Xenomai 3.3.2
Loading...
Searching...
No Matches
buffer.h
1/*
2 * Analogy for Linux, buffer related features
3 *
4 * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
5 * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
6 *
7 * Xenomai is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * Xenomai is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with Xenomai; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21#ifndef _COBALT_RTDM_ANALOGY_BUFFER_H
22#define _COBALT_RTDM_ANALOGY_BUFFER_H
23
24#include <linux/version.h>
25#include <linux/mm.h>
26#include <rtdm/driver.h>
27#include <rtdm/uapi/analogy.h>
28#include <rtdm/analogy/rtdm_helpers.h>
29#include <rtdm/analogy/context.h>
30#include <rtdm/analogy/command.h>
32
33/* --- Events bits / flags --- */
34
35#define A4L_BUF_EOBUF_NR 0
36#define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR)
37
38#define A4L_BUF_ERROR_NR 1
39#define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR)
40
41#define A4L_BUF_EOA_NR 2
42#define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR)
43
44/* --- Status bits / flags --- */
45
46#define A4L_BUF_BULK_NR 8
47#define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR)
48
49#define A4L_BUF_MAP_NR 9
50#define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR)
51
52
53/* Buffer descriptor structure */
54struct a4l_buffer {
55
56 /* Added by the structure update */
57 struct a4l_subdevice *subd;
58
59 /* Buffer's first virtual page pointer */
60 void *buf;
61
62 /* Buffer's global size */
63 unsigned long size;
64 /* Tab containing buffer's pages pointers */
65 unsigned long *pg_list;
66
67 /* RT/NRT synchronization element */
68 struct a4l_sync sync;
69
70 /* Counters needed for transfer */
71 unsigned long end_count;
72 unsigned long prd_count;
73 unsigned long cns_count;
74 unsigned long tmp_count;
75
76 /* Status + events occuring during transfer */
77 unsigned long flags;
78
79 /* Command on progress */
80 struct a4l_cmd_desc *cur_cmd;
81
82 /* Munge counter */
83 unsigned long mng_count;
84
85 /* Theshold below which the user process should not be
86 awakened */
87 unsigned long wake_count;
88};
89
90static inline void __dump_buffer_counters(struct a4l_buffer *buf)
91{
92 __a4l_dbg(1, core_dbg, "a4l_buffer=0x%p, p=0x%p \n", buf, buf->buf);
93 __a4l_dbg(1, core_dbg, "end=%06ld, prd=%06ld, cns=%06ld, tmp=%06ld \n",
94 buf->end_count, buf->prd_count, buf->cns_count, buf->tmp_count);
95}
96
97/* --- Static inline functions related with
98 user<->kernel data transfers --- */
99
100/* The function __produce is an inline function which copies data into
101 the asynchronous buffer and takes care of the non-contiguous issue
102 when looping. This function is used in read and write operations */
103static inline int __produce(struct a4l_device_context *cxt,
104 struct a4l_buffer *buf, void *pin, unsigned long count)
105{
106 unsigned long start_ptr = (buf->prd_count % buf->size);
107 struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
108 unsigned long tmp_cnt = count;
109 int ret = 0;
110
111 while (ret == 0 && tmp_cnt != 0) {
112 /* Check the data copy can be performed contiguously */
113 unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
114 buf->size - start_ptr : tmp_cnt;
115
116 /* Perform the copy */
117 if (cxt == NULL)
118 memcpy(buf->buf + start_ptr, pin, blk_size);
119 else
121 buf->buf + start_ptr,
122 pin, blk_size);
123
124 /* Update pointers/counts */
125 pin += blk_size;
126 tmp_cnt -= blk_size;
127 start_ptr = 0;
128 }
129
130 return ret;
131}
132
133/* The function __consume is an inline function which copies data from
134 the asynchronous buffer and takes care of the non-contiguous issue
135 when looping. This function is used in read and write operations */
136static inline int __consume(struct a4l_device_context *cxt,
137 struct a4l_buffer *buf, void *pout, unsigned long count)
138{
139 unsigned long start_ptr = (buf->cns_count % buf->size);
140 struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
141 unsigned long tmp_cnt = count;
142 int ret = 0;
143
144 while (ret == 0 && tmp_cnt != 0) {
145 /* Check the data copy can be performed contiguously */
146 unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
147 buf->size - start_ptr : tmp_cnt;
148
149 /* Perform the copy */
150 if (cxt == NULL)
151 memcpy(pout, buf->buf + start_ptr, blk_size);
152 else
153 ret = rtdm_safe_copy_to_user(fd,
154 pout,
155 buf->buf + start_ptr,
156 blk_size);
157
158 /* Update pointers/counts */
159 pout += blk_size;
160 tmp_cnt -= blk_size;
161 start_ptr = 0;
162 }
163
164 return ret;
165}
166
167/* The function __munge is an inline function which calls the
168 subdevice specific munge callback on contiguous windows within the
169 whole buffer. This function is used in read and write operations */
170static inline void __munge(struct a4l_subdevice * subd,
171 void (*munge) (struct a4l_subdevice *,
172 void *, unsigned long),
173 struct a4l_buffer * buf, unsigned long count)
174{
175 unsigned long start_ptr = (buf->mng_count % buf->size);
176 unsigned long tmp_cnt = count;
177
178 while (tmp_cnt != 0) {
179 /* Check the data copy can be performed contiguously */
180 unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
181 buf->size - start_ptr : tmp_cnt;
182
183 /* Perform the munge operation */
184 munge(subd, buf->buf + start_ptr, blk_size);
185
186 /* Update the start pointer and the count */
187 tmp_cnt -= blk_size;
188 start_ptr = 0;
189 }
190}
191
192/* The function __handle_event can only be called from process context
193 (not interrupt service routine). It allows the client process to
194 retrieve the buffer status which has been updated by the driver */
195static inline int __handle_event(struct a4l_buffer * buf)
196{
197 int ret = 0;
198
199 /* The event "End of acquisition" must not be cleaned
200 before the complete flush of the buffer */
201 if (test_bit(A4L_BUF_EOA_NR, &buf->flags))
202 ret = -ENOENT;
203
204 if (test_bit(A4L_BUF_ERROR_NR, &buf->flags))
205 ret = -EPIPE;
206
207 return ret;
208}
209
210/* --- Counters management functions --- */
211
212/* Here, we may wonder why we need more than two counters / pointers.
213
214 Theoretically, we only need two counters (or two pointers):
215 - one which tells where the reader should be within the buffer
216 - one which tells where the writer should be within the buffer
217
218 With these two counters (or pointers), we just have to check that
219 the writer does not overtake the reader inside the ring buffer
220 BEFORE any read / write operations.
221
222 However, if one element is a DMA controller, we have to be more
223 careful. Generally a DMA transfer occurs like this:
224 DMA shot
225 |-> then DMA interrupt
226 |-> then DMA soft handler which checks the counter
227
228 So, the checkings occur AFTER the write operations.
229
230 Let's take an example: the reader is a software task and the writer
231 is a DMA controller. At the end of the DMA shot, the write counter
232 is higher than the read counter. Unfortunately, a read operation
233 occurs between the DMA shot and the DMA interrupt, so the handler
234 will not notice that an overflow occured.
235
236 That is why tmp_count comes into play: tmp_count records the
237 read/consumer current counter before the next DMA shot and once the
238 next DMA shot is done, we check that the updated writer/producer
239 counter is not higher than tmp_count. Thus we are sure that the DMA
240 writer has not overtaken the reader because it was not able to
241 overtake the n-1 value. */
242
243static inline int __pre_abs_put(struct a4l_buffer * buf, unsigned long count)
244{
245 if (count - buf->tmp_count > buf->size) {
246 set_bit(A4L_BUF_ERROR_NR, &buf->flags);
247 return -EPIPE;
248 }
249
250 buf->tmp_count = buf->cns_count;
251
252 return 0;
253}
254
255static inline int __pre_put(struct a4l_buffer * buf, unsigned long count)
256{
257 return __pre_abs_put(buf, buf->tmp_count + count);
258}
259
260static inline int __pre_abs_get(struct a4l_buffer * buf, unsigned long count)
261{
262 /* The first time, we expect the buffer to be properly filled
263 before the trigger occurence; by the way, we need tmp_count to
264 have been initialized and tmp_count is updated right here */
265 if (buf->tmp_count == 0 || buf->cns_count == 0)
266 goto out;
267
268 /* At the end of the acquisition, the user application has
269 written the defined amount of data into the buffer; so the
270 last time, the DMA channel can easily overtake the tmp
271 frontier because no more data were sent from user space;
272 therefore no useless alarm should be sent */
273 if (buf->end_count != 0 && (long)(count - buf->end_count) > 0)
274 goto out;
275
276 /* Once the exception are passed, we check that the DMA
277 transfer has not overtaken the last record of the production
278 count (tmp_count was updated with prd_count the last time
279 __pre_abs_get was called). We must understand that we cannot
280 compare the current DMA count with the current production
281 count because even if, right now, the production count is
282 higher than the DMA count, it does not mean that the DMA count
283 was not greater a few cycles before; in such case, the DMA
284 channel would have retrieved the wrong data */
285 if ((long)(count - buf->tmp_count) > 0) {
286 set_bit(A4L_BUF_ERROR_NR, &buf->flags);
287 return -EPIPE;
288 }
289
290out:
291 buf->tmp_count = buf->prd_count;
292
293 return 0;
294}
295
296static inline int __pre_get(struct a4l_buffer * buf, unsigned long count)
297{
298 return __pre_abs_get(buf, buf->tmp_count + count);
299}
300
301static inline int __abs_put(struct a4l_buffer * buf, unsigned long count)
302{
303 unsigned long old = buf->prd_count;
304
305 if ((long)(buf->prd_count - count) >= 0)
306 return -EINVAL;
307
308 buf->prd_count = count;
309
310 if ((old / buf->size) != (count / buf->size))
311 set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
312
313 if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
314 set_bit(A4L_BUF_EOA_NR, &buf->flags);
315
316 return 0;
317}
318
319static inline int __put(struct a4l_buffer * buf, unsigned long count)
320{
321 return __abs_put(buf, buf->prd_count + count);
322}
323
324static inline int __abs_get(struct a4l_buffer * buf, unsigned long count)
325{
326 unsigned long old = buf->cns_count;
327
328 if ((long)(buf->cns_count - count) >= 0)
329 return -EINVAL;
330
331 buf->cns_count = count;
332
333 if ((old / buf->size) != count / buf->size)
334 set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
335
336 if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
337 set_bit(A4L_BUF_EOA_NR, &buf->flags);
338
339 return 0;
340}
341
342static inline int __get(struct a4l_buffer * buf, unsigned long count)
343{
344 return __abs_get(buf, buf->cns_count + count);
345}
346
347static inline unsigned long __count_to_put(struct a4l_buffer * buf)
348{
349 unsigned long ret;
350
351 if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0)
352 ret = buf->size + buf->cns_count - buf->prd_count;
353 else
354 ret = 0;
355
356 return ret;
357}
358
359static inline unsigned long __count_to_get(struct a4l_buffer * buf)
360{
361 unsigned long ret;
362
363 /* If the acquisition is unlimited (end_count == 0), we must
364 not take into account end_count */
365 if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0)
366 ret = buf->prd_count;
367 else
368 ret = buf->end_count;
369
370 if ((long)(ret - buf->cns_count) > 0)
371 ret -= buf->cns_count;
372 else
373 ret = 0;
374
375 return ret;
376}
377
378static inline unsigned long __count_to_end(struct a4l_buffer * buf)
379{
380 unsigned long ret = buf->end_count - buf->cns_count;
381
382 if (buf->end_count == 0)
383 return ULONG_MAX;
384
385 return ((long)ret) < 0 ? 0 : ret;
386}
387
388/* --- Buffer internal functions --- */
389
390int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size);
391
392void a4l_free_buffer(struct a4l_buffer *buf_desc);
393
394void a4l_init_buffer(struct a4l_buffer * buf_desc);
395
396void a4l_cleanup_buffer(struct a4l_buffer * buf_desc);
397
398int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd);
399
400void a4l_cancel_buffer(struct a4l_device_context *cxt);
401
402int a4l_buf_prepare_absput(struct a4l_subdevice *subd,
403 unsigned long count);
404
405int a4l_buf_commit_absput(struct a4l_subdevice *subd,
406 unsigned long count);
407
408int a4l_buf_prepare_put(struct a4l_subdevice *subd,
409 unsigned long count);
410
411int a4l_buf_commit_put(struct a4l_subdevice *subd,
412 unsigned long count);
413
414int a4l_buf_put(struct a4l_subdevice *subd,
415 void *bufdata, unsigned long count);
416
417int a4l_buf_prepare_absget(struct a4l_subdevice *subd,
418 unsigned long count);
419
420int a4l_buf_commit_absget(struct a4l_subdevice *subd,
421 unsigned long count);
422
423int a4l_buf_prepare_get(struct a4l_subdevice *subd,
424 unsigned long count);
425
426int a4l_buf_commit_get(struct a4l_subdevice *subd,
427 unsigned long count);
428
429int a4l_buf_get(struct a4l_subdevice *subd,
430 void *bufdata, unsigned long count);
431
432int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
433
434unsigned long a4l_buf_count(struct a4l_subdevice *subd);
435
436/* --- Current Command management function --- */
437
438static inline struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice *subd)
439{
440 return (subd->buf) ? subd->buf->cur_cmd : NULL;
441}
442
443/* --- Munge related function --- */
444
445int a4l_get_chan(struct a4l_subdevice *subd);
446
447/* --- IOCTL / FOPS functions --- */
448
449int a4l_ioctl_mmap(struct a4l_device_context * cxt, void *arg);
450int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg);
451int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg);
452int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg);
453int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg);
454int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg);
455ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes);
456ssize_t a4l_write_buffer(struct a4l_device_context * cxt, const void *bufdata, size_t nbytes);
457int a4l_select(struct a4l_device_context *cxt,
458 rtdm_selector_t *selector,
459 enum rtdm_selecttype type, unsigned fd_index);
460
461#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
Real-Time Driver Model for Xenomai, driver API header.
static struct rtdm_fd * rtdm_private_to_fd(void *dev_private)
Locate a device file descriptor structure from its driver private area.
Definition driver.h:176
rtdm_selecttype
Definition driver.h:113
int rtdm_safe_copy_to_user(struct rtdm_fd *fd, void __user *dst, const void *src, size_t size)
Check if read/write access to user-space memory block is safe and copy specified buffer to it.
int rtdm_safe_copy_from_user(struct rtdm_fd *fd, void *dst, const void __user *src, size_t size)
Check if read access to user-space memory block and copy it to specified buffer.
Structure describing the asynchronous instruction.
Definition analogy.h:289
Structure describing the subdevice.
Definition subdevice.h:40
struct a4l_buffer * buf
Linked buffer.
Definition subdevice.h:51
Analogy for Linux, subdevice related features.
Analogy for Linux, UAPI bits.