Xenomai  3.1
rtskb.h
1 /***
2  *
3  * include/rtskb.h
4  *
5  * RTnet - real-time networking subsystem
6  * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>,
7  * 2003-2005 Jan Kiszka <jan.kiszka@web.de>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  */
24 
25 #ifndef __RTSKB_H_
26 #define __RTSKB_H_
27 
28 #ifdef __KERNEL__
29 
30 #include <linux/skbuff.h>
31 
32 #include <rtdm/net.h>
33 #include <rtnet_internal.h>
34 
35 /***
36 
37 rtskb Management - A Short Introduction
38 ---------------------------------------
39 
40 1. rtskbs (Real-Time Socket Buffers)
41 
42 A rtskb consists of a management structure (struct rtskb) and a fixed-sized
43 (RTSKB_SIZE) data buffer. It is used to store network packets on their way from
44 the API routines through the stack to the NICs or vice versa. rtskbs are
45 allocated as one chunk of memory which contains both the managment structure
46 and the buffer memory itself.
47 
48 
49 2. rtskb Queues
50 
51 A rtskb queue is described by struct rtskb_queue. A queue can contain an
52 unlimited number of rtskbs in an ordered way. A rtskb can either be added to
53 the head (rtskb_queue_head()) or the tail of a queue (rtskb_queue_tail()). When
54 a rtskb is removed from a queue (rtskb_dequeue()), it is always taken from the
55 head. Queues are normally spin lock protected unless the __variants of the
56 queuing functions are used.
57 
58 
59 3. Prioritized rtskb Queues
60 
61 A prioritized queue contains a number of normal rtskb queues within an array.
62 The array index of a sub-queue correspond to the priority of the rtskbs within
63 this queue. For enqueuing a rtskb (rtskb_prio_queue_head()), its priority field
64 is evaluated and the rtskb is then placed into the appropriate sub-queue. When
65 dequeuing a rtskb, the first rtskb of the first non-empty sub-queue with the
66 highest priority is returned. The current implementation supports 32 different
67 priority levels, the lowest if defined by QUEUE_MIN_PRIO, the highest by
68 QUEUE_MAX_PRIO.
69 
70 
71 4. rtskb Pools
72 
73 As rtskbs must not be allocated by a normal memory manager during runtime,
74 preallocated rtskbs are kept ready in several pools. Most packet producers
75 (NICs, sockets, etc.) have their own pools in order to be independent of the
76 load situation of other parts of the stack.
77 
78 When a pool is created (rtskb_pool_init()), the required rtskbs are allocated
79 from a Linux slab cache. Pools can be extended (rtskb_pool_extend()) or
80 shrinked (rtskb_pool_shrink()) during runtime. When shutting down the
81 program/module, every pool has to be released (rtskb_pool_release()). All these
82 commands demand to be executed within a non real-time context.
83 
84 Pools are organized as normal rtskb queues (struct rtskb_queue). When a rtskb
85 is allocated (alloc_rtskb()), it is actually dequeued from the pool's queue.
86 When freeing a rtskb (kfree_rtskb()), the rtskb is enqueued to its owning pool.
87 rtskbs can be exchanged between pools (rtskb_acquire()). In this case, the
88 passed rtskb switches over to from its owning pool to a given pool, but only if
89 this pool can pass an empty rtskb from its own queue back.
90 
91 
92 5. rtskb Chains
93 
94 To ease the defragmentation of larger IP packets, several rtskbs can form a
95 chain. For these purposes, the first rtskb (and only the first!) provides a
96 pointer to the last rtskb in the chain. When enqueuing the first rtskb of a
97 chain, the whole chain is automatically placed into the destined queue. But,
98 to dequeue a complete chain specialized calls are required (postfix: _chain).
99 While chains also get freed en bloc (kfree_rtskb()) when passing the first
100 rtskbs, it is not possible to allocate a chain from a pool (alloc_rtskb()); a
101 newly allocated rtskb is always reset to a "single rtskb chain". Furthermore,
102 the acquisition of complete chains is NOT supported (rtskb_acquire()).
103 
104 
105 6. Capturing Support (Optional)
106 
107 When incoming or outgoing packets are captured, the assigned rtskb needs to be
108 shared between the stack, the driver, and the capturing service. In contrast to
109 many other network stacks, RTnet does not create a new rtskb head and
110 re-references the payload. Instead, additional fields at the end of the rtskb
111 structure are use for sharing a rtskb with a capturing service. If the sharing
112 bit (RTSKB_CAP_SHARED) in cap_flags is set, the rtskb will not be returned to
113 the owning pool upon the call of kfree_rtskb. Instead this bit will be reset,
114 and a compensation rtskb stored in cap_comp_skb will be returned to the owning
115 pool. cap_start and cap_len can be used to mirror the dimension of the full
116 packet. This is required because the data and len fields will be modified while
117 walking through the stack. cap_next allows to add a rtskb to a separate queue
118 which is independent of any queue described in 2.
119 
120 Certain setup tasks for capturing packets can not become part of a capturing
121 module, they have to be embedded into the stack. For this purpose, several
122 inline functions are provided. rtcap_mark_incoming() is used to save the packet
123 dimension right before it is modifed by the stack. rtcap_report_incoming()
124 calls the capturing handler, if present, in order to let it process the
125 received rtskb (e.g. allocate compensation rtskb, mark original rtskb as
126 shared, and enqueue it).
127 
128 Outgoing rtskb have to be captured by adding a hook function to the chain of
129 hard_start_xmit functions of a device. To measure the delay caused by RTmac
130 between the request and the actual transmission, a time stamp can be taken using
131 rtcap_mark_rtmac_enqueue(). This function is typically called by RTmac
132 disciplines when they add a rtskb to their internal transmission queue. In such
133 a case, the RTSKB_CAP_RTMAC_STAMP bit is set in cap_flags to indicate that the
134 cap_rtmac_stamp field now contains valid data.
135 
136  ***/
137 
138 #ifndef CHECKSUM_PARTIAL
139 #define CHECKSUM_PARTIAL CHECKSUM_HW
140 #endif
141 
142 #define RTSKB_CAP_SHARED 1 /* rtskb shared between stack and RTcap */
143 #define RTSKB_CAP_RTMAC_STAMP 2 /* cap_rtmac_stamp is valid */
144 
145 #define RTSKB_UNMAPPED 0
146 
147 struct rtskb_queue;
148 struct rtsocket;
149 struct rtnet_device;
150 
151 /***
152  * rtskb - realtime socket buffer
153  */
154 struct rtskb {
155  struct rtskb *next; /* used for queuing rtskbs */
156  struct rtskb *chain_end; /* marks the end of a rtskb chain starting
157  with this very rtskb */
158 
159  struct rtskb_pool *pool; /* owning pool */
160 
161  unsigned int priority; /* bit 0..15: prio, 16..31: user-defined */
162 
163  struct rtsocket *sk; /* assigned socket */
164  struct rtnet_device *rtdev; /* source or destination device */
165 
166  nanosecs_abs_t time_stamp; /* arrival or transmission (RTcap) time */
167 
168  /* patch address of the transmission time stamp, can be NULL
169  * calculation: *xmit_stamp = cpu_to_be64(time_in_ns + *xmit_stamp)
170  */
171  nanosecs_abs_t *xmit_stamp;
172 
173  /* transport layer */
174  union {
175  struct tcphdr *th;
176  struct udphdr *uh;
177  struct icmphdr *icmph;
178  struct iphdr *ipihdr;
179  unsigned char *raw;
180  } h;
181 
182  /* network layer */
183  union {
184  struct iphdr *iph;
185  struct arphdr *arph;
186  unsigned char *raw;
187  } nh;
188 
189  /* link layer */
190  union {
191  struct ethhdr *ethernet;
192  unsigned char *raw;
193  } mac;
194 
195  unsigned short protocol;
196  unsigned char pkt_type;
197 
198  unsigned char ip_summed;
199  unsigned int csum;
200 
201  unsigned char *data;
202  unsigned char *tail;
203  unsigned char *end;
204  unsigned int len;
205 
206  dma_addr_t buf_dma_addr;
207 
208  unsigned char *buf_start;
209 
210 #ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
211  unsigned char *buf_end;
212 #endif
213 
214 #if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
215  int cap_flags; /* see RTSKB_CAP_xxx */
216  struct rtskb *cap_comp_skb; /* compensation rtskb */
217  struct rtskb *cap_next; /* used for capture queue */
218  unsigned char *cap_start; /* start offset for capturing */
219  unsigned int cap_len; /* capture length of this rtskb */
220  nanosecs_abs_t cap_rtmac_stamp; /* RTmac enqueuing time */
221 #endif
222 
223  struct list_head entry; /* for global rtskb list */
224 };
225 
226 struct rtskb_queue {
227  struct rtskb *first;
228  struct rtskb *last;
229  rtdm_lock_t lock;
230 };
231 
232 struct rtskb_pool_lock_ops {
233  int (*trylock)(void *cookie);
234  void (*unlock)(void *cookie);
235 };
236 
237 struct rtskb_pool {
238  struct rtskb_queue queue;
239  const struct rtskb_pool_lock_ops *lock_ops;
240  void *lock_cookie;
241 };
242 
243 #define QUEUE_MAX_PRIO 0
244 #define QUEUE_MIN_PRIO 31
245 
246 struct rtskb_prio_queue {
247  rtdm_lock_t lock;
248  unsigned long usage; /* bit array encoding non-empty sub-queues */
249  struct rtskb_queue queue[QUEUE_MIN_PRIO + 1];
250 };
251 
252 #define RTSKB_PRIO_MASK 0x0000FFFF /* bits 0..15: xmit prio */
253 #define RTSKB_CHANNEL_MASK 0xFFFF0000 /* bits 16..31: xmit channel */
254 #define RTSKB_CHANNEL_SHIFT 16
255 
256 #define RTSKB_DEF_RT_CHANNEL SOCK_DEF_RT_CHANNEL
257 #define RTSKB_DEF_NRT_CHANNEL SOCK_DEF_NRT_CHANNEL
258 #define RTSKB_USER_CHANNEL SOCK_USER_CHANNEL
259 
260 /* Note: always keep SOCK_XMIT_PARAMS consistent with definitions above! */
261 #define RTSKB_PRIO_VALUE SOCK_XMIT_PARAMS
262 
263 /* default values for the module parameter */
264 #define DEFAULT_GLOBAL_RTSKBS 0 /* default number of rtskb's in global pool */
265 #define DEFAULT_DEVICE_RTSKBS \
266  16 /* default additional rtskbs per network adapter */
267 #define DEFAULT_SOCKET_RTSKBS 16 /* default number of rtskb's in socket pools */
268 
269 #define ALIGN_RTSKB_STRUCT_LEN SKB_DATA_ALIGN(sizeof(struct rtskb))
270 #define RTSKB_SIZE 1544 /* maximum needed by pcnet32-rt */
271 
272 extern unsigned int rtskb_pools; /* current number of rtskb pools */
273 extern unsigned int rtskb_pools_max; /* maximum number of rtskb pools */
274 extern unsigned int rtskb_amount; /* current number of allocated rtskbs */
275 extern unsigned int rtskb_amount_max; /* maximum number of allocated rtskbs */
276 
277 #ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
278 extern void rtskb_over_panic(struct rtskb *skb, int len, void *here);
279 extern void rtskb_under_panic(struct rtskb *skb, int len, void *here);
280 #endif
281 
282 extern struct rtskb *rtskb_pool_dequeue(struct rtskb_pool *pool);
283 
284 extern void rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb);
285 
286 extern struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_pool *pool);
287 
288 extern void kfree_rtskb(struct rtskb *skb);
289 #define dev_kfree_rtskb(a) kfree_rtskb(a)
290 
291 static inline void rtskb_tx_timestamp(struct rtskb *skb)
292 {
293  nanosecs_abs_t *ts = skb->xmit_stamp;
294 
295  if (!ts)
296  return;
297 
298  *ts = cpu_to_be64(rtdm_clock_read() + *ts);
299 }
300 
301 /***
302  * rtskb_queue_init - initialize the queue
303  * @queue
304  */
305 static inline void rtskb_queue_init(struct rtskb_queue *queue)
306 {
307  rtdm_lock_init(&queue->lock);
308  queue->first = NULL;
309  queue->last = NULL;
310 }
311 
312 /***
313  * rtskb_prio_queue_init - initialize the prioritized queue
314  * @prioqueue
315  */
316 static inline void rtskb_prio_queue_init(struct rtskb_prio_queue *prioqueue)
317 {
318  memset(prioqueue, 0, sizeof(struct rtskb_prio_queue));
319  rtdm_lock_init(&prioqueue->lock);
320 }
321 
322 /***
323  * rtskb_queue_empty
324  * @queue
325  */
326 static inline int rtskb_queue_empty(struct rtskb_queue *queue)
327 {
328  return (queue->first == NULL);
329 }
330 
331 /***
332  * rtskb__prio_queue_empty
333  * @queue
334  */
335 static inline int rtskb_prio_queue_empty(struct rtskb_prio_queue *prioqueue)
336 {
337  return (prioqueue->usage == 0);
338 }
339 
340 /***
341  * __rtskb_queue_head - insert a buffer at the queue head (w/o locks)
342  * @queue: queue to use
343  * @skb: buffer to queue
344  */
345 static inline void __rtskb_queue_head(struct rtskb_queue *queue,
346  struct rtskb *skb)
347 {
348  struct rtskb *chain_end = skb->chain_end;
349 
350  chain_end->next = queue->first;
351 
352  if (queue->first == NULL)
353  queue->last = chain_end;
354  queue->first = skb;
355 }
356 
357 /***
358  * rtskb_queue_head - insert a buffer at the queue head (lock protected)
359  * @queue: queue to use
360  * @skb: buffer to queue
361  */
362 static inline void rtskb_queue_head(struct rtskb_queue *queue,
363  struct rtskb *skb)
364 {
365  rtdm_lockctx_t context;
366 
367  rtdm_lock_get_irqsave(&queue->lock, context);
368  __rtskb_queue_head(queue, skb);
369  rtdm_lock_put_irqrestore(&queue->lock, context);
370 }
371 
372 /***
373  * __rtskb_prio_queue_head - insert a buffer at the prioritized queue head
374  * (w/o locks)
375  * @queue: queue to use
376  * @skb: buffer to queue
377  */
378 static inline void __rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
379  struct rtskb *skb)
380 {
381  unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
382 
383  RTNET_ASSERT(prio <= 31, prio = 31;);
384 
385  __rtskb_queue_head(&prioqueue->queue[prio], skb);
386  __set_bit(prio, &prioqueue->usage);
387 }
388 
389 /***
390  * rtskb_prio_queue_head - insert a buffer at the prioritized queue head
391  * (lock protected)
392  * @queue: queue to use
393  * @skb: buffer to queue
394  */
395 static inline void rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
396  struct rtskb *skb)
397 {
398  rtdm_lockctx_t context;
399 
400  rtdm_lock_get_irqsave(&prioqueue->lock, context);
401  __rtskb_prio_queue_head(prioqueue, skb);
402  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
403 }
404 
405 /***
406  * __rtskb_queue_tail - insert a buffer at the queue tail (w/o locks)
407  * @queue: queue to use
408  * @skb: buffer to queue
409  */
410 static inline void __rtskb_queue_tail(struct rtskb_queue *queue,
411  struct rtskb *skb)
412 {
413  struct rtskb *chain_end = skb->chain_end;
414 
415  chain_end->next = NULL;
416 
417  if (queue->first == NULL)
418  queue->first = skb;
419  else
420  queue->last->next = skb;
421  queue->last = chain_end;
422 }
423 
424 /***
425  * rtskb_queue_tail - insert a buffer at the queue tail (lock protected)
426  * @queue: queue to use
427  * @skb: buffer to queue
428  */
429 static inline void rtskb_queue_tail(struct rtskb_queue *queue,
430  struct rtskb *skb)
431 {
432  rtdm_lockctx_t context;
433 
434  rtdm_lock_get_irqsave(&queue->lock, context);
435  __rtskb_queue_tail(queue, skb);
436  rtdm_lock_put_irqrestore(&queue->lock, context);
437 }
438 
439 /***
440  * rtskb_queue_tail_check - variant of rtskb_queue_tail
441  * returning true on empty->non empty transition.
442  * @queue: queue to use
443  * @skb: buffer to queue
444  */
445 static inline bool rtskb_queue_tail_check(struct rtskb_queue *queue,
446  struct rtskb *skb)
447 {
448  rtdm_lockctx_t context;
449  bool ret;
450 
451  rtdm_lock_get_irqsave(&queue->lock, context);
452  ret = queue->first == NULL;
453  __rtskb_queue_tail(queue, skb);
454  rtdm_lock_put_irqrestore(&queue->lock, context);
455 
456  return ret;
457 }
458 
459 /***
460  * __rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
461  * (w/o locks)
462  * @prioqueue: queue to use
463  * @skb: buffer to queue
464  */
465 static inline void __rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
466  struct rtskb *skb)
467 {
468  unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
469 
470  RTNET_ASSERT(prio <= 31, prio = 31;);
471 
472  __rtskb_queue_tail(&prioqueue->queue[prio], skb);
473  __set_bit(prio, &prioqueue->usage);
474 }
475 
476 /***
477  * rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
478  * (lock protected)
479  * @prioqueue: queue to use
480  * @skb: buffer to queue
481  */
482 static inline void rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
483  struct rtskb *skb)
484 {
485  rtdm_lockctx_t context;
486 
487  rtdm_lock_get_irqsave(&prioqueue->lock, context);
488  __rtskb_prio_queue_tail(prioqueue, skb);
489  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
490 }
491 
492 /***
493  * __rtskb_dequeue - remove from the head of the queue (w/o locks)
494  * @queue: queue to remove from
495  */
496 static inline struct rtskb *__rtskb_dequeue(struct rtskb_queue *queue)
497 {
498  struct rtskb *result;
499 
500  if ((result = queue->first) != NULL) {
501  queue->first = result->next;
502  result->next = NULL;
503  }
504 
505  return result;
506 }
507 
508 /***
509  * rtskb_dequeue - remove from the head of the queue (lock protected)
510  * @queue: queue to remove from
511  */
512 static inline struct rtskb *rtskb_dequeue(struct rtskb_queue *queue)
513 {
514  rtdm_lockctx_t context;
515  struct rtskb *result;
516 
517  rtdm_lock_get_irqsave(&queue->lock, context);
518  result = __rtskb_dequeue(queue);
519  rtdm_lock_put_irqrestore(&queue->lock, context);
520 
521  return result;
522 }
523 
524 /***
525  * __rtskb_prio_dequeue - remove from the head of the prioritized queue
526  * (w/o locks)
527  * @prioqueue: queue to remove from
528  */
529 static inline struct rtskb *
530 __rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
531 {
532  int prio;
533  struct rtskb *result = NULL;
534  struct rtskb_queue *sub_queue;
535 
536  if (prioqueue->usage) {
537  prio = ffz(~prioqueue->usage);
538  sub_queue = &prioqueue->queue[prio];
539  result = __rtskb_dequeue(sub_queue);
540  if (rtskb_queue_empty(sub_queue))
541  __change_bit(prio, &prioqueue->usage);
542  }
543 
544  return result;
545 }
546 
547 /***
548  * rtskb_prio_dequeue - remove from the head of the prioritized queue
549  * (lock protected)
550  * @prioqueue: queue to remove from
551  */
552 static inline struct rtskb *
553 rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
554 {
555  rtdm_lockctx_t context;
556  struct rtskb *result;
557 
558  rtdm_lock_get_irqsave(&prioqueue->lock, context);
559  result = __rtskb_prio_dequeue(prioqueue);
560  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
561 
562  return result;
563 }
564 
565 /***
566  * __rtskb_dequeue_chain - remove a chain from the head of the queue
567  * (w/o locks)
568  * @queue: queue to remove from
569  */
570 static inline struct rtskb *__rtskb_dequeue_chain(struct rtskb_queue *queue)
571 {
572  struct rtskb *result;
573  struct rtskb *chain_end;
574 
575  if ((result = queue->first) != NULL) {
576  chain_end = result->chain_end;
577  queue->first = chain_end->next;
578  chain_end->next = NULL;
579  }
580 
581  return result;
582 }
583 
584 /***
585  * rtskb_dequeue_chain - remove a chain from the head of the queue
586  * (lock protected)
587  * @queue: queue to remove from
588  */
589 static inline struct rtskb *rtskb_dequeue_chain(struct rtskb_queue *queue)
590 {
591  rtdm_lockctx_t context;
592  struct rtskb *result;
593 
594  rtdm_lock_get_irqsave(&queue->lock, context);
595  result = __rtskb_dequeue_chain(queue);
596  rtdm_lock_put_irqrestore(&queue->lock, context);
597 
598  return result;
599 }
600 
601 /***
602  * rtskb_prio_dequeue_chain - remove a chain from the head of the
603  * prioritized queue
604  * @prioqueue: queue to remove from
605  */
606 static inline struct rtskb *
607 rtskb_prio_dequeue_chain(struct rtskb_prio_queue *prioqueue)
608 {
609  rtdm_lockctx_t context;
610  int prio;
611  struct rtskb *result = NULL;
612  struct rtskb_queue *sub_queue;
613 
614  rtdm_lock_get_irqsave(&prioqueue->lock, context);
615  if (prioqueue->usage) {
616  prio = ffz(~prioqueue->usage);
617  sub_queue = &prioqueue->queue[prio];
618  result = __rtskb_dequeue_chain(sub_queue);
619  if (rtskb_queue_empty(sub_queue))
620  __change_bit(prio, &prioqueue->usage);
621  }
622  rtdm_lock_put_irqrestore(&prioqueue->lock, context);
623 
624  return result;
625 }
626 
627 /***
628  * rtskb_queue_purge - clean the queue
629  * @queue
630  */
631 static inline void rtskb_queue_purge(struct rtskb_queue *queue)
632 {
633  struct rtskb *skb;
634  while ((skb = rtskb_dequeue(queue)) != NULL)
635  kfree_rtskb(skb);
636 }
637 
638 static inline int rtskb_headlen(const struct rtskb *skb)
639 {
640  return skb->len;
641 }
642 
643 static inline void rtskb_reserve(struct rtskb *skb, unsigned int len)
644 {
645  skb->data += len;
646  skb->tail += len;
647 }
648 
649 static inline unsigned char *__rtskb_put(struct rtskb *skb, unsigned int len)
650 {
651  unsigned char *tmp = skb->tail;
652 
653  skb->tail += len;
654  skb->len += len;
655  return tmp;
656 }
657 
658 #define rtskb_put(skb, length) \
659  ({ \
660  struct rtskb *__rtskb = (skb); \
661  unsigned int __len = (length); \
662  unsigned char *tmp = __rtskb->tail; \
663  \
664  __rtskb->tail += __len; \
665  __rtskb->len += __len; \
666  \
667  RTNET_ASSERT(__rtskb->tail <= __rtskb->buf_end, \
668  rtskb_over_panic(__rtskb, __len, \
669  current_text_addr());); \
670  \
671  tmp; \
672  })
673 
674 static inline unsigned char *__rtskb_push(struct rtskb *skb, unsigned int len)
675 {
676  skb->data -= len;
677  skb->len += len;
678  return skb->data;
679 }
680 
681 #define rtskb_push(skb, length) \
682  ({ \
683  struct rtskb *__rtskb = (skb); \
684  unsigned int __len = (length); \
685  \
686  __rtskb->data -= __len; \
687  __rtskb->len += __len; \
688  \
689  RTNET_ASSERT(__rtskb->data >= __rtskb->buf_start, \
690  rtskb_under_panic(__rtskb, __len, \
691  current_text_addr());); \
692  \
693  __rtskb->data; \
694  })
695 
696 static inline unsigned char *__rtskb_pull(struct rtskb *skb, unsigned int len)
697 {
698  RTNET_ASSERT(len <= skb->len, return NULL;);
699 
700  skb->len -= len;
701 
702  return skb->data += len;
703 }
704 
705 static inline unsigned char *rtskb_pull(struct rtskb *skb, unsigned int len)
706 {
707  if (len > skb->len)
708  return NULL;
709 
710  skb->len -= len;
711 
712  return skb->data += len;
713 }
714 
715 static inline void rtskb_trim(struct rtskb *skb, unsigned int len)
716 {
717  if (skb->len > len) {
718  skb->len = len;
719  skb->tail = skb->data + len;
720  }
721 }
722 
723 static inline struct rtskb *rtskb_padto(struct rtskb *rtskb, unsigned int len)
724 {
725  RTNET_ASSERT(len <= (unsigned int)(rtskb->buf_end + 1 - rtskb->data),
726  return NULL;);
727 
728  memset(rtskb->data + rtskb->len, 0, len - rtskb->len);
729 
730  return rtskb;
731 }
732 
733 static inline dma_addr_t rtskb_data_dma_addr(struct rtskb *rtskb,
734  unsigned int offset)
735 {
736  return rtskb->buf_dma_addr + rtskb->data - rtskb->buf_start + offset;
737 }
738 
739 extern struct rtskb_pool global_pool;
740 
741 extern unsigned int rtskb_pool_init(struct rtskb_pool *pool,
742  unsigned int initial_size,
743  const struct rtskb_pool_lock_ops *lock_ops,
744  void *lock_cookie);
745 
746 extern unsigned int __rtskb_module_pool_init(struct rtskb_pool *pool,
747  unsigned int initial_size,
748  struct module *module);
749 
750 #define rtskb_module_pool_init(pool, size) \
751  __rtskb_module_pool_init(pool, size, THIS_MODULE)
752 
753 extern void rtskb_pool_release(struct rtskb_pool *pool);
754 
755 extern unsigned int rtskb_pool_extend(struct rtskb_pool *pool,
756  unsigned int add_rtskbs);
757 extern unsigned int rtskb_pool_shrink(struct rtskb_pool *pool,
758  unsigned int rem_rtskbs);
759 extern int rtskb_acquire(struct rtskb *rtskb, struct rtskb_pool *comp_pool);
760 extern struct rtskb *rtskb_clone(struct rtskb *rtskb, struct rtskb_pool *pool);
761 
762 extern int rtskb_pools_init(void);
763 extern void rtskb_pools_release(void);
764 
765 extern unsigned int rtskb_copy_and_csum_bits(const struct rtskb *skb,
766  int offset, u8 *to, int len,
767  unsigned int csum);
768 extern void rtskb_copy_and_csum_dev(const struct rtskb *skb, u8 *to);
769 
770 #if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
771 
772 extern rtdm_lock_t rtcap_lock;
773 extern void (*rtcap_handler)(struct rtskb *skb);
774 
775 static inline void rtcap_mark_incoming(struct rtskb *skb)
776 {
777  skb->cap_start = skb->data;
778  skb->cap_len = skb->len;
779 }
780 
781 static inline void rtcap_report_incoming(struct rtskb *skb)
782 {
783  rtdm_lockctx_t context;
784 
785  rtdm_lock_get_irqsave(&rtcap_lock, context);
786  if (rtcap_handler != NULL)
787  rtcap_handler(skb);
788 
789  rtdm_lock_put_irqrestore(&rtcap_lock, context);
790 }
791 
792 static inline void rtcap_mark_rtmac_enqueue(struct rtskb *skb)
793 {
794  /* rtskb start and length are probably not valid yet */
795  skb->cap_flags |= RTSKB_CAP_RTMAC_STAMP;
796  skb->cap_rtmac_stamp = rtdm_clock_read();
797 }
798 
799 #else /* ifndef CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
800 
801 #define rtcap_mark_incoming(skb)
802 #define rtcap_report_incoming(skb)
803 #define rtcap_mark_rtmac_enqueue(skb)
804 
805 #endif /* CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
806 
807 #endif /* __KERNEL__ */
808 
809 #endif /* __RTSKB_H_ */
static void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context)
Release lock and restore preemption state
Definition: driver.h:631
ipipe_spinlock_t rtdm_lock_t
Lock variable.
Definition: driver.h:551
#define rtdm_lock_get_irqsave(__lock, __context)
Acquire lock and disable preemption, by stalling the head domain.
Definition: driver.h:607
uint64_t nanosecs_abs_t
RTDM type for representing absolute dates.
Definition: rtdm.h:43
static void rtdm_lock_init(rtdm_lock_t *lock)
Dynamic lock initialisation
Definition: driver.h:564
nanosecs_abs_t rtdm_clock_read(void)
Get system time
unsigned long rtdm_lockctx_t
Variable to save the context while holding a lock.
Definition: driver.h:554