Lines Matching full:offload
9 #include <linux/can/rx-offload.h>
24 can_rx_offload_le(struct can_rx_offload *offload, in can_rx_offload_le() argument
27 if (offload->inc) in can_rx_offload_le()
34 can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) in can_rx_offload_inc() argument
36 if (offload->inc) in can_rx_offload_inc()
44 struct can_rx_offload *offload = container_of(napi, in can_rx_offload_napi_poll() local
47 struct net_device *dev = offload->dev; in can_rx_offload_napi_poll()
53 (skb = skb_dequeue(&offload->skb_queue))) { in can_rx_offload_napi_poll()
69 if (!skb_queue_empty(&offload->skb_queue)) in can_rx_offload_napi_poll()
70 napi_reschedule(&offload->napi); in can_rx_offload_napi_poll()
121 * @offload: pointer to rx_offload context
141 can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) in can_rx_offload_offload_one() argument
149 if (unlikely(skb_queue_len(&offload->skb_queue) > in can_rx_offload_offload_one()
150 offload->skb_queue_len_max)) in can_rx_offload_offload_one()
153 skb = offload->mailbox_read(offload, n, ×tamp, drop); in can_rx_offload_offload_one()
162 offload->dev->stats.rx_dropped++; in can_rx_offload_offload_one()
163 offload->dev->stats.rx_fifo_errors++; in can_rx_offload_offload_one()
175 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, in can_rx_offload_irq_offload_timestamp() argument
181 for (i = offload->mb_first; in can_rx_offload_irq_offload_timestamp()
182 can_rx_offload_le(offload, i, offload->mb_last); in can_rx_offload_irq_offload_timestamp()
183 can_rx_offload_inc(offload, &i)) { in can_rx_offload_irq_offload_timestamp()
189 skb = can_rx_offload_offload_one(offload, i); in can_rx_offload_irq_offload_timestamp()
193 __skb_queue_add_sort(&offload->skb_irq_queue, skb, in can_rx_offload_irq_offload_timestamp()
202 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) in can_rx_offload_irq_offload_fifo() argument
208 skb = can_rx_offload_offload_one(offload, 0); in can_rx_offload_irq_offload_fifo()
214 __skb_queue_tail(&offload->skb_irq_queue, skb); in can_rx_offload_irq_offload_fifo()
222 int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, in can_rx_offload_queue_timestamp() argument
227 if (skb_queue_len(&offload->skb_queue) > in can_rx_offload_queue_timestamp()
228 offload->skb_queue_len_max) { in can_rx_offload_queue_timestamp()
236 __skb_queue_add_sort(&offload->skb_irq_queue, skb, in can_rx_offload_queue_timestamp()
243 unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, in can_rx_offload_get_echo_skb() argument
247 struct net_device *dev = offload->dev; in can_rx_offload_get_echo_skb()
257 err = can_rx_offload_queue_timestamp(offload, skb, timestamp); in can_rx_offload_get_echo_skb()
267 int can_rx_offload_queue_tail(struct can_rx_offload *offload, in can_rx_offload_queue_tail() argument
270 if (skb_queue_len(&offload->skb_queue) > in can_rx_offload_queue_tail()
271 offload->skb_queue_len_max) { in can_rx_offload_queue_tail()
276 __skb_queue_tail(&offload->skb_irq_queue, skb); in can_rx_offload_queue_tail()
282 void can_rx_offload_irq_finish(struct can_rx_offload *offload) in can_rx_offload_irq_finish() argument
287 if (skb_queue_empty_lockless(&offload->skb_irq_queue)) in can_rx_offload_irq_finish()
290 spin_lock_irqsave(&offload->skb_queue.lock, flags); in can_rx_offload_irq_finish()
291 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); in can_rx_offload_irq_finish()
292 spin_unlock_irqrestore(&offload->skb_queue.lock, flags); in can_rx_offload_irq_finish()
294 queue_len = skb_queue_len(&offload->skb_queue); in can_rx_offload_irq_finish()
295 if (queue_len > offload->skb_queue_len_max / 8) in can_rx_offload_irq_finish()
296 netdev_dbg(offload->dev, "%s: queue_len=%d\n", in can_rx_offload_irq_finish()
299 napi_schedule(&offload->napi); in can_rx_offload_irq_finish()
303 void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload) in can_rx_offload_threaded_irq_finish() argument
308 if (skb_queue_empty_lockless(&offload->skb_irq_queue)) in can_rx_offload_threaded_irq_finish()
311 spin_lock_irqsave(&offload->skb_queue.lock, flags); in can_rx_offload_threaded_irq_finish()
312 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); in can_rx_offload_threaded_irq_finish()
313 spin_unlock_irqrestore(&offload->skb_queue.lock, flags); in can_rx_offload_threaded_irq_finish()
315 queue_len = skb_queue_len(&offload->skb_queue); in can_rx_offload_threaded_irq_finish()
316 if (queue_len > offload->skb_queue_len_max / 8) in can_rx_offload_threaded_irq_finish()
317 netdev_dbg(offload->dev, "%s: queue_len=%d\n", in can_rx_offload_threaded_irq_finish()
321 napi_schedule(&offload->napi); in can_rx_offload_threaded_irq_finish()
327 struct can_rx_offload *offload, in can_rx_offload_init_queue() argument
330 offload->dev = dev; in can_rx_offload_init_queue()
333 offload->skb_queue_len_max = 2 << fls(weight); in can_rx_offload_init_queue()
334 offload->skb_queue_len_max *= 4; in can_rx_offload_init_queue()
335 skb_queue_head_init(&offload->skb_queue); in can_rx_offload_init_queue()
336 __skb_queue_head_init(&offload->skb_irq_queue); in can_rx_offload_init_queue()
338 netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll, in can_rx_offload_init_queue()
342 __func__, offload->skb_queue_len_max); in can_rx_offload_init_queue()
348 struct can_rx_offload *offload) in can_rx_offload_add_timestamp() argument
352 if (offload->mb_first > BITS_PER_LONG_LONG || in can_rx_offload_add_timestamp()
353 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) in can_rx_offload_add_timestamp()
356 if (offload->mb_first < offload->mb_last) { in can_rx_offload_add_timestamp()
357 offload->inc = true; in can_rx_offload_add_timestamp()
358 weight = offload->mb_last - offload->mb_first; in can_rx_offload_add_timestamp()
360 offload->inc = false; in can_rx_offload_add_timestamp()
361 weight = offload->mb_first - offload->mb_last; in can_rx_offload_add_timestamp()
364 return can_rx_offload_init_queue(dev, offload, weight); in can_rx_offload_add_timestamp()
369 struct can_rx_offload *offload, unsigned int weight) in can_rx_offload_add_fifo() argument
371 if (!offload->mailbox_read) in can_rx_offload_add_fifo()
374 return can_rx_offload_init_queue(dev, offload, weight); in can_rx_offload_add_fifo()
379 struct can_rx_offload *offload, in can_rx_offload_add_manual() argument
382 if (offload->mailbox_read) in can_rx_offload_add_manual()
385 return can_rx_offload_init_queue(dev, offload, weight); in can_rx_offload_add_manual()
389 void can_rx_offload_enable(struct can_rx_offload *offload) in can_rx_offload_enable() argument
391 napi_enable(&offload->napi); in can_rx_offload_enable()
395 void can_rx_offload_del(struct can_rx_offload *offload) in can_rx_offload_del() argument
397 netif_napi_del(&offload->napi); in can_rx_offload_del()
398 skb_queue_purge(&offload->skb_queue); in can_rx_offload_del()
399 __skb_queue_purge(&offload->skb_irq_queue); in can_rx_offload_del()