Lines Matching refs:softnet_data
220 static inline void rps_lock_irqsave(struct softnet_data *sd, in rps_lock_irqsave()
229 static inline void rps_lock_irq_disable(struct softnet_data *sd) in rps_lock_irq_disable()
237 static inline void rps_unlock_irq_restore(struct softnet_data *sd, in rps_unlock_irq_restore()
246 static inline void rps_unlock_irq_enable(struct softnet_data *sd) in rps_unlock_irq_enable()
427 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
428 EXPORT_PER_CPU_SYMBOL(softnet_data);
3070 struct softnet_data *sd; in __netif_reschedule()
3074 sd = this_cpu_ptr(&softnet_data); in __netif_reschedule()
3138 skb->next = __this_cpu_read(softnet_data.completion_queue); in __dev_kfree_skb_irq()
3139 __this_cpu_write(softnet_data.completion_queue, skb); in __dev_kfree_skb_irq()
3992 return __this_cpu_read(softnet_data.xmit.skip_txqueue); in netdev_xmit_txqueue_skipped()
3997 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); in netdev_xmit_skip_txqueue()
4344 static inline void ____napi_schedule(struct softnet_data *sd, in ____napi_schedule()
4427 per_cpu(softnet_data, next_cpu).input_queue_head; in set_rps_cpu()
4507 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - in get_rps_cpu()
4562 ((int)(per_cpu(softnet_data, cpu).input_queue_head - in rps_may_expire_flow()
4577 struct softnet_data *sd = data; in rps_trigger_softirq()
4588 struct softnet_data *sd = data; in trigger_rx_softirq()
4599 static int napi_schedule_rps(struct softnet_data *sd) in napi_schedule_rps()
4601 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); in napi_schedule_rps()
4624 struct softnet_data *sd; in skb_flow_limit()
4630 sd = this_cpu_ptr(&softnet_data); in skb_flow_limit()
4664 struct softnet_data *sd; in enqueue_to_backlog()
4669 sd = &per_cpu(softnet_data, cpu); in enqueue_to_backlog()
5017 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_tx_action()
5304 __this_cpu_inc(softnet_data.processed); in __netif_receive_skb_core()
5786 struct softnet_data *sd; in flush_backlog()
5789 sd = this_cpu_ptr(&softnet_data); in flush_backlog()
5814 struct softnet_data *sd = &per_cpu(softnet_data, cpu); in flush_required()
5868 static void net_rps_send_ipi(struct softnet_data *remsd) in net_rps_send_ipi()
5872 struct softnet_data *next = remsd->rps_ipi_next; in net_rps_send_ipi()
5885 static void net_rps_action_and_irq_enable(struct softnet_data *sd) in net_rps_action_and_irq_enable()
5888 struct softnet_data *remsd = sd->rps_ipi_list; in net_rps_action_and_irq_enable()
5902 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) in sd_has_rps_ipi_waiting()
5913 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog()
5973 ____napi_schedule(this_cpu_ptr(&softnet_data), n); in __napi_schedule()
6024 ____napi_schedule(this_cpu_ptr(&softnet_data), n); in __napi_schedule_irqoff()
6629 static void skb_defer_free_flush(struct softnet_data *sd) in skb_defer_free_flush()
6653 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_rx_action()
11059 struct softnet_data *sd, *oldsd, *remsd = NULL; in dev_cpu_dead()
11063 sd = &per_cpu(softnet_data, cpu); in dev_cpu_dead()
11064 oldsd = &per_cpu(softnet_data, oldcpu); in dev_cpu_dead()
11385 struct softnet_data *sd = &per_cpu(softnet_data, i); in net_dev_init()