Lines Matching refs:softnet_data
220 static inline void rps_lock_irqsave(struct softnet_data *sd, in rps_lock_irqsave()
229 static inline void rps_lock_irq_disable(struct softnet_data *sd) in rps_lock_irq_disable()
237 static inline void rps_unlock_irq_restore(struct softnet_data *sd, in rps_unlock_irq_restore()
246 static inline void rps_unlock_irq_enable(struct softnet_data *sd) in rps_unlock_irq_enable()
443 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
444 EXPORT_PER_CPU_SYMBOL(softnet_data);
3117 struct softnet_data *sd; in __netif_reschedule()
3121 sd = this_cpu_ptr(&softnet_data); in __netif_reschedule()
3185 skb->next = __this_cpu_read(softnet_data.completion_queue); in dev_kfree_skb_irq_reason()
3186 __this_cpu_write(softnet_data.completion_queue, skb); in dev_kfree_skb_irq_reason()
3931 return __this_cpu_read(softnet_data.xmit.skip_txqueue); in netdev_xmit_txqueue_skipped()
3936 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); in netdev_xmit_skip_txqueue()
4457 static inline void ____napi_schedule(struct softnet_data *sd, in ____napi_schedule()
4545 per_cpu(softnet_data, next_cpu).input_queue_head; in set_rps_cpu()
4627 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - in get_rps_cpu()
4682 ((int)(per_cpu(softnet_data, cpu).input_queue_head - in rps_may_expire_flow()
4697 struct softnet_data *sd = data; in rps_trigger_softirq()
4708 struct softnet_data *sd = data; in trigger_rx_softirq()
4724 static void napi_schedule_rps(struct softnet_data *sd) in napi_schedule_rps()
4726 struct softnet_data *mysd = this_cpu_ptr(&softnet_data); in napi_schedule_rps()
4752 struct softnet_data *sd; in skb_flow_limit()
4758 sd = this_cpu_ptr(&softnet_data); in skb_flow_limit()
4792 struct softnet_data *sd; in enqueue_to_backlog()
4797 sd = &per_cpu(softnet_data, cpu); in enqueue_to_backlog()
5145 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_tx_action()
5367 __this_cpu_inc(softnet_data.processed); in __netif_receive_skb_core()
5849 struct softnet_data *sd; in flush_backlog()
5852 sd = this_cpu_ptr(&softnet_data); in flush_backlog()
5877 struct softnet_data *sd = &per_cpu(softnet_data, cpu); in flush_required()
5931 static void net_rps_send_ipi(struct softnet_data *remsd) in net_rps_send_ipi()
5935 struct softnet_data *next = remsd->rps_ipi_next; in net_rps_send_ipi()
5948 static void net_rps_action_and_irq_enable(struct softnet_data *sd) in net_rps_action_and_irq_enable()
5951 struct softnet_data *remsd = sd->rps_ipi_list; in net_rps_action_and_irq_enable()
5965 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) in sd_has_rps_ipi_waiting()
5976 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog()
6036 ____napi_schedule(this_cpu_ptr(&softnet_data), n); in __napi_schedule()
6086 ____napi_schedule(this_cpu_ptr(&softnet_data), n); in __napi_schedule_irqoff()
6661 static void skb_defer_free_flush(struct softnet_data *sd) in skb_defer_free_flush()
6685 struct softnet_data *sd; in napi_threaded_poll()
6693 sd = this_cpu_ptr(&softnet_data); in napi_threaded_poll()
6721 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_rx_action()
11207 struct softnet_data *sd, *oldsd, *remsd = NULL; in dev_cpu_dead()
11211 sd = &per_cpu(softnet_data, cpu); in dev_cpu_dead()
11212 oldsd = &per_cpu(softnet_data, oldcpu); in dev_cpu_dead()
11536 struct softnet_data *sd = &per_cpu(softnet_data, i); in net_dev_init()