Searched refs:softnet_data (Results 1 – 7 of 7) sorted by relevance
116 static u32 softnet_backlog_len(struct softnet_data *sd) in softnet_backlog_len()122 static struct softnet_data *softnet_get_online(loff_t *pos) in softnet_get_online()124 struct softnet_data *sd = NULL; in softnet_get_online()128 sd = &per_cpu(softnet_data, *pos); in softnet_get_online()152 struct softnet_data *sd = v; in softnet_seq_show()
225 static inline void rps_lock(struct softnet_data *sd) in rps_lock()232 static inline void rps_unlock(struct softnet_data *sd) in rps_unlock()406 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);407 EXPORT_PER_CPU_SYMBOL(softnet_data);3033 struct softnet_data *sd; in __netif_reschedule()3037 sd = this_cpu_ptr(&softnet_data); in __netif_reschedule()3101 skb->next = __this_cpu_read(softnet_data.completion_queue); in __dev_kfree_skb_irq()3102 __this_cpu_write(softnet_data.completion_queue, skb); in __dev_kfree_skb_irq()4310 static inline void ____napi_schedule(struct softnet_data *sd, in ____napi_schedule()4391 per_cpu(softnet_data, next_cpu).input_queue_head; in set_rps_cpu()[all …]
124 struct softnet_data *sd; in flow_limit_cpu_sysctl()139 sd = &per_cpu(softnet_data, i); in flow_limit_cpu_sysctl()171 sd = &per_cpu(softnet_data, i); in flow_limit_cpu_sysctl()
246 struct softnet_data *sd = &get_cpu_var(softnet_data); in zap_completion_queue()268 put_cpu_var(softnet_data); in zap_completion_queue()
105 struct softnet_data *sd; in validate_xmit_xfrm()128 sd = this_cpu_ptr(&softnet_data); in validate_xmit_xfrm()317 struct softnet_data *sd; in xfrm_dev_resume()330 sd = this_cpu_ptr(&softnet_data); in xfrm_dev_resume()339 void xfrm_dev_backlog(struct softnet_data *sd) in xfrm_dev_backlog()
3327 struct softnet_data { struct3336 struct softnet_data *rps_ipi_list; argument3360 struct softnet_data *rps_ipi_next; argument3370 static inline void input_queue_head_incr(struct softnet_data *sd) in input_queue_head_incr() argument3377 static inline void input_queue_tail_incr_save(struct softnet_data *sd, in input_queue_tail_incr_save()3385 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);3389 return this_cpu_read(softnet_data.xmit.recursion); in dev_recursion_level()3395 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > in dev_xmit_recursion()3401 __this_cpu_inc(softnet_data.xmit.recursion); in dev_xmit_recursion_inc()3406 __this_cpu_dec(softnet_data.xmit.recursion); in dev_xmit_recursion_dec()[all …]
1867 void xfrm_dev_backlog(struct softnet_data *sd);1924 static inline void xfrm_dev_backlog(struct softnet_data *sd) in xfrm_dev_backlog()