Lines Matching +full:foo +full:- +full:queue

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
88 * - qdisc return codes
89 * - driver transmit return codes
90 * - errno values
94 * the driver transmit return codes though - when qdiscs are used, the actual
101 /* qdisc ->enqueue() return codes. */
111 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
131 * - successful transmission (rc == NETDEV_TX_OK) in dev_xmit_complete()
132 * - error while transmitting (rc < 0) in dev_xmit_complete()
133 * - error while queueing to a different device (rc & NET_XMIT_MASK) in dev_xmit_complete()
142 * Compute the worst-case header length according to the protocols
234 #define netdev_hw_addr_list_count(l) ((l)->count)
237 list_for_each_entry(ha, &(l)->list, list)
239 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
240 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
242 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
244 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
245 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
247 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
256 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
258 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
262 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
264 * dev->hard_header_len ? (dev->hard_header_len +
265 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
271 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
273 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
321 * to the per-CPU poll_list, and whoever clears that bit
350 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
354 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
382 * enum rx_handler_result - Possible return values for rx_handlers.
386 * case skb->dev was changed by rx_handler.
394 * to register a second rx_handler will return -EBUSY.
407 * If the rx_handler changed skb->dev, to divert the skb to another
413 * are registered on exact device (ptype->dev == skb->dev).
415 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
436 return test_bit(NAPI_STATE_DISABLE, &n->state); in napi_disable_pending()
441 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); in napi_prefer_busy_poll()
447 * napi_schedule - schedule NAPI poll
460 * napi_schedule_irqoff - schedule NAPI poll
471 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
483 * napi_complete - NAPI processing complete
498 * napi_disable - prevent NAPI from scheduling
509 * napi_synchronize - wait until NAPI is not running
519 while (test_bit(NAPI_STATE_SCHED, &n->state)) in napi_synchronize()
526 * napi_if_scheduled_mark_missed - if napi is running, set the
538 val = READ_ONCE(n->state); in napi_if_scheduled_mark_missed()
546 } while (cmpxchg(&n->state, val, new) != val); in napi_if_scheduled_mark_missed()
568 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
571 * queue independently. The netif_xmit_*stopped functions below are called
572 * to check if the queue has been stopped by the driver or stack (either
579 * read-mostly part
592 * Number of TX timeouts for this queue
597 /* Subordinate device that the queue has been assigned to */
603 * write-mostly part
637 return q->numa_node; in netdev_queue_numa_node_read()
646 q->numa_node = node; in netdev_queue_numa_node_write()
664 * tail pointer for that CPU's input queue at the time of last enqueue, and
688 * Each entry is a 32bit value. Upper part is the high-order bits
691 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
693 * meaning we use 32-6=26 bits for the hash.
711 unsigned int index = hash & table->mask; in rps_record_sock_flow()
717 if (table->ents[index] != val) in rps_record_sock_flow()
718 table->ents[index] = val; in rps_record_sock_flow()
728 /* This structure contains an instance of an RX queue. */
743 * RX queue sysfs structures and functions.
747 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
748 ssize_t (*store)(struct netdev_rx_queue *queue,
752 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */
772 - sizeof(struct xps_map)) / sizeof(u16))
778 * in nr_ids. This will help not accessing out-of-bound memory.
782 * not crossing its upper bound, as the original dev->num_tc can be updated in
838 return a->id_len == b->id_len && in netdev_phys_item_id_same()
839 memcmp(a->id, b->id, a->id_len) == 0; in netdev_phys_item_id_same()
1044 * the queue before that can happen; it's for obsolete devices and weird
1045 * corner cases, but the stack really does a non-trivial amount
1061 * Called to decide which queue to use when device supports multiple
1082 * Old-style ioctl entry point. This is used internally by the
1106 * for dev->watchdog ticks.
1113 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1116 * (which should normally be dev->stats) and return a pointer to
1119 * 3. Update dev->stats asynchronously and atomically, and define
1140 * SR-IOV management functions.
1163 * tx queues stopped. This allows the netdevice to perform queue
1174 * so the underlying device can perform whatever needed clean-ups to
1201 * FC-GS Fabric Device Management Information(FDMI) specification.
1212 * Set hardware filter for RFS. rxq_index is the target queue index;
1232 * Adjusts the requested feature flags according to device-specific
1239 * Must return >0 or -errno if it changed dev->features itself.
1265 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1269 * network cables) or protocol-dependent mechanisms (eg
1296 * Called when a user wants to set a max-rate limitation of specific
1297 * TX queue.
1325 * no frames were xmit'ed and core-caller will free all frames.
1332 * queue id bound to an AF_XDP socket. The flags field specifies if
1406 int queue, u8 *mac);
1408 int queue, u16 vlan,
1569 * enum netdev_priv_flags - &struct net_device priv_flags
1584 * release skb->dst
1586 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1691 * struct net_device - The DEVICE structure.
1694 * data with strictly "high-level" data, and it has to know about
1714 * @ptype_all: Device-specific packet handlers for all protocols
1715 * @ptype_specific: Device-specific, protocol-specific packet handlers
1719 * @hw_features: User-changeable features
1721 * @wanted_features: User-requested features
1821 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1846 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1847 * indexed by RX queue number. Assigned by driver.
1856 * @tx_queue_len: Max frames per queue allowed
1858 * @xdp_bulkq: XDP device bulk queue
1885 * @ml_priv: Mid-layer private
1886 * @ml_priv_type: Mid-layer private type
1899 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1918 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1919 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1925 * @wol_enabled: Wake-on-LAN is enabled
1929 * @net_notifier_list: List of per-net netdev notifier block
1941 * dev->addr_list_lock.
1981 /* Read-mostly cache-line for fast-path access */
1989 /* Note : dev->mtu is often read without holding a lock.
2085 /* Protocol-specific pointers */
2168 /* These may be needed for future network-power-down code. */
2208 /* mid-layer private */
2274 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) in netif_elide_gro()
2284 return dev->prio_tc_map[prio & TC_BITMASK]; in netdev_get_prio_tc_map()
2290 if (tc >= dev->num_tc) in netdev_set_prio_tc_map()
2291 return -EINVAL; in netdev_set_prio_tc_map()
2293 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; in netdev_set_prio_tc_map()
2305 return dev->num_tc; in netdev_get_num_tc()
2332 return max_t(int, -dev->num_tc, 0); in netdev_get_sb_channel()
2339 return &dev->_tx[index]; in netdev_get_tx_queue()
2356 for (i = 0; i < dev->num_tx_queues; i++) in netdev_for_each_tx_queue()
2357 f(dev, &dev->_tx[i], arg); in netdev_for_each_tx_queue()
2368 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2369 (dev)->qdisc_running_key = &qdisc_running_key; \
2370 lockdep_set_class(&(dev)->addr_list_lock, \
2372 for (i = 0; i < (dev)->num_tx_queues; i++) \
2373 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2388 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; in netdev_get_fwd_headroom()
2393 if (dev->netdev_ops->ndo_set_rx_headroom) in netdev_set_rx_headroom()
2394 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); in netdev_set_rx_headroom()
2400 netdev_set_rx_headroom(dev, -1); in netdev_reset_rx_headroom()
2406 if (dev->ml_priv_type != type) in netdev_get_ml_priv()
2409 return dev->ml_priv; in netdev_get_ml_priv()
2416 WARN(dev->ml_priv_type && dev->ml_priv_type != type, in netdev_set_ml_priv()
2418 dev->ml_priv_type, type); in netdev_set_ml_priv()
2419 WARN(!dev->ml_priv_type && dev->ml_priv, in netdev_set_ml_priv()
2422 dev->ml_priv = ml_priv; in netdev_set_ml_priv()
2423 dev->ml_priv_type = type; in netdev_set_ml_priv()
2432 return read_pnet(&dev->nd_net); in dev_net()
2438 write_pnet(&dev->nd_net, net); in dev_net_set()
2442 * netdev_priv - access network device private data
2455 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2458 * fine-grained identification of different network device types. For
2461 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2469 * netif_napi_add - initialize a NAPI context
2476 * *any* of the other NAPI-related functions.
2482 * netif_tx_napi_add - initialize a NAPI context
2489 * to exclusively poll a TX queue.
2497 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); in netif_tx_napi_add()
2502 * __netif_napi_del - remove a NAPI context
2512 * netif_napi_del - remove a NAPI context
2524 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
2530 /* This indicates where we are processing relative to skb->data. */
2533 /* This is non-zero if the packet cannot be merged with the new skb. */
2548 /* Used in ipv6_gro_receive() and foo-over-udp */
2551 /* This is non-zero if the packet may be of the same flow. */
2568 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2590 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2595 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; in gro_recursion_inc_test()
2604 NAPI_GRO_CB(skb)->flush |= 1;
2619 NAPI_GRO_CB(skb)->flush |= 1;
2658 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2677 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); in dev_sw_netstats_rx_add()
2679 u64_stats_update_begin(&tstats->syncp); in dev_sw_netstats_rx_add()
2680 tstats->rx_bytes += len; in dev_sw_netstats_rx_add()
2681 tstats->rx_packets++; in dev_sw_netstats_rx_add()
2682 u64_stats_update_end(&tstats->syncp); in dev_sw_netstats_rx_add()
2689 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); in dev_sw_netstats_tx_add()
2691 u64_stats_update_begin(&tstats->syncp); in dev_sw_netstats_tx_add()
2692 tstats->tx_bytes += len; in dev_sw_netstats_tx_add()
2693 tstats->tx_packets += packets; in dev_sw_netstats_tx_add()
2694 u64_stats_update_end(&tstats->syncp); in dev_sw_netstats_tx_add()
2699 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); in dev_lstats_add()
2701 u64_stats_update_begin(&lstats->syncp); in dev_lstats_add()
2702 u64_stats_add(&lstats->bytes, len); in dev_lstats_add()
2703 u64_stats_inc(&lstats->packets); in dev_lstats_add()
2704 u64_stats_update_end(&lstats->syncp); in dev_lstats_add()
2715 u64_stats_init(&stat->syncp); \
2732 u64_stats_init(&stat->syncp); \
2779 - we can use this eg to kick tcp sessions
2865 info->dev = dev; in netdev_notifier_info_init()
2866 info->extack = NULL; in netdev_notifier_info_init()
2872 return info->dev; in netdev_notifier_info_to_dev()
2878 return info->extack; in netdev_notifier_info_to_extack()
2887 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2889 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2891 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2893 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2895 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2897 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
2900 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2912 lh = dev->dev_list.next; in next_net_device()
2913 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device()
2922 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); in next_net_device_rcu()
2923 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device_rcu()
2928 return list_empty(&net->dev_base_head) ? NULL : in first_net_device()
2929 net_device_entry(net->dev_base_head.next); in first_net_device()
2934 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); in first_net_device_rcu()
2936 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in first_net_device_rcu()
3012 return NAPI_GRO_CB(skb)->data_offset; in skb_gro_offset()
3017 return skb->len - NAPI_GRO_CB(skb)->data_offset; in skb_gro_len()
3022 NAPI_GRO_CB(skb)->data_offset += len; in skb_gro_pull()
3028 return NAPI_GRO_CB(skb)->frag0 + offset; in skb_gro_header_fast()
3033 return NAPI_GRO_CB(skb)->frag0_len < hlen; in skb_gro_header_hard()
3038 NAPI_GRO_CB(skb)->frag0 = NULL; in skb_gro_frag0_invalidate()
3039 NAPI_GRO_CB(skb)->frag0_len = 0; in skb_gro_frag0_invalidate()
3049 return skb->data + offset; in skb_gro_header_slow()
3054 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + in skb_gro_network_header()
3061 if (NAPI_GRO_CB(skb)->csum_valid) in skb_gro_postpull_rcsum()
3062 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, in skb_gro_postpull_rcsum()
3075 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); in skb_at_gro_remcsum_start()
3082 return ((skb->ip_summed != CHECKSUM_PARTIAL || in __skb_gro_checksum_validate_needed()
3086 NAPI_GRO_CB(skb)->csum_cnt == 0 && in __skb_gro_checksum_validate_needed()
3093 if (NAPI_GRO_CB(skb)->csum_valid && in __skb_gro_checksum_validate_complete()
3094 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) in __skb_gro_checksum_validate_complete()
3097 NAPI_GRO_CB(skb)->csum = psum; in __skb_gro_checksum_validate_complete()
3104 if (NAPI_GRO_CB(skb)->csum_cnt > 0) { in skb_gro_incr_csum_unnecessary()
3106 NAPI_GRO_CB(skb)->csum_cnt--; in skb_gro_incr_csum_unnecessary()
3140 return (NAPI_GRO_CB(skb)->csum_cnt == 0 && in __skb_gro_checksum_convert_check()
3141 !NAPI_GRO_CB(skb)->csum_valid); in __skb_gro_checksum_convert_check()
3147 NAPI_GRO_CB(skb)->csum = ~pseudo; in __skb_gro_checksum_convert()
3148 NAPI_GRO_CB(skb)->csum_valid = 1; in __skb_gro_checksum_convert()
3165 grc->offset = 0; in skb_gro_remcsum_init()
3166 grc->delta = 0; in skb_gro_remcsum_init()
3178 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); in skb_gro_remcsum_process()
3181 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; in skb_gro_remcsum_process()
3192 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, in skb_gro_remcsum_process()
3195 /* Adjust skb->csum since we changed the packet */ in skb_gro_remcsum_process()
3196 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); in skb_gro_remcsum_process()
3198 grc->offset = off + hdrlen + offset; in skb_gro_remcsum_process()
3199 grc->delta = delta; in skb_gro_remcsum_process()
3208 size_t plen = grc->offset + sizeof(u16); in skb_gro_remcsum_cleanup()
3210 if (!grc->delta) in skb_gro_remcsum_cleanup()
3213 ptr = skb_gro_header_fast(skb, grc->offset); in skb_gro_remcsum_cleanup()
3214 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { in skb_gro_remcsum_cleanup()
3215 ptr = skb_gro_header_slow(skb, plen, grc->offset); in skb_gro_remcsum_cleanup()
3220 remcsum_unadjust((__sum16 *)ptr, grc->delta); in skb_gro_remcsum_cleanup()
3226 if (PTR_ERR(pp) != -EINPROGRESS) in skb_gro_flush_final()
3227 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final()
3234 if (PTR_ERR(pp) != -EINPROGRESS) { in skb_gro_flush_final_remcsum()
3235 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum()
3237 skb->remcsum_offload = 0; in skb_gro_flush_final_remcsum()
3243 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final()
3250 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum()
3252 skb->remcsum_offload = 0; in skb_gro_flush_final_remcsum()
3261 if (!dev->header_ops || !dev->header_ops->create) in dev_hard_header()
3264 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); in dev_hard_header()
3270 const struct net_device *dev = skb->dev; in dev_parse_header()
3272 if (!dev->header_ops || !dev->header_ops->parse) in dev_parse_header()
3274 return dev->header_ops->parse(skb, haddr); in dev_parse_header()
3279 const struct net_device *dev = skb->dev; in dev_parse_header_protocol()
3281 if (!dev->header_ops || !dev->header_ops->parse_protocol) in dev_parse_header_protocol()
3283 return dev->header_ops->parse_protocol(skb); in dev_parse_header_protocol()
3290 if (likely(len >= dev->hard_header_len)) in dev_validate_header()
3292 if (len < dev->min_header_len) in dev_validate_header()
3296 memset(ll_header + len, 0, dev->hard_header_len - len); in dev_validate_header()
3300 if (dev->header_ops && dev->header_ops->validate) in dev_validate_header()
3301 return dev->header_ops->validate(ll_header, len); in dev_validate_header()
3308 return dev->header_ops && dev->header_ops->create; in dev_has_header()
3325 * Incoming packets are placed on per-CPU queues
3373 sd->input_queue_head++; in input_queue_head_incr()
3381 *qtail = ++sd->input_queue_tail; in input_queue_tail_incr_save()
3416 for (i = 0; i < dev->num_tx_queues; i++) in netif_tx_schedule_all()
3422 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_start_queue()
3426 * netif_start_queue - allow transmit
3440 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_start_all_queues()
3449 * netif_wake_queue - restart transmit
3464 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_wake_all_queues()
3472 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_stop_queue()
3476 * netif_stop_queue - stop transmitted packets
3491 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_queue_stopped()
3495 * netif_queue_stopped - test if transmit queue is flowblocked
3498 * Test if transmit queue on device is currently unable to send.
3507 return dev_queue->state & QUEUE_STATE_ANY_XOFF; in netif_xmit_stopped()
3513 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; in netif_xmit_frozen_or_stopped()
3519 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; in netif_xmit_frozen_or_drv_stopped()
3523 * netdev_queue_set_dql_min_limit - set dql minimum limit
3524 * @dev_queue: pointer to transmit queue
3528 * defined by @min_limit is reached (or until the tx queue is
3536 dev_queue->dql.min_limit = min_limit; in netdev_queue_set_dql_min_limit()
3541 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3542 * @dev_queue: pointer to transmit queue
3550 prefetchw(&dev_queue->dql.num_queued); in netdev_txq_bql_enqueue_prefetchw()
3555 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3556 * @dev_queue: pointer to transmit queue
3564 prefetchw(&dev_queue->dql.limit); in netdev_txq_bql_complete_prefetchw()
3572 dql_queued(&dev_queue->dql, bytes); in netdev_tx_sent_queue()
3574 if (likely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3577 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3587 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3588 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3604 dql_queued(&dev_queue->dql, bytes); in __netdev_tx_sent_queue()
3613 * netdev_sent_queue - report the number of bytes queued to hardware
3615 * @bytes: number of bytes queued to the hardware device queue
3618 * device hardware queue. @bytes should be a good approximation and should
3641 dql_completed(&dev_queue->dql, bytes); in netdev_tx_completed_queue()
3645 * netdev_tx_sent_queue will miss the update and cause the queue to in netdev_tx_completed_queue()
3650 if (unlikely(dql_avail(&dev_queue->dql) < 0)) in netdev_tx_completed_queue()
3653 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) in netdev_tx_completed_queue()
3659 * netdev_completed_queue - report bytes and packets completed by device
3665 * hardware queue over the physical medium, @bytes must exactly match the
3677 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); in netdev_tx_reset_queue()
3678 dql_reset(&q->dql); in netdev_tx_reset_queue()
3683 * netdev_reset_queue - reset the packets and bytes count of a network device
3695 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3697 * @queue_index: given tx queue index
3699 * Returns 0 if given tx queue index >= number of device tx queues,
3700 * otherwise returns the originally passed tx queue index.
3704 if (unlikely(queue_index >= dev->real_num_tx_queues)) { in netdev_cap_txqueue()
3705 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", in netdev_cap_txqueue()
3706 dev->name, queue_index, in netdev_cap_txqueue()
3707 dev->real_num_tx_queues); in netdev_cap_txqueue()
3715 * netif_running - test if up
3722 return test_bit(__LINK_STATE_START, &dev->state); in netif_running()
3733 * netif_start_subqueue - allow sending packets on subqueue
3735 * @queue_index: sub queue index
3737 * Start individual transmit queue of a device with multiple transmit queues.
3747 * netif_stop_subqueue - stop sending packets on subqueue
3749 * @queue_index: sub queue index
3751 * Stop individual transmit queue of a device with multiple transmit queues.
3760 * __netif_subqueue_stopped - test status of subqueue
3762 * @queue_index: sub queue index
3764 * Check individual transmit queue of a device with multiple transmit queues.
3775 * netif_subqueue_stopped - test status of subqueue
3777 * @skb: sub queue buffer pointer
3779 * Check individual transmit queue of a device with multiple transmit queues.
3788 * netif_wake_subqueue - allow sending packets on subqueue
3790 * @queue_index: sub queue index
3792 * Resume individual transmit queue of a device with multiple transmit queues.
3808 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3809 * @j: CPU/Rx queue index
3813 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3824 * netif_attr_test_online - Test for online CPU/Rx queue
3825 * @j: CPU/Rx queue index
3829 * Returns true if a CPU/Rx queue is online.
3844 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3845 * @n: CPU/Rx queue index
3846 * @srcp: the cpumask/Rx queue mask pointer
3854 /* -1 is a legal arg here. */ in netif_attrmask_next()
3855 if (n != -1) in netif_attrmask_next()
3865 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
3866 * @n: CPU/Rx queue index
3877 /* -1 is a legal arg here. */ in netif_attrmask_next_and()
3878 if (n != -1) in netif_attrmask_next_and()
3907 * netif_is_multiqueue - test if device has multiple transmit queues
3914 return dev->num_tx_queues > 1; in netif_is_multiqueue()
3925 dev->real_num_rx_queues = rxqs; in netif_set_real_num_rx_queues()
3935 return dev->_rx + rxq; in __netif_get_rx_queue()
3940 struct netdev_rx_queue *queue) in get_netdev_rx_queue_index() argument
3942 struct net_device *dev = queue->dev; in get_netdev_rx_queue_index()
3943 int index = queue - dev->_rx; in get_netdev_rx_queue_index()
3945 BUG_ON(index >= dev->num_rx_queues); in get_netdev_rx_queue_index()
4019 kfree_skb(napi->skb); in napi_free_frags()
4020 napi->skb = NULL; in napi_free_frags()
4109 if (!(dev->flags & IFF_UP)) in __is_skb_forwardable()
4115 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; in __is_skb_forwardable()
4116 if (skb->len <= len) in __is_skb_forwardable()
4134 atomic_long_inc(&dev->rx_dropped); in ____dev_forward_skb()
4139 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); in ____dev_forward_skb()
4140 skb->priority = 0; in ____dev_forward_skb()
4154 * dev_put - release reference to device
4163 this_cpu_dec(*dev->pcpu_refcnt); in dev_put()
4165 refcount_dec(&dev->dev_refcnt); in dev_put()
4171 * dev_hold - get reference to device
4180 this_cpu_inc(*dev->pcpu_refcnt); in dev_hold()
4182 refcount_inc(&dev->dev_refcnt); in dev_hold()
4201 * netif_carrier_ok - test if carrier present
4208 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); in netif_carrier_ok()
4220 * netif_dormant_on - mark device as dormant.
4227 * in a "pending" state, waiting for some external event. For "on-
4233 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_on()
4238 * netif_dormant_off - set device as not dormant.
4245 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_off()
4250 * netif_dormant - test if device is dormant
4257 return test_bit(__LINK_STATE_DORMANT, &dev->state); in netif_dormant()
4262 * netif_testing_on - mark device as under test.
4273 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_on()
4278 * netif_testing_off - set device as not under test.
4285 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_off()
4290 * netif_testing - test if device is under test
4297 return test_bit(__LINK_STATE_TESTING, &dev->state); in netif_testing()
4302 * netif_oper_up - test if device is operational
4309 return (dev->operstate == IF_OPER_UP || in netif_oper_up()
4310 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); in netif_oper_up()
4314 * netif_device_present - is device available or removed
4321 return test_bit(__LINK_STATE_PRESENT, &dev->state); in netif_device_present()
4376 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4377 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4378 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4379 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4380 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4381 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4382 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4383 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4384 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4385 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4386 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4387 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4388 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4389 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4390 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4400 return (1U << debug_value) - 1; in netif_msg_init()
4405 spin_lock(&txq->_xmit_lock); in __netif_tx_lock()
4406 txq->xmit_lock_owner = cpu; in __netif_tx_lock()
4411 __acquire(&txq->_xmit_lock); in __netif_tx_acquire()
4417 __release(&txq->_xmit_lock); in __netif_tx_release()
4422 spin_lock_bh(&txq->_xmit_lock); in __netif_tx_lock_bh()
4423 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_lock_bh()
4428 bool ok = spin_trylock(&txq->_xmit_lock); in __netif_tx_trylock()
4430 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_trylock()
4436 txq->xmit_lock_owner = -1; in __netif_tx_unlock()
4437 spin_unlock(&txq->_xmit_lock); in __netif_tx_unlock()
4442 txq->xmit_lock_owner = -1; in __netif_tx_unlock_bh()
4443 spin_unlock_bh(&txq->_xmit_lock); in __netif_tx_unlock_bh()
4448 if (txq->xmit_lock_owner != -1) in txq_trans_update()
4449 txq->trans_start = jiffies; in txq_trans_update()
4452 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
4457 if (txq->trans_start != jiffies) in netif_trans_update()
4458 txq->trans_start = jiffies; in netif_trans_update()
4462 * netif_tx_lock - grab network device transmit lock
4472 spin_lock(&dev->tx_global_lock); in netif_tx_lock()
4474 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_lock()
4480 * the ->hard_start_xmit() handler and already in netif_tx_lock()
4484 set_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_lock()
4499 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_unlock()
4503 * queue is not stopped for another reason, we in netif_tx_unlock()
4506 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_unlock()
4509 spin_unlock(&dev->tx_global_lock); in netif_tx_unlock()
4519 if ((dev->features & NETIF_F_LLTX) == 0) { \
4527 (((dev->features & NETIF_F_LLTX) == 0) ? \
4532 if ((dev->features & NETIF_F_LLTX) == 0) { \
4546 spin_lock(&dev->tx_global_lock); in netif_tx_disable()
4547 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_disable()
4554 spin_unlock(&dev->tx_global_lock); in netif_tx_disable()
4563 nest_level = dev->nested_level; in netif_addr_lock()
4565 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock()
4573 nest_level = dev->nested_level; in netif_addr_lock_bh()
4576 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock_bh()
4581 spin_unlock(&dev->addr_list_lock); in netif_addr_unlock()
4586 spin_unlock_bh(&dev->addr_list_lock); in netif_addr_unlock_bh()
4594 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4600 /* Support for loadable net-drivers */
4647 memcpy(dev->dev_addr, addr, len); in __dev_addr_set()
4652 __dev_addr_set(dev, addr, dev->addr_len); in dev_addr_set()
4659 memcpy(&dev->dev_addr[offset], addr, len); in dev_addr_mod()
4680 * __dev_uc_sync - Synchonize device's unicast list
4694 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); in __dev_uc_sync()
4698 * __dev_uc_unsync - Remove synchronized addresses from device
4708 __hw_addr_unsync_dev(&dev->uc, dev, unsync); in __dev_uc_unsync()
4724 * __dev_mc_sync - Synchonize device's multicast list
4738 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); in __dev_mc_sync()
4742 * __dev_mc_unsync - Remove synchronized addresses from device
4752 __hw_addr_unsync_dev(&dev->mc, dev, unsync); in __dev_mc_unsync()
4811 if (list_empty(&dev->unlink_list)) in net_unlink_todo()
4812 list_add_tail(&dev->unlink_list, &net_unlink_list); in net_unlink_todo()
4818 for (iter = &(dev)->adj_list.upper, \
4839 for (iter = (dev)->adj_list.lower.next, \
4845 for (iter = &(dev)->adj_list.lower, \
4854 for (iter = (dev)->adj_list.lower.next, \
4988 return ops->ndo_start_xmit(skb, dev); in __netdev_start_xmit()
4999 const struct net_device_ops *ops = dev->netdev_ops; in netdev_start_xmit()
5036 return (dev->features & ~dev->hw_features) | dev->wanted_features; in netdev_get_wanted_features()
5093 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && in skb_gso_ok()
5101 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && in netif_needs_gso()
5102 (skb->ip_summed != CHECKSUM_UNNECESSARY))); in netif_needs_gso()
5108 dev->gso_max_size = size; in netif_set_gso_max_size()
5115 skb->protocol = protocol; in skb_gso_error_unwind()
5116 skb->encapsulation = 1; in skb_gso_error_unwind()
5119 skb->mac_header = mac_offset; in skb_gso_error_unwind()
5120 skb->network_header = skb->mac_header + mac_len; in skb_gso_error_unwind()
5121 skb->mac_len = mac_len; in skb_gso_error_unwind()
5126 return dev->priv_flags & IFF_MACSEC; in netif_is_macsec()
5131 return dev->priv_flags & IFF_MACVLAN; in netif_is_macvlan()
5136 return dev->priv_flags & IFF_MACVLAN_PORT; in netif_is_macvlan_port()
5141 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; in netif_is_bond_master()
5146 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; in netif_is_bond_slave()
5151 return dev->priv_flags & IFF_SUPP_NOFCS; in netif_supports_nofcs()
5156 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; in netif_has_l3_rx_handler()
5161 return dev->priv_flags & IFF_L3MDEV_MASTER; in netif_is_l3_master()
5166 return dev->priv_flags & IFF_L3MDEV_SLAVE; in netif_is_l3_slave()
5171 return dev->priv_flags & IFF_EBRIDGE; in netif_is_bridge_master()
5176 return dev->priv_flags & IFF_BRIDGE_PORT; in netif_is_bridge_port()
5181 return dev->priv_flags & IFF_OPENVSWITCH; in netif_is_ovs_master()
5186 return dev->priv_flags & IFF_OVS_DATAPATH; in netif_is_ovs_port()
5196 return dev->priv_flags & IFF_TEAM; in netif_is_team_master()
5201 return dev->priv_flags & IFF_TEAM_PORT; in netif_is_team_port()
5216 return dev->priv_flags & IFF_RXFH_CONFIGURED; in netif_is_rxfh_configured()
5221 return dev->priv_flags & IFF_FAILOVER; in netif_is_failover()
5226 return dev->priv_flags & IFF_FAILOVER_SLAVE; in netif_is_failover_slave()
5232 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); in netif_keep_dst()
5239 return dev->priv_flags & IFF_MACSEC; in netif_reduces_vlan_mtu()
5250 if (!dev->name[0] || strchr(dev->name, '%')) in netdev_name()
5252 return dev->name; in netdev_name()
5257 return dev->reg_state == NETREG_UNREGISTERING; in netdev_unregistering()
5262 switch (dev->reg_state) { in netdev_reg_state()
5271 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); in netdev_reg_state()
5319 MODULE_ALIAS("netdev-" device)
5451 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)