Lines Matching +full:individual +full:- +full:port +full:- +full:switching
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
54 #include <net/dropreason-core.h>
100 * - qdisc return codes
101 * - driver transmit return codes
102 * - errno values
106 * the driver transmit return codes though - when qdiscs are used, the actual
113 /* qdisc ->enqueue() return codes. */
123 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
143 * - successful transmission (rc == NETDEV_TX_OK) in dev_xmit_complete()
144 * - error while transmitting (rc < 0) in dev_xmit_complete()
145 * - error while queueing to a different device (rc & NET_XMIT_MASK) in dev_xmit_complete()
154 * Compute the worst-case header length according to the protocols
215 /* per-cpu stats, allocated on demand.
262 #define netdev_hw_addr_list_count(l) ((l)->count)
265 list_for_each_entry(ha, &(l)->list, list)
267 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
268 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
270 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
273 if ((_ha)->sync_cnt)
275 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
276 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
278 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
281 if ((_ha)->sync_cnt)
290 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
292 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
296 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
298 * dev->hard_header_len ? (dev->hard_header_len +
299 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
305 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
306 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
308 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
309 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
356 * to the per-CPU poll_list, and whoever clears that bit
380 /* control-path-only fields follow */
389 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
393 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
421 * enum rx_handler_result - Possible return values for rx_handlers.
425 * case skb->dev was changed by rx_handler.
433 * to register a second rx_handler will return -EBUSY.
446 * If the rx_handler changed skb->dev, to divert the skb to another
452 * are registered on exact device (ptype->dev == skb->dev).
454 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
475 return test_bit(NAPI_STATE_DISABLE, &n->state); in napi_disable_pending()
480 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); in napi_prefer_busy_poll()
486 * napi_schedule - schedule NAPI poll
499 * napi_schedule_irqoff - schedule NAPI poll
510 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
521 * napi_complete_done - NAPI processing complete
540 * napi_disable - prevent NAPI from scheduling
551 * napi_synchronize - wait until NAPI is not running
561 while (test_bit(NAPI_STATE_SCHED, &n->state)) in napi_synchronize()
568 * napi_if_scheduled_mark_missed - if napi is running, set the
579 val = READ_ONCE(n->state); in napi_if_scheduled_mark_missed()
588 } while (!try_cmpxchg(&n->state, &val, new)); in napi_if_scheduled_mark_missed()
621 * read-mostly part
647 * write-mostly part
695 return q->numa_node; in netdev_queue_numa_node_read()
704 q->numa_node = node; in netdev_queue_numa_node_write()
746 * Each entry is a 32bit value. Upper part is the high-order bits
749 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
751 * meaning we use 32-6=26 bits for the hash.
769 unsigned int index = hash & table->mask; in rps_record_sock_flow()
778 if (READ_ONCE(table->ents[index]) != val) in rps_record_sock_flow()
779 WRITE_ONCE(table->ents[index], val); in rps_record_sock_flow()
789 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */
809 - sizeof(struct xps_map)) / sizeof(u16))
815 * in nr_ids. This will help not accessing out-of-bound memory.
819 * not crossing its upper bound, as the original dev->num_tc can be updated in
865 * physical item (port for example) used by a netdevice.
875 return a->id_len == b->id_len && in netdev_phys_item_id_same()
876 memcmp(a->id, b->id, a->id_len) == 0; in netdev_phys_item_id_same()
912 int port; member
1085 * corner cases, but the stack really does a non-trivial amount
1122 * Old-style ioctl entry point. This is used internally by the
1146 * for dev->watchdog ticks.
1153 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1156 * (which should normally be dev->stats) and return a pointer to
1159 * 3. Update dev->stats asynchronously and atomically, and define
1180 * SR-IOV management functions.
1192 * struct nlattr *port[]);
1214 * so the underlying device can perform whatever needed clean-ups to
1241 * FC-GS Fabric Device Management Information(FDMI) specification.
1246 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1272 * Adjusts the requested feature flags according to device-specific
1279 * Must return >0 or -errno if it changed dev->features itself.
1320 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1324 * network cables) or protocol-dependent mechanisms (eg
1329 * Called to get ID of physical port of this device. If driver does
1331 * multiple net devices on single physical port.
1335 * Called to get the parent ID of the physical port of this device.
1339 * Called by upper layer devices to accelerate switching or other
1351 * Called when a user wants to set a max-rate limitation of specific
1375 * no frames were xmit'ed and core-caller will free all frames.
1490 struct nlattr *port[]);
1651 * enum netdev_priv_flags - &struct net_device priv_flags
1666 * release skb->dst
1668 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1669 * @IFF_MACVLAN_PORT: device used as macvlan port
1670 * @IFF_BRIDGE_PORT: device used as bridge port
1671 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1674 * @IFF_TEAM_PORT: device used as team port
1778 * struct net_device - The DEVICE structure.
1781 * data with strictly "high-level" data, and it has to know about
1801 * @ptype_all: Device-specific packet handlers for all protocols
1802 * @ptype_specific: Device-specific, protocol-specific packet handlers
1806 * @hw_features: User-changeable features
1808 * @wanted_features: User-requested features
1905 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1929 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1969 * @ml_priv: Mid-layer private
1970 * @ml_priv_type: Mid-layer private type
1985 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
2008 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
2010 * @proto_down: protocol port state information can be sent to the
2012 * switch port.
2014 * @wol_enabled: Wake-on-LAN is enabled
2018 * @net_notifier_list: List of per-net netdev notifier block
2030 * dev->addr_list_lock.
2047 * @devlink_port: Pointer to related devlink port structure.
2088 /* Read-mostly cache-line for fast-path access */
2098 /* Note : dev->mtu is often read without holding a lock.
2192 /* Protocol-specific pointers */
2239 * and shinfo->gso_segs is a 16bit field.
2286 /* These may be needed for future network-power-down code. */
2327 /* mid-layer private */
2356 * and shinfo->gso_segs is a 16bit field.
2412 * Driver should use this to assign devlink port instance to a netdevice
2416 #define SET_NETDEV_DEVLINK_PORT(dev, port) \ argument
2418 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \
2419 ((dev)->devlink_port = (port)); \
2424 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) in netif_elide_gro()
2434 return dev->prio_tc_map[prio & TC_BITMASK]; in netdev_get_prio_tc_map()
2440 if (tc >= dev->num_tc) in netdev_set_prio_tc_map()
2441 return -EINVAL; in netdev_set_prio_tc_map()
2443 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; in netdev_set_prio_tc_map()
2455 return dev->num_tc; in netdev_get_num_tc()
2482 return max_t(int, -dev->num_tc, 0); in netdev_get_sb_channel()
2489 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); in netdev_get_tx_queue()
2490 return &dev->_tx[index]; in netdev_get_tx_queue()
2507 for (i = 0; i < dev->num_tx_queues; i++) in netdev_for_each_tx_queue()
2508 f(dev, &dev->_tx[i], arg); in netdev_for_each_tx_queue()
2518 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2519 lockdep_set_class(&(dev)->addr_list_lock, \
2521 for (i = 0; i < (dev)->num_tx_queues; i++) \
2522 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2537 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; in netdev_get_fwd_headroom()
2542 if (dev->netdev_ops->ndo_set_rx_headroom) in netdev_set_rx_headroom()
2543 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); in netdev_set_rx_headroom()
2549 netdev_set_rx_headroom(dev, -1); in netdev_reset_rx_headroom()
2555 if (dev->ml_priv_type != type) in netdev_get_ml_priv()
2558 return dev->ml_priv; in netdev_get_ml_priv()
2565 WARN(dev->ml_priv_type && dev->ml_priv_type != type, in netdev_set_ml_priv()
2567 dev->ml_priv_type, type); in netdev_set_ml_priv()
2568 WARN(!dev->ml_priv_type && dev->ml_priv, in netdev_set_ml_priv()
2571 dev->ml_priv = ml_priv; in netdev_set_ml_priv()
2572 dev->ml_priv_type = type; in netdev_set_ml_priv()
2581 return read_pnet(&dev->nd_net); in dev_net()
2587 write_pnet(&dev->nd_net, net); in dev_net_set()
2591 * netdev_priv - access network device private data
2604 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2607 * fine-grained identification of different network device types. For
2610 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2621 * netif_napi_add() - initialize a NAPI context
2627 * *any* of the other NAPI-related functions.
2642 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); in netif_napi_add_tx_weight()
2647 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
2664 * __netif_napi_del - remove a NAPI context
2674 * netif_napi_del - remove a NAPI context
2719 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2738 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); in dev_sw_netstats_rx_add()
2740 u64_stats_update_begin(&tstats->syncp); in dev_sw_netstats_rx_add()
2741 u64_stats_add(&tstats->rx_bytes, len); in dev_sw_netstats_rx_add()
2742 u64_stats_inc(&tstats->rx_packets); in dev_sw_netstats_rx_add()
2743 u64_stats_update_end(&tstats->syncp); in dev_sw_netstats_rx_add()
2750 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); in dev_sw_netstats_tx_add()
2752 u64_stats_update_begin(&tstats->syncp); in dev_sw_netstats_tx_add()
2753 u64_stats_add(&tstats->tx_bytes, len); in dev_sw_netstats_tx_add()
2754 u64_stats_add(&tstats->tx_packets, packets); in dev_sw_netstats_tx_add()
2755 u64_stats_update_end(&tstats->syncp); in dev_sw_netstats_tx_add()
2760 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); in dev_lstats_add()
2762 u64_stats_update_begin(&lstats->syncp); in dev_lstats_add()
2763 u64_stats_add(&lstats->bytes, len); in dev_lstats_add()
2764 u64_stats_inc(&lstats->packets); in dev_lstats_add()
2765 u64_stats_update_end(&lstats->syncp); in dev_lstats_add()
2776 u64_stats_init(&stat->syncp); \
2793 u64_stats_init(&stat->syncp); \
2840 - we can use this eg to kick tcp sessions
2968 info->dev = dev; in netdev_notifier_info_init()
2969 info->extack = NULL; in netdev_notifier_info_init()
2975 return info->dev; in netdev_notifier_info_to_dev()
2981 return info->extack; in netdev_notifier_info_to_extack()
2991 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2993 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2995 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2997 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2999 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
3001 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
3004 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
3011 xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex))
3019 lh = dev->dev_list.next; in next_net_device()
3020 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device()
3029 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); in next_net_device_rcu()
3030 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device_rcu()
3035 return list_empty(&net->dev_base_head) ? NULL : in first_net_device()
3036 net_device_entry(net->dev_base_head.next); in first_net_device()
3041 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); in first_net_device_rcu()
3043 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in first_net_device_rcu()
3133 if (!dev->header_ops || !dev->header_ops->create) in dev_hard_header()
3136 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); in dev_hard_header()
3142 const struct net_device *dev = skb->dev; in dev_parse_header()
3144 if (!dev->header_ops || !dev->header_ops->parse) in dev_parse_header()
3146 return dev->header_ops->parse(skb, haddr); in dev_parse_header()
3151 const struct net_device *dev = skb->dev; in dev_parse_header_protocol()
3153 if (!dev->header_ops || !dev->header_ops->parse_protocol) in dev_parse_header_protocol()
3155 return dev->header_ops->parse_protocol(skb); in dev_parse_header_protocol()
3162 if (likely(len >= dev->hard_header_len)) in dev_validate_header()
3164 if (len < dev->min_header_len) in dev_validate_header()
3168 memset(ll_header + len, 0, dev->hard_header_len - len); in dev_validate_header()
3172 if (dev->header_ops && dev->header_ops->validate) in dev_validate_header()
3173 return dev->header_ops->validate(ll_header, len); in dev_validate_header()
3180 return dev->header_ops && dev->header_ops->create; in dev_has_header()
3184 * Incoming packets are placed on per-CPU queues
3245 sd->input_queue_head++; in input_queue_head_incr()
3253 *qtail = ++sd->input_queue_tail; in input_queue_tail_incr_save()
3288 for (i = 0; i < dev->num_tx_queues; i++) in netif_tx_schedule_all()
3294 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_start_queue()
3298 * netif_start_queue - allow transmit
3312 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_start_all_queues()
3321 * netif_wake_queue - restart transmit
3336 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_wake_all_queues()
3345 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_stop_queue()
3349 * netif_stop_queue - stop transmitted packets
3364 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_queue_stopped()
3368 * netif_queue_stopped - test if transmit queue is flowblocked
3380 return dev_queue->state & QUEUE_STATE_ANY_XOFF; in netif_xmit_stopped()
3386 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; in netif_xmit_frozen_or_stopped()
3392 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; in netif_xmit_frozen_or_drv_stopped()
3396 * netdev_queue_set_dql_min_limit - set dql minimum limit
3409 dev_queue->dql.min_limit = min_limit; in netdev_queue_set_dql_min_limit()
3414 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3423 prefetchw(&dev_queue->dql.num_queued); in netdev_txq_bql_enqueue_prefetchw()
3428 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3437 prefetchw(&dev_queue->dql.limit); in netdev_txq_bql_complete_prefetchw()
3442 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
3455 dql_queued(&dev_queue->dql, bytes); in netdev_tx_sent_queue()
3457 if (likely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3460 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3470 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3471 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3487 dql_queued(&dev_queue->dql, bytes); in __netdev_tx_sent_queue()
3496 * netdev_sent_queue - report the number of bytes queued to hardware
3519 * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
3525 * individual packet), so that BQL can adjust its limits appropriately.
3534 dql_completed(&dev_queue->dql, bytes); in netdev_tx_completed_queue()
3543 if (unlikely(dql_avail(&dev_queue->dql) < 0)) in netdev_tx_completed_queue()
3546 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) in netdev_tx_completed_queue()
3552 * netdev_completed_queue - report bytes and packets completed by device
3570 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); in netdev_tx_reset_queue()
3571 dql_reset(&q->dql); in netdev_tx_reset_queue()
3576 * netdev_reset_queue - reset the packets and bytes count of a network device
3588 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3597 if (unlikely(queue_index >= dev->real_num_tx_queues)) { in netdev_cap_txqueue()
3599 dev->name, queue_index, in netdev_cap_txqueue()
3600 dev->real_num_tx_queues); in netdev_cap_txqueue()
3608 * netif_running - test if up
3615 return test_bit(__LINK_STATE_START, &dev->state); in netif_running()
3626 * netif_start_subqueue - allow sending packets on subqueue
3630 * Start individual transmit queue of a device with multiple transmit queues.
3640 * netif_stop_subqueue - stop sending packets on subqueue
3644 * Stop individual transmit queue of a device with multiple transmit queues.
3653 * __netif_subqueue_stopped - test status of subqueue
3657 * Check individual transmit queue of a device with multiple transmit queues.
3668 * netif_subqueue_stopped - test status of subqueue
3672 * Check individual transmit queue of a device with multiple transmit queues.
3681 * netif_wake_subqueue - allow sending packets on subqueue
3685 * Resume individual transmit queue of a device with multiple transmit queues.
3701 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3717 * netif_attr_test_online - Test for online CPU/Rx queue
3737 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3747 /* -1 is a legal arg here. */ in netif_attrmask_next()
3748 if (n != -1) in netif_attrmask_next()
3758 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
3770 /* -1 is a legal arg here. */ in netif_attrmask_next_and()
3771 if (n != -1) in netif_attrmask_next_and()
3800 * netif_is_multiqueue - test if device has multiple transmit queues
3807 return dev->num_tx_queues > 1; in netif_is_multiqueue()
3818 dev->real_num_rx_queues = rxqs; in netif_set_real_num_rx_queues()
3890 kfree_skb(napi->skb); in napi_free_frags()
3891 napi->skb = NULL; in napi_free_frags()
3964 if (!(dev->flags & IFF_UP)) in __is_skb_forwardable()
3970 len = dev->mtu + dev->hard_header_len + vlan_hdr_len; in __is_skb_forwardable()
3971 if (skb->len <= len) in __is_skb_forwardable()
3988 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); in dev_core_stats()
4003 this_cpu_inc(p->FIELD); \
4021 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); in DEV_CORE_STATS_INC()
4022 skb->priority = 0; in DEV_CORE_STATS_INC()
4033 this_cpu_dec(*dev->pcpu_refcnt); in __dev_put()
4035 refcount_dec(&dev->dev_refcnt); in __dev_put()
4044 this_cpu_inc(*dev->pcpu_refcnt); in __dev_hold()
4046 refcount_inc(&dev->dev_refcnt); in __dev_hold()
4056 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); in __netdev_tracker_alloc()
4067 refcount_dec(&dev->refcnt_tracker.no_tracker); in netdev_tracker_alloc()
4076 ref_tracker_free(&dev->refcnt_tracker, tracker); in netdev_tracker_free()
4099 * dev_hold - get reference to device
4111 * dev_put - release reference to device
4148 * netif_carrier_ok - test if carrier present
4155 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); in netif_carrier_ok()
4167 * netif_dormant_on - mark device as dormant.
4174 * in a "pending" state, waiting for some external event. For "on-
4180 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_on()
4185 * netif_dormant_off - set device as not dormant.
4192 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_off()
4197 * netif_dormant - test if device is dormant
4204 return test_bit(__LINK_STATE_DORMANT, &dev->state); in netif_dormant()
4209 * netif_testing_on - mark device as under test.
4220 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_on()
4225 * netif_testing_off - set device as not under test.
4232 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_off()
4237 * netif_testing - test if device is under test
4244 return test_bit(__LINK_STATE_TESTING, &dev->state); in netif_testing()
4249 * netif_oper_up - test if device is operational
4256 return (dev->operstate == IF_OPER_UP || in netif_oper_up()
4257 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); in netif_oper_up()
4261 * netif_device_present - is device available or removed
4268 return test_bit(__LINK_STATE_PRESENT, &dev->state); in netif_device_present()
4323 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4324 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4325 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4326 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4327 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4328 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4329 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4330 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4331 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4332 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4333 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4334 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4335 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4336 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4337 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4347 return (1U << debug_value) - 1; in netif_msg_init()
4352 spin_lock(&txq->_xmit_lock); in __netif_tx_lock()
4354 WRITE_ONCE(txq->xmit_lock_owner, cpu); in __netif_tx_lock()
4359 __acquire(&txq->_xmit_lock); in __netif_tx_acquire()
4365 __release(&txq->_xmit_lock); in __netif_tx_release()
4370 spin_lock_bh(&txq->_xmit_lock); in __netif_tx_lock_bh()
4372 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); in __netif_tx_lock_bh()
4377 bool ok = spin_trylock(&txq->_xmit_lock); in __netif_tx_trylock()
4381 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); in __netif_tx_trylock()
4389 WRITE_ONCE(txq->xmit_lock_owner, -1); in __netif_tx_unlock()
4390 spin_unlock(&txq->_xmit_lock); in __netif_tx_unlock()
4396 WRITE_ONCE(txq->xmit_lock_owner, -1); in __netif_tx_unlock_bh()
4397 spin_unlock_bh(&txq->_xmit_lock); in __netif_tx_unlock_bh()
4401 * txq->trans_start can be read locklessly from dev_watchdog()
4405 if (txq->xmit_lock_owner != -1) in txq_trans_update()
4406 WRITE_ONCE(txq->trans_start, jiffies); in txq_trans_update()
4413 if (READ_ONCE(txq->trans_start) != now) in txq_trans_cond_update()
4414 WRITE_ONCE(txq->trans_start, now); in txq_trans_cond_update()
4417 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
4426 * netif_tx_lock - grab network device transmit lock
4448 if ((dev->features & NETIF_F_LLTX) == 0) { \
4456 (((dev->features & NETIF_F_LLTX) == 0) ? \
4461 if ((dev->features & NETIF_F_LLTX) == 0) { \
4475 spin_lock(&dev->tx_global_lock); in netif_tx_disable()
4476 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_disable()
4483 spin_unlock(&dev->tx_global_lock); in netif_tx_disable()
4492 nest_level = dev->nested_level; in netif_addr_lock()
4494 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock()
4502 nest_level = dev->nested_level; in netif_addr_lock_bh()
4505 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock_bh()
4510 spin_unlock(&dev->addr_list_lock); in netif_addr_unlock()
4515 spin_unlock_bh(&dev->addr_list_lock); in netif_addr_unlock_bh()
4523 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4529 /* Support for loadable net-drivers */
4584 __dev_addr_set(dev, addr, dev->addr_len); in dev_addr_set()
4603 * __dev_uc_sync - Synchonize device's unicast list
4617 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); in __dev_uc_sync()
4621 * __dev_uc_unsync - Remove synchronized addresses from device
4631 __hw_addr_unsync_dev(&dev->uc, dev, unsync); in __dev_uc_unsync()
4647 * __dev_mc_sync - Synchonize device's multicast list
4661 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); in __dev_mc_sync()
4665 * __dev_mc_unsync - Remove synchronized addresses from device
4675 __hw_addr_unsync_dev(&dev->mc, dev, unsync); in __dev_mc_unsync()
4723 for (iter = &(dev)->adj_list.upper, \
4744 for (iter = (dev)->adj_list.lower.next, \
4750 for (iter = &(dev)->adj_list.lower, \
4759 for (iter = (dev)->adj_list.lower.next, \
4876 const struct net_device_ops *ops = dev->netdev_ops; in netdev_get_tstamp()
4878 if (ops->ndo_get_tstamp) in netdev_get_tstamp()
4879 return ops->ndo_get_tstamp(dev, hwtstamps, cycles); in netdev_get_tstamp()
4881 return hwtstamps->hwtstamp; in netdev_get_tstamp()
4889 return ops->ndo_start_xmit(skb, dev); in __netdev_start_xmit()
4900 const struct net_device_ops *ops = dev->netdev_ops; in netdev_start_xmit()
4935 return (dev->features & ~dev->hw_features) | dev->wanted_features; in netdev_get_wanted_features()
4993 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && in skb_gso_ok()
5001 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && in netif_needs_gso()
5002 (skb->ip_summed != CHECKSUM_UNNECESSARY))); in netif_needs_gso()
5012 return dev->priv_flags & IFF_MACSEC; in netif_is_macsec()
5017 return dev->priv_flags & IFF_MACVLAN; in netif_is_macvlan()
5022 return dev->priv_flags & IFF_MACVLAN_PORT; in netif_is_macvlan_port()
5027 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; in netif_is_bond_master()
5032 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; in netif_is_bond_slave()
5037 return dev->priv_flags & IFF_SUPP_NOFCS; in netif_supports_nofcs()
5042 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; in netif_has_l3_rx_handler()
5047 return dev->priv_flags & IFF_L3MDEV_MASTER; in netif_is_l3_master()
5052 return dev->priv_flags & IFF_L3MDEV_SLAVE; in netif_is_l3_slave()
5059 return dev->ifindex; in dev_sdif()
5066 return dev->priv_flags & IFF_EBRIDGE; in netif_is_bridge_master()
5071 return dev->priv_flags & IFF_BRIDGE_PORT; in netif_is_bridge_port()
5076 return dev->priv_flags & IFF_OPENVSWITCH; in netif_is_ovs_master()
5081 return dev->priv_flags & IFF_OVS_DATAPATH; in netif_is_ovs_port()
5096 return dev->priv_flags & IFF_TEAM; in netif_is_team_master()
5101 return dev->priv_flags & IFF_TEAM_PORT; in netif_is_team_port()
5116 return dev->priv_flags & IFF_RXFH_CONFIGURED; in netif_is_rxfh_configured()
5121 return dev->priv_flags & IFF_FAILOVER; in netif_is_failover()
5126 return dev->priv_flags & IFF_FAILOVER_SLAVE; in netif_is_failover_slave()
5132 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); in netif_keep_dst()
5150 if (!dev->name[0] || strchr(dev->name, '%')) in netdev_name()
5152 return dev->name; in netdev_name()
5157 switch (dev->reg_state) { in netdev_reg_state()
5166 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); in netdev_reg_state()
5171 MODULE_ALIAS("netdev-" device)
5206 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5213 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
5214 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
5216 atomic_long_add((VAL), &(DEV)->stats.__##FIELD)