Lines Matching full:block

60 	struct tcf_block *block = chain->block;  in tcf_proto_signal_destroying()  local
62 mutex_lock(&block->proto_destroy_lock); in tcf_proto_signal_destroying()
63 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, in tcf_proto_signal_destroying()
65 mutex_unlock(&block->proto_destroy_lock); in tcf_proto_signal_destroying()
84 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, in tcf_proto_exists_destroying()
99 struct tcf_block *block = chain->block; in tcf_proto_signal_destroyed() local
101 mutex_lock(&block->proto_destroy_lock); in tcf_proto_signal_destroyed()
104 mutex_unlock(&block->proto_destroy_lock); in tcf_proto_signal_destroyed()
359 #define ASSERT_BLOCK_LOCKED(block) \ argument
360 lockdep_assert_held(&(block)->lock)
368 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, in tcf_chain_create() argument
373 ASSERT_BLOCK_LOCKED(block); in tcf_chain_create()
378 list_add_tail(&chain->list, &block->chain_list); in tcf_chain_create()
380 chain->block = block; in tcf_chain_create()
384 block->chain0.chain = chain; in tcf_chain_create()
399 struct tcf_block *block = chain->block; in tcf_chain0_head_change() local
404 mutex_lock(&block->lock); in tcf_chain0_head_change()
405 list_for_each_entry(item, &block->chain0.filter_chain_list, list) in tcf_chain0_head_change()
407 mutex_unlock(&block->lock); in tcf_chain0_head_change()
410 /* Returns true if block can be safely freed. */
414 struct tcf_block *block = chain->block; in tcf_chain_detach() local
416 ASSERT_BLOCK_LOCKED(block); in tcf_chain_detach()
420 block->chain0.chain = NULL; in tcf_chain_detach()
422 if (list_empty(&block->chain_list) && in tcf_chain_detach()
423 refcount_read(&block->refcnt) == 0) in tcf_chain_detach()
429 static void tcf_block_destroy(struct tcf_block *block) in tcf_block_destroy() argument
431 mutex_destroy(&block->lock); in tcf_block_destroy()
432 mutex_destroy(&block->proto_destroy_lock); in tcf_block_destroy()
433 kfree_rcu(block, rcu); in tcf_block_destroy()
438 struct tcf_block *block = chain->block; in tcf_chain_destroy() local
443 tcf_block_destroy(block); in tcf_chain_destroy()
448 ASSERT_BLOCK_LOCKED(chain->block); in tcf_chain_hold()
455 ASSERT_BLOCK_LOCKED(chain->block); in tcf_chain_held_by_acts_only()
463 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, in tcf_chain_lookup() argument
468 ASSERT_BLOCK_LOCKED(block); in tcf_chain_lookup()
470 list_for_each_entry(chain, &block->chain_list, list) { in tcf_chain_lookup()
480 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, in __tcf_chain_get() argument
487 mutex_lock(&block->lock); in __tcf_chain_get()
488 chain = tcf_chain_lookup(block, chain_index); in __tcf_chain_get()
494 chain = tcf_chain_create(block, chain_index); in __tcf_chain_get()
502 mutex_unlock(&block->lock); in __tcf_chain_get()
516 mutex_unlock(&block->lock); in __tcf_chain_get()
520 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, in tcf_chain_get() argument
523 return __tcf_chain_get(block, chain_index, create, false); in tcf_chain_get()
526 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) in tcf_chain_get_by_act() argument
528 return __tcf_chain_get(block, chain_index, true, true); in tcf_chain_get_by_act()
536 struct tcf_block *block, struct sk_buff *oskb,
542 struct tcf_block *block = chain->block; in __tcf_chain_put() local
548 mutex_lock(&block->lock); in __tcf_chain_put()
551 mutex_unlock(&block->lock); in __tcf_chain_put()
560 /* tc_chain_notify_delete can't be called while holding block lock. in __tcf_chain_put()
561 * However, when block is unlocked chain can be changed concurrently, so in __tcf_chain_put()
571 block, NULL, 0, 0, false); in __tcf_chain_put()
578 mutex_unlock(&block->lock); in __tcf_chain_put()
626 static int tcf_block_setup(struct tcf_block *block,
630 struct tcf_block *block, in tc_indr_block_ing_cmd() argument
639 .block_shared = tcf_block_non_null_shared(block), in tc_indr_block_ing_cmd()
643 if (!block) in tc_indr_block_ing_cmd()
646 bo.block = &block->flow_block; in tc_indr_block_ing_cmd()
648 down_write(&block->cb_lock); in tc_indr_block_ing_cmd()
651 tcf_block_setup(block, &bo); in tc_indr_block_ing_cmd()
652 up_write(&block->cb_lock); in tc_indr_block_ing_cmd()
682 struct tcf_block *block = tc_dev_ingress_block(dev); in tc_indr_block_get_and_ing_cmd() local
684 tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command); in tc_indr_block_get_and_ing_cmd()
687 static void tc_indr_block_call(struct tcf_block *block, in tc_indr_block_call() argument
697 .block = &block->flow_block, in tc_indr_block_call()
698 .block_shared = tcf_block_shared(block), in tc_indr_block_call()
704 tcf_block_setup(block, &bo); in tc_indr_block_call()
707 static bool tcf_block_offload_in_use(struct tcf_block *block) in tcf_block_offload_in_use() argument
709 return atomic_read(&block->offloadcnt); in tcf_block_offload_in_use()
712 static int tcf_block_offload_cmd(struct tcf_block *block, in tcf_block_offload_cmd() argument
724 bo.block = &block->flow_block; in tcf_block_offload_cmd()
725 bo.block_shared = tcf_block_shared(block); in tcf_block_offload_cmd()
733 return tcf_block_setup(block, &bo); in tcf_block_offload_cmd()
736 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, in tcf_block_offload_bind() argument
743 down_write(&block->cb_lock); in tcf_block_offload_bind()
747 /* If tc offload feature is disabled and the block we try to bind in tcf_block_offload_bind()
750 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { in tcf_block_offload_bind()
751 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); in tcf_block_offload_bind()
756 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); in tcf_block_offload_bind()
762 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); in tcf_block_offload_bind()
763 up_write(&block->cb_lock); in tcf_block_offload_bind()
767 if (tcf_block_offload_in_use(block)) { in tcf_block_offload_bind()
772 block->nooffloaddevcnt++; in tcf_block_offload_bind()
773 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); in tcf_block_offload_bind()
775 up_write(&block->cb_lock); in tcf_block_offload_bind()
779 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, in tcf_block_offload_unbind() argument
785 down_write(&block->cb_lock); in tcf_block_offload_unbind()
786 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); in tcf_block_offload_unbind()
790 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); in tcf_block_offload_unbind()
793 up_write(&block->cb_lock); in tcf_block_offload_unbind()
797 WARN_ON(block->nooffloaddevcnt-- == 0); in tcf_block_offload_unbind()
798 up_write(&block->cb_lock); in tcf_block_offload_unbind()
802 tcf_chain0_head_change_cb_add(struct tcf_block *block, in tcf_chain0_head_change_cb_add() argument
817 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_add()
818 chain0 = block->chain0.chain; in tcf_chain0_head_change_cb_add()
822 list_add(&item->list, &block->chain0.filter_chain_list); in tcf_chain0_head_change_cb_add()
823 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_add()
834 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_add()
835 list_add(&item->list, &block->chain0.filter_chain_list); in tcf_chain0_head_change_cb_add()
836 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_add()
846 tcf_chain0_head_change_cb_del(struct tcf_block *block, in tcf_chain0_head_change_cb_del() argument
851 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_del()
852 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { in tcf_chain0_head_change_cb_del()
856 if (block->chain0.chain) in tcf_chain0_head_change_cb_del()
859 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_del()
865 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_del()
876 static int tcf_block_insert(struct tcf_block *block, struct net *net, in tcf_block_insert() argument
884 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, in tcf_block_insert()
892 static void tcf_block_remove(struct tcf_block *block, struct net *net) in tcf_block_remove() argument
897 idr_remove(&tn->idr, block->index); in tcf_block_remove()
905 struct tcf_block *block; in tcf_block_create() local
907 block = kzalloc(sizeof(*block), GFP_KERNEL); in tcf_block_create()
908 if (!block) { in tcf_block_create()
909 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); in tcf_block_create()
912 mutex_init(&block->lock); in tcf_block_create()
913 mutex_init(&block->proto_destroy_lock); in tcf_block_create()
914 init_rwsem(&block->cb_lock); in tcf_block_create()
915 flow_block_init(&block->flow_block); in tcf_block_create()
916 INIT_LIST_HEAD(&block->chain_list); in tcf_block_create()
917 INIT_LIST_HEAD(&block->owner_list); in tcf_block_create()
918 INIT_LIST_HEAD(&block->chain0.filter_chain_list); in tcf_block_create()
920 refcount_set(&block->refcnt, 1); in tcf_block_create()
921 block->net = net; in tcf_block_create()
922 block->index = block_index; in tcf_block_create()
925 if (!tcf_block_shared(block)) in tcf_block_create()
926 block->q = q; in tcf_block_create()
927 return block; in tcf_block_create()
939 struct tcf_block *block; in tcf_block_refcnt_get() local
942 block = tcf_block_lookup(net, block_index); in tcf_block_refcnt_get()
943 if (block && !refcount_inc_not_zero(&block->refcnt)) in tcf_block_refcnt_get()
944 block = NULL; in tcf_block_refcnt_get()
947 return block; in tcf_block_refcnt_get()
951 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) in __tcf_get_next_chain() argument
953 mutex_lock(&block->lock); in __tcf_get_next_chain()
955 chain = list_is_last(&chain->list, &block->chain_list) ? in __tcf_get_next_chain()
958 chain = list_first_entry_or_null(&block->chain_list, in __tcf_get_next_chain()
963 chain = list_is_last(&chain->list, &block->chain_list) ? in __tcf_get_next_chain()
968 mutex_unlock(&block->lock); in __tcf_get_next_chain()
974 * block. It properly obtains block->lock and takes reference to chain before
983 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) in tcf_get_next_chain() argument
985 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); in tcf_get_next_chain()
1048 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) in tcf_block_flush_all_chains() argument
1052 /* Last reference to block. At this point chains cannot be added or in tcf_block_flush_all_chains()
1055 for (chain = tcf_get_next_chain(block, NULL); in tcf_block_flush_all_chains()
1057 chain = tcf_get_next_chain(block, chain)) { in tcf_block_flush_all_chains()
1167 struct tcf_block *block; in __tcf_block_find() local
1170 block = tcf_block_refcnt_get(net, block_index); in __tcf_block_find()
1171 if (!block) { in __tcf_block_find()
1172 NL_SET_ERR_MSG(extack, "Block of given index was not found"); in __tcf_block_find()
1178 block = cops->tcf_block(q, cl, extack); in __tcf_block_find()
1179 if (!block) in __tcf_block_find()
1182 if (tcf_block_shared(block)) { in __tcf_block_find()
1183 …NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the … in __tcf_block_find()
1187 /* Always take reference to block in order to support execution in __tcf_block_find()
1189 * must release block when it is finished using it. 'if' block in __tcf_block_find()
1190 * of this conditional obtain reference to block by calling in __tcf_block_find()
1193 refcount_inc(&block->refcnt); in __tcf_block_find()
1196 return block; in __tcf_block_find()
1199 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, in __tcf_block_put() argument
1202 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { in __tcf_block_put()
1203 /* Flushing/putting all chains will cause the block to be in __tcf_block_put()
1205 * is empty, block has to be manually deallocated. After block in __tcf_block_put()
1207 * increment it or add new chains to block. in __tcf_block_put()
1209 bool free_block = list_empty(&block->chain_list); in __tcf_block_put()
1211 mutex_unlock(&block->lock); in __tcf_block_put()
1212 if (tcf_block_shared(block)) in __tcf_block_put()
1213 tcf_block_remove(block, block->net); in __tcf_block_put()
1216 tcf_block_offload_unbind(block, q, ei); in __tcf_block_put()
1219 tcf_block_destroy(block); in __tcf_block_put()
1221 tcf_block_flush_all_chains(block, rtnl_held); in __tcf_block_put()
1223 tcf_block_offload_unbind(block, q, ei); in __tcf_block_put()
1227 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) in tcf_block_refcnt_put() argument
1229 __tcf_block_put(block, NULL, NULL, rtnl_held); in tcf_block_refcnt_put()
1232 /* Find tcf block.
1241 struct tcf_block *block; in tcf_block_find() local
1254 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); in tcf_block_find()
1255 if (IS_ERR(block)) { in tcf_block_find()
1256 err = PTR_ERR(block); in tcf_block_find()
1260 return block; in tcf_block_find()
1270 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, in tcf_block_release() argument
1273 if (!IS_ERR_OR_NULL(block)) in tcf_block_release()
1274 tcf_block_refcnt_put(block, rtnl_held); in tcf_block_release()
1291 tcf_block_owner_netif_keep_dst(struct tcf_block *block, in tcf_block_owner_netif_keep_dst() argument
1295 if (block->keep_dst && in tcf_block_owner_netif_keep_dst()
1301 void tcf_block_netif_keep_dst(struct tcf_block *block) in tcf_block_netif_keep_dst() argument
1305 block->keep_dst = true; in tcf_block_netif_keep_dst()
1306 list_for_each_entry(item, &block->owner_list, list) in tcf_block_netif_keep_dst()
1307 tcf_block_owner_netif_keep_dst(block, item->q, in tcf_block_netif_keep_dst()
1312 static int tcf_block_owner_add(struct tcf_block *block, in tcf_block_owner_add() argument
1323 list_add(&item->list, &block->owner_list); in tcf_block_owner_add()
1327 static void tcf_block_owner_del(struct tcf_block *block, in tcf_block_owner_del() argument
1333 list_for_each_entry(item, &block->owner_list, list) { in tcf_block_owner_del()
1348 struct tcf_block *block = NULL; in tcf_block_get_ext() local
1352 /* block_index not 0 means the shared block is requested */ in tcf_block_get_ext()
1353 block = tcf_block_refcnt_get(net, ei->block_index); in tcf_block_get_ext()
1355 if (!block) { in tcf_block_get_ext()
1356 block = tcf_block_create(net, q, ei->block_index, extack); in tcf_block_get_ext()
1357 if (IS_ERR(block)) in tcf_block_get_ext()
1358 return PTR_ERR(block); in tcf_block_get_ext()
1359 if (tcf_block_shared(block)) { in tcf_block_get_ext()
1360 err = tcf_block_insert(block, net, extack); in tcf_block_get_ext()
1366 err = tcf_block_owner_add(block, q, ei->binder_type); in tcf_block_get_ext()
1370 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); in tcf_block_get_ext()
1372 err = tcf_chain0_head_change_cb_add(block, ei, extack); in tcf_block_get_ext()
1376 err = tcf_block_offload_bind(block, q, ei, extack); in tcf_block_get_ext()
1380 *p_block = block; in tcf_block_get_ext()
1384 tcf_chain0_head_change_cb_del(block, ei); in tcf_block_get_ext()
1386 tcf_block_owner_del(block, q, ei->binder_type); in tcf_block_get_ext()
1389 tcf_block_refcnt_put(block, true); in tcf_block_get_ext()
1418 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, in tcf_block_put_ext() argument
1421 if (!block) in tcf_block_put_ext()
1423 tcf_chain0_head_change_cb_del(block, ei); in tcf_block_put_ext()
1424 tcf_block_owner_del(block, q, ei->binder_type); in tcf_block_put_ext()
1426 __tcf_block_put(block, q, ei, true); in tcf_block_put_ext()
1430 void tcf_block_put(struct tcf_block *block) in tcf_block_put() argument
1434 if (!block) in tcf_block_put()
1436 tcf_block_put_ext(block, block->q, &ei); in tcf_block_put()
1442 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, in tcf_block_playback_offloads() argument
1450 lockdep_assert_held(&block->cb_lock); in tcf_block_playback_offloads()
1452 for (chain = __tcf_get_next_chain(block, NULL); in tcf_block_playback_offloads()
1455 chain = __tcf_get_next_chain(block, chain), in tcf_block_playback_offloads()
1479 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, in tcf_block_playback_offloads()
1484 static int tcf_block_bind(struct tcf_block *block, in tcf_block_bind() argument
1490 lockdep_assert_held(&block->cb_lock); in tcf_block_bind()
1493 err = tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_bind()
1495 tcf_block_offload_in_use(block), in tcf_block_bind()
1500 block->lockeddevcnt++; in tcf_block_bind()
1504 list_splice(&bo->cb_list, &block->flow_block.cb_list); in tcf_block_bind()
1512 tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_bind()
1514 tcf_block_offload_in_use(block), in tcf_block_bind()
1517 block->lockeddevcnt--; in tcf_block_bind()
1525 static void tcf_block_unbind(struct tcf_block *block, in tcf_block_unbind() argument
1530 lockdep_assert_held(&block->cb_lock); in tcf_block_unbind()
1533 tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_unbind()
1535 tcf_block_offload_in_use(block), in tcf_block_unbind()
1540 block->lockeddevcnt--; in tcf_block_unbind()
1544 static int tcf_block_setup(struct tcf_block *block, in tcf_block_setup() argument
1551 err = tcf_block_bind(block, bo); in tcf_block_setup()
1555 tcf_block_unbind(block, bo); in tcf_block_setup()
1619 tp->chain->block->index, in tcf_classify()
1789 struct tcf_proto *tp, struct tcf_block *block, in tcf_fill_node() argument
1810 tcm->tcm_block_index = block->index; in tcf_fill_node()
1835 struct tcf_block *block, struct Qdisc *q, in tfilter_notify() argument
1847 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, in tfilter_notify()
1867 struct tcf_block *block, struct Qdisc *q, in tfilter_del_notify() argument
1879 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, in tfilter_del_notify()
1907 struct tcf_block *block, struct Qdisc *q, in tfilter_notify_chain() argument
1916 tfilter_notify(net, oskb, n, tp, block, in tfilter_notify_chain()
1941 struct tcf_block *block; in tc_new_tfilter() local
1967 block = NULL; in tc_new_tfilter()
1995 * block is shared (no qdisc found), qdisc is not unlocked, classifier in tc_new_tfilter()
2009 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_new_tfilter()
2011 if (IS_ERR(block)) { in tc_new_tfilter()
2012 err = PTR_ERR(block); in tc_new_tfilter()
2022 chain = tcf_chain_get(block, chain_index, true); in tc_new_tfilter()
2115 tfilter_notify(net, skb, n, tp, block, q, parent, fh, in tc_new_tfilter()
2133 tcf_block_release(q, block, rtnl_held); in tc_new_tfilter()
2167 struct tcf_block *block = NULL; in tc_del_tfilter() local
2203 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc in tc_del_tfilter()
2218 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_del_tfilter()
2220 if (IS_ERR(block)) { in tc_del_tfilter()
2221 err = PTR_ERR(block); in tc_del_tfilter()
2231 chain = tcf_chain_get(block, chain_index, false); in tc_del_tfilter()
2246 tfilter_notify_chain(net, skb, block, q, parent, n, in tc_del_tfilter()
2270 tfilter_notify(net, skb, n, tp, block, q, parent, fh, in tc_del_tfilter()
2285 err = tfilter_del_notify(net, skb, n, tp, block, in tc_del_tfilter()
2301 tcf_block_release(q, block, rtnl_held); in tc_del_tfilter()
2327 struct tcf_block *block = NULL; in tc_get_tfilter() local
2360 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not in tc_get_tfilter()
2374 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_get_tfilter()
2376 if (IS_ERR(block)) { in tc_get_tfilter()
2377 err = PTR_ERR(block); in tc_get_tfilter()
2387 chain = tcf_chain_get(block, chain_index, false); in tc_get_tfilter()
2414 err = tfilter_notify(net, skb, n, tp, block, q, parent, in tc_get_tfilter()
2427 tcf_block_release(q, block, rtnl_held); in tc_get_tfilter()
2439 struct tcf_block *block; member
2449 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, in tcf_node_dump()
2460 struct tcf_block *block = chain->block; in tcf_chain_dump() local
2483 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, in tcf_chain_dump()
2495 arg.block = block; in tcf_chain_dump()
2522 struct tcf_block *block; in tc_dump_tfilter() local
2538 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); in tc_dump_tfilter()
2539 if (!block) in tc_dump_tfilter()
2541 /* If we work with block index, q is NULL and parent value in tc_dump_tfilter()
2576 block = cops->tcf_block(q, cl, NULL); in tc_dump_tfilter()
2577 if (!block) in tc_dump_tfilter()
2579 if (tcf_block_shared(block)) in tc_dump_tfilter()
2586 for (chain = __tcf_get_next_chain(block, NULL); in tc_dump_tfilter()
2589 chain = __tcf_get_next_chain(block, chain), in tc_dump_tfilter()
2603 tcf_block_refcnt_put(block, true); in tc_dump_tfilter()
2616 struct tcf_block *block, in tc_chain_fill_node() argument
2636 if (block->q) { in tc_chain_fill_node()
2637 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; in tc_chain_fill_node()
2638 tcm->tcm_parent = block->q->handle; in tc_chain_fill_node()
2641 tcm->tcm_block_index = block->index; in tc_chain_fill_node()
2667 struct tcf_block *block = chain->block; in tc_chain_notify() local
2668 struct net *net = block->net; in tc_chain_notify()
2677 chain->index, net, skb, block, portid, in tc_chain_notify()
2696 struct tcf_block *block, struct sk_buff *oskb, in tc_chain_notify_delete() argument
2700 struct net *net = block->net; in tc_chain_notify_delete()
2708 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { in tc_chain_notify_delete()
2771 struct tcf_block *block; in tc_ctl_chain() local
2789 block = tcf_block_find(net, &q, &parent, &cl, in tc_ctl_chain()
2791 if (IS_ERR(block)) in tc_ctl_chain()
2792 return PTR_ERR(block); in tc_ctl_chain()
2801 mutex_lock(&block->lock); in tc_ctl_chain()
2802 chain = tcf_chain_lookup(block, chain_index); in tc_ctl_chain()
2821 chain = tcf_chain_create(block, chain_index); in tc_ctl_chain()
2838 /* Modifying chain requires holding parent block lock. In case in tc_ctl_chain()
2846 mutex_unlock(&block->lock); in tc_ctl_chain()
2860 tfilter_notify_chain(net, skb, block, q, parent, n, in tc_ctl_chain()
2884 tcf_block_release(q, block, true); in tc_ctl_chain()
2891 mutex_unlock(&block->lock); in tc_ctl_chain()
2901 struct tcf_block *block; in tc_dump_chain() local
2918 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); in tc_dump_chain()
2919 if (!block) in tc_dump_chain()
2921 /* If we work with block index, q is NULL and parent value in tc_dump_chain()
2956 block = cops->tcf_block(q, cl, NULL); in tc_dump_chain()
2957 if (!block) in tc_dump_chain()
2959 if (tcf_block_shared(block)) in tc_dump_chain()
2966 mutex_lock(&block->lock); in tc_dump_chain()
2967 list_for_each_entry(chain, &block->chain_list, list) { in tc_dump_chain()
2978 chain->index, net, skb, block, in tc_dump_chain()
2986 mutex_unlock(&block->lock); in tc_dump_chain()
2989 tcf_block_refcnt_put(block, true); in tc_dump_chain()
3128 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) in tcf_block_offload_inc() argument
3133 atomic_inc(&block->offloadcnt); in tcf_block_offload_inc()
3136 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) in tcf_block_offload_dec() argument
3141 atomic_dec(&block->offloadcnt); in tcf_block_offload_dec()
3144 static void tc_cls_offload_cnt_update(struct tcf_block *block, in tc_cls_offload_cnt_update() argument
3148 lockdep_assert_held(&block->cb_lock); in tc_cls_offload_cnt_update()
3153 tcf_block_offload_inc(block, flags); in tc_cls_offload_cnt_update()
3158 tcf_block_offload_dec(block, flags); in tc_cls_offload_cnt_update()
3164 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, in tc_cls_offload_cnt_reset() argument
3167 lockdep_assert_held(&block->cb_lock); in tc_cls_offload_cnt_reset()
3170 tcf_block_offload_dec(block, flags); in tc_cls_offload_cnt_reset()
3176 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, in __tc_setup_cb_call() argument
3183 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { in __tc_setup_cb_call()
3195 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, in tc_setup_cb_call() argument
3198 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_call()
3204 down_read(&block->cb_lock); in tc_setup_cb_call()
3205 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_call()
3206 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_call()
3209 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_call()
3210 up_read(&block->cb_lock); in tc_setup_cb_call()
3215 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_call()
3217 up_read(&block->cb_lock); in tc_setup_cb_call()
3225 * successfully offloaded, increment block offloads counter. On failure,
3230 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_add() argument
3234 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_add()
3240 down_read(&block->cb_lock); in tc_setup_cb_add()
3241 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_add()
3242 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_add()
3245 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_add()
3246 up_read(&block->cb_lock); in tc_setup_cb_add()
3251 /* Make sure all netdevs sharing this block are offload-capable. */ in tc_setup_cb_add()
3252 if (block->nooffloaddevcnt && err_stop) { in tc_setup_cb_add()
3257 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_add()
3264 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, in tc_setup_cb_add()
3267 up_read(&block->cb_lock); in tc_setup_cb_add()
3275 * successfully offloaded, increment block offload counter. On failure,
3280 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_replace() argument
3286 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_replace()
3292 down_read(&block->cb_lock); in tc_setup_cb_replace()
3293 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_replace()
3294 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_replace()
3297 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_replace()
3298 up_read(&block->cb_lock); in tc_setup_cb_replace()
3303 /* Make sure all netdevs sharing this block are offload-capable. */ in tc_setup_cb_replace()
3304 if (block->nooffloaddevcnt && err_stop) { in tc_setup_cb_replace()
3309 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); in tc_setup_cb_replace()
3313 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_replace()
3320 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, in tc_setup_cb_replace()
3323 up_read(&block->cb_lock); in tc_setup_cb_replace()
3330 /* Destroy filter and decrement block offload counter, if filter was previously
3334 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_destroy() argument
3338 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_destroy()
3344 down_read(&block->cb_lock); in tc_setup_cb_destroy()
3345 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_destroy()
3346 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_destroy()
3349 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_destroy()
3350 up_read(&block->cb_lock); in tc_setup_cb_destroy()
3355 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_destroy()
3357 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); in tc_setup_cb_destroy()
3361 up_read(&block->cb_lock); in tc_setup_cb_destroy()
3368 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_reoffload() argument
3379 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, in tc_setup_cb_reoffload()