Lines Matching full:node

42 static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)  in bpf_lru_node_is_ref()  argument
44 return node->ref; in bpf_lru_node_is_ref()
62 struct bpf_lru_node *node, in __bpf_lru_node_move_to_free() argument
66 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) in __bpf_lru_node_move_to_free()
69 /* If the removing node is the next_inactive_rotation candidate, in __bpf_lru_node_move_to_free()
72 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move_to_free()
75 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free()
77 node->type = tgt_free_type; in __bpf_lru_node_move_to_free()
78 list_move(&node->list, free_list); in __bpf_lru_node_move_to_free()
83 struct bpf_lru_node *node, in __bpf_lru_node_move_in() argument
86 if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) || in __bpf_lru_node_move_in()
91 node->type = tgt_type; in __bpf_lru_node_move_in()
92 node->ref = 0; in __bpf_lru_node_move_in()
93 list_move(&node->list, &l->lists[tgt_type]); in __bpf_lru_node_move_in()
101 struct bpf_lru_node *node, in __bpf_lru_node_move() argument
104 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) || in __bpf_lru_node_move()
108 if (node->type != tgt_type) { in __bpf_lru_node_move()
109 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move()
111 node->type = tgt_type; in __bpf_lru_node_move()
113 node->ref = 0; in __bpf_lru_node_move()
115 /* If the moving node is the next_inactive_rotation candidate, in __bpf_lru_node_move()
118 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move()
121 list_move(&node->list, &l->lists[tgt_type]); in __bpf_lru_node_move()
132 * 2. If the node has the ref bit set, it will be rotated
134 * Give this node one more chance to survive in the active list.
143 struct bpf_lru_node *node, *tmp_node, *first_node; in __bpf_lru_list_rotate_active() local
147 list_for_each_entry_safe_reverse(node, tmp_node, active, list) { in __bpf_lru_list_rotate_active()
148 if (bpf_lru_node_is_ref(node)) in __bpf_lru_list_rotate_active()
149 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); in __bpf_lru_list_rotate_active()
151 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); in __bpf_lru_list_rotate_active()
153 if (++i == lru->nr_scans || node == first_node) in __bpf_lru_list_rotate_active()
159 * 1. If the node has ref bit set, it will be moved to the head
161 * 2. If the node does not have ref bit set, it will leave it
171 struct bpf_lru_node *node; in __bpf_lru_list_rotate_inactive() local
188 node = list_entry(cur, struct bpf_lru_node, list); in __bpf_lru_list_rotate_inactive()
190 if (bpf_lru_node_is_ref(node)) in __bpf_lru_list_rotate_inactive()
191 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); in __bpf_lru_list_rotate_inactive()
213 struct bpf_lru_node *node, *tmp_node; in __bpf_lru_list_shrink_inactive() local
217 list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) { in __bpf_lru_list_shrink_inactive()
218 if (bpf_lru_node_is_ref(node)) { in __bpf_lru_list_shrink_inactive()
219 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); in __bpf_lru_list_shrink_inactive()
220 } else if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink_inactive()
221 __bpf_lru_node_move_to_free(l, node, free_list, in __bpf_lru_list_shrink_inactive()
249 * If it cannot get a free node after calling
251 * one node from either inactive or active list without
262 struct bpf_lru_node *node, *tmp_node; in __bpf_lru_list_shrink() local
277 list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list, in __bpf_lru_list_shrink()
279 if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink()
280 __bpf_lru_node_move_to_free(l, node, free_list, in __bpf_lru_list_shrink()
293 struct bpf_lru_node *node, *tmp_node; in __local_list_flush() local
295 list_for_each_entry_safe_reverse(node, tmp_node, in __local_list_flush()
297 if (bpf_lru_node_is_ref(node)) in __local_list_flush()
298 __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE); in __local_list_flush()
300 __bpf_lru_node_move_in(l, node, in __local_list_flush()
306 struct bpf_lru_node *node) in bpf_lru_list_push_free() argument
310 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) in bpf_lru_list_push_free()
314 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); in bpf_lru_list_push_free()
322 struct bpf_lru_node *node, *tmp_node; in bpf_lru_list_pop_free_to_local() local
331 list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE], in bpf_lru_list_pop_free_to_local()
333 __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l), in bpf_lru_list_pop_free_to_local()
350 struct bpf_lru_node *node, in __local_list_add_pending() argument
353 *(u32 *)((void *)node + lru->hash_offset) = hash; in __local_list_add_pending()
354 node->cpu = cpu; in __local_list_add_pending()
355 node->type = BPF_LRU_LOCAL_LIST_T_PENDING; in __local_list_add_pending()
356 node->ref = 0; in __local_list_add_pending()
357 list_add(&node->list, local_pending_list(loc_l)); in __local_list_add_pending()
363 struct bpf_lru_node *node; in __local_list_pop_free() local
365 node = list_first_entry_or_null(local_free_list(loc_l), in __local_list_pop_free()
368 if (node) in __local_list_pop_free()
369 list_del(&node->list); in __local_list_pop_free()
371 return node; in __local_list_pop_free()
377 struct bpf_lru_node *node; in __local_list_pop_pending() local
382 list_for_each_entry_reverse(node, local_pending_list(loc_l), in __local_list_pop_pending()
384 if ((!bpf_lru_node_is_ref(node) || force) && in __local_list_pop_pending()
385 lru->del_from_htab(lru->del_arg, node)) { in __local_list_pop_pending()
386 list_del(&node->list); in __local_list_pop_pending()
387 return node; in __local_list_pop_pending()
403 struct bpf_lru_node *node = NULL; in bpf_percpu_lru_pop_free() local
420 node = list_first_entry(free_list, struct bpf_lru_node, list); in bpf_percpu_lru_pop_free()
421 *(u32 *)((void *)node + lru->hash_offset) = hash; in bpf_percpu_lru_pop_free()
422 node->ref = 0; in bpf_percpu_lru_pop_free()
423 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); in bpf_percpu_lru_pop_free()
428 return node; in bpf_percpu_lru_pop_free()
436 struct bpf_lru_node *node; in bpf_common_lru_pop_free() local
445 node = __local_list_pop_free(loc_l); in bpf_common_lru_pop_free()
446 if (!node) { in bpf_common_lru_pop_free()
448 node = __local_list_pop_free(loc_l); in bpf_common_lru_pop_free()
451 if (node) in bpf_common_lru_pop_free()
452 __local_list_add_pending(lru, loc_l, cpu, node, hash); in bpf_common_lru_pop_free()
456 if (node) in bpf_common_lru_pop_free()
457 return node; in bpf_common_lru_pop_free()
474 node = __local_list_pop_free(steal_loc_l); in bpf_common_lru_pop_free()
475 if (!node) in bpf_common_lru_pop_free()
476 node = __local_list_pop_pending(lru, steal_loc_l); in bpf_common_lru_pop_free()
481 } while (!node && steal != first_steal); in bpf_common_lru_pop_free()
485 if (node) { in bpf_common_lru_pop_free()
487 __local_list_add_pending(lru, loc_l, cpu, node, hash); in bpf_common_lru_pop_free()
491 return node; in bpf_common_lru_pop_free()
503 struct bpf_lru_node *node) in bpf_common_lru_push_free() argument
507 if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) || in bpf_common_lru_push_free()
508 WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE)) in bpf_common_lru_push_free()
511 if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) { in bpf_common_lru_push_free()
514 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); in bpf_common_lru_push_free()
518 if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) { in bpf_common_lru_push_free()
523 node->type = BPF_LRU_LOCAL_LIST_T_FREE; in bpf_common_lru_push_free()
524 node->ref = 0; in bpf_common_lru_push_free()
525 list_move(&node->list, local_free_list(loc_l)); in bpf_common_lru_push_free()
532 bpf_lru_list_push_free(&lru->common_lru.lru_list, node); in bpf_common_lru_push_free()
536 struct bpf_lru_node *node) in bpf_percpu_lru_push_free() argument
541 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free()
545 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); in bpf_percpu_lru_push_free()
550 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node) in bpf_lru_push_free() argument
553 bpf_percpu_lru_push_free(lru, node); in bpf_lru_push_free()
555 bpf_common_lru_push_free(lru, node); in bpf_lru_push_free()
566 struct bpf_lru_node *node; in bpf_common_lru_populate() local
568 node = (struct bpf_lru_node *)(buf + node_offset); in bpf_common_lru_populate()
569 node->type = BPF_LRU_LIST_T_FREE; in bpf_common_lru_populate()
570 node->ref = 0; in bpf_common_lru_populate()
571 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); in bpf_common_lru_populate()
589 struct bpf_lru_node *node; in bpf_percpu_lru_populate() local
593 node = (struct bpf_lru_node *)(buf + node_offset); in bpf_percpu_lru_populate()
594 node->cpu = cpu; in bpf_percpu_lru_populate()
595 node->type = BPF_LRU_LIST_T_FREE; in bpf_percpu_lru_populate()
596 node->ref = 0; in bpf_percpu_lru_populate()
597 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); in bpf_percpu_lru_populate()