Lines Matching +full:counter +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0
23 if (!c->parent) in propagate_protected_usage()
26 min = READ_ONCE(c->min); in propagate_protected_usage()
27 if (min || atomic_long_read(&c->min_usage)) { in propagate_protected_usage()
29 old_protected = atomic_long_xchg(&c->min_usage, protected); in propagate_protected_usage()
30 delta = protected - old_protected; in propagate_protected_usage()
32 atomic_long_add(delta, &c->parent->children_min_usage); in propagate_protected_usage()
35 low = READ_ONCE(c->low); in propagate_protected_usage()
36 if (low || atomic_long_read(&c->low_usage)) { in propagate_protected_usage()
38 old_protected = atomic_long_xchg(&c->low_usage, protected); in propagate_protected_usage()
39 delta = protected - old_protected; in propagate_protected_usage()
41 atomic_long_add(delta, &c->parent->children_low_usage); in propagate_protected_usage()
46 * page_counter_cancel - take pages out of the local counter
47 * @counter: counter
50 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument
54 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel()
56 if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n", in page_counter_cancel()
58 new = 0; in page_counter_cancel()
59 atomic_long_set(&counter->usage, new); in page_counter_cancel()
61 propagate_protected_usage(counter, new); in page_counter_cancel()
65 * page_counter_charge - hierarchically charge pages
66 * @counter: counter
69 * NOTE: This does not consider any configured counter limits.
71 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument
75 for (c = counter; c; c = c->parent) { in page_counter_charge()
78 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge()
84 if (new > READ_ONCE(c->watermark)) in page_counter_charge()
85 WRITE_ONCE(c->watermark, new); in page_counter_charge()
90 * page_counter_try_charge - try to hierarchically charge pages
91 * @counter: counter
93 * @fail: points first counter to hit its limit, if any
95 * Returns %true on success, or %false and @fail if the counter or one
98 bool page_counter_try_charge(struct page_counter *counter, in page_counter_try_charge() argument
104 for (c = counter; c; c = c->parent) { in page_counter_try_charge()
118 * counter has changed and retries. in page_counter_try_charge()
120 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge()
121 if (new > c->max) { in page_counter_try_charge()
122 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge()
129 data_race(c->failcnt++); in page_counter_try_charge()
138 if (new > READ_ONCE(c->watermark)) in page_counter_try_charge()
139 WRITE_ONCE(c->watermark, new); in page_counter_try_charge()
144 for (c = counter; c != *fail; c = c->parent) in page_counter_try_charge()
151 * page_counter_uncharge - hierarchically uncharge pages
152 * @counter: counter
155 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument
159 for (c = counter; c; c = c->parent) in page_counter_uncharge()
164 * page_counter_set_max - set the maximum number of pages allowed
165 * @counter: counter
168 * Returns 0 on success, -EBUSY if the current number of pages on the
169 * counter already exceeds the specified limit.
171 * The caller must serialize invocations on the same counter.
173 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages) in page_counter_set_max() argument
181 * below the concurrently-changing counter value. in page_counter_set_max()
184 * and after, so the read-swap-read is ordered and in page_counter_set_max()
188 * modified counter and retry. in page_counter_set_max()
190 usage = page_counter_read(counter); in page_counter_set_max()
193 return -EBUSY; in page_counter_set_max()
195 old = xchg(&counter->max, nr_pages); in page_counter_set_max()
197 if (page_counter_read(counter) <= usage) in page_counter_set_max()
198 return 0; in page_counter_set_max()
200 counter->max = old; in page_counter_set_max()
206 * page_counter_set_min - set the amount of protected memory
207 * @counter: counter
210 * The caller must serialize invocations on the same counter.
212 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages) in page_counter_set_min() argument
216 WRITE_ONCE(counter->min, nr_pages); in page_counter_set_min()
218 for (c = counter; c; c = c->parent) in page_counter_set_min()
219 propagate_protected_usage(c, atomic_long_read(&c->usage)); in page_counter_set_min()
223 * page_counter_set_low - set the amount of protected memory
224 * @counter: counter
227 * The caller must serialize invocations on the same counter.
229 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages) in page_counter_set_low() argument
233 WRITE_ONCE(counter->low, nr_pages); in page_counter_set_low()
235 for (c = counter; c; c = c->parent) in page_counter_set_low()
236 propagate_protected_usage(c, atomic_long_read(&c->usage)); in page_counter_set_low()
240 * page_counter_memparse - memparse() for page counter limits
245 * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
256 return 0; in page_counter_memparse()
260 if (*end != '\0') in page_counter_memparse()
261 return -EINVAL; in page_counter_memparse()
265 return 0; in page_counter_memparse()