Lines Matching refs:plr

156 		if (rdtgrp->plr && rdtgrp->plr->minor == minor) {  in region_find_by_minor()
174 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) in pseudo_lock_cstates_relax() argument
178 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { in pseudo_lock_cstates_relax()
200 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) in pseudo_lock_cstates_constrain() argument
206 for_each_cpu(cpu, &plr->d->cpu_mask) { in pseudo_lock_cstates_constrain()
224 list_add(&pm_req->list, &plr->pm_reqs); in pseudo_lock_cstates_constrain()
230 pseudo_lock_cstates_relax(plr); in pseudo_lock_cstates_constrain()
243 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) in pseudo_lock_region_clear() argument
245 plr->size = 0; in pseudo_lock_region_clear()
246 plr->line_size = 0; in pseudo_lock_region_clear()
247 kfree(plr->kmem); in pseudo_lock_region_clear()
248 plr->kmem = NULL; in pseudo_lock_region_clear()
249 plr->r = NULL; in pseudo_lock_region_clear()
250 if (plr->d) in pseudo_lock_region_clear()
251 plr->d->plr = NULL; in pseudo_lock_region_clear()
252 plr->d = NULL; in pseudo_lock_region_clear()
253 plr->cbm = 0; in pseudo_lock_region_clear()
254 plr->debugfs_dir = NULL; in pseudo_lock_region_clear()
275 static int pseudo_lock_region_init(struct pseudo_lock_region *plr) in pseudo_lock_region_init() argument
282 plr->cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_region_init()
284 if (!cpu_online(plr->cpu)) { in pseudo_lock_region_init()
286 plr->cpu); in pseudo_lock_region_init()
291 ci = get_cpu_cacheinfo(plr->cpu); in pseudo_lock_region_init()
293 plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm); in pseudo_lock_region_init()
296 if (ci->info_list[i].level == plr->r->cache_level) { in pseudo_lock_region_init()
297 plr->line_size = ci->info_list[i].coherency_line_size; in pseudo_lock_region_init()
305 pseudo_lock_region_clear(plr); in pseudo_lock_region_init()
322 struct pseudo_lock_region *plr; in pseudo_lock_init() local
324 plr = kzalloc(sizeof(*plr), GFP_KERNEL); in pseudo_lock_init()
325 if (!plr) in pseudo_lock_init()
328 init_waitqueue_head(&plr->lock_thread_wq); in pseudo_lock_init()
329 INIT_LIST_HEAD(&plr->pm_reqs); in pseudo_lock_init()
330 rdtgrp->plr = plr; in pseudo_lock_init()
344 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) in pseudo_lock_region_alloc() argument
348 ret = pseudo_lock_region_init(plr); in pseudo_lock_region_alloc()
356 if (plr->size > KMALLOC_MAX_SIZE) { in pseudo_lock_region_alloc()
362 plr->kmem = kzalloc(plr->size, GFP_KERNEL); in pseudo_lock_region_alloc()
363 if (!plr->kmem) { in pseudo_lock_region_alloc()
372 pseudo_lock_region_clear(plr); in pseudo_lock_region_alloc()
389 pseudo_lock_region_clear(rdtgrp->plr); in pseudo_lock_free()
390 kfree(rdtgrp->plr); in pseudo_lock_free()
391 rdtgrp->plr = NULL; in pseudo_lock_free()
416 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_fn() local
465 mem_r = plr->kmem; in pseudo_lock_fn()
466 size = plr->size; in pseudo_lock_fn()
467 line_size = plr->line_size; in pseudo_lock_fn()
516 plr->thread_done = 1; in pseudo_lock_fn()
517 wake_up_interruptible(&plr->lock_thread_wq); in pseudo_lock_fn()
798 if (d->plr) { in rdtgroup_cbm_overlaps_pseudo_locked()
799 cbm_len = d->plr->r->cache.cbm_len; in rdtgroup_cbm_overlaps_pseudo_locked()
800 cbm_b = d->plr->cbm; in rdtgroup_cbm_overlaps_pseudo_locked()
836 if (d_i->plr) in rdtgroup_pseudo_locked_in_hierarchy()
869 struct pseudo_lock_region *plr = _plr; in measure_cycles_lat_fn() local
879 mem_r = READ_ONCE(plr->kmem); in measure_cycles_lat_fn()
885 for (i = 0; i < plr->size; i += 32) { in measure_cycles_lat_fn()
896 plr->thread_done = 1; in measure_cycles_lat_fn()
897 wake_up_interruptible(&plr->lock_thread_wq); in measure_cycles_lat_fn()
933 struct pseudo_lock_region *plr, in measure_residency_fn() argument
945 miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu, in measure_residency_fn()
950 hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu, in measure_residency_fn()
981 line_size = READ_ONCE(plr->line_size); in measure_residency_fn()
982 mem_r = READ_ONCE(plr->kmem); in measure_residency_fn()
983 size = READ_ONCE(plr->size); in measure_residency_fn()
1049 struct pseudo_lock_region *plr = _plr; in measure_l2_residency() local
1072 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); in measure_l2_residency()
1080 plr->thread_done = 1; in measure_l2_residency()
1081 wake_up_interruptible(&plr->lock_thread_wq); in measure_l2_residency()
1087 struct pseudo_lock_region *plr = _plr; in measure_l3_residency() local
1111 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); in measure_l3_residency()
1136 plr->thread_done = 1; in measure_l3_residency()
1137 wake_up_interruptible(&plr->lock_thread_wq); in measure_l3_residency()
1153 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_measure_cycles() local
1166 if (!plr->d) { in pseudo_lock_measure_cycles()
1171 plr->thread_done = 0; in pseudo_lock_measure_cycles()
1172 cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_measure_cycles()
1178 plr->cpu = cpu; in pseudo_lock_measure_cycles()
1181 thread = kthread_create_on_node(measure_cycles_lat_fn, plr, in pseudo_lock_measure_cycles()
1186 thread = kthread_create_on_node(measure_l2_residency, plr, in pseudo_lock_measure_cycles()
1191 thread = kthread_create_on_node(measure_l3_residency, plr, in pseudo_lock_measure_cycles()
1205 ret = wait_event_interruptible(plr->lock_thread_wq, in pseudo_lock_measure_cycles()
1206 plr->thread_done == 1); in pseudo_lock_measure_cycles()
1273 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_create() local
1279 ret = pseudo_lock_region_alloc(plr); in rdtgroup_pseudo_lock_create()
1283 ret = pseudo_lock_cstates_constrain(plr); in rdtgroup_pseudo_lock_create()
1289 plr->thread_done = 0; in rdtgroup_pseudo_lock_create()
1292 cpu_to_node(plr->cpu), in rdtgroup_pseudo_lock_create()
1293 "pseudo_lock/%u", plr->cpu); in rdtgroup_pseudo_lock_create()
1300 kthread_bind(thread, plr->cpu); in rdtgroup_pseudo_lock_create()
1303 ret = wait_event_interruptible(plr->lock_thread_wq, in rdtgroup_pseudo_lock_create()
1304 plr->thread_done == 1); in rdtgroup_pseudo_lock_create()
1336 plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, in rdtgroup_pseudo_lock_create()
1338 if (!IS_ERR_OR_NULL(plr->debugfs_dir)) in rdtgroup_pseudo_lock_create()
1340 plr->debugfs_dir, rdtgrp, in rdtgroup_pseudo_lock_create()
1363 plr->minor = new_minor; in rdtgroup_pseudo_lock_create()
1376 debugfs_remove_recursive(plr->debugfs_dir); in rdtgroup_pseudo_lock_create()
1379 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_create()
1381 pseudo_lock_region_clear(plr); in rdtgroup_pseudo_lock_create()
1402 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_remove() local
1413 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_remove()
1414 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); in rdtgroup_pseudo_lock_remove()
1415 device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); in rdtgroup_pseudo_lock_remove()
1416 pseudo_lock_minor_release(plr->minor); in rdtgroup_pseudo_lock_remove()
1475 struct pseudo_lock_region *plr; in pseudo_lock_dev_mmap() local
1489 plr = rdtgrp->plr; in pseudo_lock_dev_mmap()
1491 if (!plr->d) { in pseudo_lock_dev_mmap()
1502 if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { in pseudo_lock_dev_mmap()
1507 physical = __pa(plr->kmem) >> PAGE_SHIFT; in pseudo_lock_dev_mmap()
1508 psize = plr->size - off; in pseudo_lock_dev_mmap()
1510 if (off > plr->size) { in pseudo_lock_dev_mmap()
1529 memset(plr->kmem + off, 0, vsize); in pseudo_lock_dev_mmap()