Lines Matching refs:plr

157 		if (rdtgrp->plr && rdtgrp->plr->minor == minor) {  in region_find_by_minor()
175 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) in pseudo_lock_cstates_relax() argument
179 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { in pseudo_lock_cstates_relax()
204 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) in pseudo_lock_cstates_constrain() argument
210 for_each_cpu(cpu, &plr->d->cpu_mask) { in pseudo_lock_cstates_constrain()
228 list_add(&pm_req->list, &plr->pm_reqs); in pseudo_lock_cstates_constrain()
234 pseudo_lock_cstates_relax(plr); in pseudo_lock_cstates_constrain()
247 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) in pseudo_lock_region_clear() argument
249 plr->size = 0; in pseudo_lock_region_clear()
250 plr->line_size = 0; in pseudo_lock_region_clear()
251 kfree(plr->kmem); in pseudo_lock_region_clear()
252 plr->kmem = NULL; in pseudo_lock_region_clear()
253 plr->s = NULL; in pseudo_lock_region_clear()
254 if (plr->d) in pseudo_lock_region_clear()
255 plr->d->plr = NULL; in pseudo_lock_region_clear()
256 plr->d = NULL; in pseudo_lock_region_clear()
257 plr->cbm = 0; in pseudo_lock_region_clear()
258 plr->debugfs_dir = NULL; in pseudo_lock_region_clear()
279 static int pseudo_lock_region_init(struct pseudo_lock_region *plr) in pseudo_lock_region_init() argument
286 plr->cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_region_init()
288 if (!cpu_online(plr->cpu)) { in pseudo_lock_region_init()
290 plr->cpu); in pseudo_lock_region_init()
295 ci = get_cpu_cacheinfo(plr->cpu); in pseudo_lock_region_init()
297 plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); in pseudo_lock_region_init()
300 if (ci->info_list[i].level == plr->s->res->cache_level) { in pseudo_lock_region_init()
301 plr->line_size = ci->info_list[i].coherency_line_size; in pseudo_lock_region_init()
309 pseudo_lock_region_clear(plr); in pseudo_lock_region_init()
326 struct pseudo_lock_region *plr; in pseudo_lock_init() local
328 plr = kzalloc(sizeof(*plr), GFP_KERNEL); in pseudo_lock_init()
329 if (!plr) in pseudo_lock_init()
332 init_waitqueue_head(&plr->lock_thread_wq); in pseudo_lock_init()
333 INIT_LIST_HEAD(&plr->pm_reqs); in pseudo_lock_init()
334 rdtgrp->plr = plr; in pseudo_lock_init()
348 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) in pseudo_lock_region_alloc() argument
352 ret = pseudo_lock_region_init(plr); in pseudo_lock_region_alloc()
360 if (plr->size > KMALLOC_MAX_SIZE) { in pseudo_lock_region_alloc()
366 plr->kmem = kzalloc(plr->size, GFP_KERNEL); in pseudo_lock_region_alloc()
367 if (!plr->kmem) { in pseudo_lock_region_alloc()
376 pseudo_lock_region_clear(plr); in pseudo_lock_region_alloc()
393 pseudo_lock_region_clear(rdtgrp->plr); in pseudo_lock_free()
394 kfree(rdtgrp->plr); in pseudo_lock_free()
395 rdtgrp->plr = NULL; in pseudo_lock_free()
420 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_fn() local
471 mem_r = plr->kmem; in pseudo_lock_fn()
472 size = plr->size; in pseudo_lock_fn()
473 line_size = plr->line_size; in pseudo_lock_fn()
522 plr->thread_done = 1; in pseudo_lock_fn()
523 wake_up_interruptible(&plr->lock_thread_wq); in pseudo_lock_fn()
804 if (d->plr) { in rdtgroup_cbm_overlaps_pseudo_locked()
805 cbm_len = d->plr->s->res->cache.cbm_len; in rdtgroup_cbm_overlaps_pseudo_locked()
806 cbm_b = d->plr->cbm; in rdtgroup_cbm_overlaps_pseudo_locked()
842 if (d_i->plr) in rdtgroup_pseudo_locked_in_hierarchy()
875 struct pseudo_lock_region *plr = _plr; in measure_cycles_lat_fn() local
887 mem_r = READ_ONCE(plr->kmem); in measure_cycles_lat_fn()
893 for (i = 0; i < plr->size; i += 32) { in measure_cycles_lat_fn()
904 plr->thread_done = 1; in measure_cycles_lat_fn()
905 wake_up_interruptible(&plr->lock_thread_wq); in measure_cycles_lat_fn()
941 struct pseudo_lock_region *plr, in measure_residency_fn() argument
954 miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu, in measure_residency_fn()
959 hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu, in measure_residency_fn()
991 line_size = READ_ONCE(plr->line_size); in measure_residency_fn()
992 mem_r = READ_ONCE(plr->kmem); in measure_residency_fn()
993 size = READ_ONCE(plr->size); in measure_residency_fn()
1059 struct pseudo_lock_region *plr = _plr; in measure_l2_residency() local
1082 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); in measure_l2_residency()
1090 plr->thread_done = 1; in measure_l2_residency()
1091 wake_up_interruptible(&plr->lock_thread_wq); in measure_l2_residency()
1097 struct pseudo_lock_region *plr = _plr; in measure_l3_residency() local
1121 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts); in measure_l3_residency()
1146 plr->thread_done = 1; in measure_l3_residency()
1147 wake_up_interruptible(&plr->lock_thread_wq); in measure_l3_residency()
1165 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_measure_cycles() local
1178 if (!plr->d) { in pseudo_lock_measure_cycles()
1183 plr->thread_done = 0; in pseudo_lock_measure_cycles()
1184 cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_measure_cycles()
1190 plr->cpu = cpu; in pseudo_lock_measure_cycles()
1193 thread = kthread_create_on_node(measure_cycles_lat_fn, plr, in pseudo_lock_measure_cycles()
1198 thread = kthread_create_on_node(measure_l2_residency, plr, in pseudo_lock_measure_cycles()
1203 thread = kthread_create_on_node(measure_l3_residency, plr, in pseudo_lock_measure_cycles()
1217 ret = wait_event_interruptible(plr->lock_thread_wq, in pseudo_lock_measure_cycles()
1218 plr->thread_done == 1); in pseudo_lock_measure_cycles()
1285 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_create() local
1291 ret = pseudo_lock_region_alloc(plr); in rdtgroup_pseudo_lock_create()
1295 ret = pseudo_lock_cstates_constrain(plr); in rdtgroup_pseudo_lock_create()
1301 plr->thread_done = 0; in rdtgroup_pseudo_lock_create()
1304 cpu_to_node(plr->cpu), in rdtgroup_pseudo_lock_create()
1305 "pseudo_lock/%u", plr->cpu); in rdtgroup_pseudo_lock_create()
1312 kthread_bind(thread, plr->cpu); in rdtgroup_pseudo_lock_create()
1315 ret = wait_event_interruptible(plr->lock_thread_wq, in rdtgroup_pseudo_lock_create()
1316 plr->thread_done == 1); in rdtgroup_pseudo_lock_create()
1348 plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, in rdtgroup_pseudo_lock_create()
1350 if (!IS_ERR_OR_NULL(plr->debugfs_dir)) in rdtgroup_pseudo_lock_create()
1352 plr->debugfs_dir, rdtgrp, in rdtgroup_pseudo_lock_create()
1375 plr->minor = new_minor; in rdtgroup_pseudo_lock_create()
1388 debugfs_remove_recursive(plr->debugfs_dir); in rdtgroup_pseudo_lock_create()
1391 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_create()
1393 pseudo_lock_region_clear(plr); in rdtgroup_pseudo_lock_create()
1414 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_remove() local
1425 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_remove()
1426 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); in rdtgroup_pseudo_lock_remove()
1427 device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); in rdtgroup_pseudo_lock_remove()
1428 pseudo_lock_minor_release(plr->minor); in rdtgroup_pseudo_lock_remove()
1487 struct pseudo_lock_region *plr; in pseudo_lock_dev_mmap() local
1501 plr = rdtgrp->plr; in pseudo_lock_dev_mmap()
1503 if (!plr->d) { in pseudo_lock_dev_mmap()
1514 if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { in pseudo_lock_dev_mmap()
1519 physical = __pa(plr->kmem) >> PAGE_SHIFT; in pseudo_lock_dev_mmap()
1520 psize = plr->size - off; in pseudo_lock_dev_mmap()
1522 if (off > plr->size) { in pseudo_lock_dev_mmap()
1541 memset(plr->kmem + off, 0, vsize); in pseudo_lock_dev_mmap()