Lines Matching refs:plr
171 if (rdtgrp->plr && rdtgrp->plr->minor == minor) { in region_find_by_minor()
189 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) in pseudo_lock_cstates_relax() argument
193 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { in pseudo_lock_cstates_relax()
215 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) in pseudo_lock_cstates_constrain() argument
221 for_each_cpu(cpu, &plr->d->cpu_mask) { in pseudo_lock_cstates_constrain()
239 list_add(&pm_req->list, &plr->pm_reqs); in pseudo_lock_cstates_constrain()
245 pseudo_lock_cstates_relax(plr); in pseudo_lock_cstates_constrain()
258 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) in pseudo_lock_region_clear() argument
260 plr->size = 0; in pseudo_lock_region_clear()
261 plr->line_size = 0; in pseudo_lock_region_clear()
262 kfree(plr->kmem); in pseudo_lock_region_clear()
263 plr->kmem = NULL; in pseudo_lock_region_clear()
264 plr->r = NULL; in pseudo_lock_region_clear()
265 if (plr->d) in pseudo_lock_region_clear()
266 plr->d->plr = NULL; in pseudo_lock_region_clear()
267 plr->d = NULL; in pseudo_lock_region_clear()
268 plr->cbm = 0; in pseudo_lock_region_clear()
269 plr->debugfs_dir = NULL; in pseudo_lock_region_clear()
290 static int pseudo_lock_region_init(struct pseudo_lock_region *plr) in pseudo_lock_region_init() argument
297 plr->cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_region_init()
299 if (!cpu_online(plr->cpu)) { in pseudo_lock_region_init()
301 plr->cpu); in pseudo_lock_region_init()
306 ci = get_cpu_cacheinfo(plr->cpu); in pseudo_lock_region_init()
308 plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm); in pseudo_lock_region_init()
311 if (ci->info_list[i].level == plr->r->cache_level) { in pseudo_lock_region_init()
312 plr->line_size = ci->info_list[i].coherency_line_size; in pseudo_lock_region_init()
320 pseudo_lock_region_clear(plr); in pseudo_lock_region_init()
337 struct pseudo_lock_region *plr; in pseudo_lock_init() local
339 plr = kzalloc(sizeof(*plr), GFP_KERNEL); in pseudo_lock_init()
340 if (!plr) in pseudo_lock_init()
343 init_waitqueue_head(&plr->lock_thread_wq); in pseudo_lock_init()
344 INIT_LIST_HEAD(&plr->pm_reqs); in pseudo_lock_init()
345 rdtgrp->plr = plr; in pseudo_lock_init()
359 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) in pseudo_lock_region_alloc() argument
363 ret = pseudo_lock_region_init(plr); in pseudo_lock_region_alloc()
371 if (plr->size > KMALLOC_MAX_SIZE) { in pseudo_lock_region_alloc()
377 plr->kmem = kzalloc(plr->size, GFP_KERNEL); in pseudo_lock_region_alloc()
378 if (!plr->kmem) { in pseudo_lock_region_alloc()
387 pseudo_lock_region_clear(plr); in pseudo_lock_region_alloc()
404 pseudo_lock_region_clear(rdtgrp->plr); in pseudo_lock_free()
405 kfree(rdtgrp->plr); in pseudo_lock_free()
406 rdtgrp->plr = NULL; in pseudo_lock_free()
431 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_fn() local
484 mem_r = plr->kmem; in pseudo_lock_fn()
485 size = plr->size; in pseudo_lock_fn()
486 line_size = plr->line_size; in pseudo_lock_fn()
535 plr->thread_done = 1; in pseudo_lock_fn()
536 wake_up_interruptible(&plr->lock_thread_wq); in pseudo_lock_fn()
817 if (d->plr) { in rdtgroup_cbm_overlaps_pseudo_locked()
818 cbm_len = d->plr->r->cache.cbm_len; in rdtgroup_cbm_overlaps_pseudo_locked()
819 cbm_b = d->plr->cbm; in rdtgroup_cbm_overlaps_pseudo_locked()
855 if (d_i->plr) in rdtgroup_pseudo_locked_in_hierarchy()
888 struct pseudo_lock_region *plr = _plr; in measure_cycles_lat_fn() local
915 mem_r = plr->kmem; in measure_cycles_lat_fn()
921 for (i = 0; i < plr->size; i += 32) { in measure_cycles_lat_fn()
932 plr->thread_done = 1; in measure_cycles_lat_fn()
933 wake_up_interruptible(&plr->lock_thread_wq); in measure_cycles_lat_fn()
941 struct pseudo_lock_region *plr = _plr; in measure_cycles_perf_fn() local
1041 mem_r = plr->kmem; in measure_cycles_perf_fn()
1042 size = plr->size; in measure_cycles_perf_fn()
1043 line_size = plr->line_size; in measure_cycles_perf_fn()
1089 plr->thread_done = 1; in measure_cycles_perf_fn()
1090 wake_up_interruptible(&plr->lock_thread_wq); in measure_cycles_perf_fn()
1106 struct pseudo_lock_region *plr = rdtgrp->plr; in pseudo_lock_measure_cycles() local
1119 plr->thread_done = 0; in pseudo_lock_measure_cycles()
1120 cpu = cpumask_first(&plr->d->cpu_mask); in pseudo_lock_measure_cycles()
1127 thread = kthread_create_on_node(measure_cycles_lat_fn, plr, in pseudo_lock_measure_cycles()
1132 thread = kthread_create_on_node(measure_cycles_perf_fn, plr, in pseudo_lock_measure_cycles()
1146 ret = wait_event_interruptible(plr->lock_thread_wq, in pseudo_lock_measure_cycles()
1147 plr->thread_done == 1); in pseudo_lock_measure_cycles()
1214 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_create() local
1220 ret = pseudo_lock_region_alloc(plr); in rdtgroup_pseudo_lock_create()
1224 ret = pseudo_lock_cstates_constrain(plr); in rdtgroup_pseudo_lock_create()
1230 plr->thread_done = 0; in rdtgroup_pseudo_lock_create()
1233 cpu_to_node(plr->cpu), in rdtgroup_pseudo_lock_create()
1234 "pseudo_lock/%u", plr->cpu); in rdtgroup_pseudo_lock_create()
1241 kthread_bind(thread, plr->cpu); in rdtgroup_pseudo_lock_create()
1244 ret = wait_event_interruptible(plr->lock_thread_wq, in rdtgroup_pseudo_lock_create()
1245 plr->thread_done == 1); in rdtgroup_pseudo_lock_create()
1277 plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, in rdtgroup_pseudo_lock_create()
1279 if (!IS_ERR_OR_NULL(plr->debugfs_dir)) in rdtgroup_pseudo_lock_create()
1281 plr->debugfs_dir, rdtgrp, in rdtgroup_pseudo_lock_create()
1304 plr->minor = new_minor; in rdtgroup_pseudo_lock_create()
1317 debugfs_remove_recursive(plr->debugfs_dir); in rdtgroup_pseudo_lock_create()
1320 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_create()
1322 pseudo_lock_region_clear(plr); in rdtgroup_pseudo_lock_create()
1343 struct pseudo_lock_region *plr = rdtgrp->plr; in rdtgroup_pseudo_lock_remove() local
1354 pseudo_lock_cstates_relax(plr); in rdtgroup_pseudo_lock_remove()
1355 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); in rdtgroup_pseudo_lock_remove()
1356 device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); in rdtgroup_pseudo_lock_remove()
1357 pseudo_lock_minor_release(plr->minor); in rdtgroup_pseudo_lock_remove()
1416 struct pseudo_lock_region *plr; in pseudo_lock_dev_mmap() local
1430 plr = rdtgrp->plr; in pseudo_lock_dev_mmap()
1438 if (!cpumask_subset(¤t->cpus_allowed, &plr->d->cpu_mask)) { in pseudo_lock_dev_mmap()
1443 physical = __pa(plr->kmem) >> PAGE_SHIFT; in pseudo_lock_dev_mmap()
1444 psize = plr->size - off; in pseudo_lock_dev_mmap()
1446 if (off > plr->size) { in pseudo_lock_dev_mmap()
1465 memset(plr->kmem + off, 0, vsize); in pseudo_lock_dev_mmap()