1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25
26 #include <core/client.h>
27 #include <core/option.h>
28
29 #include <nvif/class.h>
30 #include <nvif/if0002.h>
31 #include <nvif/if0003.h>
32 #include <nvif/ioctl.h>
33 #include <nvif/unpack.h>
34
35 static u8
nvkm_pm_count_perfdom(struct nvkm_pm * pm)36 nvkm_pm_count_perfdom(struct nvkm_pm *pm)
37 {
38 struct nvkm_perfdom *dom;
39 u8 domain_nr = 0;
40
41 list_for_each_entry(dom, &pm->domains, head)
42 domain_nr++;
43 return domain_nr;
44 }
45
46 static u16
nvkm_perfdom_count_perfsig(struct nvkm_perfdom * dom)47 nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
48 {
49 u16 signal_nr = 0;
50 int i;
51
52 if (dom) {
53 for (i = 0; i < dom->signal_nr; i++) {
54 if (dom->signal[i].name)
55 signal_nr++;
56 }
57 }
58 return signal_nr;
59 }
60
61 static struct nvkm_perfdom *
nvkm_perfdom_find(struct nvkm_pm * pm,int di)62 nvkm_perfdom_find(struct nvkm_pm *pm, int di)
63 {
64 struct nvkm_perfdom *dom;
65 int tmp = 0;
66
67 list_for_each_entry(dom, &pm->domains, head) {
68 if (tmp++ == di)
69 return dom;
70 }
71 return NULL;
72 }
73
74 static struct nvkm_perfsig *
nvkm_perfsig_find(struct nvkm_pm * pm,u8 di,u8 si,struct nvkm_perfdom ** pdom)75 nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
76 {
77 struct nvkm_perfdom *dom = *pdom;
78
79 if (dom == NULL) {
80 dom = nvkm_perfdom_find(pm, di);
81 if (dom == NULL)
82 return NULL;
83 *pdom = dom;
84 }
85
86 if (!dom->signal[si].name)
87 return NULL;
88 return &dom->signal[si];
89 }
90
91 static u8
nvkm_perfsig_count_perfsrc(struct nvkm_perfsig * sig)92 nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
93 {
94 u8 source_nr = 0, i;
95
96 for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
97 if (sig->source[i])
98 source_nr++;
99 }
100 return source_nr;
101 }
102
103 static struct nvkm_perfsrc *
nvkm_perfsrc_find(struct nvkm_pm * pm,struct nvkm_perfsig * sig,int si)104 nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
105 {
106 struct nvkm_perfsrc *src;
107 bool found = false;
108 int tmp = 1; /* Sources ID start from 1 */
109 u8 i;
110
111 for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
112 if (sig->source[i] == si) {
113 found = true;
114 break;
115 }
116 }
117
118 if (found) {
119 list_for_each_entry(src, &pm->sources, head) {
120 if (tmp++ == si)
121 return src;
122 }
123 }
124
125 return NULL;
126 }
127
128 static int
nvkm_perfsrc_enable(struct nvkm_pm * pm,struct nvkm_perfctr * ctr)129 nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
130 {
131 struct nvkm_subdev *subdev = &pm->engine.subdev;
132 struct nvkm_device *device = subdev->device;
133 struct nvkm_perfdom *dom = NULL;
134 struct nvkm_perfsig *sig;
135 struct nvkm_perfsrc *src;
136 u32 mask, value;
137 int i, j;
138
139 for (i = 0; i < 4; i++) {
140 for (j = 0; j < 8 && ctr->source[i][j]; j++) {
141 sig = nvkm_perfsig_find(pm, ctr->domain,
142 ctr->signal[i], &dom);
143 if (!sig)
144 return -EINVAL;
145
146 src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
147 if (!src)
148 return -EINVAL;
149
150 /* set enable bit if needed */
151 mask = value = 0x00000000;
152 if (src->enable)
153 mask = value = 0x80000000;
154 mask |= (src->mask << src->shift);
155 value |= ((ctr->source[i][j] >> 32) << src->shift);
156
157 /* enable the source */
158 nvkm_mask(device, src->addr, mask, value);
159 nvkm_debug(subdev,
160 "enabled source %08x %08x %08x\n",
161 src->addr, mask, value);
162 }
163 }
164 return 0;
165 }
166
167 static int
nvkm_perfsrc_disable(struct nvkm_pm * pm,struct nvkm_perfctr * ctr)168 nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
169 {
170 struct nvkm_subdev *subdev = &pm->engine.subdev;
171 struct nvkm_device *device = subdev->device;
172 struct nvkm_perfdom *dom = NULL;
173 struct nvkm_perfsig *sig;
174 struct nvkm_perfsrc *src;
175 u32 mask;
176 int i, j;
177
178 for (i = 0; i < 4; i++) {
179 for (j = 0; j < 8 && ctr->source[i][j]; j++) {
180 sig = nvkm_perfsig_find(pm, ctr->domain,
181 ctr->signal[i], &dom);
182 if (!sig)
183 return -EINVAL;
184
185 src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
186 if (!src)
187 return -EINVAL;
188
189 /* unset enable bit if needed */
190 mask = 0x00000000;
191 if (src->enable)
192 mask = 0x80000000;
193 mask |= (src->mask << src->shift);
194
195 /* disable the source */
196 nvkm_mask(device, src->addr, mask, 0);
197 nvkm_debug(subdev, "disabled source %08x %08x\n",
198 src->addr, mask);
199 }
200 }
201 return 0;
202 }
203
204 /*******************************************************************************
205 * Perfdom object classes
206 ******************************************************************************/
207 static int
nvkm_perfdom_init(struct nvkm_perfdom * dom,void * data,u32 size)208 nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
209 {
210 union {
211 struct nvif_perfdom_init none;
212 } *args = data;
213 struct nvkm_object *object = &dom->object;
214 struct nvkm_pm *pm = dom->perfmon->pm;
215 int ret = -ENOSYS, i;
216
217 nvif_ioctl(object, "perfdom init size %d\n", size);
218 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
219 nvif_ioctl(object, "perfdom init\n");
220 } else
221 return ret;
222
223 for (i = 0; i < 4; i++) {
224 if (dom->ctr[i]) {
225 dom->func->init(pm, dom, dom->ctr[i]);
226
227 /* enable sources */
228 nvkm_perfsrc_enable(pm, dom->ctr[i]);
229 }
230 }
231
232 /* start next batch of counters for sampling */
233 dom->func->next(pm, dom);
234 return 0;
235 }
236
237 static int
nvkm_perfdom_sample(struct nvkm_perfdom * dom,void * data,u32 size)238 nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
239 {
240 union {
241 struct nvif_perfdom_sample none;
242 } *args = data;
243 struct nvkm_object *object = &dom->object;
244 struct nvkm_pm *pm = dom->perfmon->pm;
245 int ret = -ENOSYS;
246
247 nvif_ioctl(object, "perfdom sample size %d\n", size);
248 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
249 nvif_ioctl(object, "perfdom sample\n");
250 } else
251 return ret;
252 pm->sequence++;
253
254 /* sample previous batch of counters */
255 list_for_each_entry(dom, &pm->domains, head)
256 dom->func->next(pm, dom);
257
258 return 0;
259 }
260
261 static int
nvkm_perfdom_read(struct nvkm_perfdom * dom,void * data,u32 size)262 nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
263 {
264 union {
265 struct nvif_perfdom_read_v0 v0;
266 } *args = data;
267 struct nvkm_object *object = &dom->object;
268 struct nvkm_pm *pm = dom->perfmon->pm;
269 int ret = -ENOSYS, i;
270
271 nvif_ioctl(object, "perfdom read size %d\n", size);
272 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
273 nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
274 } else
275 return ret;
276
277 for (i = 0; i < 4; i++) {
278 if (dom->ctr[i])
279 dom->func->read(pm, dom, dom->ctr[i]);
280 }
281
282 if (!dom->clk)
283 return -EAGAIN;
284
285 for (i = 0; i < 4; i++)
286 if (dom->ctr[i])
287 args->v0.ctr[i] = dom->ctr[i]->ctr;
288 args->v0.clk = dom->clk;
289 return 0;
290 }
291
292 static int
nvkm_perfdom_mthd(struct nvkm_object * object,u32 mthd,void * data,u32 size)293 nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
294 {
295 struct nvkm_perfdom *dom = nvkm_perfdom(object);
296 switch (mthd) {
297 case NVIF_PERFDOM_V0_INIT:
298 return nvkm_perfdom_init(dom, data, size);
299 case NVIF_PERFDOM_V0_SAMPLE:
300 return nvkm_perfdom_sample(dom, data, size);
301 case NVIF_PERFDOM_V0_READ:
302 return nvkm_perfdom_read(dom, data, size);
303 default:
304 break;
305 }
306 return -EINVAL;
307 }
308
309 static void *
nvkm_perfdom_dtor(struct nvkm_object * object)310 nvkm_perfdom_dtor(struct nvkm_object *object)
311 {
312 struct nvkm_perfdom *dom = nvkm_perfdom(object);
313 struct nvkm_pm *pm = dom->perfmon->pm;
314 int i;
315
316 for (i = 0; i < 4; i++) {
317 struct nvkm_perfctr *ctr = dom->ctr[i];
318 if (ctr) {
319 nvkm_perfsrc_disable(pm, ctr);
320 if (ctr->head.next)
321 list_del(&ctr->head);
322 }
323 kfree(ctr);
324 }
325
326 return dom;
327 }
328
329 static int
nvkm_perfctr_new(struct nvkm_perfdom * dom,int slot,u8 domain,struct nvkm_perfsig * signal[4],u64 source[4][8],u16 logic_op,struct nvkm_perfctr ** pctr)330 nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
331 struct nvkm_perfsig *signal[4], u64 source[4][8],
332 u16 logic_op, struct nvkm_perfctr **pctr)
333 {
334 struct nvkm_perfctr *ctr;
335 int i, j;
336
337 if (!dom)
338 return -EINVAL;
339
340 ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
341 if (!ctr)
342 return -ENOMEM;
343
344 ctr->domain = domain;
345 ctr->logic_op = logic_op;
346 ctr->slot = slot;
347 for (i = 0; i < 4; i++) {
348 if (signal[i]) {
349 ctr->signal[i] = signal[i] - dom->signal;
350 for (j = 0; j < 8; j++)
351 ctr->source[i][j] = source[i][j];
352 }
353 }
354 list_add_tail(&ctr->head, &dom->list);
355
356 return 0;
357 }
358
359 static const struct nvkm_object_func
360 nvkm_perfdom = {
361 .dtor = nvkm_perfdom_dtor,
362 .mthd = nvkm_perfdom_mthd,
363 };
364
365 static int
nvkm_perfdom_new_(struct nvkm_perfmon * perfmon,const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)366 nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
367 const struct nvkm_oclass *oclass, void *data, u32 size,
368 struct nvkm_object **pobject)
369 {
370 union {
371 struct nvif_perfdom_v0 v0;
372 } *args = data;
373 struct nvkm_pm *pm = perfmon->pm;
374 struct nvkm_object *parent = oclass->parent;
375 struct nvkm_perfdom *sdom = NULL;
376 struct nvkm_perfctr *ctr[4] = {};
377 struct nvkm_perfdom *dom;
378 int c, s, m;
379 int ret = -ENOSYS;
380
381 nvif_ioctl(parent, "create perfdom size %d\n", size);
382 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
383 nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
384 args->v0.version, args->v0.domain, args->v0.mode);
385 } else
386 return ret;
387
388 for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
389 struct nvkm_perfsig *sig[4] = {};
390 u64 src[4][8] = {};
391
392 for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
393 sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
394 args->v0.ctr[c].signal[s],
395 &sdom);
396 if (args->v0.ctr[c].signal[s] && !sig[s])
397 return -EINVAL;
398
399 for (m = 0; m < 8; m++) {
400 src[s][m] = args->v0.ctr[c].source[s][m];
401 if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
402 src[s][m]))
403 return -EINVAL;
404 }
405 }
406
407 ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
408 args->v0.ctr[c].logic_op, &ctr[c]);
409 if (ret)
410 return ret;
411 }
412
413 if (!sdom)
414 return -EINVAL;
415
416 if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
417 return -ENOMEM;
418 nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
419 dom->perfmon = perfmon;
420 *pobject = &dom->object;
421
422 dom->func = sdom->func;
423 dom->addr = sdom->addr;
424 dom->mode = args->v0.mode;
425 for (c = 0; c < ARRAY_SIZE(ctr); c++)
426 dom->ctr[c] = ctr[c];
427 return 0;
428 }
429
430 /*******************************************************************************
431 * Perfmon object classes
432 ******************************************************************************/
433 static int
nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon * perfmon,void * data,u32 size)434 nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
435 void *data, u32 size)
436 {
437 union {
438 struct nvif_perfmon_query_domain_v0 v0;
439 } *args = data;
440 struct nvkm_object *object = &perfmon->object;
441 struct nvkm_pm *pm = perfmon->pm;
442 struct nvkm_perfdom *dom;
443 u8 domain_nr;
444 int di, ret = -ENOSYS;
445
446 nvif_ioctl(object, "perfmon query domain size %d\n", size);
447 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
448 nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
449 args->v0.version, args->v0.iter);
450 di = (args->v0.iter & 0xff) - 1;
451 } else
452 return ret;
453
454 domain_nr = nvkm_pm_count_perfdom(pm);
455 if (di >= (int)domain_nr)
456 return -EINVAL;
457
458 if (di >= 0) {
459 dom = nvkm_perfdom_find(pm, di);
460 if (dom == NULL)
461 return -EINVAL;
462
463 args->v0.id = di;
464 args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
465 strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
466
467 /* Currently only global counters (PCOUNTER) are implemented
468 * but this will be different for local counters (MP). */
469 args->v0.counter_nr = 4;
470 }
471
472 if (++di < domain_nr) {
473 args->v0.iter = ++di;
474 return 0;
475 }
476
477 args->v0.iter = 0xff;
478 return 0;
479 }
480
481 static int
nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon * perfmon,void * data,u32 size)482 nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
483 void *data, u32 size)
484 {
485 union {
486 struct nvif_perfmon_query_signal_v0 v0;
487 } *args = data;
488 struct nvkm_object *object = &perfmon->object;
489 struct nvkm_pm *pm = perfmon->pm;
490 struct nvkm_device *device = pm->engine.subdev.device;
491 struct nvkm_perfdom *dom;
492 struct nvkm_perfsig *sig;
493 const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
494 const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
495 int ret = -ENOSYS, si;
496
497 nvif_ioctl(object, "perfmon query signal size %d\n", size);
498 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
499 nvif_ioctl(object,
500 "perfmon query signal vers %d dom %d iter %04x\n",
501 args->v0.version, args->v0.domain, args->v0.iter);
502 si = (args->v0.iter & 0xffff) - 1;
503 } else
504 return ret;
505
506 dom = nvkm_perfdom_find(pm, args->v0.domain);
507 if (dom == NULL || si >= (int)dom->signal_nr)
508 return -EINVAL;
509
510 if (si >= 0) {
511 sig = &dom->signal[si];
512 if (raw || !sig->name) {
513 snprintf(args->v0.name, sizeof(args->v0.name),
514 "/%s/%02x", dom->name, si);
515 } else {
516 strncpy(args->v0.name, sig->name,
517 sizeof(args->v0.name) - 1);
518 }
519
520 args->v0.signal = si;
521 args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
522 }
523
524 while (++si < dom->signal_nr) {
525 if (all || dom->signal[si].name) {
526 args->v0.iter = ++si;
527 return 0;
528 }
529 }
530
531 args->v0.iter = 0xffff;
532 return 0;
533 }
534
535 static int
nvkm_perfmon_mthd_query_source(struct nvkm_perfmon * perfmon,void * data,u32 size)536 nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
537 void *data, u32 size)
538 {
539 union {
540 struct nvif_perfmon_query_source_v0 v0;
541 } *args = data;
542 struct nvkm_object *object = &perfmon->object;
543 struct nvkm_pm *pm = perfmon->pm;
544 struct nvkm_perfdom *dom = NULL;
545 struct nvkm_perfsig *sig;
546 struct nvkm_perfsrc *src;
547 u8 source_nr = 0;
548 int si, ret = -ENOSYS;
549
550 nvif_ioctl(object, "perfmon query source size %d\n", size);
551 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
552 nvif_ioctl(object,
553 "perfmon source vers %d dom %d sig %02x iter %02x\n",
554 args->v0.version, args->v0.domain, args->v0.signal,
555 args->v0.iter);
556 si = (args->v0.iter & 0xff) - 1;
557 } else
558 return ret;
559
560 sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
561 if (!sig)
562 return -EINVAL;
563
564 source_nr = nvkm_perfsig_count_perfsrc(sig);
565 if (si >= (int)source_nr)
566 return -EINVAL;
567
568 if (si >= 0) {
569 src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
570 if (!src)
571 return -EINVAL;
572
573 args->v0.source = sig->source[si];
574 args->v0.mask = src->mask;
575 strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
576 }
577
578 if (++si < source_nr) {
579 args->v0.iter = ++si;
580 return 0;
581 }
582
583 args->v0.iter = 0xff;
584 return 0;
585 }
586
587 static int
nvkm_perfmon_mthd(struct nvkm_object * object,u32 mthd,void * data,u32 size)588 nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
589 {
590 struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
591 switch (mthd) {
592 case NVIF_PERFMON_V0_QUERY_DOMAIN:
593 return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
594 case NVIF_PERFMON_V0_QUERY_SIGNAL:
595 return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
596 case NVIF_PERFMON_V0_QUERY_SOURCE:
597 return nvkm_perfmon_mthd_query_source(perfmon, data, size);
598 default:
599 break;
600 }
601 return -EINVAL;
602 }
603
604 static int
nvkm_perfmon_child_new(const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)605 nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
606 struct nvkm_object **pobject)
607 {
608 struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
609 return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
610 }
611
612 static int
nvkm_perfmon_child_get(struct nvkm_object * object,int index,struct nvkm_oclass * oclass)613 nvkm_perfmon_child_get(struct nvkm_object *object, int index,
614 struct nvkm_oclass *oclass)
615 {
616 if (index == 0) {
617 oclass->base.oclass = NVIF_CLASS_PERFDOM;
618 oclass->base.minver = 0;
619 oclass->base.maxver = 0;
620 oclass->ctor = nvkm_perfmon_child_new;
621 return 0;
622 }
623 return -EINVAL;
624 }
625
626 static void *
nvkm_perfmon_dtor(struct nvkm_object * object)627 nvkm_perfmon_dtor(struct nvkm_object *object)
628 {
629 struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
630 struct nvkm_pm *pm = perfmon->pm;
631 mutex_lock(&pm->engine.subdev.mutex);
632 if (pm->perfmon == &perfmon->object)
633 pm->perfmon = NULL;
634 mutex_unlock(&pm->engine.subdev.mutex);
635 return perfmon;
636 }
637
638 static const struct nvkm_object_func
639 nvkm_perfmon = {
640 .dtor = nvkm_perfmon_dtor,
641 .mthd = nvkm_perfmon_mthd,
642 .sclass = nvkm_perfmon_child_get,
643 };
644
645 static int
nvkm_perfmon_new(struct nvkm_pm * pm,const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)646 nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
647 void *data, u32 size, struct nvkm_object **pobject)
648 {
649 struct nvkm_perfmon *perfmon;
650
651 if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
652 return -ENOMEM;
653 nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
654 perfmon->pm = pm;
655 *pobject = &perfmon->object;
656 return 0;
657 }
658
659 /*******************************************************************************
660 * PPM engine/subdev functions
661 ******************************************************************************/
662
663 static int
nvkm_pm_oclass_new(struct nvkm_device * device,const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)664 nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
665 void *data, u32 size, struct nvkm_object **pobject)
666 {
667 struct nvkm_pm *pm = nvkm_pm(oclass->engine);
668 int ret;
669
670 ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
671 if (ret)
672 return ret;
673
674 mutex_lock(&pm->engine.subdev.mutex);
675 if (pm->perfmon == NULL)
676 pm->perfmon = *pobject;
677 ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
678 mutex_unlock(&pm->engine.subdev.mutex);
679 return ret;
680 }
681
682 static const struct nvkm_device_oclass
683 nvkm_pm_oclass = {
684 .base.oclass = NVIF_CLASS_PERFMON,
685 .base.minver = -1,
686 .base.maxver = -1,
687 .ctor = nvkm_pm_oclass_new,
688 };
689
690 static int
nvkm_pm_oclass_get(struct nvkm_oclass * oclass,int index,const struct nvkm_device_oclass ** class)691 nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
692 const struct nvkm_device_oclass **class)
693 {
694 if (index == 0) {
695 oclass->base = nvkm_pm_oclass.base;
696 *class = &nvkm_pm_oclass;
697 return index;
698 }
699 return 1;
700 }
701
702 static int
nvkm_perfsrc_new(struct nvkm_pm * pm,struct nvkm_perfsig * sig,const struct nvkm_specsrc * spec)703 nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
704 const struct nvkm_specsrc *spec)
705 {
706 const struct nvkm_specsrc *ssrc;
707 const struct nvkm_specmux *smux;
708 struct nvkm_perfsrc *src;
709 u8 source_nr = 0;
710
711 if (!spec) {
712 /* No sources are defined for this signal. */
713 return 0;
714 }
715
716 ssrc = spec;
717 while (ssrc->name) {
718 smux = ssrc->mux;
719 while (smux->name) {
720 bool found = false;
721 u8 source_id = 0;
722 u32 len;
723
724 list_for_each_entry(src, &pm->sources, head) {
725 if (src->addr == ssrc->addr &&
726 src->shift == smux->shift) {
727 found = true;
728 break;
729 }
730 source_id++;
731 }
732
733 if (!found) {
734 src = kzalloc(sizeof(*src), GFP_KERNEL);
735 if (!src)
736 return -ENOMEM;
737
738 src->addr = ssrc->addr;
739 src->mask = smux->mask;
740 src->shift = smux->shift;
741 src->enable = smux->enable;
742
743 len = strlen(ssrc->name) +
744 strlen(smux->name) + 2;
745 src->name = kzalloc(len, GFP_KERNEL);
746 if (!src->name) {
747 kfree(src);
748 return -ENOMEM;
749 }
750 snprintf(src->name, len, "%s_%s", ssrc->name,
751 smux->name);
752
753 list_add_tail(&src->head, &pm->sources);
754 }
755
756 sig->source[source_nr++] = source_id + 1;
757 smux++;
758 }
759 ssrc++;
760 }
761
762 return 0;
763 }
764
765 int
nvkm_perfdom_new(struct nvkm_pm * pm,const char * name,u32 mask,u32 base,u32 size_unit,u32 size_domain,const struct nvkm_specdom * spec)766 nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
767 u32 base, u32 size_unit, u32 size_domain,
768 const struct nvkm_specdom *spec)
769 {
770 const struct nvkm_specdom *sdom;
771 const struct nvkm_specsig *ssig;
772 struct nvkm_perfdom *dom;
773 int ret, i;
774
775 for (i = 0; i == 0 || mask; i++) {
776 u32 addr = base + (i * size_unit);
777 if (i && !(mask & (1 << i)))
778 continue;
779
780 sdom = spec;
781 while (sdom->signal_nr) {
782 dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
783 GFP_KERNEL);
784 if (!dom)
785 return -ENOMEM;
786
787 if (mask) {
788 snprintf(dom->name, sizeof(dom->name),
789 "%s/%02x/%02x", name, i,
790 (int)(sdom - spec));
791 } else {
792 snprintf(dom->name, sizeof(dom->name),
793 "%s/%02x", name, (int)(sdom - spec));
794 }
795
796 list_add_tail(&dom->head, &pm->domains);
797 INIT_LIST_HEAD(&dom->list);
798 dom->func = sdom->func;
799 dom->addr = addr;
800 dom->signal_nr = sdom->signal_nr;
801
802 ssig = (sdom++)->signal;
803 while (ssig->name) {
804 struct nvkm_perfsig *sig =
805 &dom->signal[ssig->signal];
806 sig->name = ssig->name;
807 ret = nvkm_perfsrc_new(pm, sig, ssig->source);
808 if (ret)
809 return ret;
810 ssig++;
811 }
812
813 addr += size_domain;
814 }
815
816 mask &= ~(1 << i);
817 }
818
819 return 0;
820 }
821
822 static int
nvkm_pm_fini(struct nvkm_engine * engine,bool suspend)823 nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
824 {
825 struct nvkm_pm *pm = nvkm_pm(engine);
826 if (pm->func->fini)
827 pm->func->fini(pm);
828 return 0;
829 }
830
831 static void *
nvkm_pm_dtor(struct nvkm_engine * engine)832 nvkm_pm_dtor(struct nvkm_engine *engine)
833 {
834 struct nvkm_pm *pm = nvkm_pm(engine);
835 struct nvkm_perfdom *dom, *next_dom;
836 struct nvkm_perfsrc *src, *next_src;
837
838 list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
839 list_del(&dom->head);
840 kfree(dom);
841 }
842
843 list_for_each_entry_safe(src, next_src, &pm->sources, head) {
844 list_del(&src->head);
845 kfree(src->name);
846 kfree(src);
847 }
848
849 return pm;
850 }
851
852 static const struct nvkm_engine_func
853 nvkm_pm = {
854 .dtor = nvkm_pm_dtor,
855 .fini = nvkm_pm_fini,
856 .base.sclass = nvkm_pm_oclass_get,
857 };
858
859 int
nvkm_pm_ctor(const struct nvkm_pm_func * func,struct nvkm_device * device,int index,struct nvkm_pm * pm)860 nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
861 int index, struct nvkm_pm *pm)
862 {
863 pm->func = func;
864 INIT_LIST_HEAD(&pm->domains);
865 INIT_LIST_HEAD(&pm->sources);
866 return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine);
867 }
868