1 /*
2 * Copyright © 2015-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Robert Bragg <robert@sixbynine.org>
25 */
26
27
28 /**
29 * DOC: i915 Perf Overview
30 *
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
33 * GPU.
34 *
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
38 *
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41 *
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46 * sysctl option.
47 *
48 */
49
50 /**
51 * DOC: i915 Perf History and Comparison with Core Perf
52 *
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
55 *
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
67 *
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
71 *
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
80 *
81 *
82 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84 *
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
89 *
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
93 *
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
100 *
101 *
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
104 * hit:
105 *
106 * - The perf based OA PMU driver broke some significant design assumptions:
107 *
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
113 *
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
116 *
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
120 *
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
126 *
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
141 *
142 * - As a side note on perf's grouping feature; there was also some concern
143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
147 *
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
150 *
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
154 *
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
165 * command streamer.
166 *
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
177 *
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
180 *
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
192 */
193
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197
198 #include "gem/i915_gem_context.h"
199 #include "gt/intel_engine_pm.h"
200 #include "gt/intel_engine_user.h"
201 #include "gt/intel_gt.h"
202 #include "gt/intel_lrc_reg.h"
203 #include "gt/intel_ring.h"
204
205 #include "i915_drv.h"
206 #include "i915_perf.h"
207
208 /* HW requires this to be a power of two, between 128k and 16M, though driver
209 * is currently generally designed assuming the largest 16M size is used such
210 * that the overflow cases are unlikely in normal operation.
211 */
212 #define OA_BUFFER_SIZE SZ_16M
213
214 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
215
216 /**
217 * DOC: OA Tail Pointer Race
218 *
219 * There's a HW race condition between OA unit tail pointer register updates and
220 * writes to memory whereby the tail pointer can sometimes get ahead of what's
221 * been written out to the OA buffer so far (in terms of what's visible to the
222 * CPU).
223 *
224 * Although this can be observed explicitly while copying reports to userspace
225 * by checking for a zeroed report-id field in tail reports, we want to account
226 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
227 * redundant read() attempts.
228 *
229 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
230 * in the OA buffer, starting from the tail reported by the HW until we find a
231 * report with its first 2 dwords not 0 meaning its previous report is
232 * completely in memory and ready to be read. Those dwords are also set to 0
233 * once read and the whole buffer is cleared upon OA buffer initialization. The
234 * first dword is the reason for this report while the second is the timestamp,
235 * making the chances of having those 2 fields at 0 fairly unlikely. A more
236 * detailed explanation is available in oa_buffer_check_unlocked().
237 *
238 * Most of the implementation details for this workaround are in
239 * oa_buffer_check_unlocked() and _append_oa_reports()
240 *
241 * Note for posterity: previously the driver used to define an effective tail
242 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
243 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
244 * This was flawed considering that the OA unit may also automatically generate
245 * non-periodic reports (such as on context switch) or the OA unit may be
246 * enabled without any periodic sampling.
247 */
248 #define OA_TAIL_MARGIN_NSEC 100000ULL
249 #define INVALID_TAIL_PTR 0xffffffff
250
251 /* The default frequency for checking whether the OA unit has written new
252 * reports to the circular OA buffer...
253 */
254 #define DEFAULT_POLL_FREQUENCY_HZ 200
255 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
256
257 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
258 static u32 i915_perf_stream_paranoid = true;
259
260 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
261 * of the 64bit timestamp bits to trigger reports from) but there's currently
262 * no known use case for sampling as infrequently as once per 47 thousand years.
263 *
264 * Since the timestamps included in OA reports are only 32bits it seems
265 * reasonable to limit the OA exponent where it's still possible to account for
266 * overflow in OA report timestamps.
267 */
268 #define OA_EXPONENT_MAX 31
269
270 #define INVALID_CTX_ID 0xffffffff
271
272 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
273 #define OAREPORT_REASON_MASK 0x3f
274 #define OAREPORT_REASON_MASK_EXTENDED 0x7f
275 #define OAREPORT_REASON_SHIFT 19
276 #define OAREPORT_REASON_TIMER (1<<0)
277 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
278 #define OAREPORT_REASON_CLK_RATIO (1<<5)
279
280
281 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
282 *
283 * The highest sampling frequency we can theoretically program the OA unit
284 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
285 *
286 * Initialized just before we register the sysctl parameter.
287 */
288 static int oa_sample_rate_hard_limit;
289
290 /* Theoretically we can program the OA unit to sample every 160ns but don't
291 * allow that by default unless root...
292 *
293 * The default threshold of 100000Hz is based on perf's similar
294 * kernel.perf_event_max_sample_rate sysctl parameter.
295 */
296 static u32 i915_oa_max_sample_rate = 100000;
297
298 /* XXX: beware if future OA HW adds new report formats that the current
299 * code assumes all reports have a power-of-two size and ~(size - 1) can
300 * be used as a mask to align the OA tail pointer.
301 */
302 static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
303 [I915_OA_FORMAT_A13] = { 0, 64 },
304 [I915_OA_FORMAT_A29] = { 1, 128 },
305 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
306 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
307 [I915_OA_FORMAT_B4_C8] = { 4, 64 },
308 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
309 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
310 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
311 };
312
313 static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
314 [I915_OA_FORMAT_A12] = { 0, 64 },
315 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
316 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
317 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
318 };
319
320 static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
321 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
322 };
323
324 #define SAMPLE_OA_REPORT (1<<0)
325
326 /**
327 * struct perf_open_properties - for validated properties given to open a stream
328 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
329 * @single_context: Whether a single or all gpu contexts should be monitored
330 * @hold_preemption: Whether the preemption is disabled for the filtered
331 * context
332 * @ctx_handle: A gem ctx handle for use with @single_context
333 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
334 * @oa_format: An OA unit HW report format
335 * @oa_periodic: Whether to enable periodic OA unit sampling
336 * @oa_period_exponent: The OA unit sampling period is derived from this
337 * @engine: The engine (typically rcs0) being monitored by the OA unit
338 * @has_sseu: Whether @sseu was specified by userspace
339 * @sseu: internal SSEU configuration computed either from the userspace
340 * specified configuration in the opening parameters or a default value
341 * (see get_default_sseu_config())
342 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
343 * data availability
344 *
345 * As read_properties_unlocked() enumerates and validates the properties given
346 * to open a stream of metrics the configuration is built up in the structure
347 * which starts out zero initialized.
348 */
349 struct perf_open_properties {
350 u32 sample_flags;
351
352 u64 single_context:1;
353 u64 hold_preemption:1;
354 u64 ctx_handle;
355
356 /* OA sampling state */
357 int metrics_set;
358 int oa_format;
359 bool oa_periodic;
360 int oa_period_exponent;
361
362 struct intel_engine_cs *engine;
363
364 bool has_sseu;
365 struct intel_sseu sseu;
366
367 u64 poll_oa_period;
368 };
369
370 struct i915_oa_config_bo {
371 struct llist_node node;
372
373 struct i915_oa_config *oa_config;
374 struct i915_vma *vma;
375 };
376
377 static struct ctl_table_header *sysctl_header;
378
379 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
380
i915_oa_config_release(struct kref * ref)381 void i915_oa_config_release(struct kref *ref)
382 {
383 struct i915_oa_config *oa_config =
384 container_of(ref, typeof(*oa_config), ref);
385
386 kfree(oa_config->flex_regs);
387 kfree(oa_config->b_counter_regs);
388 kfree(oa_config->mux_regs);
389
390 kfree_rcu(oa_config, rcu);
391 }
392
393 struct i915_oa_config *
i915_perf_get_oa_config(struct i915_perf * perf,int metrics_set)394 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
395 {
396 struct i915_oa_config *oa_config;
397
398 rcu_read_lock();
399 oa_config = idr_find(&perf->metrics_idr, metrics_set);
400 if (oa_config)
401 oa_config = i915_oa_config_get(oa_config);
402 rcu_read_unlock();
403
404 return oa_config;
405 }
406
free_oa_config_bo(struct i915_oa_config_bo * oa_bo)407 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
408 {
409 i915_oa_config_put(oa_bo->oa_config);
410 i915_vma_put(oa_bo->vma);
411 kfree(oa_bo);
412 }
413
gen12_oa_hw_tail_read(struct i915_perf_stream * stream)414 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
415 {
416 struct intel_uncore *uncore = stream->uncore;
417
418 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
419 GEN12_OAG_OATAILPTR_MASK;
420 }
421
gen8_oa_hw_tail_read(struct i915_perf_stream * stream)422 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
423 {
424 struct intel_uncore *uncore = stream->uncore;
425
426 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
427 }
428
gen7_oa_hw_tail_read(struct i915_perf_stream * stream)429 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
430 {
431 struct intel_uncore *uncore = stream->uncore;
432 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
433
434 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
435 }
436
437 /**
438 * oa_buffer_check_unlocked - check for data and update tail ptr state
439 * @stream: i915 stream instance
440 *
441 * This is either called via fops (for blocking reads in user ctx) or the poll
442 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
443 * if there is data available for userspace to read.
444 *
445 * This function is central to providing a workaround for the OA unit tail
446 * pointer having a race with respect to what data is visible to the CPU.
447 * It is responsible for reading tail pointers from the hardware and giving
448 * the pointers time to 'age' before they are made available for reading.
449 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
450 *
451 * Besides returning true when there is data available to read() this function
452 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
453 * object.
454 *
455 * Note: It's safe to read OA config state here unlocked, assuming that this is
456 * only called while the stream is enabled, while the global OA configuration
457 * can't be modified.
458 *
459 * Returns: %true if the OA buffer contains data, else %false
460 */
oa_buffer_check_unlocked(struct i915_perf_stream * stream)461 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
462 {
463 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
464 int report_size = stream->oa_buffer.format_size;
465 unsigned long flags;
466 bool pollin;
467 u32 hw_tail;
468 u64 now;
469
470 /* We have to consider the (unlikely) possibility that read() errors
471 * could result in an OA buffer reset which might reset the head and
472 * tail state.
473 */
474 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
475
476 hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
477
478 /* The tail pointer increases in 64 byte increments,
479 * not in report_size steps...
480 */
481 hw_tail &= ~(report_size - 1);
482
483 now = ktime_get_mono_fast_ns();
484
485 if (hw_tail == stream->oa_buffer.aging_tail &&
486 (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
487 /* If the HW tail hasn't move since the last check and the HW
488 * tail has been aging for long enough, declare it the new
489 * tail.
490 */
491 stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
492 } else {
493 u32 head, tail, aged_tail;
494
495 /* NB: The head we observe here might effectively be a little
496 * out of date. If a read() is in progress, the head could be
497 * anywhere between this head and stream->oa_buffer.tail.
498 */
499 head = stream->oa_buffer.head - gtt_offset;
500 aged_tail = stream->oa_buffer.tail - gtt_offset;
501
502 hw_tail -= gtt_offset;
503 tail = hw_tail;
504
505 /* Walk the stream backward until we find a report with dword 0
506 * & 1 not at 0. Since the circular buffer pointers progress by
507 * increments of 64 bytes and that reports can be up to 256
508 * bytes long, we can't tell whether a report has fully landed
509 * in memory before the first 2 dwords of the following report
510 * have effectively landed.
511 *
512 * This is assuming that the writes of the OA unit land in
513 * memory in the order they were written to.
514 * If not : (╯°□°)╯︵ ┻━┻
515 */
516 while (OA_TAKEN(tail, aged_tail) >= report_size) {
517 u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
518
519 if (report32[0] != 0 || report32[1] != 0)
520 break;
521
522 tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
523 }
524
525 if (OA_TAKEN(hw_tail, tail) > report_size &&
526 __ratelimit(&stream->perf->tail_pointer_race))
527 DRM_NOTE("unlanded report(s) head=0x%x "
528 "tail=0x%x hw_tail=0x%x\n",
529 head, tail, hw_tail);
530
531 stream->oa_buffer.tail = gtt_offset + tail;
532 stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
533 stream->oa_buffer.aging_timestamp = now;
534 }
535
536 pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
537 stream->oa_buffer.head - gtt_offset) >= report_size;
538
539 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
540
541 return pollin;
542 }
543
544 /**
545 * append_oa_status - Appends a status record to a userspace read() buffer.
546 * @stream: An i915-perf stream opened for OA metrics
547 * @buf: destination buffer given by userspace
548 * @count: the number of bytes userspace wants to read
549 * @offset: (inout): the current position for writing into @buf
550 * @type: The kind of status to report to userspace
551 *
552 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
553 * into the userspace read() buffer.
554 *
555 * The @buf @offset will only be updated on success.
556 *
557 * Returns: 0 on success, negative error code on failure.
558 */
append_oa_status(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,enum drm_i915_perf_record_type type)559 static int append_oa_status(struct i915_perf_stream *stream,
560 char __user *buf,
561 size_t count,
562 size_t *offset,
563 enum drm_i915_perf_record_type type)
564 {
565 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
566
567 if ((count - *offset) < header.size)
568 return -ENOSPC;
569
570 if (copy_to_user(buf + *offset, &header, sizeof(header)))
571 return -EFAULT;
572
573 (*offset) += header.size;
574
575 return 0;
576 }
577
578 /**
579 * append_oa_sample - Copies single OA report into userspace read() buffer.
580 * @stream: An i915-perf stream opened for OA metrics
581 * @buf: destination buffer given by userspace
582 * @count: the number of bytes userspace wants to read
583 * @offset: (inout): the current position for writing into @buf
584 * @report: A single OA report to (optionally) include as part of the sample
585 *
586 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
587 * properties when opening a stream, tracked as `stream->sample_flags`. This
588 * function copies the requested components of a single sample to the given
589 * read() @buf.
590 *
591 * The @buf @offset will only be updated on success.
592 *
593 * Returns: 0 on success, negative error code on failure.
594 */
append_oa_sample(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,const u8 * report)595 static int append_oa_sample(struct i915_perf_stream *stream,
596 char __user *buf,
597 size_t count,
598 size_t *offset,
599 const u8 *report)
600 {
601 int report_size = stream->oa_buffer.format_size;
602 struct drm_i915_perf_record_header header;
603 u32 sample_flags = stream->sample_flags;
604
605 header.type = DRM_I915_PERF_RECORD_SAMPLE;
606 header.pad = 0;
607 header.size = stream->sample_size;
608
609 if ((count - *offset) < header.size)
610 return -ENOSPC;
611
612 buf += *offset;
613 if (copy_to_user(buf, &header, sizeof(header)))
614 return -EFAULT;
615 buf += sizeof(header);
616
617 if (sample_flags & SAMPLE_OA_REPORT) {
618 if (copy_to_user(buf, report, report_size))
619 return -EFAULT;
620 }
621
622 (*offset) += header.size;
623
624 return 0;
625 }
626
627 /**
628 * Copies all buffered OA reports into userspace read() buffer.
629 * @stream: An i915-perf stream opened for OA metrics
630 * @buf: destination buffer given by userspace
631 * @count: the number of bytes userspace wants to read
632 * @offset: (inout): the current position for writing into @buf
633 *
634 * Notably any error condition resulting in a short read (-%ENOSPC or
635 * -%EFAULT) will be returned even though one or more records may
636 * have been successfully copied. In this case it's up to the caller
637 * to decide if the error should be squashed before returning to
638 * userspace.
639 *
640 * Note: reports are consumed from the head, and appended to the
641 * tail, so the tail chases the head?... If you think that's mad
642 * and back-to-front you're not alone, but this follows the
643 * Gen PRM naming convention.
644 *
645 * Returns: 0 on success, negative error code on failure.
646 */
gen8_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)647 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
648 char __user *buf,
649 size_t count,
650 size_t *offset)
651 {
652 struct intel_uncore *uncore = stream->uncore;
653 int report_size = stream->oa_buffer.format_size;
654 u8 *oa_buf_base = stream->oa_buffer.vaddr;
655 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
656 u32 mask = (OA_BUFFER_SIZE - 1);
657 size_t start_offset = *offset;
658 unsigned long flags;
659 u32 head, tail;
660 u32 taken;
661 int ret = 0;
662
663 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
664 return -EIO;
665
666 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
667
668 head = stream->oa_buffer.head;
669 tail = stream->oa_buffer.tail;
670
671 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
672
673 /*
674 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
675 * while indexing relative to oa_buf_base.
676 */
677 head -= gtt_offset;
678 tail -= gtt_offset;
679
680 /*
681 * An out of bounds or misaligned head or tail pointer implies a driver
682 * bug since we validate + align the tail pointers we read from the
683 * hardware and we are in full control of the head pointer which should
684 * only be incremented by multiples of the report size (notably also
685 * all a power of two).
686 */
687 if (drm_WARN_ONCE(&uncore->i915->drm,
688 head > OA_BUFFER_SIZE || head % report_size ||
689 tail > OA_BUFFER_SIZE || tail % report_size,
690 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
691 head, tail))
692 return -EIO;
693
694
695 for (/* none */;
696 (taken = OA_TAKEN(tail, head));
697 head = (head + report_size) & mask) {
698 u8 *report = oa_buf_base + head;
699 u32 *report32 = (void *)report;
700 u32 ctx_id;
701 u32 reason;
702
703 /*
704 * All the report sizes factor neatly into the buffer
705 * size so we never expect to see a report split
706 * between the beginning and end of the buffer.
707 *
708 * Given the initial alignment check a misalignment
709 * here would imply a driver bug that would result
710 * in an overrun.
711 */
712 if (drm_WARN_ON(&uncore->i915->drm,
713 (OA_BUFFER_SIZE - head) < report_size)) {
714 drm_err(&uncore->i915->drm,
715 "Spurious OA head ptr: non-integral report offset\n");
716 break;
717 }
718
719 /*
720 * The reason field includes flags identifying what
721 * triggered this specific report (mostly timer
722 * triggered or e.g. due to a context switch).
723 *
724 * This field is never expected to be zero so we can
725 * check that the report isn't invalid before copying
726 * it to userspace...
727 */
728 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
729 (IS_GEN(stream->perf->i915, 12) ?
730 OAREPORT_REASON_MASK_EXTENDED :
731 OAREPORT_REASON_MASK));
732 if (reason == 0) {
733 if (__ratelimit(&stream->perf->spurious_report_rs))
734 DRM_NOTE("Skipping spurious, invalid OA report\n");
735 continue;
736 }
737
738 ctx_id = report32[2] & stream->specific_ctx_id_mask;
739
740 /*
741 * Squash whatever is in the CTX_ID field if it's marked as
742 * invalid to be sure we avoid false-positive, single-context
743 * filtering below...
744 *
745 * Note: that we don't clear the valid_ctx_bit so userspace can
746 * understand that the ID has been squashed by the kernel.
747 */
748 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
749 INTEL_GEN(stream->perf->i915) <= 11)
750 ctx_id = report32[2] = INVALID_CTX_ID;
751
752 /*
753 * NB: For Gen 8 the OA unit no longer supports clock gating
754 * off for a specific context and the kernel can't securely
755 * stop the counters from updating as system-wide / global
756 * values.
757 *
758 * Automatic reports now include a context ID so reports can be
759 * filtered on the cpu but it's not worth trying to
760 * automatically subtract/hide counter progress for other
761 * contexts while filtering since we can't stop userspace
762 * issuing MI_REPORT_PERF_COUNT commands which would still
763 * provide a side-band view of the real values.
764 *
765 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
766 * to normalize counters for a single filtered context then it
767 * needs be forwarded bookend context-switch reports so that it
768 * can track switches in between MI_REPORT_PERF_COUNT commands
769 * and can itself subtract/ignore the progress of counters
770 * associated with other contexts. Note that the hardware
771 * automatically triggers reports when switching to a new
772 * context which are tagged with the ID of the newly active
773 * context. To avoid the complexity (and likely fragility) of
774 * reading ahead while parsing reports to try and minimize
775 * forwarding redundant context switch reports (i.e. between
776 * other, unrelated contexts) we simply elect to forward them
777 * all.
778 *
779 * We don't rely solely on the reason field to identify context
780 * switches since it's not-uncommon for periodic samples to
781 * identify a switch before any 'context switch' report.
782 */
783 if (!stream->perf->exclusive_stream->ctx ||
784 stream->specific_ctx_id == ctx_id ||
785 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
786 reason & OAREPORT_REASON_CTX_SWITCH) {
787
788 /*
789 * While filtering for a single context we avoid
790 * leaking the IDs of other contexts.
791 */
792 if (stream->perf->exclusive_stream->ctx &&
793 stream->specific_ctx_id != ctx_id) {
794 report32[2] = INVALID_CTX_ID;
795 }
796
797 ret = append_oa_sample(stream, buf, count, offset,
798 report);
799 if (ret)
800 break;
801
802 stream->oa_buffer.last_ctx_id = ctx_id;
803 }
804
805 /*
806 * Clear out the first 2 dword as a mean to detect unlanded
807 * reports.
808 */
809 report32[0] = 0;
810 report32[1] = 0;
811 }
812
813 if (start_offset != *offset) {
814 i915_reg_t oaheadptr;
815
816 oaheadptr = IS_GEN(stream->perf->i915, 12) ?
817 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
818
819 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
820
821 /*
822 * We removed the gtt_offset for the copy loop above, indexing
823 * relative to oa_buf_base so put back here...
824 */
825 head += gtt_offset;
826 intel_uncore_write(uncore, oaheadptr,
827 head & GEN12_OAG_OAHEADPTR_MASK);
828 stream->oa_buffer.head = head;
829
830 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
831 }
832
833 return ret;
834 }
835
836 /**
837 * gen8_oa_read - copy status records then buffered OA reports
838 * @stream: An i915-perf stream opened for OA metrics
839 * @buf: destination buffer given by userspace
840 * @count: the number of bytes userspace wants to read
841 * @offset: (inout): the current position for writing into @buf
842 *
843 * Checks OA unit status registers and if necessary appends corresponding
844 * status records for userspace (such as for a buffer full condition) and then
845 * initiate appending any buffered OA reports.
846 *
847 * Updates @offset according to the number of bytes successfully copied into
848 * the userspace buffer.
849 *
850 * NB: some data may be successfully copied to the userspace buffer
851 * even if an error is returned, and this is reflected in the
852 * updated @offset.
853 *
854 * Returns: zero on success or a negative error code
855 */
gen8_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)856 static int gen8_oa_read(struct i915_perf_stream *stream,
857 char __user *buf,
858 size_t count,
859 size_t *offset)
860 {
861 struct intel_uncore *uncore = stream->uncore;
862 u32 oastatus;
863 i915_reg_t oastatus_reg;
864 int ret;
865
866 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
867 return -EIO;
868
869 oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
870 GEN12_OAG_OASTATUS : GEN8_OASTATUS;
871
872 oastatus = intel_uncore_read(uncore, oastatus_reg);
873
874 /*
875 * We treat OABUFFER_OVERFLOW as a significant error:
876 *
877 * Although theoretically we could handle this more gracefully
878 * sometimes, some Gens don't correctly suppress certain
879 * automatically triggered reports in this condition and so we
880 * have to assume that old reports are now being trampled
881 * over.
882 *
883 * Considering how we don't currently give userspace control
884 * over the OA buffer size and always configure a large 16MB
885 * buffer, then a buffer overflow does anyway likely indicate
886 * that something has gone quite badly wrong.
887 */
888 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
889 ret = append_oa_status(stream, buf, count, offset,
890 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
891 if (ret)
892 return ret;
893
894 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
895 stream->period_exponent);
896
897 stream->perf->ops.oa_disable(stream);
898 stream->perf->ops.oa_enable(stream);
899
900 /*
901 * Note: .oa_enable() is expected to re-init the oabuffer and
902 * reset GEN8_OASTATUS for us
903 */
904 oastatus = intel_uncore_read(uncore, oastatus_reg);
905 }
906
907 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
908 ret = append_oa_status(stream, buf, count, offset,
909 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
910 if (ret)
911 return ret;
912
913 intel_uncore_rmw(uncore, oastatus_reg,
914 GEN8_OASTATUS_COUNTER_OVERFLOW |
915 GEN8_OASTATUS_REPORT_LOST,
916 IS_GEN_RANGE(uncore->i915, 8, 10) ?
917 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
918 GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
919 }
920
921 return gen8_append_oa_reports(stream, buf, count, offset);
922 }
923
924 /**
925 * Copies all buffered OA reports into userspace read() buffer.
926 * @stream: An i915-perf stream opened for OA metrics
927 * @buf: destination buffer given by userspace
928 * @count: the number of bytes userspace wants to read
929 * @offset: (inout): the current position for writing into @buf
930 *
931 * Notably any error condition resulting in a short read (-%ENOSPC or
932 * -%EFAULT) will be returned even though one or more records may
933 * have been successfully copied. In this case it's up to the caller
934 * to decide if the error should be squashed before returning to
935 * userspace.
936 *
937 * Note: reports are consumed from the head, and appended to the
938 * tail, so the tail chases the head?... If you think that's mad
939 * and back-to-front you're not alone, but this follows the
940 * Gen PRM naming convention.
941 *
942 * Returns: 0 on success, negative error code on failure.
943 */
gen7_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)944 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
945 char __user *buf,
946 size_t count,
947 size_t *offset)
948 {
949 struct intel_uncore *uncore = stream->uncore;
950 int report_size = stream->oa_buffer.format_size;
951 u8 *oa_buf_base = stream->oa_buffer.vaddr;
952 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
953 u32 mask = (OA_BUFFER_SIZE - 1);
954 size_t start_offset = *offset;
955 unsigned long flags;
956 u32 head, tail;
957 u32 taken;
958 int ret = 0;
959
960 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
961 return -EIO;
962
963 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
964
965 head = stream->oa_buffer.head;
966 tail = stream->oa_buffer.tail;
967
968 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
969
970 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
971 * while indexing relative to oa_buf_base.
972 */
973 head -= gtt_offset;
974 tail -= gtt_offset;
975
976 /* An out of bounds or misaligned head or tail pointer implies a driver
977 * bug since we validate + align the tail pointers we read from the
978 * hardware and we are in full control of the head pointer which should
979 * only be incremented by multiples of the report size (notably also
980 * all a power of two).
981 */
982 if (drm_WARN_ONCE(&uncore->i915->drm,
983 head > OA_BUFFER_SIZE || head % report_size ||
984 tail > OA_BUFFER_SIZE || tail % report_size,
985 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
986 head, tail))
987 return -EIO;
988
989
990 for (/* none */;
991 (taken = OA_TAKEN(tail, head));
992 head = (head + report_size) & mask) {
993 u8 *report = oa_buf_base + head;
994 u32 *report32 = (void *)report;
995
996 /* All the report sizes factor neatly into the buffer
997 * size so we never expect to see a report split
998 * between the beginning and end of the buffer.
999 *
1000 * Given the initial alignment check a misalignment
1001 * here would imply a driver bug that would result
1002 * in an overrun.
1003 */
1004 if (drm_WARN_ON(&uncore->i915->drm,
1005 (OA_BUFFER_SIZE - head) < report_size)) {
1006 drm_err(&uncore->i915->drm,
1007 "Spurious OA head ptr: non-integral report offset\n");
1008 break;
1009 }
1010
1011 /* The report-ID field for periodic samples includes
1012 * some undocumented flags related to what triggered
1013 * the report and is never expected to be zero so we
1014 * can check that the report isn't invalid before
1015 * copying it to userspace...
1016 */
1017 if (report32[0] == 0) {
1018 if (__ratelimit(&stream->perf->spurious_report_rs))
1019 DRM_NOTE("Skipping spurious, invalid OA report\n");
1020 continue;
1021 }
1022
1023 ret = append_oa_sample(stream, buf, count, offset, report);
1024 if (ret)
1025 break;
1026
1027 /* Clear out the first 2 dwords as a mean to detect unlanded
1028 * reports.
1029 */
1030 report32[0] = 0;
1031 report32[1] = 0;
1032 }
1033
1034 if (start_offset != *offset) {
1035 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1036
1037 /* We removed the gtt_offset for the copy loop above, indexing
1038 * relative to oa_buf_base so put back here...
1039 */
1040 head += gtt_offset;
1041
1042 intel_uncore_write(uncore, GEN7_OASTATUS2,
1043 (head & GEN7_OASTATUS2_HEAD_MASK) |
1044 GEN7_OASTATUS2_MEM_SELECT_GGTT);
1045 stream->oa_buffer.head = head;
1046
1047 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1048 }
1049
1050 return ret;
1051 }
1052
1053 /**
1054 * gen7_oa_read - copy status records then buffered OA reports
1055 * @stream: An i915-perf stream opened for OA metrics
1056 * @buf: destination buffer given by userspace
1057 * @count: the number of bytes userspace wants to read
1058 * @offset: (inout): the current position for writing into @buf
1059 *
1060 * Checks Gen 7 specific OA unit status registers and if necessary appends
1061 * corresponding status records for userspace (such as for a buffer full
1062 * condition) and then initiate appending any buffered OA reports.
1063 *
1064 * Updates @offset according to the number of bytes successfully copied into
1065 * the userspace buffer.
1066 *
1067 * Returns: zero on success or a negative error code
1068 */
gen7_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1069 static int gen7_oa_read(struct i915_perf_stream *stream,
1070 char __user *buf,
1071 size_t count,
1072 size_t *offset)
1073 {
1074 struct intel_uncore *uncore = stream->uncore;
1075 u32 oastatus1;
1076 int ret;
1077
1078 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1079 return -EIO;
1080
1081 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1082
1083 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1084 * bits while the OA unit is enabled (while the tail pointer
1085 * may be updated asynchronously) so we ignore status bits
1086 * that have already been reported to userspace.
1087 */
1088 oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1089
1090 /* We treat OABUFFER_OVERFLOW as a significant error:
1091 *
1092 * - The status can be interpreted to mean that the buffer is
1093 * currently full (with a higher precedence than OA_TAKEN()
1094 * which will start to report a near-empty buffer after an
1095 * overflow) but it's awkward that we can't clear the status
1096 * on Haswell, so without a reset we won't be able to catch
1097 * the state again.
1098 *
1099 * - Since it also implies the HW has started overwriting old
1100 * reports it may also affect our sanity checks for invalid
1101 * reports when copying to userspace that assume new reports
1102 * are being written to cleared memory.
1103 *
1104 * - In the future we may want to introduce a flight recorder
1105 * mode where the driver will automatically maintain a safe
1106 * guard band between head/tail, avoiding this overflow
1107 * condition, but we avoid the added driver complexity for
1108 * now.
1109 */
1110 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1111 ret = append_oa_status(stream, buf, count, offset,
1112 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1113 if (ret)
1114 return ret;
1115
1116 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1117 stream->period_exponent);
1118
1119 stream->perf->ops.oa_disable(stream);
1120 stream->perf->ops.oa_enable(stream);
1121
1122 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1123 }
1124
1125 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1126 ret = append_oa_status(stream, buf, count, offset,
1127 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1128 if (ret)
1129 return ret;
1130 stream->perf->gen7_latched_oastatus1 |=
1131 GEN7_OASTATUS1_REPORT_LOST;
1132 }
1133
1134 return gen7_append_oa_reports(stream, buf, count, offset);
1135 }
1136
1137 /**
1138 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1139 * @stream: An i915-perf stream opened for OA metrics
1140 *
1141 * Called when userspace tries to read() from a blocking stream FD opened
1142 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1143 * OA buffer and wakes us.
1144 *
1145 * Note: it's acceptable to have this return with some false positives
1146 * since any subsequent read handling will return -EAGAIN if there isn't
1147 * really data ready for userspace yet.
1148 *
1149 * Returns: zero on success or a negative error code
1150 */
i915_oa_wait_unlocked(struct i915_perf_stream * stream)1151 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1152 {
1153 /* We would wait indefinitely if periodic sampling is not enabled */
1154 if (!stream->periodic)
1155 return -EIO;
1156
1157 return wait_event_interruptible(stream->poll_wq,
1158 oa_buffer_check_unlocked(stream));
1159 }
1160
1161 /**
1162 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1163 * @stream: An i915-perf stream opened for OA metrics
1164 * @file: An i915 perf stream file
1165 * @wait: poll() state table
1166 *
1167 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1168 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1169 * when it sees data ready to read in the circular OA buffer.
1170 */
i915_oa_poll_wait(struct i915_perf_stream * stream,struct file * file,poll_table * wait)1171 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1172 struct file *file,
1173 poll_table *wait)
1174 {
1175 poll_wait(file, &stream->poll_wq, wait);
1176 }
1177
1178 /**
1179 * i915_oa_read - just calls through to &i915_oa_ops->read
1180 * @stream: An i915-perf stream opened for OA metrics
1181 * @buf: destination buffer given by userspace
1182 * @count: the number of bytes userspace wants to read
1183 * @offset: (inout): the current position for writing into @buf
1184 *
1185 * Updates @offset according to the number of bytes successfully copied into
1186 * the userspace buffer.
1187 *
1188 * Returns: zero on success or a negative error code
1189 */
i915_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1190 static int i915_oa_read(struct i915_perf_stream *stream,
1191 char __user *buf,
1192 size_t count,
1193 size_t *offset)
1194 {
1195 return stream->perf->ops.read(stream, buf, count, offset);
1196 }
1197
oa_pin_context(struct i915_perf_stream * stream)1198 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1199 {
1200 struct i915_gem_engines_iter it;
1201 struct i915_gem_context *ctx = stream->ctx;
1202 struct intel_context *ce;
1203 struct i915_gem_ww_ctx ww;
1204 int err = -ENODEV;
1205
1206 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1207 if (ce->engine != stream->engine) /* first match! */
1208 continue;
1209
1210 err = 0;
1211 break;
1212 }
1213 i915_gem_context_unlock_engines(ctx);
1214
1215 if (err)
1216 return ERR_PTR(err);
1217
1218 i915_gem_ww_ctx_init(&ww, true);
1219 retry:
1220 /*
1221 * As the ID is the gtt offset of the context's vma we
1222 * pin the vma to ensure the ID remains fixed.
1223 */
1224 err = intel_context_pin_ww(ce, &ww);
1225 if (err == -EDEADLK) {
1226 err = i915_gem_ww_ctx_backoff(&ww);
1227 if (!err)
1228 goto retry;
1229 }
1230 i915_gem_ww_ctx_fini(&ww);
1231
1232 if (err)
1233 return ERR_PTR(err);
1234
1235 stream->pinned_ctx = ce;
1236 return stream->pinned_ctx;
1237 }
1238
1239 /**
1240 * oa_get_render_ctx_id - determine and hold ctx hw id
1241 * @stream: An i915-perf stream opened for OA metrics
1242 *
1243 * Determine the render context hw id, and ensure it remains fixed for the
1244 * lifetime of the stream. This ensures that we don't have to worry about
1245 * updating the context ID in OACONTROL on the fly.
1246 *
1247 * Returns: zero on success or a negative error code
1248 */
oa_get_render_ctx_id(struct i915_perf_stream * stream)1249 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1250 {
1251 struct intel_context *ce;
1252
1253 ce = oa_pin_context(stream);
1254 if (IS_ERR(ce))
1255 return PTR_ERR(ce);
1256
1257 switch (INTEL_GEN(ce->engine->i915)) {
1258 case 7: {
1259 /*
1260 * On Haswell we don't do any post processing of the reports
1261 * and don't need to use the mask.
1262 */
1263 stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1264 stream->specific_ctx_id_mask = 0;
1265 break;
1266 }
1267
1268 case 8:
1269 case 9:
1270 case 10:
1271 if (intel_engine_in_execlists_submission_mode(ce->engine)) {
1272 stream->specific_ctx_id_mask =
1273 (1U << GEN8_CTX_ID_WIDTH) - 1;
1274 stream->specific_ctx_id = stream->specific_ctx_id_mask;
1275 } else {
1276 /*
1277 * When using GuC, the context descriptor we write in
1278 * i915 is read by GuC and rewritten before it's
1279 * actually written into the hardware. The LRCA is
1280 * what is put into the context id field of the
1281 * context descriptor by GuC. Because it's aligned to
1282 * a page, the lower 12bits are always at 0 and
1283 * dropped by GuC. They won't be part of the context
1284 * ID in the OA reports, so squash those lower bits.
1285 */
1286 stream->specific_ctx_id = ce->lrc.lrca >> 12;
1287
1288 /*
1289 * GuC uses the top bit to signal proxy submission, so
1290 * ignore that bit.
1291 */
1292 stream->specific_ctx_id_mask =
1293 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1294 }
1295 break;
1296
1297 case 11:
1298 case 12: {
1299 stream->specific_ctx_id_mask =
1300 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1301 /*
1302 * Pick an unused context id
1303 * 0 - BITS_PER_LONG are used by other contexts
1304 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1305 */
1306 stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1307 break;
1308 }
1309
1310 default:
1311 MISSING_CASE(INTEL_GEN(ce->engine->i915));
1312 }
1313
1314 ce->tag = stream->specific_ctx_id;
1315
1316 drm_dbg(&stream->perf->i915->drm,
1317 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1318 stream->specific_ctx_id,
1319 stream->specific_ctx_id_mask);
1320
1321 return 0;
1322 }
1323
1324 /**
1325 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1326 * @stream: An i915-perf stream opened for OA metrics
1327 *
1328 * In case anything needed doing to ensure the context HW ID would remain valid
1329 * for the lifetime of the stream, then that can be undone here.
1330 */
oa_put_render_ctx_id(struct i915_perf_stream * stream)1331 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1332 {
1333 struct intel_context *ce;
1334
1335 ce = fetch_and_zero(&stream->pinned_ctx);
1336 if (ce) {
1337 ce->tag = 0; /* recomputed on next submission after parking */
1338 intel_context_unpin(ce);
1339 }
1340
1341 stream->specific_ctx_id = INVALID_CTX_ID;
1342 stream->specific_ctx_id_mask = 0;
1343 }
1344
1345 static void
free_oa_buffer(struct i915_perf_stream * stream)1346 free_oa_buffer(struct i915_perf_stream *stream)
1347 {
1348 i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1349 I915_VMA_RELEASE_MAP);
1350
1351 stream->oa_buffer.vaddr = NULL;
1352 }
1353
1354 static void
free_oa_configs(struct i915_perf_stream * stream)1355 free_oa_configs(struct i915_perf_stream *stream)
1356 {
1357 struct i915_oa_config_bo *oa_bo, *tmp;
1358
1359 i915_oa_config_put(stream->oa_config);
1360 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1361 free_oa_config_bo(oa_bo);
1362 }
1363
1364 static void
free_noa_wait(struct i915_perf_stream * stream)1365 free_noa_wait(struct i915_perf_stream *stream)
1366 {
1367 i915_vma_unpin_and_release(&stream->noa_wait, 0);
1368 }
1369
i915_oa_stream_destroy(struct i915_perf_stream * stream)1370 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1371 {
1372 struct i915_perf *perf = stream->perf;
1373
1374 BUG_ON(stream != perf->exclusive_stream);
1375
1376 /*
1377 * Unset exclusive_stream first, it will be checked while disabling
1378 * the metric set on gen8+.
1379 *
1380 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1381 */
1382 WRITE_ONCE(perf->exclusive_stream, NULL);
1383 perf->ops.disable_metric_set(stream);
1384
1385 free_oa_buffer(stream);
1386
1387 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1388 intel_engine_pm_put(stream->engine);
1389
1390 if (stream->ctx)
1391 oa_put_render_ctx_id(stream);
1392
1393 free_oa_configs(stream);
1394 free_noa_wait(stream);
1395
1396 if (perf->spurious_report_rs.missed) {
1397 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1398 perf->spurious_report_rs.missed);
1399 }
1400 }
1401
gen7_init_oa_buffer(struct i915_perf_stream * stream)1402 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1403 {
1404 struct intel_uncore *uncore = stream->uncore;
1405 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1406 unsigned long flags;
1407
1408 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1409
1410 /* Pre-DevBDW: OABUFFER must be set with counters off,
1411 * before OASTATUS1, but after OASTATUS2
1412 */
1413 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1414 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1415 stream->oa_buffer.head = gtt_offset;
1416
1417 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1418
1419 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1420 gtt_offset | OABUFFER_SIZE_16M);
1421
1422 /* Mark that we need updated tail pointers to read from... */
1423 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1424 stream->oa_buffer.tail = gtt_offset;
1425
1426 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1427
1428 /* On Haswell we have to track which OASTATUS1 flags we've
1429 * already seen since they can't be cleared while periodic
1430 * sampling is enabled.
1431 */
1432 stream->perf->gen7_latched_oastatus1 = 0;
1433
1434 /* NB: although the OA buffer will initially be allocated
1435 * zeroed via shmfs (and so this memset is redundant when
1436 * first allocating), we may re-init the OA buffer, either
1437 * when re-enabling a stream or in error/reset paths.
1438 *
1439 * The reason we clear the buffer for each re-init is for the
1440 * sanity check in gen7_append_oa_reports() that looks at the
1441 * report-id field to make sure it's non-zero which relies on
1442 * the assumption that new reports are being written to zeroed
1443 * memory...
1444 */
1445 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1446 }
1447
gen8_init_oa_buffer(struct i915_perf_stream * stream)1448 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1449 {
1450 struct intel_uncore *uncore = stream->uncore;
1451 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1452 unsigned long flags;
1453
1454 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1455
1456 intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1457 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1458 stream->oa_buffer.head = gtt_offset;
1459
1460 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1461
1462 /*
1463 * PRM says:
1464 *
1465 * "This MMIO must be set before the OATAILPTR
1466 * register and after the OAHEADPTR register. This is
1467 * to enable proper functionality of the overflow
1468 * bit."
1469 */
1470 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1471 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1472 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1473
1474 /* Mark that we need updated tail pointers to read from... */
1475 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1476 stream->oa_buffer.tail = gtt_offset;
1477
1478 /*
1479 * Reset state used to recognise context switches, affecting which
1480 * reports we will forward to userspace while filtering for a single
1481 * context.
1482 */
1483 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1484
1485 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1486
1487 /*
1488 * NB: although the OA buffer will initially be allocated
1489 * zeroed via shmfs (and so this memset is redundant when
1490 * first allocating), we may re-init the OA buffer, either
1491 * when re-enabling a stream or in error/reset paths.
1492 *
1493 * The reason we clear the buffer for each re-init is for the
1494 * sanity check in gen8_append_oa_reports() that looks at the
1495 * reason field to make sure it's non-zero which relies on
1496 * the assumption that new reports are being written to zeroed
1497 * memory...
1498 */
1499 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1500 }
1501
gen12_init_oa_buffer(struct i915_perf_stream * stream)1502 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1503 {
1504 struct intel_uncore *uncore = stream->uncore;
1505 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1506 unsigned long flags;
1507
1508 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1509
1510 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1511 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1512 gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1513 stream->oa_buffer.head = gtt_offset;
1514
1515 /*
1516 * PRM says:
1517 *
1518 * "This MMIO must be set before the OATAILPTR
1519 * register and after the OAHEADPTR register. This is
1520 * to enable proper functionality of the overflow
1521 * bit."
1522 */
1523 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1524 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1525 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1526 gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1527
1528 /* Mark that we need updated tail pointers to read from... */
1529 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1530 stream->oa_buffer.tail = gtt_offset;
1531
1532 /*
1533 * Reset state used to recognise context switches, affecting which
1534 * reports we will forward to userspace while filtering for a single
1535 * context.
1536 */
1537 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1538
1539 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1540
1541 /*
1542 * NB: although the OA buffer will initially be allocated
1543 * zeroed via shmfs (and so this memset is redundant when
1544 * first allocating), we may re-init the OA buffer, either
1545 * when re-enabling a stream or in error/reset paths.
1546 *
1547 * The reason we clear the buffer for each re-init is for the
1548 * sanity check in gen8_append_oa_reports() that looks at the
1549 * reason field to make sure it's non-zero which relies on
1550 * the assumption that new reports are being written to zeroed
1551 * memory...
1552 */
1553 memset(stream->oa_buffer.vaddr, 0,
1554 stream->oa_buffer.vma->size);
1555 }
1556
alloc_oa_buffer(struct i915_perf_stream * stream)1557 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1558 {
1559 struct drm_i915_private *i915 = stream->perf->i915;
1560 struct drm_i915_gem_object *bo;
1561 struct i915_vma *vma;
1562 int ret;
1563
1564 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1565 return -ENODEV;
1566
1567 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1568 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1569
1570 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1571 if (IS_ERR(bo)) {
1572 drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1573 return PTR_ERR(bo);
1574 }
1575
1576 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1577
1578 /* PreHSW required 512K alignment, HSW requires 16M */
1579 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1580 if (IS_ERR(vma)) {
1581 ret = PTR_ERR(vma);
1582 goto err_unref;
1583 }
1584 stream->oa_buffer.vma = vma;
1585
1586 stream->oa_buffer.vaddr =
1587 i915_gem_object_pin_map(bo, I915_MAP_WB);
1588 if (IS_ERR(stream->oa_buffer.vaddr)) {
1589 ret = PTR_ERR(stream->oa_buffer.vaddr);
1590 goto err_unpin;
1591 }
1592
1593 return 0;
1594
1595 err_unpin:
1596 __i915_vma_unpin(vma);
1597
1598 err_unref:
1599 i915_gem_object_put(bo);
1600
1601 stream->oa_buffer.vaddr = NULL;
1602 stream->oa_buffer.vma = NULL;
1603
1604 return ret;
1605 }
1606
save_restore_register(struct i915_perf_stream * stream,u32 * cs,bool save,i915_reg_t reg,u32 offset,u32 dword_count)1607 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1608 bool save, i915_reg_t reg, u32 offset,
1609 u32 dword_count)
1610 {
1611 u32 cmd;
1612 u32 d;
1613
1614 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1615 cmd |= MI_SRM_LRM_GLOBAL_GTT;
1616 if (INTEL_GEN(stream->perf->i915) >= 8)
1617 cmd++;
1618
1619 for (d = 0; d < dword_count; d++) {
1620 *cs++ = cmd;
1621 *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1622 *cs++ = intel_gt_scratch_offset(stream->engine->gt,
1623 offset) + 4 * d;
1624 *cs++ = 0;
1625 }
1626
1627 return cs;
1628 }
1629
alloc_noa_wait(struct i915_perf_stream * stream)1630 static int alloc_noa_wait(struct i915_perf_stream *stream)
1631 {
1632 struct drm_i915_private *i915 = stream->perf->i915;
1633 struct drm_i915_gem_object *bo;
1634 struct i915_vma *vma;
1635 const u64 delay_ticks = 0xffffffffffffffff -
1636 i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
1637 const u32 base = stream->engine->mmio_base;
1638 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1639 u32 *batch, *ts0, *cs, *jump;
1640 int ret, i;
1641 enum {
1642 START_TS,
1643 NOW_TS,
1644 DELTA_TS,
1645 JUMP_PREDICATE,
1646 DELTA_TARGET,
1647 N_CS_GPR
1648 };
1649
1650 bo = i915_gem_object_create_internal(i915, 4096);
1651 if (IS_ERR(bo)) {
1652 drm_err(&i915->drm,
1653 "Failed to allocate NOA wait batchbuffer\n");
1654 return PTR_ERR(bo);
1655 }
1656
1657 /*
1658 * We pin in GGTT because we jump into this buffer now because
1659 * multiple OA config BOs will have a jump to this address and it
1660 * needs to be fixed during the lifetime of the i915/perf stream.
1661 */
1662 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
1663 if (IS_ERR(vma)) {
1664 ret = PTR_ERR(vma);
1665 goto err_unref;
1666 }
1667
1668 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1669 if (IS_ERR(batch)) {
1670 ret = PTR_ERR(batch);
1671 goto err_unpin;
1672 }
1673
1674 /* Save registers. */
1675 for (i = 0; i < N_CS_GPR; i++)
1676 cs = save_restore_register(
1677 stream, cs, true /* save */, CS_GPR(i),
1678 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1679 cs = save_restore_register(
1680 stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
1681 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1682
1683 /* First timestamp snapshot location. */
1684 ts0 = cs;
1685
1686 /*
1687 * Initial snapshot of the timestamp register to implement the wait.
1688 * We work with 32b values, so clear out the top 32b bits of the
1689 * register because the ALU works 64bits.
1690 */
1691 *cs++ = MI_LOAD_REGISTER_IMM(1);
1692 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1693 *cs++ = 0;
1694 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1695 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1696 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1697
1698 /*
1699 * This is the location we're going to jump back into until the
1700 * required amount of time has passed.
1701 */
1702 jump = cs;
1703
1704 /*
1705 * Take another snapshot of the timestamp register. Take care to clear
1706 * up the top 32bits of CS_GPR(1) as we're using it for other
1707 * operations below.
1708 */
1709 *cs++ = MI_LOAD_REGISTER_IMM(1);
1710 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1711 *cs++ = 0;
1712 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1713 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1714 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1715
1716 /*
1717 * Do a diff between the 2 timestamps and store the result back into
1718 * CS_GPR(1).
1719 */
1720 *cs++ = MI_MATH(5);
1721 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1722 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1723 *cs++ = MI_MATH_SUB;
1724 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1725 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1726
1727 /*
1728 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1729 * timestamp have rolled over the 32bits) into the predicate register
1730 * to be used for the predicated jump.
1731 */
1732 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1733 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1734 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1735
1736 /* Restart from the beginning if we had timestamps roll over. */
1737 *cs++ = (INTEL_GEN(i915) < 8 ?
1738 MI_BATCH_BUFFER_START :
1739 MI_BATCH_BUFFER_START_GEN8) |
1740 MI_BATCH_PREDICATE;
1741 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1742 *cs++ = 0;
1743
1744 /*
1745 * Now add the diff between to previous timestamps and add it to :
1746 * (((1 * << 64) - 1) - delay_ns)
1747 *
1748 * When the Carry Flag contains 1 this means the elapsed time is
1749 * longer than the expected delay, and we can exit the wait loop.
1750 */
1751 *cs++ = MI_LOAD_REGISTER_IMM(2);
1752 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1753 *cs++ = lower_32_bits(delay_ticks);
1754 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1755 *cs++ = upper_32_bits(delay_ticks);
1756
1757 *cs++ = MI_MATH(4);
1758 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1759 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1760 *cs++ = MI_MATH_ADD;
1761 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1762
1763 *cs++ = MI_ARB_CHECK;
1764
1765 /*
1766 * Transfer the result into the predicate register to be used for the
1767 * predicated jump.
1768 */
1769 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1770 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1771 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1772
1773 /* Predicate the jump. */
1774 *cs++ = (INTEL_GEN(i915) < 8 ?
1775 MI_BATCH_BUFFER_START :
1776 MI_BATCH_BUFFER_START_GEN8) |
1777 MI_BATCH_PREDICATE;
1778 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1779 *cs++ = 0;
1780
1781 /* Restore registers. */
1782 for (i = 0; i < N_CS_GPR; i++)
1783 cs = save_restore_register(
1784 stream, cs, false /* restore */, CS_GPR(i),
1785 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1786 cs = save_restore_register(
1787 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
1788 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1789
1790 /* And return to the ring. */
1791 *cs++ = MI_BATCH_BUFFER_END;
1792
1793 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1794
1795 i915_gem_object_flush_map(bo);
1796 __i915_gem_object_release_map(bo);
1797
1798 stream->noa_wait = vma;
1799 return 0;
1800
1801 err_unpin:
1802 i915_vma_unpin_and_release(&vma, 0);
1803 err_unref:
1804 i915_gem_object_put(bo);
1805 return ret;
1806 }
1807
write_cs_mi_lri(u32 * cs,const struct i915_oa_reg * reg_data,u32 n_regs)1808 static u32 *write_cs_mi_lri(u32 *cs,
1809 const struct i915_oa_reg *reg_data,
1810 u32 n_regs)
1811 {
1812 u32 i;
1813
1814 for (i = 0; i < n_regs; i++) {
1815 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1816 u32 n_lri = min_t(u32,
1817 n_regs - i,
1818 MI_LOAD_REGISTER_IMM_MAX_REGS);
1819
1820 *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1821 }
1822 *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1823 *cs++ = reg_data[i].value;
1824 }
1825
1826 return cs;
1827 }
1828
num_lri_dwords(int num_regs)1829 static int num_lri_dwords(int num_regs)
1830 {
1831 int count = 0;
1832
1833 if (num_regs > 0) {
1834 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1835 count += num_regs * 2;
1836 }
1837
1838 return count;
1839 }
1840
1841 static struct i915_oa_config_bo *
alloc_oa_config_buffer(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)1842 alloc_oa_config_buffer(struct i915_perf_stream *stream,
1843 struct i915_oa_config *oa_config)
1844 {
1845 struct drm_i915_gem_object *obj;
1846 struct i915_oa_config_bo *oa_bo;
1847 size_t config_length = 0;
1848 u32 *cs;
1849 int err;
1850
1851 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1852 if (!oa_bo)
1853 return ERR_PTR(-ENOMEM);
1854
1855 config_length += num_lri_dwords(oa_config->mux_regs_len);
1856 config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1857 config_length += num_lri_dwords(oa_config->flex_regs_len);
1858 config_length += 3; /* MI_BATCH_BUFFER_START */
1859 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1860
1861 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1862 if (IS_ERR(obj)) {
1863 err = PTR_ERR(obj);
1864 goto err_free;
1865 }
1866
1867 cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1868 if (IS_ERR(cs)) {
1869 err = PTR_ERR(cs);
1870 goto err_oa_bo;
1871 }
1872
1873 cs = write_cs_mi_lri(cs,
1874 oa_config->mux_regs,
1875 oa_config->mux_regs_len);
1876 cs = write_cs_mi_lri(cs,
1877 oa_config->b_counter_regs,
1878 oa_config->b_counter_regs_len);
1879 cs = write_cs_mi_lri(cs,
1880 oa_config->flex_regs,
1881 oa_config->flex_regs_len);
1882
1883 /* Jump into the active wait. */
1884 *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
1885 MI_BATCH_BUFFER_START :
1886 MI_BATCH_BUFFER_START_GEN8);
1887 *cs++ = i915_ggtt_offset(stream->noa_wait);
1888 *cs++ = 0;
1889
1890 i915_gem_object_flush_map(obj);
1891 __i915_gem_object_release_map(obj);
1892
1893 oa_bo->vma = i915_vma_instance(obj,
1894 &stream->engine->gt->ggtt->vm,
1895 NULL);
1896 if (IS_ERR(oa_bo->vma)) {
1897 err = PTR_ERR(oa_bo->vma);
1898 goto err_oa_bo;
1899 }
1900
1901 oa_bo->oa_config = i915_oa_config_get(oa_config);
1902 llist_add(&oa_bo->node, &stream->oa_config_bos);
1903
1904 return oa_bo;
1905
1906 err_oa_bo:
1907 i915_gem_object_put(obj);
1908 err_free:
1909 kfree(oa_bo);
1910 return ERR_PTR(err);
1911 }
1912
1913 static struct i915_vma *
get_oa_vma(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)1914 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1915 {
1916 struct i915_oa_config_bo *oa_bo;
1917
1918 /*
1919 * Look for the buffer in the already allocated BOs attached
1920 * to the stream.
1921 */
1922 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1923 if (oa_bo->oa_config == oa_config &&
1924 memcmp(oa_bo->oa_config->uuid,
1925 oa_config->uuid,
1926 sizeof(oa_config->uuid)) == 0)
1927 goto out;
1928 }
1929
1930 oa_bo = alloc_oa_config_buffer(stream, oa_config);
1931 if (IS_ERR(oa_bo))
1932 return ERR_CAST(oa_bo);
1933
1934 out:
1935 return i915_vma_get(oa_bo->vma);
1936 }
1937
1938 static int
emit_oa_config(struct i915_perf_stream * stream,struct i915_oa_config * oa_config,struct intel_context * ce,struct i915_active * active)1939 emit_oa_config(struct i915_perf_stream *stream,
1940 struct i915_oa_config *oa_config,
1941 struct intel_context *ce,
1942 struct i915_active *active)
1943 {
1944 struct i915_request *rq;
1945 struct i915_vma *vma;
1946 struct i915_gem_ww_ctx ww;
1947 int err;
1948
1949 vma = get_oa_vma(stream, oa_config);
1950 if (IS_ERR(vma))
1951 return PTR_ERR(vma);
1952
1953 i915_gem_ww_ctx_init(&ww, true);
1954 retry:
1955 err = i915_gem_object_lock(vma->obj, &ww);
1956 if (err)
1957 goto err;
1958
1959 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1960 if (err)
1961 goto err;
1962
1963 intel_engine_pm_get(ce->engine);
1964 rq = i915_request_create(ce);
1965 intel_engine_pm_put(ce->engine);
1966 if (IS_ERR(rq)) {
1967 err = PTR_ERR(rq);
1968 goto err_vma_unpin;
1969 }
1970
1971 if (!IS_ERR_OR_NULL(active)) {
1972 /* After all individual context modifications */
1973 err = i915_request_await_active(rq, active,
1974 I915_ACTIVE_AWAIT_ACTIVE);
1975 if (err)
1976 goto err_add_request;
1977
1978 err = i915_active_add_request(active, rq);
1979 if (err)
1980 goto err_add_request;
1981 }
1982
1983 err = i915_request_await_object(rq, vma->obj, 0);
1984 if (!err)
1985 err = i915_vma_move_to_active(vma, rq, 0);
1986 if (err)
1987 goto err_add_request;
1988
1989 err = rq->engine->emit_bb_start(rq,
1990 vma->node.start, 0,
1991 I915_DISPATCH_SECURE);
1992 if (err)
1993 goto err_add_request;
1994
1995 err_add_request:
1996 i915_request_add(rq);
1997 err_vma_unpin:
1998 i915_vma_unpin(vma);
1999 err:
2000 if (err == -EDEADLK) {
2001 err = i915_gem_ww_ctx_backoff(&ww);
2002 if (!err)
2003 goto retry;
2004 }
2005
2006 i915_gem_ww_ctx_fini(&ww);
2007 i915_vma_put(vma);
2008 return err;
2009 }
2010
oa_context(struct i915_perf_stream * stream)2011 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2012 {
2013 return stream->pinned_ctx ?: stream->engine->kernel_context;
2014 }
2015
2016 static int
hsw_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2017 hsw_enable_metric_set(struct i915_perf_stream *stream,
2018 struct i915_active *active)
2019 {
2020 struct intel_uncore *uncore = stream->uncore;
2021
2022 /*
2023 * PRM:
2024 *
2025 * OA unit is using “crclk” for its functionality. When trunk
2026 * level clock gating takes place, OA clock would be gated,
2027 * unable to count the events from non-render clock domain.
2028 * Render clock gating must be disabled when OA is enabled to
2029 * count the events from non-render domain. Unit level clock
2030 * gating for RCS should also be disabled.
2031 */
2032 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2033 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2034 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2035 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2036
2037 return emit_oa_config(stream,
2038 stream->oa_config, oa_context(stream),
2039 active);
2040 }
2041
hsw_disable_metric_set(struct i915_perf_stream * stream)2042 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2043 {
2044 struct intel_uncore *uncore = stream->uncore;
2045
2046 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2047 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2048 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2049 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2050
2051 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2052 }
2053
oa_config_flex_reg(const struct i915_oa_config * oa_config,i915_reg_t reg)2054 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2055 i915_reg_t reg)
2056 {
2057 u32 mmio = i915_mmio_reg_offset(reg);
2058 int i;
2059
2060 /*
2061 * This arbitrary default will select the 'EU FPU0 Pipeline
2062 * Active' event. In the future it's anticipated that there
2063 * will be an explicit 'No Event' we can select, but not yet...
2064 */
2065 if (!oa_config)
2066 return 0;
2067
2068 for (i = 0; i < oa_config->flex_regs_len; i++) {
2069 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2070 return oa_config->flex_regs[i].value;
2071 }
2072
2073 return 0;
2074 }
2075 /*
2076 * NB: It must always remain pointer safe to run this even if the OA unit
2077 * has been disabled.
2078 *
2079 * It's fine to put out-of-date values into these per-context registers
2080 * in the case that the OA unit has been disabled.
2081 */
2082 static void
gen8_update_reg_state_unlocked(const struct intel_context * ce,const struct i915_perf_stream * stream)2083 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2084 const struct i915_perf_stream *stream)
2085 {
2086 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2087 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2088 /* The MMIO offsets for Flex EU registers aren't contiguous */
2089 i915_reg_t flex_regs[] = {
2090 EU_PERF_CNTL0,
2091 EU_PERF_CNTL1,
2092 EU_PERF_CNTL2,
2093 EU_PERF_CNTL3,
2094 EU_PERF_CNTL4,
2095 EU_PERF_CNTL5,
2096 EU_PERF_CNTL6,
2097 };
2098 u32 *reg_state = ce->lrc_reg_state;
2099 int i;
2100
2101 reg_state[ctx_oactxctrl + 1] =
2102 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2103 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2104 GEN8_OA_COUNTER_RESUME;
2105
2106 for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2107 reg_state[ctx_flexeu0 + i * 2 + 1] =
2108 oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2109 }
2110
2111 struct flex {
2112 i915_reg_t reg;
2113 u32 offset;
2114 u32 value;
2115 };
2116
2117 static int
gen8_store_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2118 gen8_store_flex(struct i915_request *rq,
2119 struct intel_context *ce,
2120 const struct flex *flex, unsigned int count)
2121 {
2122 u32 offset;
2123 u32 *cs;
2124
2125 cs = intel_ring_begin(rq, 4 * count);
2126 if (IS_ERR(cs))
2127 return PTR_ERR(cs);
2128
2129 offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2130 do {
2131 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2132 *cs++ = offset + flex->offset * sizeof(u32);
2133 *cs++ = 0;
2134 *cs++ = flex->value;
2135 } while (flex++, --count);
2136
2137 intel_ring_advance(rq, cs);
2138
2139 return 0;
2140 }
2141
2142 static int
gen8_load_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2143 gen8_load_flex(struct i915_request *rq,
2144 struct intel_context *ce,
2145 const struct flex *flex, unsigned int count)
2146 {
2147 u32 *cs;
2148
2149 GEM_BUG_ON(!count || count > 63);
2150
2151 cs = intel_ring_begin(rq, 2 * count + 2);
2152 if (IS_ERR(cs))
2153 return PTR_ERR(cs);
2154
2155 *cs++ = MI_LOAD_REGISTER_IMM(count);
2156 do {
2157 *cs++ = i915_mmio_reg_offset(flex->reg);
2158 *cs++ = flex->value;
2159 } while (flex++, --count);
2160 *cs++ = MI_NOOP;
2161
2162 intel_ring_advance(rq, cs);
2163
2164 return 0;
2165 }
2166
gen8_modify_context(struct intel_context * ce,const struct flex * flex,unsigned int count)2167 static int gen8_modify_context(struct intel_context *ce,
2168 const struct flex *flex, unsigned int count)
2169 {
2170 struct i915_request *rq;
2171 int err;
2172
2173 rq = intel_engine_create_kernel_request(ce->engine);
2174 if (IS_ERR(rq))
2175 return PTR_ERR(rq);
2176
2177 /* Serialise with the remote context */
2178 err = intel_context_prepare_remote_request(ce, rq);
2179 if (err == 0)
2180 err = gen8_store_flex(rq, ce, flex, count);
2181
2182 i915_request_add(rq);
2183 return err;
2184 }
2185
2186 static int
gen8_modify_self(struct intel_context * ce,const struct flex * flex,unsigned int count,struct i915_active * active)2187 gen8_modify_self(struct intel_context *ce,
2188 const struct flex *flex, unsigned int count,
2189 struct i915_active *active)
2190 {
2191 struct i915_request *rq;
2192 int err;
2193
2194 intel_engine_pm_get(ce->engine);
2195 rq = i915_request_create(ce);
2196 intel_engine_pm_put(ce->engine);
2197 if (IS_ERR(rq))
2198 return PTR_ERR(rq);
2199
2200 if (!IS_ERR_OR_NULL(active)) {
2201 err = i915_active_add_request(active, rq);
2202 if (err)
2203 goto err_add_request;
2204 }
2205
2206 err = gen8_load_flex(rq, ce, flex, count);
2207 if (err)
2208 goto err_add_request;
2209
2210 err_add_request:
2211 i915_request_add(rq);
2212 return err;
2213 }
2214
gen8_configure_context(struct i915_gem_context * ctx,struct flex * flex,unsigned int count)2215 static int gen8_configure_context(struct i915_gem_context *ctx,
2216 struct flex *flex, unsigned int count)
2217 {
2218 struct i915_gem_engines_iter it;
2219 struct intel_context *ce;
2220 int err = 0;
2221
2222 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2223 GEM_BUG_ON(ce == ce->engine->kernel_context);
2224
2225 if (ce->engine->class != RENDER_CLASS)
2226 continue;
2227
2228 /* Otherwise OA settings will be set upon first use */
2229 if (!intel_context_pin_if_active(ce))
2230 continue;
2231
2232 flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2233 err = gen8_modify_context(ce, flex, count);
2234
2235 intel_context_unpin(ce);
2236 if (err)
2237 break;
2238 }
2239 i915_gem_context_unlock_engines(ctx);
2240
2241 return err;
2242 }
2243
gen12_configure_oar_context(struct i915_perf_stream * stream,struct i915_active * active)2244 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2245 struct i915_active *active)
2246 {
2247 int err;
2248 struct intel_context *ce = stream->pinned_ctx;
2249 u32 format = stream->oa_buffer.format;
2250 struct flex regs_context[] = {
2251 {
2252 GEN8_OACTXCONTROL,
2253 stream->perf->ctx_oactxctrl_offset + 1,
2254 active ? GEN8_OA_COUNTER_RESUME : 0,
2255 },
2256 };
2257 /* Offsets in regs_lri are not used since this configuration is only
2258 * applied using LRI. Initialize the correct offsets for posterity.
2259 */
2260 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2261 struct flex regs_lri[] = {
2262 {
2263 GEN12_OAR_OACONTROL,
2264 GEN12_OAR_OACONTROL_OFFSET + 1,
2265 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2266 (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2267 },
2268 {
2269 RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2270 CTX_CONTEXT_CONTROL,
2271 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2272 active ?
2273 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2274 0)
2275 },
2276 };
2277
2278 /* Modify the context image of pinned context with regs_context*/
2279 err = intel_context_lock_pinned(ce);
2280 if (err)
2281 return err;
2282
2283 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2284 intel_context_unlock_pinned(ce);
2285 if (err)
2286 return err;
2287
2288 /* Apply regs_lri using LRI with pinned context */
2289 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2290 }
2291
2292 /*
2293 * Manages updating the per-context aspects of the OA stream
2294 * configuration across all contexts.
2295 *
2296 * The awkward consideration here is that OACTXCONTROL controls the
2297 * exponent for periodic sampling which is primarily used for system
2298 * wide profiling where we'd like a consistent sampling period even in
2299 * the face of context switches.
2300 *
2301 * Our approach of updating the register state context (as opposed to
2302 * say using a workaround batch buffer) ensures that the hardware
2303 * won't automatically reload an out-of-date timer exponent even
2304 * transiently before a WA BB could be parsed.
2305 *
2306 * This function needs to:
2307 * - Ensure the currently running context's per-context OA state is
2308 * updated
2309 * - Ensure that all existing contexts will have the correct per-context
2310 * OA state if they are scheduled for use.
2311 * - Ensure any new contexts will be initialized with the correct
2312 * per-context OA state.
2313 *
2314 * Note: it's only the RCS/Render context that has any OA state.
2315 * Note: the first flex register passed must always be R_PWR_CLK_STATE
2316 */
2317 static int
oa_configure_all_contexts(struct i915_perf_stream * stream,struct flex * regs,size_t num_regs,struct i915_active * active)2318 oa_configure_all_contexts(struct i915_perf_stream *stream,
2319 struct flex *regs,
2320 size_t num_regs,
2321 struct i915_active *active)
2322 {
2323 struct drm_i915_private *i915 = stream->perf->i915;
2324 struct intel_engine_cs *engine;
2325 struct i915_gem_context *ctx, *cn;
2326 int err;
2327
2328 lockdep_assert_held(&stream->perf->lock);
2329
2330 /*
2331 * The OA register config is setup through the context image. This image
2332 * might be written to by the GPU on context switch (in particular on
2333 * lite-restore). This means we can't safely update a context's image,
2334 * if this context is scheduled/submitted to run on the GPU.
2335 *
2336 * We could emit the OA register config through the batch buffer but
2337 * this might leave small interval of time where the OA unit is
2338 * configured at an invalid sampling period.
2339 *
2340 * Note that since we emit all requests from a single ring, there
2341 * is still an implicit global barrier here that may cause a high
2342 * priority context to wait for an otherwise independent low priority
2343 * context. Contexts idle at the time of reconfiguration are not
2344 * trapped behind the barrier.
2345 */
2346 spin_lock(&i915->gem.contexts.lock);
2347 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2348 if (!kref_get_unless_zero(&ctx->ref))
2349 continue;
2350
2351 spin_unlock(&i915->gem.contexts.lock);
2352
2353 err = gen8_configure_context(ctx, regs, num_regs);
2354 if (err) {
2355 i915_gem_context_put(ctx);
2356 return err;
2357 }
2358
2359 spin_lock(&i915->gem.contexts.lock);
2360 list_safe_reset_next(ctx, cn, link);
2361 i915_gem_context_put(ctx);
2362 }
2363 spin_unlock(&i915->gem.contexts.lock);
2364
2365 /*
2366 * After updating all other contexts, we need to modify ourselves.
2367 * If we don't modify the kernel_context, we do not get events while
2368 * idle.
2369 */
2370 for_each_uabi_engine(engine, i915) {
2371 struct intel_context *ce = engine->kernel_context;
2372
2373 if (engine->class != RENDER_CLASS)
2374 continue;
2375
2376 regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2377
2378 err = gen8_modify_self(ce, regs, num_regs, active);
2379 if (err)
2380 return err;
2381 }
2382
2383 return 0;
2384 }
2385
2386 static int
gen12_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2387 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2388 const struct i915_oa_config *oa_config,
2389 struct i915_active *active)
2390 {
2391 struct flex regs[] = {
2392 {
2393 GEN8_R_PWR_CLK_STATE,
2394 CTX_R_PWR_CLK_STATE,
2395 },
2396 };
2397
2398 return oa_configure_all_contexts(stream,
2399 regs, ARRAY_SIZE(regs),
2400 active);
2401 }
2402
2403 static int
lrc_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2404 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2405 const struct i915_oa_config *oa_config,
2406 struct i915_active *active)
2407 {
2408 /* The MMIO offsets for Flex EU registers aren't contiguous */
2409 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2410 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2411 struct flex regs[] = {
2412 {
2413 GEN8_R_PWR_CLK_STATE,
2414 CTX_R_PWR_CLK_STATE,
2415 },
2416 {
2417 GEN8_OACTXCONTROL,
2418 stream->perf->ctx_oactxctrl_offset + 1,
2419 },
2420 { EU_PERF_CNTL0, ctx_flexeuN(0) },
2421 { EU_PERF_CNTL1, ctx_flexeuN(1) },
2422 { EU_PERF_CNTL2, ctx_flexeuN(2) },
2423 { EU_PERF_CNTL3, ctx_flexeuN(3) },
2424 { EU_PERF_CNTL4, ctx_flexeuN(4) },
2425 { EU_PERF_CNTL5, ctx_flexeuN(5) },
2426 { EU_PERF_CNTL6, ctx_flexeuN(6) },
2427 };
2428 #undef ctx_flexeuN
2429 int i;
2430
2431 regs[1].value =
2432 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2433 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2434 GEN8_OA_COUNTER_RESUME;
2435
2436 for (i = 2; i < ARRAY_SIZE(regs); i++)
2437 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2438
2439 return oa_configure_all_contexts(stream,
2440 regs, ARRAY_SIZE(regs),
2441 active);
2442 }
2443
2444 static int
gen8_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2445 gen8_enable_metric_set(struct i915_perf_stream *stream,
2446 struct i915_active *active)
2447 {
2448 struct intel_uncore *uncore = stream->uncore;
2449 struct i915_oa_config *oa_config = stream->oa_config;
2450 int ret;
2451
2452 /*
2453 * We disable slice/unslice clock ratio change reports on SKL since
2454 * they are too noisy. The HW generates a lot of redundant reports
2455 * where the ratio hasn't really changed causing a lot of redundant
2456 * work to processes and increasing the chances we'll hit buffer
2457 * overruns.
2458 *
2459 * Although we don't currently use the 'disable overrun' OABUFFER
2460 * feature it's worth noting that clock ratio reports have to be
2461 * disabled before considering to use that feature since the HW doesn't
2462 * correctly block these reports.
2463 *
2464 * Currently none of the high-level metrics we have depend on knowing
2465 * this ratio to normalize.
2466 *
2467 * Note: This register is not power context saved and restored, but
2468 * that's OK considering that we disable RC6 while the OA unit is
2469 * enabled.
2470 *
2471 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2472 * be read back from automatically triggered reports, as part of the
2473 * RPT_ID field.
2474 */
2475 if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
2476 intel_uncore_write(uncore, GEN8_OA_DEBUG,
2477 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2478 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2479 }
2480
2481 /*
2482 * Update all contexts prior writing the mux configurations as we need
2483 * to make sure all slices/subslices are ON before writing to NOA
2484 * registers.
2485 */
2486 ret = lrc_configure_all_contexts(stream, oa_config, active);
2487 if (ret)
2488 return ret;
2489
2490 return emit_oa_config(stream,
2491 stream->oa_config, oa_context(stream),
2492 active);
2493 }
2494
oag_report_ctx_switches(const struct i915_perf_stream * stream)2495 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2496 {
2497 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2498 (stream->sample_flags & SAMPLE_OA_REPORT) ?
2499 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2500 }
2501
2502 static int
gen12_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2503 gen12_enable_metric_set(struct i915_perf_stream *stream,
2504 struct i915_active *active)
2505 {
2506 struct intel_uncore *uncore = stream->uncore;
2507 struct i915_oa_config *oa_config = stream->oa_config;
2508 bool periodic = stream->periodic;
2509 u32 period_exponent = stream->period_exponent;
2510 int ret;
2511
2512 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2513 /* Disable clk ratio reports, like previous Gens. */
2514 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2515 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2516 /*
2517 * If the user didn't require OA reports, instruct
2518 * the hardware not to emit ctx switch reports.
2519 */
2520 oag_report_ctx_switches(stream));
2521
2522 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2523 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2524 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2525 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2526 : 0);
2527
2528 /*
2529 * Update all contexts prior writing the mux configurations as we need
2530 * to make sure all slices/subslices are ON before writing to NOA
2531 * registers.
2532 */
2533 ret = gen12_configure_all_contexts(stream, oa_config, active);
2534 if (ret)
2535 return ret;
2536
2537 /*
2538 * For Gen12, performance counters are context
2539 * saved/restored. Only enable it for the context that
2540 * requested this.
2541 */
2542 if (stream->ctx) {
2543 ret = gen12_configure_oar_context(stream, active);
2544 if (ret)
2545 return ret;
2546 }
2547
2548 return emit_oa_config(stream,
2549 stream->oa_config, oa_context(stream),
2550 active);
2551 }
2552
gen8_disable_metric_set(struct i915_perf_stream * stream)2553 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2554 {
2555 struct intel_uncore *uncore = stream->uncore;
2556
2557 /* Reset all contexts' slices/subslices configurations. */
2558 lrc_configure_all_contexts(stream, NULL, NULL);
2559
2560 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2561 }
2562
gen10_disable_metric_set(struct i915_perf_stream * stream)2563 static void gen10_disable_metric_set(struct i915_perf_stream *stream)
2564 {
2565 struct intel_uncore *uncore = stream->uncore;
2566
2567 /* Reset all contexts' slices/subslices configurations. */
2568 lrc_configure_all_contexts(stream, NULL, NULL);
2569
2570 /* Make sure we disable noa to save power. */
2571 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2572 }
2573
gen12_disable_metric_set(struct i915_perf_stream * stream)2574 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2575 {
2576 struct intel_uncore *uncore = stream->uncore;
2577
2578 /* Reset all contexts' slices/subslices configurations. */
2579 gen12_configure_all_contexts(stream, NULL, NULL);
2580
2581 /* disable the context save/restore or OAR counters */
2582 if (stream->ctx)
2583 gen12_configure_oar_context(stream, NULL);
2584
2585 /* Make sure we disable noa to save power. */
2586 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2587 }
2588
gen7_oa_enable(struct i915_perf_stream * stream)2589 static void gen7_oa_enable(struct i915_perf_stream *stream)
2590 {
2591 struct intel_uncore *uncore = stream->uncore;
2592 struct i915_gem_context *ctx = stream->ctx;
2593 u32 ctx_id = stream->specific_ctx_id;
2594 bool periodic = stream->periodic;
2595 u32 period_exponent = stream->period_exponent;
2596 u32 report_format = stream->oa_buffer.format;
2597
2598 /*
2599 * Reset buf pointers so we don't forward reports from before now.
2600 *
2601 * Think carefully if considering trying to avoid this, since it
2602 * also ensures status flags and the buffer itself are cleared
2603 * in error paths, and we have checks for invalid reports based
2604 * on the assumption that certain fields are written to zeroed
2605 * memory which this helps maintains.
2606 */
2607 gen7_init_oa_buffer(stream);
2608
2609 intel_uncore_write(uncore, GEN7_OACONTROL,
2610 (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2611 (period_exponent <<
2612 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2613 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2614 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2615 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2616 GEN7_OACONTROL_ENABLE);
2617 }
2618
gen8_oa_enable(struct i915_perf_stream * stream)2619 static void gen8_oa_enable(struct i915_perf_stream *stream)
2620 {
2621 struct intel_uncore *uncore = stream->uncore;
2622 u32 report_format = stream->oa_buffer.format;
2623
2624 /*
2625 * Reset buf pointers so we don't forward reports from before now.
2626 *
2627 * Think carefully if considering trying to avoid this, since it
2628 * also ensures status flags and the buffer itself are cleared
2629 * in error paths, and we have checks for invalid reports based
2630 * on the assumption that certain fields are written to zeroed
2631 * memory which this helps maintains.
2632 */
2633 gen8_init_oa_buffer(stream);
2634
2635 /*
2636 * Note: we don't rely on the hardware to perform single context
2637 * filtering and instead filter on the cpu based on the context-id
2638 * field of reports
2639 */
2640 intel_uncore_write(uncore, GEN8_OACONTROL,
2641 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2642 GEN8_OA_COUNTER_ENABLE);
2643 }
2644
gen12_oa_enable(struct i915_perf_stream * stream)2645 static void gen12_oa_enable(struct i915_perf_stream *stream)
2646 {
2647 struct intel_uncore *uncore = stream->uncore;
2648 u32 report_format = stream->oa_buffer.format;
2649
2650 /*
2651 * If we don't want OA reports from the OA buffer, then we don't even
2652 * need to program the OAG unit.
2653 */
2654 if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2655 return;
2656
2657 gen12_init_oa_buffer(stream);
2658
2659 intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2660 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2661 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2662 }
2663
2664 /**
2665 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2666 * @stream: An i915 perf stream opened for OA metrics
2667 *
2668 * [Re]enables hardware periodic sampling according to the period configured
2669 * when opening the stream. This also starts a hrtimer that will periodically
2670 * check for data in the circular OA buffer for notifying userspace (e.g.
2671 * during a read() or poll()).
2672 */
i915_oa_stream_enable(struct i915_perf_stream * stream)2673 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2674 {
2675 stream->pollin = false;
2676
2677 stream->perf->ops.oa_enable(stream);
2678
2679 if (stream->periodic)
2680 hrtimer_start(&stream->poll_check_timer,
2681 ns_to_ktime(stream->poll_oa_period),
2682 HRTIMER_MODE_REL_PINNED);
2683 }
2684
gen7_oa_disable(struct i915_perf_stream * stream)2685 static void gen7_oa_disable(struct i915_perf_stream *stream)
2686 {
2687 struct intel_uncore *uncore = stream->uncore;
2688
2689 intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2690 if (intel_wait_for_register(uncore,
2691 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2692 50))
2693 drm_err(&stream->perf->i915->drm,
2694 "wait for OA to be disabled timed out\n");
2695 }
2696
gen8_oa_disable(struct i915_perf_stream * stream)2697 static void gen8_oa_disable(struct i915_perf_stream *stream)
2698 {
2699 struct intel_uncore *uncore = stream->uncore;
2700
2701 intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2702 if (intel_wait_for_register(uncore,
2703 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2704 50))
2705 drm_err(&stream->perf->i915->drm,
2706 "wait for OA to be disabled timed out\n");
2707 }
2708
gen12_oa_disable(struct i915_perf_stream * stream)2709 static void gen12_oa_disable(struct i915_perf_stream *stream)
2710 {
2711 struct intel_uncore *uncore = stream->uncore;
2712
2713 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2714 if (intel_wait_for_register(uncore,
2715 GEN12_OAG_OACONTROL,
2716 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2717 50))
2718 drm_err(&stream->perf->i915->drm,
2719 "wait for OA to be disabled timed out\n");
2720
2721 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
2722 if (intel_wait_for_register(uncore,
2723 GEN12_OA_TLB_INV_CR,
2724 1, 0,
2725 50))
2726 drm_err(&stream->perf->i915->drm,
2727 "wait for OA tlb invalidate timed out\n");
2728 }
2729
2730 /**
2731 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2732 * @stream: An i915 perf stream opened for OA metrics
2733 *
2734 * Stops the OA unit from periodically writing counter reports into the
2735 * circular OA buffer. This also stops the hrtimer that periodically checks for
2736 * data in the circular OA buffer, for notifying userspace.
2737 */
i915_oa_stream_disable(struct i915_perf_stream * stream)2738 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2739 {
2740 stream->perf->ops.oa_disable(stream);
2741
2742 if (stream->periodic)
2743 hrtimer_cancel(&stream->poll_check_timer);
2744 }
2745
2746 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2747 .destroy = i915_oa_stream_destroy,
2748 .enable = i915_oa_stream_enable,
2749 .disable = i915_oa_stream_disable,
2750 .wait_unlocked = i915_oa_wait_unlocked,
2751 .poll_wait = i915_oa_poll_wait,
2752 .read = i915_oa_read,
2753 };
2754
i915_perf_stream_enable_sync(struct i915_perf_stream * stream)2755 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2756 {
2757 struct i915_active *active;
2758 int err;
2759
2760 active = i915_active_create();
2761 if (!active)
2762 return -ENOMEM;
2763
2764 err = stream->perf->ops.enable_metric_set(stream, active);
2765 if (err == 0)
2766 __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
2767
2768 i915_active_put(active);
2769 return err;
2770 }
2771
2772 static void
get_default_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine)2773 get_default_sseu_config(struct intel_sseu *out_sseu,
2774 struct intel_engine_cs *engine)
2775 {
2776 const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
2777
2778 *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
2779
2780 if (IS_GEN(engine->i915, 11)) {
2781 /*
2782 * We only need subslice count so it doesn't matter which ones
2783 * we select - just turn off low bits in the amount of half of
2784 * all available subslices per slice.
2785 */
2786 out_sseu->subslice_mask =
2787 ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
2788 out_sseu->slice_mask = 0x1;
2789 }
2790 }
2791
2792 static int
get_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine,const struct drm_i915_gem_context_param_sseu * drm_sseu)2793 get_sseu_config(struct intel_sseu *out_sseu,
2794 struct intel_engine_cs *engine,
2795 const struct drm_i915_gem_context_param_sseu *drm_sseu)
2796 {
2797 if (drm_sseu->engine.engine_class != engine->uabi_class ||
2798 drm_sseu->engine.engine_instance != engine->uabi_instance)
2799 return -EINVAL;
2800
2801 return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
2802 }
2803
2804 /**
2805 * i915_oa_stream_init - validate combined props for OA stream and init
2806 * @stream: An i915 perf stream
2807 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2808 * @props: The property state that configures stream (individually validated)
2809 *
2810 * While read_properties_unlocked() validates properties in isolation it
2811 * doesn't ensure that the combination necessarily makes sense.
2812 *
2813 * At this point it has been determined that userspace wants a stream of
2814 * OA metrics, but still we need to further validate the combined
2815 * properties are OK.
2816 *
2817 * If the configuration makes sense then we can allocate memory for
2818 * a circular OA buffer and apply the requested metric set configuration.
2819 *
2820 * Returns: zero on success or a negative error code.
2821 */
i915_oa_stream_init(struct i915_perf_stream * stream,struct drm_i915_perf_open_param * param,struct perf_open_properties * props)2822 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2823 struct drm_i915_perf_open_param *param,
2824 struct perf_open_properties *props)
2825 {
2826 struct drm_i915_private *i915 = stream->perf->i915;
2827 struct i915_perf *perf = stream->perf;
2828 int format_size;
2829 int ret;
2830
2831 if (!props->engine) {
2832 DRM_DEBUG("OA engine not specified\n");
2833 return -EINVAL;
2834 }
2835
2836 /*
2837 * If the sysfs metrics/ directory wasn't registered for some
2838 * reason then don't let userspace try their luck with config
2839 * IDs
2840 */
2841 if (!perf->metrics_kobj) {
2842 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2843 return -EINVAL;
2844 }
2845
2846 if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2847 (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
2848 DRM_DEBUG("Only OA report sampling supported\n");
2849 return -EINVAL;
2850 }
2851
2852 if (!perf->ops.enable_metric_set) {
2853 DRM_DEBUG("OA unit not supported\n");
2854 return -ENODEV;
2855 }
2856
2857 /*
2858 * To avoid the complexity of having to accurately filter
2859 * counter reports and marshal to the appropriate client
2860 * we currently only allow exclusive access
2861 */
2862 if (perf->exclusive_stream) {
2863 DRM_DEBUG("OA unit already in use\n");
2864 return -EBUSY;
2865 }
2866
2867 if (!props->oa_format) {
2868 DRM_DEBUG("OA report format not specified\n");
2869 return -EINVAL;
2870 }
2871
2872 stream->engine = props->engine;
2873 stream->uncore = stream->engine->gt->uncore;
2874
2875 stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2876
2877 format_size = perf->oa_formats[props->oa_format].size;
2878
2879 stream->sample_flags = props->sample_flags;
2880 stream->sample_size += format_size;
2881
2882 stream->oa_buffer.format_size = format_size;
2883 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2884 return -EINVAL;
2885
2886 stream->hold_preemption = props->hold_preemption;
2887
2888 stream->oa_buffer.format =
2889 perf->oa_formats[props->oa_format].format;
2890
2891 stream->periodic = props->oa_periodic;
2892 if (stream->periodic)
2893 stream->period_exponent = props->oa_period_exponent;
2894
2895 if (stream->ctx) {
2896 ret = oa_get_render_ctx_id(stream);
2897 if (ret) {
2898 DRM_DEBUG("Invalid context id to filter with\n");
2899 return ret;
2900 }
2901 }
2902
2903 ret = alloc_noa_wait(stream);
2904 if (ret) {
2905 DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
2906 goto err_noa_wait_alloc;
2907 }
2908
2909 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2910 if (!stream->oa_config) {
2911 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
2912 ret = -EINVAL;
2913 goto err_config;
2914 }
2915
2916 /* PRM - observability performance counters:
2917 *
2918 * OACONTROL, performance counter enable, note:
2919 *
2920 * "When this bit is set, in order to have coherent counts,
2921 * RC6 power state and trunk clock gating must be disabled.
2922 * This can be achieved by programming MMIO registers as
2923 * 0xA094=0 and 0xA090[31]=1"
2924 *
2925 * In our case we are expecting that taking pm + FORCEWAKE
2926 * references will effectively disable RC6.
2927 */
2928 intel_engine_pm_get(stream->engine);
2929 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
2930
2931 ret = alloc_oa_buffer(stream);
2932 if (ret)
2933 goto err_oa_buf_alloc;
2934
2935 stream->ops = &i915_oa_stream_ops;
2936
2937 perf->sseu = props->sseu;
2938 WRITE_ONCE(perf->exclusive_stream, stream);
2939
2940 ret = i915_perf_stream_enable_sync(stream);
2941 if (ret) {
2942 DRM_DEBUG("Unable to enable metric set\n");
2943 goto err_enable;
2944 }
2945
2946 DRM_DEBUG("opening stream oa config uuid=%s\n",
2947 stream->oa_config->uuid);
2948
2949 hrtimer_init(&stream->poll_check_timer,
2950 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2951 stream->poll_check_timer.function = oa_poll_check_timer_cb;
2952 init_waitqueue_head(&stream->poll_wq);
2953 spin_lock_init(&stream->oa_buffer.ptr_lock);
2954
2955 return 0;
2956
2957 err_enable:
2958 WRITE_ONCE(perf->exclusive_stream, NULL);
2959 perf->ops.disable_metric_set(stream);
2960
2961 free_oa_buffer(stream);
2962
2963 err_oa_buf_alloc:
2964 free_oa_configs(stream);
2965
2966 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
2967 intel_engine_pm_put(stream->engine);
2968
2969 err_config:
2970 free_noa_wait(stream);
2971
2972 err_noa_wait_alloc:
2973 if (stream->ctx)
2974 oa_put_render_ctx_id(stream);
2975
2976 return ret;
2977 }
2978
i915_oa_init_reg_state(const struct intel_context * ce,const struct intel_engine_cs * engine)2979 void i915_oa_init_reg_state(const struct intel_context *ce,
2980 const struct intel_engine_cs *engine)
2981 {
2982 struct i915_perf_stream *stream;
2983
2984 if (engine->class != RENDER_CLASS)
2985 return;
2986
2987 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
2988 stream = READ_ONCE(engine->i915->perf.exclusive_stream);
2989 if (stream && INTEL_GEN(stream->perf->i915) < 12)
2990 gen8_update_reg_state_unlocked(ce, stream);
2991 }
2992
2993 /**
2994 * i915_perf_read - handles read() FOP for i915 perf stream FDs
2995 * @file: An i915 perf stream file
2996 * @buf: destination buffer given by userspace
2997 * @count: the number of bytes userspace wants to read
2998 * @ppos: (inout) file seek position (unused)
2999 *
3000 * The entry point for handling a read() on a stream file descriptor from
3001 * userspace. Most of the work is left to the i915_perf_read_locked() and
3002 * &i915_perf_stream_ops->read but to save having stream implementations (of
3003 * which we might have multiple later) we handle blocking read here.
3004 *
3005 * We can also consistently treat trying to read from a disabled stream
3006 * as an IO error so implementations can assume the stream is enabled
3007 * while reading.
3008 *
3009 * Returns: The number of bytes copied or a negative error code on failure.
3010 */
i915_perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)3011 static ssize_t i915_perf_read(struct file *file,
3012 char __user *buf,
3013 size_t count,
3014 loff_t *ppos)
3015 {
3016 struct i915_perf_stream *stream = file->private_data;
3017 struct i915_perf *perf = stream->perf;
3018 size_t offset = 0;
3019 int ret;
3020
3021 /* To ensure it's handled consistently we simply treat all reads of a
3022 * disabled stream as an error. In particular it might otherwise lead
3023 * to a deadlock for blocking file descriptors...
3024 */
3025 if (!stream->enabled)
3026 return -EIO;
3027
3028 if (!(file->f_flags & O_NONBLOCK)) {
3029 /* There's the small chance of false positives from
3030 * stream->ops->wait_unlocked.
3031 *
3032 * E.g. with single context filtering since we only wait until
3033 * oabuffer has >= 1 report we don't immediately know whether
3034 * any reports really belong to the current context
3035 */
3036 do {
3037 ret = stream->ops->wait_unlocked(stream);
3038 if (ret)
3039 return ret;
3040
3041 mutex_lock(&perf->lock);
3042 ret = stream->ops->read(stream, buf, count, &offset);
3043 mutex_unlock(&perf->lock);
3044 } while (!offset && !ret);
3045 } else {
3046 mutex_lock(&perf->lock);
3047 ret = stream->ops->read(stream, buf, count, &offset);
3048 mutex_unlock(&perf->lock);
3049 }
3050
3051 /* We allow the poll checking to sometimes report false positive EPOLLIN
3052 * events where we might actually report EAGAIN on read() if there's
3053 * not really any data available. In this situation though we don't
3054 * want to enter a busy loop between poll() reporting a EPOLLIN event
3055 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3056 * effectively ensures we back off until the next hrtimer callback
3057 * before reporting another EPOLLIN event.
3058 * The exception to this is if ops->read() returned -ENOSPC which means
3059 * that more OA data is available than could fit in the user provided
3060 * buffer. In this case we want the next poll() call to not block.
3061 */
3062 if (ret != -ENOSPC)
3063 stream->pollin = false;
3064
3065 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3066 return offset ?: (ret ?: -EAGAIN);
3067 }
3068
oa_poll_check_timer_cb(struct hrtimer * hrtimer)3069 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3070 {
3071 struct i915_perf_stream *stream =
3072 container_of(hrtimer, typeof(*stream), poll_check_timer);
3073
3074 if (oa_buffer_check_unlocked(stream)) {
3075 stream->pollin = true;
3076 wake_up(&stream->poll_wq);
3077 }
3078
3079 hrtimer_forward_now(hrtimer,
3080 ns_to_ktime(stream->poll_oa_period));
3081
3082 return HRTIMER_RESTART;
3083 }
3084
3085 /**
3086 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3087 * @stream: An i915 perf stream
3088 * @file: An i915 perf stream file
3089 * @wait: poll() state table
3090 *
3091 * For handling userspace polling on an i915 perf stream, this calls through to
3092 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3093 * will be woken for new stream data.
3094 *
3095 * Note: The &perf->lock mutex has been taken to serialize
3096 * with any non-file-operation driver hooks.
3097 *
3098 * Returns: any poll events that are ready without sleeping
3099 */
i915_perf_poll_locked(struct i915_perf_stream * stream,struct file * file,poll_table * wait)3100 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3101 struct file *file,
3102 poll_table *wait)
3103 {
3104 __poll_t events = 0;
3105
3106 stream->ops->poll_wait(stream, file, wait);
3107
3108 /* Note: we don't explicitly check whether there's something to read
3109 * here since this path may be very hot depending on what else
3110 * userspace is polling, or on the timeout in use. We rely solely on
3111 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3112 * samples to read.
3113 */
3114 if (stream->pollin)
3115 events |= EPOLLIN;
3116
3117 return events;
3118 }
3119
3120 /**
3121 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3122 * @file: An i915 perf stream file
3123 * @wait: poll() state table
3124 *
3125 * For handling userspace polling on an i915 perf stream, this ensures
3126 * poll_wait() gets called with a wait queue that will be woken for new stream
3127 * data.
3128 *
3129 * Note: Implementation deferred to i915_perf_poll_locked()
3130 *
3131 * Returns: any poll events that are ready without sleeping
3132 */
i915_perf_poll(struct file * file,poll_table * wait)3133 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3134 {
3135 struct i915_perf_stream *stream = file->private_data;
3136 struct i915_perf *perf = stream->perf;
3137 __poll_t ret;
3138
3139 mutex_lock(&perf->lock);
3140 ret = i915_perf_poll_locked(stream, file, wait);
3141 mutex_unlock(&perf->lock);
3142
3143 return ret;
3144 }
3145
3146 /**
3147 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3148 * @stream: A disabled i915 perf stream
3149 *
3150 * [Re]enables the associated capture of data for this stream.
3151 *
3152 * If a stream was previously enabled then there's currently no intention
3153 * to provide userspace any guarantee about the preservation of previously
3154 * buffered data.
3155 */
i915_perf_enable_locked(struct i915_perf_stream * stream)3156 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3157 {
3158 if (stream->enabled)
3159 return;
3160
3161 /* Allow stream->ops->enable() to refer to this */
3162 stream->enabled = true;
3163
3164 if (stream->ops->enable)
3165 stream->ops->enable(stream);
3166
3167 if (stream->hold_preemption)
3168 intel_context_set_nopreempt(stream->pinned_ctx);
3169 }
3170
3171 /**
3172 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3173 * @stream: An enabled i915 perf stream
3174 *
3175 * Disables the associated capture of data for this stream.
3176 *
3177 * The intention is that disabling an re-enabling a stream will ideally be
3178 * cheaper than destroying and re-opening a stream with the same configuration,
3179 * though there are no formal guarantees about what state or buffered data
3180 * must be retained between disabling and re-enabling a stream.
3181 *
3182 * Note: while a stream is disabled it's considered an error for userspace
3183 * to attempt to read from the stream (-EIO).
3184 */
i915_perf_disable_locked(struct i915_perf_stream * stream)3185 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3186 {
3187 if (!stream->enabled)
3188 return;
3189
3190 /* Allow stream->ops->disable() to refer to this */
3191 stream->enabled = false;
3192
3193 if (stream->hold_preemption)
3194 intel_context_clear_nopreempt(stream->pinned_ctx);
3195
3196 if (stream->ops->disable)
3197 stream->ops->disable(stream);
3198 }
3199
i915_perf_config_locked(struct i915_perf_stream * stream,unsigned long metrics_set)3200 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3201 unsigned long metrics_set)
3202 {
3203 struct i915_oa_config *config;
3204 long ret = stream->oa_config->id;
3205
3206 config = i915_perf_get_oa_config(stream->perf, metrics_set);
3207 if (!config)
3208 return -EINVAL;
3209
3210 if (config != stream->oa_config) {
3211 int err;
3212
3213 /*
3214 * If OA is bound to a specific context, emit the
3215 * reconfiguration inline from that context. The update
3216 * will then be ordered with respect to submission on that
3217 * context.
3218 *
3219 * When set globally, we use a low priority kernel context,
3220 * so it will effectively take effect when idle.
3221 */
3222 err = emit_oa_config(stream, config, oa_context(stream), NULL);
3223 if (!err)
3224 config = xchg(&stream->oa_config, config);
3225 else
3226 ret = err;
3227 }
3228
3229 i915_oa_config_put(config);
3230
3231 return ret;
3232 }
3233
3234 /**
3235 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3236 * @stream: An i915 perf stream
3237 * @cmd: the ioctl request
3238 * @arg: the ioctl data
3239 *
3240 * Note: The &perf->lock mutex has been taken to serialize
3241 * with any non-file-operation driver hooks.
3242 *
3243 * Returns: zero on success or a negative error code. Returns -EINVAL for
3244 * an unknown ioctl request.
3245 */
i915_perf_ioctl_locked(struct i915_perf_stream * stream,unsigned int cmd,unsigned long arg)3246 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3247 unsigned int cmd,
3248 unsigned long arg)
3249 {
3250 switch (cmd) {
3251 case I915_PERF_IOCTL_ENABLE:
3252 i915_perf_enable_locked(stream);
3253 return 0;
3254 case I915_PERF_IOCTL_DISABLE:
3255 i915_perf_disable_locked(stream);
3256 return 0;
3257 case I915_PERF_IOCTL_CONFIG:
3258 return i915_perf_config_locked(stream, arg);
3259 }
3260
3261 return -EINVAL;
3262 }
3263
3264 /**
3265 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3266 * @file: An i915 perf stream file
3267 * @cmd: the ioctl request
3268 * @arg: the ioctl data
3269 *
3270 * Implementation deferred to i915_perf_ioctl_locked().
3271 *
3272 * Returns: zero on success or a negative error code. Returns -EINVAL for
3273 * an unknown ioctl request.
3274 */
i915_perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3275 static long i915_perf_ioctl(struct file *file,
3276 unsigned int cmd,
3277 unsigned long arg)
3278 {
3279 struct i915_perf_stream *stream = file->private_data;
3280 struct i915_perf *perf = stream->perf;
3281 long ret;
3282
3283 mutex_lock(&perf->lock);
3284 ret = i915_perf_ioctl_locked(stream, cmd, arg);
3285 mutex_unlock(&perf->lock);
3286
3287 return ret;
3288 }
3289
3290 /**
3291 * i915_perf_destroy_locked - destroy an i915 perf stream
3292 * @stream: An i915 perf stream
3293 *
3294 * Frees all resources associated with the given i915 perf @stream, disabling
3295 * any associated data capture in the process.
3296 *
3297 * Note: The &perf->lock mutex has been taken to serialize
3298 * with any non-file-operation driver hooks.
3299 */
i915_perf_destroy_locked(struct i915_perf_stream * stream)3300 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3301 {
3302 if (stream->enabled)
3303 i915_perf_disable_locked(stream);
3304
3305 if (stream->ops->destroy)
3306 stream->ops->destroy(stream);
3307
3308 if (stream->ctx)
3309 i915_gem_context_put(stream->ctx);
3310
3311 kfree(stream);
3312 }
3313
3314 /**
3315 * i915_perf_release - handles userspace close() of a stream file
3316 * @inode: anonymous inode associated with file
3317 * @file: An i915 perf stream file
3318 *
3319 * Cleans up any resources associated with an open i915 perf stream file.
3320 *
3321 * NB: close() can't really fail from the userspace point of view.
3322 *
3323 * Returns: zero on success or a negative error code.
3324 */
i915_perf_release(struct inode * inode,struct file * file)3325 static int i915_perf_release(struct inode *inode, struct file *file)
3326 {
3327 struct i915_perf_stream *stream = file->private_data;
3328 struct i915_perf *perf = stream->perf;
3329
3330 mutex_lock(&perf->lock);
3331 i915_perf_destroy_locked(stream);
3332 mutex_unlock(&perf->lock);
3333
3334 /* Release the reference the perf stream kept on the driver. */
3335 drm_dev_put(&perf->i915->drm);
3336
3337 return 0;
3338 }
3339
3340
3341 static const struct file_operations fops = {
3342 .owner = THIS_MODULE,
3343 .llseek = no_llseek,
3344 .release = i915_perf_release,
3345 .poll = i915_perf_poll,
3346 .read = i915_perf_read,
3347 .unlocked_ioctl = i915_perf_ioctl,
3348 /* Our ioctl have no arguments, so it's safe to use the same function
3349 * to handle 32bits compatibility.
3350 */
3351 .compat_ioctl = i915_perf_ioctl,
3352 };
3353
3354
3355 /**
3356 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3357 * @perf: i915 perf instance
3358 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3359 * @props: individually validated u64 property value pairs
3360 * @file: drm file
3361 *
3362 * See i915_perf_ioctl_open() for interface details.
3363 *
3364 * Implements further stream config validation and stream initialization on
3365 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3366 * taken to serialize with any non-file-operation driver hooks.
3367 *
3368 * Note: at this point the @props have only been validated in isolation and
3369 * it's still necessary to validate that the combination of properties makes
3370 * sense.
3371 *
3372 * In the case where userspace is interested in OA unit metrics then further
3373 * config validation and stream initialization details will be handled by
3374 * i915_oa_stream_init(). The code here should only validate config state that
3375 * will be relevant to all stream types / backends.
3376 *
3377 * Returns: zero on success or a negative error code.
3378 */
3379 static int
i915_perf_open_ioctl_locked(struct i915_perf * perf,struct drm_i915_perf_open_param * param,struct perf_open_properties * props,struct drm_file * file)3380 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3381 struct drm_i915_perf_open_param *param,
3382 struct perf_open_properties *props,
3383 struct drm_file *file)
3384 {
3385 struct i915_gem_context *specific_ctx = NULL;
3386 struct i915_perf_stream *stream = NULL;
3387 unsigned long f_flags = 0;
3388 bool privileged_op = true;
3389 int stream_fd;
3390 int ret;
3391
3392 if (props->single_context) {
3393 u32 ctx_handle = props->ctx_handle;
3394 struct drm_i915_file_private *file_priv = file->driver_priv;
3395
3396 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3397 if (!specific_ctx) {
3398 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
3399 ctx_handle);
3400 ret = -ENOENT;
3401 goto err;
3402 }
3403 }
3404
3405 /*
3406 * On Haswell the OA unit supports clock gating off for a specific
3407 * context and in this mode there's no visibility of metrics for the
3408 * rest of the system, which we consider acceptable for a
3409 * non-privileged client.
3410 *
3411 * For Gen8->11 the OA unit no longer supports clock gating off for a
3412 * specific context and the kernel can't securely stop the counters
3413 * from updating as system-wide / global values. Even though we can
3414 * filter reports based on the included context ID we can't block
3415 * clients from seeing the raw / global counter values via
3416 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3417 * enable the OA unit by default.
3418 *
3419 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3420 * per context basis. So we can relax requirements there if the user
3421 * doesn't request global stream access (i.e. query based sampling
3422 * using MI_RECORD_PERF_COUNT.
3423 */
3424 if (IS_HASWELL(perf->i915) && specific_ctx)
3425 privileged_op = false;
3426 else if (IS_GEN(perf->i915, 12) && specific_ctx &&
3427 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3428 privileged_op = false;
3429
3430 if (props->hold_preemption) {
3431 if (!props->single_context) {
3432 DRM_DEBUG("preemption disable with no context\n");
3433 ret = -EINVAL;
3434 goto err;
3435 }
3436 privileged_op = true;
3437 }
3438
3439 /*
3440 * Asking for SSEU configuration is a priviliged operation.
3441 */
3442 if (props->has_sseu)
3443 privileged_op = true;
3444 else
3445 get_default_sseu_config(&props->sseu, props->engine);
3446
3447 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3448 * we check a dev.i915.perf_stream_paranoid sysctl option
3449 * to determine if it's ok to access system wide OA counters
3450 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3451 */
3452 if (privileged_op &&
3453 i915_perf_stream_paranoid && !perfmon_capable()) {
3454 DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
3455 ret = -EACCES;
3456 goto err_ctx;
3457 }
3458
3459 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3460 if (!stream) {
3461 ret = -ENOMEM;
3462 goto err_ctx;
3463 }
3464
3465 stream->perf = perf;
3466 stream->ctx = specific_ctx;
3467 stream->poll_oa_period = props->poll_oa_period;
3468
3469 ret = i915_oa_stream_init(stream, param, props);
3470 if (ret)
3471 goto err_alloc;
3472
3473 /* we avoid simply assigning stream->sample_flags = props->sample_flags
3474 * to have _stream_init check the combination of sample flags more
3475 * thoroughly, but still this is the expected result at this point.
3476 */
3477 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3478 ret = -ENODEV;
3479 goto err_flags;
3480 }
3481
3482 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3483 f_flags |= O_CLOEXEC;
3484 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3485 f_flags |= O_NONBLOCK;
3486
3487 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3488 if (stream_fd < 0) {
3489 ret = stream_fd;
3490 goto err_flags;
3491 }
3492
3493 if (!(param->flags & I915_PERF_FLAG_DISABLED))
3494 i915_perf_enable_locked(stream);
3495
3496 /* Take a reference on the driver that will be kept with stream_fd
3497 * until its release.
3498 */
3499 drm_dev_get(&perf->i915->drm);
3500
3501 return stream_fd;
3502
3503 err_flags:
3504 if (stream->ops->destroy)
3505 stream->ops->destroy(stream);
3506 err_alloc:
3507 kfree(stream);
3508 err_ctx:
3509 if (specific_ctx)
3510 i915_gem_context_put(specific_ctx);
3511 err:
3512 return ret;
3513 }
3514
oa_exponent_to_ns(struct i915_perf * perf,int exponent)3515 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3516 {
3517 return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
3518 }
3519
3520 /**
3521 * read_properties_unlocked - validate + copy userspace stream open properties
3522 * @perf: i915 perf instance
3523 * @uprops: The array of u64 key value pairs given by userspace
3524 * @n_props: The number of key value pairs expected in @uprops
3525 * @props: The stream configuration built up while validating properties
3526 *
3527 * Note this function only validates properties in isolation it doesn't
3528 * validate that the combination of properties makes sense or that all
3529 * properties necessary for a particular kind of stream have been set.
3530 *
3531 * Note that there currently aren't any ordering requirements for properties so
3532 * we shouldn't validate or assume anything about ordering here. This doesn't
3533 * rule out defining new properties with ordering requirements in the future.
3534 */
read_properties_unlocked(struct i915_perf * perf,u64 __user * uprops,u32 n_props,struct perf_open_properties * props)3535 static int read_properties_unlocked(struct i915_perf *perf,
3536 u64 __user *uprops,
3537 u32 n_props,
3538 struct perf_open_properties *props)
3539 {
3540 u64 __user *uprop = uprops;
3541 u32 i;
3542 int ret;
3543
3544 memset(props, 0, sizeof(struct perf_open_properties));
3545 props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3546
3547 if (!n_props) {
3548 DRM_DEBUG("No i915 perf properties given\n");
3549 return -EINVAL;
3550 }
3551
3552 /* At the moment we only support using i915-perf on the RCS. */
3553 props->engine = intel_engine_lookup_user(perf->i915,
3554 I915_ENGINE_CLASS_RENDER,
3555 0);
3556 if (!props->engine) {
3557 DRM_DEBUG("No RENDER-capable engines\n");
3558 return -EINVAL;
3559 }
3560
3561 /* Considering that ID = 0 is reserved and assuming that we don't
3562 * (currently) expect any configurations to ever specify duplicate
3563 * values for a particular property ID then the last _PROP_MAX value is
3564 * one greater than the maximum number of properties we expect to get
3565 * from userspace.
3566 */
3567 if (n_props >= DRM_I915_PERF_PROP_MAX) {
3568 DRM_DEBUG("More i915 perf properties specified than exist\n");
3569 return -EINVAL;
3570 }
3571
3572 for (i = 0; i < n_props; i++) {
3573 u64 oa_period, oa_freq_hz;
3574 u64 id, value;
3575
3576 ret = get_user(id, uprop);
3577 if (ret)
3578 return ret;
3579
3580 ret = get_user(value, uprop + 1);
3581 if (ret)
3582 return ret;
3583
3584 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3585 DRM_DEBUG("Unknown i915 perf property ID\n");
3586 return -EINVAL;
3587 }
3588
3589 switch ((enum drm_i915_perf_property_id)id) {
3590 case DRM_I915_PERF_PROP_CTX_HANDLE:
3591 props->single_context = 1;
3592 props->ctx_handle = value;
3593 break;
3594 case DRM_I915_PERF_PROP_SAMPLE_OA:
3595 if (value)
3596 props->sample_flags |= SAMPLE_OA_REPORT;
3597 break;
3598 case DRM_I915_PERF_PROP_OA_METRICS_SET:
3599 if (value == 0) {
3600 DRM_DEBUG("Unknown OA metric set ID\n");
3601 return -EINVAL;
3602 }
3603 props->metrics_set = value;
3604 break;
3605 case DRM_I915_PERF_PROP_OA_FORMAT:
3606 if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3607 DRM_DEBUG("Out-of-range OA report format %llu\n",
3608 value);
3609 return -EINVAL;
3610 }
3611 if (!perf->oa_formats[value].size) {
3612 DRM_DEBUG("Unsupported OA report format %llu\n",
3613 value);
3614 return -EINVAL;
3615 }
3616 props->oa_format = value;
3617 break;
3618 case DRM_I915_PERF_PROP_OA_EXPONENT:
3619 if (value > OA_EXPONENT_MAX) {
3620 DRM_DEBUG("OA timer exponent too high (> %u)\n",
3621 OA_EXPONENT_MAX);
3622 return -EINVAL;
3623 }
3624
3625 /* Theoretically we can program the OA unit to sample
3626 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3627 * for BXT. We don't allow such high sampling
3628 * frequencies by default unless root.
3629 */
3630
3631 BUILD_BUG_ON(sizeof(oa_period) != 8);
3632 oa_period = oa_exponent_to_ns(perf, value);
3633
3634 /* This check is primarily to ensure that oa_period <=
3635 * UINT32_MAX (before passing to do_div which only
3636 * accepts a u32 denominator), but we can also skip
3637 * checking anything < 1Hz which implicitly can't be
3638 * limited via an integer oa_max_sample_rate.
3639 */
3640 if (oa_period <= NSEC_PER_SEC) {
3641 u64 tmp = NSEC_PER_SEC;
3642 do_div(tmp, oa_period);
3643 oa_freq_hz = tmp;
3644 } else
3645 oa_freq_hz = 0;
3646
3647 if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
3648 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
3649 i915_oa_max_sample_rate);
3650 return -EACCES;
3651 }
3652
3653 props->oa_periodic = true;
3654 props->oa_period_exponent = value;
3655 break;
3656 case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3657 props->hold_preemption = !!value;
3658 break;
3659 case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
3660 struct drm_i915_gem_context_param_sseu user_sseu;
3661
3662 if (copy_from_user(&user_sseu,
3663 u64_to_user_ptr(value),
3664 sizeof(user_sseu))) {
3665 DRM_DEBUG("Unable to copy global sseu parameter\n");
3666 return -EFAULT;
3667 }
3668
3669 ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
3670 if (ret) {
3671 DRM_DEBUG("Invalid SSEU configuration\n");
3672 return ret;
3673 }
3674 props->has_sseu = true;
3675 break;
3676 }
3677 case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
3678 if (value < 100000 /* 100us */) {
3679 DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
3680 value);
3681 return -EINVAL;
3682 }
3683 props->poll_oa_period = value;
3684 break;
3685 case DRM_I915_PERF_PROP_MAX:
3686 MISSING_CASE(id);
3687 return -EINVAL;
3688 }
3689
3690 uprop += 2;
3691 }
3692
3693 return 0;
3694 }
3695
3696 /**
3697 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3698 * @dev: drm device
3699 * @data: ioctl data copied from userspace (unvalidated)
3700 * @file: drm file
3701 *
3702 * Validates the stream open parameters given by userspace including flags
3703 * and an array of u64 key, value pair properties.
3704 *
3705 * Very little is assumed up front about the nature of the stream being
3706 * opened (for instance we don't assume it's for periodic OA unit metrics). An
3707 * i915-perf stream is expected to be a suitable interface for other forms of
3708 * buffered data written by the GPU besides periodic OA metrics.
3709 *
3710 * Note we copy the properties from userspace outside of the i915 perf
3711 * mutex to avoid an awkward lockdep with mmap_lock.
3712 *
3713 * Most of the implementation details are handled by
3714 * i915_perf_open_ioctl_locked() after taking the &perf->lock
3715 * mutex for serializing with any non-file-operation driver hooks.
3716 *
3717 * Return: A newly opened i915 Perf stream file descriptor or negative
3718 * error code on failure.
3719 */
i915_perf_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3720 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3721 struct drm_file *file)
3722 {
3723 struct i915_perf *perf = &to_i915(dev)->perf;
3724 struct drm_i915_perf_open_param *param = data;
3725 struct perf_open_properties props;
3726 u32 known_open_flags;
3727 int ret;
3728
3729 if (!perf->i915) {
3730 DRM_DEBUG("i915 perf interface not available for this system\n");
3731 return -ENOTSUPP;
3732 }
3733
3734 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3735 I915_PERF_FLAG_FD_NONBLOCK |
3736 I915_PERF_FLAG_DISABLED;
3737 if (param->flags & ~known_open_flags) {
3738 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
3739 return -EINVAL;
3740 }
3741
3742 ret = read_properties_unlocked(perf,
3743 u64_to_user_ptr(param->properties_ptr),
3744 param->num_properties,
3745 &props);
3746 if (ret)
3747 return ret;
3748
3749 mutex_lock(&perf->lock);
3750 ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3751 mutex_unlock(&perf->lock);
3752
3753 return ret;
3754 }
3755
3756 /**
3757 * i915_perf_register - exposes i915-perf to userspace
3758 * @i915: i915 device instance
3759 *
3760 * In particular OA metric sets are advertised under a sysfs metrics/
3761 * directory allowing userspace to enumerate valid IDs that can be
3762 * used to open an i915-perf stream.
3763 */
i915_perf_register(struct drm_i915_private * i915)3764 void i915_perf_register(struct drm_i915_private *i915)
3765 {
3766 struct i915_perf *perf = &i915->perf;
3767
3768 if (!perf->i915)
3769 return;
3770
3771 /* To be sure we're synchronized with an attempted
3772 * i915_perf_open_ioctl(); considering that we register after
3773 * being exposed to userspace.
3774 */
3775 mutex_lock(&perf->lock);
3776
3777 perf->metrics_kobj =
3778 kobject_create_and_add("metrics",
3779 &i915->drm.primary->kdev->kobj);
3780
3781 mutex_unlock(&perf->lock);
3782 }
3783
3784 /**
3785 * i915_perf_unregister - hide i915-perf from userspace
3786 * @i915: i915 device instance
3787 *
3788 * i915-perf state cleanup is split up into an 'unregister' and
3789 * 'deinit' phase where the interface is first hidden from
3790 * userspace by i915_perf_unregister() before cleaning up
3791 * remaining state in i915_perf_fini().
3792 */
i915_perf_unregister(struct drm_i915_private * i915)3793 void i915_perf_unregister(struct drm_i915_private *i915)
3794 {
3795 struct i915_perf *perf = &i915->perf;
3796
3797 if (!perf->metrics_kobj)
3798 return;
3799
3800 kobject_put(perf->metrics_kobj);
3801 perf->metrics_kobj = NULL;
3802 }
3803
gen8_is_valid_flex_addr(struct i915_perf * perf,u32 addr)3804 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3805 {
3806 static const i915_reg_t flex_eu_regs[] = {
3807 EU_PERF_CNTL0,
3808 EU_PERF_CNTL1,
3809 EU_PERF_CNTL2,
3810 EU_PERF_CNTL3,
3811 EU_PERF_CNTL4,
3812 EU_PERF_CNTL5,
3813 EU_PERF_CNTL6,
3814 };
3815 int i;
3816
3817 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3818 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3819 return true;
3820 }
3821 return false;
3822 }
3823
3824 #define ADDR_IN_RANGE(addr, start, end) \
3825 ((addr) >= (start) && \
3826 (addr) <= (end))
3827
3828 #define REG_IN_RANGE(addr, start, end) \
3829 ((addr) >= i915_mmio_reg_offset(start) && \
3830 (addr) <= i915_mmio_reg_offset(end))
3831
3832 #define REG_EQUAL(addr, mmio) \
3833 ((addr) == i915_mmio_reg_offset(mmio))
3834
gen7_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)3835 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3836 {
3837 return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
3838 REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
3839 REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
3840 }
3841
gen7_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3842 static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3843 {
3844 return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
3845 REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
3846 REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
3847 REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
3848 }
3849
gen8_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3850 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3851 {
3852 return gen7_is_valid_mux_addr(perf, addr) ||
3853 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3854 REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
3855 }
3856
gen10_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3857 static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3858 {
3859 return gen8_is_valid_mux_addr(perf, addr) ||
3860 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3861 REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
3862 }
3863
hsw_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3864 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3865 {
3866 return gen7_is_valid_mux_addr(perf, addr) ||
3867 ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
3868 REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
3869 REG_EQUAL(addr, HSW_MBVID2_MISR0);
3870 }
3871
chv_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3872 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3873 {
3874 return gen7_is_valid_mux_addr(perf, addr) ||
3875 ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
3876 }
3877
gen12_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)3878 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3879 {
3880 return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
3881 REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
3882 REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
3883 REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
3884 REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
3885 REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
3886 REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
3887 }
3888
gen12_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3889 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3890 {
3891 return REG_EQUAL(addr, NOA_WRITE) ||
3892 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3893 REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
3894 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3895 REG_EQUAL(addr, RPM_CONFIG0) ||
3896 REG_EQUAL(addr, RPM_CONFIG1) ||
3897 REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
3898 }
3899
mask_reg_value(u32 reg,u32 val)3900 static u32 mask_reg_value(u32 reg, u32 val)
3901 {
3902 /* HALF_SLICE_CHICKEN2 is programmed with a the
3903 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3904 * programmed by userspace doesn't change this.
3905 */
3906 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
3907 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3908
3909 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3910 * indicated by its name and a bunch of selection fields used by OA
3911 * configs.
3912 */
3913 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
3914 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3915
3916 return val;
3917 }
3918
alloc_oa_regs(struct i915_perf * perf,bool (* is_valid)(struct i915_perf * perf,u32 addr),u32 __user * regs,u32 n_regs)3919 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
3920 bool (*is_valid)(struct i915_perf *perf, u32 addr),
3921 u32 __user *regs,
3922 u32 n_regs)
3923 {
3924 struct i915_oa_reg *oa_regs;
3925 int err;
3926 u32 i;
3927
3928 if (!n_regs)
3929 return NULL;
3930
3931 /* No is_valid function means we're not allowing any register to be programmed. */
3932 GEM_BUG_ON(!is_valid);
3933 if (!is_valid)
3934 return ERR_PTR(-EINVAL);
3935
3936 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3937 if (!oa_regs)
3938 return ERR_PTR(-ENOMEM);
3939
3940 for (i = 0; i < n_regs; i++) {
3941 u32 addr, value;
3942
3943 err = get_user(addr, regs);
3944 if (err)
3945 goto addr_err;
3946
3947 if (!is_valid(perf, addr)) {
3948 DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3949 err = -EINVAL;
3950 goto addr_err;
3951 }
3952
3953 err = get_user(value, regs + 1);
3954 if (err)
3955 goto addr_err;
3956
3957 oa_regs[i].addr = _MMIO(addr);
3958 oa_regs[i].value = mask_reg_value(addr, value);
3959
3960 regs += 2;
3961 }
3962
3963 return oa_regs;
3964
3965 addr_err:
3966 kfree(oa_regs);
3967 return ERR_PTR(err);
3968 }
3969
show_dynamic_id(struct device * dev,struct device_attribute * attr,char * buf)3970 static ssize_t show_dynamic_id(struct device *dev,
3971 struct device_attribute *attr,
3972 char *buf)
3973 {
3974 struct i915_oa_config *oa_config =
3975 container_of(attr, typeof(*oa_config), sysfs_metric_id);
3976
3977 return sprintf(buf, "%d\n", oa_config->id);
3978 }
3979
create_dynamic_oa_sysfs_entry(struct i915_perf * perf,struct i915_oa_config * oa_config)3980 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
3981 struct i915_oa_config *oa_config)
3982 {
3983 sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
3984 oa_config->sysfs_metric_id.attr.name = "id";
3985 oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
3986 oa_config->sysfs_metric_id.show = show_dynamic_id;
3987 oa_config->sysfs_metric_id.store = NULL;
3988
3989 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
3990 oa_config->attrs[1] = NULL;
3991
3992 oa_config->sysfs_metric.name = oa_config->uuid;
3993 oa_config->sysfs_metric.attrs = oa_config->attrs;
3994
3995 return sysfs_create_group(perf->metrics_kobj,
3996 &oa_config->sysfs_metric);
3997 }
3998
3999 /**
4000 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4001 * @dev: drm device
4002 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4003 * userspace (unvalidated)
4004 * @file: drm file
4005 *
4006 * Validates the submitted OA register to be saved into a new OA config that
4007 * can then be used for programming the OA unit and its NOA network.
4008 *
4009 * Returns: A new allocated config number to be used with the perf open ioctl
4010 * or a negative error code on failure.
4011 */
i915_perf_add_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4012 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4013 struct drm_file *file)
4014 {
4015 struct i915_perf *perf = &to_i915(dev)->perf;
4016 struct drm_i915_perf_oa_config *args = data;
4017 struct i915_oa_config *oa_config, *tmp;
4018 struct i915_oa_reg *regs;
4019 int err, id;
4020
4021 if (!perf->i915) {
4022 DRM_DEBUG("i915 perf interface not available for this system\n");
4023 return -ENOTSUPP;
4024 }
4025
4026 if (!perf->metrics_kobj) {
4027 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
4028 return -EINVAL;
4029 }
4030
4031 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4032 DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
4033 return -EACCES;
4034 }
4035
4036 if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4037 (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4038 (!args->flex_regs_ptr || !args->n_flex_regs)) {
4039 DRM_DEBUG("No OA registers given\n");
4040 return -EINVAL;
4041 }
4042
4043 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4044 if (!oa_config) {
4045 DRM_DEBUG("Failed to allocate memory for the OA config\n");
4046 return -ENOMEM;
4047 }
4048
4049 oa_config->perf = perf;
4050 kref_init(&oa_config->ref);
4051
4052 if (!uuid_is_valid(args->uuid)) {
4053 DRM_DEBUG("Invalid uuid format for OA config\n");
4054 err = -EINVAL;
4055 goto reg_err;
4056 }
4057
4058 /* Last character in oa_config->uuid will be 0 because oa_config is
4059 * kzalloc.
4060 */
4061 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4062
4063 oa_config->mux_regs_len = args->n_mux_regs;
4064 regs = alloc_oa_regs(perf,
4065 perf->ops.is_valid_mux_reg,
4066 u64_to_user_ptr(args->mux_regs_ptr),
4067 args->n_mux_regs);
4068
4069 if (IS_ERR(regs)) {
4070 DRM_DEBUG("Failed to create OA config for mux_regs\n");
4071 err = PTR_ERR(regs);
4072 goto reg_err;
4073 }
4074 oa_config->mux_regs = regs;
4075
4076 oa_config->b_counter_regs_len = args->n_boolean_regs;
4077 regs = alloc_oa_regs(perf,
4078 perf->ops.is_valid_b_counter_reg,
4079 u64_to_user_ptr(args->boolean_regs_ptr),
4080 args->n_boolean_regs);
4081
4082 if (IS_ERR(regs)) {
4083 DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
4084 err = PTR_ERR(regs);
4085 goto reg_err;
4086 }
4087 oa_config->b_counter_regs = regs;
4088
4089 if (INTEL_GEN(perf->i915) < 8) {
4090 if (args->n_flex_regs != 0) {
4091 err = -EINVAL;
4092 goto reg_err;
4093 }
4094 } else {
4095 oa_config->flex_regs_len = args->n_flex_regs;
4096 regs = alloc_oa_regs(perf,
4097 perf->ops.is_valid_flex_reg,
4098 u64_to_user_ptr(args->flex_regs_ptr),
4099 args->n_flex_regs);
4100
4101 if (IS_ERR(regs)) {
4102 DRM_DEBUG("Failed to create OA config for flex_regs\n");
4103 err = PTR_ERR(regs);
4104 goto reg_err;
4105 }
4106 oa_config->flex_regs = regs;
4107 }
4108
4109 err = mutex_lock_interruptible(&perf->metrics_lock);
4110 if (err)
4111 goto reg_err;
4112
4113 /* We shouldn't have too many configs, so this iteration shouldn't be
4114 * too costly.
4115 */
4116 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4117 if (!strcmp(tmp->uuid, oa_config->uuid)) {
4118 DRM_DEBUG("OA config already exists with this uuid\n");
4119 err = -EADDRINUSE;
4120 goto sysfs_err;
4121 }
4122 }
4123
4124 err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4125 if (err) {
4126 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4127 goto sysfs_err;
4128 }
4129
4130 /* Config id 0 is invalid, id 1 for kernel stored test config. */
4131 oa_config->id = idr_alloc(&perf->metrics_idr,
4132 oa_config, 2,
4133 0, GFP_KERNEL);
4134 if (oa_config->id < 0) {
4135 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4136 err = oa_config->id;
4137 goto sysfs_err;
4138 }
4139
4140 mutex_unlock(&perf->metrics_lock);
4141
4142 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4143
4144 return oa_config->id;
4145
4146 sysfs_err:
4147 mutex_unlock(&perf->metrics_lock);
4148 reg_err:
4149 i915_oa_config_put(oa_config);
4150 DRM_DEBUG("Failed to add new OA config\n");
4151 return err;
4152 }
4153
4154 /**
4155 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4156 * @dev: drm device
4157 * @data: ioctl data (pointer to u64 integer) copied from userspace
4158 * @file: drm file
4159 *
4160 * Configs can be removed while being used, the will stop appearing in sysfs
4161 * and their content will be freed when the stream using the config is closed.
4162 *
4163 * Returns: 0 on success or a negative error code on failure.
4164 */
i915_perf_remove_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4165 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4166 struct drm_file *file)
4167 {
4168 struct i915_perf *perf = &to_i915(dev)->perf;
4169 u64 *arg = data;
4170 struct i915_oa_config *oa_config;
4171 int ret;
4172
4173 if (!perf->i915) {
4174 DRM_DEBUG("i915 perf interface not available for this system\n");
4175 return -ENOTSUPP;
4176 }
4177
4178 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4179 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
4180 return -EACCES;
4181 }
4182
4183 ret = mutex_lock_interruptible(&perf->metrics_lock);
4184 if (ret)
4185 return ret;
4186
4187 oa_config = idr_find(&perf->metrics_idr, *arg);
4188 if (!oa_config) {
4189 DRM_DEBUG("Failed to remove unknown OA config\n");
4190 ret = -ENOENT;
4191 goto err_unlock;
4192 }
4193
4194 GEM_BUG_ON(*arg != oa_config->id);
4195
4196 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4197
4198 idr_remove(&perf->metrics_idr, *arg);
4199
4200 mutex_unlock(&perf->metrics_lock);
4201
4202 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4203
4204 i915_oa_config_put(oa_config);
4205
4206 return 0;
4207
4208 err_unlock:
4209 mutex_unlock(&perf->metrics_lock);
4210 return ret;
4211 }
4212
4213 static struct ctl_table oa_table[] = {
4214 {
4215 .procname = "perf_stream_paranoid",
4216 .data = &i915_perf_stream_paranoid,
4217 .maxlen = sizeof(i915_perf_stream_paranoid),
4218 .mode = 0644,
4219 .proc_handler = proc_dointvec_minmax,
4220 .extra1 = SYSCTL_ZERO,
4221 .extra2 = SYSCTL_ONE,
4222 },
4223 {
4224 .procname = "oa_max_sample_rate",
4225 .data = &i915_oa_max_sample_rate,
4226 .maxlen = sizeof(i915_oa_max_sample_rate),
4227 .mode = 0644,
4228 .proc_handler = proc_dointvec_minmax,
4229 .extra1 = SYSCTL_ZERO,
4230 .extra2 = &oa_sample_rate_hard_limit,
4231 },
4232 {}
4233 };
4234
4235 static struct ctl_table i915_root[] = {
4236 {
4237 .procname = "i915",
4238 .maxlen = 0,
4239 .mode = 0555,
4240 .child = oa_table,
4241 },
4242 {}
4243 };
4244
4245 static struct ctl_table dev_root[] = {
4246 {
4247 .procname = "dev",
4248 .maxlen = 0,
4249 .mode = 0555,
4250 .child = i915_root,
4251 },
4252 {}
4253 };
4254
4255 /**
4256 * i915_perf_init - initialize i915-perf state on module bind
4257 * @i915: i915 device instance
4258 *
4259 * Initializes i915-perf state without exposing anything to userspace.
4260 *
4261 * Note: i915-perf initialization is split into an 'init' and 'register'
4262 * phase with the i915_perf_register() exposing state to userspace.
4263 */
i915_perf_init(struct drm_i915_private * i915)4264 void i915_perf_init(struct drm_i915_private *i915)
4265 {
4266 struct i915_perf *perf = &i915->perf;
4267
4268 /* XXX const struct i915_perf_ops! */
4269
4270 if (IS_HASWELL(i915)) {
4271 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4272 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4273 perf->ops.is_valid_flex_reg = NULL;
4274 perf->ops.enable_metric_set = hsw_enable_metric_set;
4275 perf->ops.disable_metric_set = hsw_disable_metric_set;
4276 perf->ops.oa_enable = gen7_oa_enable;
4277 perf->ops.oa_disable = gen7_oa_disable;
4278 perf->ops.read = gen7_oa_read;
4279 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4280
4281 perf->oa_formats = hsw_oa_formats;
4282 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4283 /* Note: that although we could theoretically also support the
4284 * legacy ringbuffer mode on BDW (and earlier iterations of
4285 * this driver, before upstreaming did this) it didn't seem
4286 * worth the complexity to maintain now that BDW+ enable
4287 * execlist mode by default.
4288 */
4289 perf->ops.read = gen8_oa_read;
4290
4291 if (IS_GEN_RANGE(i915, 8, 9)) {
4292 perf->oa_formats = gen8_plus_oa_formats;
4293
4294 perf->ops.is_valid_b_counter_reg =
4295 gen7_is_valid_b_counter_addr;
4296 perf->ops.is_valid_mux_reg =
4297 gen8_is_valid_mux_addr;
4298 perf->ops.is_valid_flex_reg =
4299 gen8_is_valid_flex_addr;
4300
4301 if (IS_CHERRYVIEW(i915)) {
4302 perf->ops.is_valid_mux_reg =
4303 chv_is_valid_mux_addr;
4304 }
4305
4306 perf->ops.oa_enable = gen8_oa_enable;
4307 perf->ops.oa_disable = gen8_oa_disable;
4308 perf->ops.enable_metric_set = gen8_enable_metric_set;
4309 perf->ops.disable_metric_set = gen8_disable_metric_set;
4310 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4311
4312 if (IS_GEN(i915, 8)) {
4313 perf->ctx_oactxctrl_offset = 0x120;
4314 perf->ctx_flexeu0_offset = 0x2ce;
4315
4316 perf->gen8_valid_ctx_bit = BIT(25);
4317 } else {
4318 perf->ctx_oactxctrl_offset = 0x128;
4319 perf->ctx_flexeu0_offset = 0x3de;
4320
4321 perf->gen8_valid_ctx_bit = BIT(16);
4322 }
4323 } else if (IS_GEN_RANGE(i915, 10, 11)) {
4324 perf->oa_formats = gen8_plus_oa_formats;
4325
4326 perf->ops.is_valid_b_counter_reg =
4327 gen7_is_valid_b_counter_addr;
4328 perf->ops.is_valid_mux_reg =
4329 gen10_is_valid_mux_addr;
4330 perf->ops.is_valid_flex_reg =
4331 gen8_is_valid_flex_addr;
4332
4333 perf->ops.oa_enable = gen8_oa_enable;
4334 perf->ops.oa_disable = gen8_oa_disable;
4335 perf->ops.enable_metric_set = gen8_enable_metric_set;
4336 perf->ops.disable_metric_set = gen10_disable_metric_set;
4337 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4338
4339 if (IS_GEN(i915, 10)) {
4340 perf->ctx_oactxctrl_offset = 0x128;
4341 perf->ctx_flexeu0_offset = 0x3de;
4342 } else {
4343 perf->ctx_oactxctrl_offset = 0x124;
4344 perf->ctx_flexeu0_offset = 0x78e;
4345 }
4346 perf->gen8_valid_ctx_bit = BIT(16);
4347 } else if (IS_GEN(i915, 12)) {
4348 perf->oa_formats = gen12_oa_formats;
4349
4350 perf->ops.is_valid_b_counter_reg =
4351 gen12_is_valid_b_counter_addr;
4352 perf->ops.is_valid_mux_reg =
4353 gen12_is_valid_mux_addr;
4354 perf->ops.is_valid_flex_reg =
4355 gen8_is_valid_flex_addr;
4356
4357 perf->ops.oa_enable = gen12_oa_enable;
4358 perf->ops.oa_disable = gen12_oa_disable;
4359 perf->ops.enable_metric_set = gen12_enable_metric_set;
4360 perf->ops.disable_metric_set = gen12_disable_metric_set;
4361 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4362
4363 perf->ctx_flexeu0_offset = 0;
4364 perf->ctx_oactxctrl_offset = 0x144;
4365 }
4366 }
4367
4368 if (perf->ops.enable_metric_set) {
4369 mutex_init(&perf->lock);
4370
4371 oa_sample_rate_hard_limit =
4372 RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
4373
4374 mutex_init(&perf->metrics_lock);
4375 idr_init(&perf->metrics_idr);
4376
4377 /* We set up some ratelimit state to potentially throttle any
4378 * _NOTES about spurious, invalid OA reports which we don't
4379 * forward to userspace.
4380 *
4381 * We print a _NOTE about any throttling when closing the
4382 * stream instead of waiting until driver _fini which no one
4383 * would ever see.
4384 *
4385 * Using the same limiting factors as printk_ratelimit()
4386 */
4387 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4388 /* Since we use a DRM_NOTE for spurious reports it would be
4389 * inconsistent to let __ratelimit() automatically print a
4390 * warning for throttling.
4391 */
4392 ratelimit_set_flags(&perf->spurious_report_rs,
4393 RATELIMIT_MSG_ON_RELEASE);
4394
4395 ratelimit_state_init(&perf->tail_pointer_race,
4396 5 * HZ, 10);
4397 ratelimit_set_flags(&perf->tail_pointer_race,
4398 RATELIMIT_MSG_ON_RELEASE);
4399
4400 atomic64_set(&perf->noa_programming_delay,
4401 500 * 1000 /* 500us */);
4402
4403 perf->i915 = i915;
4404 }
4405 }
4406
destroy_config(int id,void * p,void * data)4407 static int destroy_config(int id, void *p, void *data)
4408 {
4409 i915_oa_config_put(p);
4410 return 0;
4411 }
4412
i915_perf_sysctl_register(void)4413 void i915_perf_sysctl_register(void)
4414 {
4415 sysctl_header = register_sysctl_table(dev_root);
4416 }
4417
i915_perf_sysctl_unregister(void)4418 void i915_perf_sysctl_unregister(void)
4419 {
4420 unregister_sysctl_table(sysctl_header);
4421 }
4422
4423 /**
4424 * i915_perf_fini - Counter part to i915_perf_init()
4425 * @i915: i915 device instance
4426 */
i915_perf_fini(struct drm_i915_private * i915)4427 void i915_perf_fini(struct drm_i915_private *i915)
4428 {
4429 struct i915_perf *perf = &i915->perf;
4430
4431 if (!perf->i915)
4432 return;
4433
4434 idr_for_each(&perf->metrics_idr, destroy_config, perf);
4435 idr_destroy(&perf->metrics_idr);
4436
4437 memset(&perf->ops, 0, sizeof(perf->ops));
4438 perf->i915 = NULL;
4439 }
4440
4441 /**
4442 * i915_perf_ioctl_version - Version of the i915-perf subsystem
4443 *
4444 * This version number is used by userspace to detect available features.
4445 */
i915_perf_ioctl_version(void)4446 int i915_perf_ioctl_version(void)
4447 {
4448 /*
4449 * 1: Initial version
4450 * I915_PERF_IOCTL_ENABLE
4451 * I915_PERF_IOCTL_DISABLE
4452 *
4453 * 2: Added runtime modification of OA config.
4454 * I915_PERF_IOCTL_CONFIG
4455 *
4456 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4457 * preemption on a particular context so that performance data is
4458 * accessible from a delta of MI_RPC reports without looking at the
4459 * OA buffer.
4460 *
4461 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4462 * be run for the duration of the performance recording based on
4463 * their SSEU configuration.
4464 *
4465 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4466 * interval for the hrtimer used to check for OA data.
4467 */
4468 return 5;
4469 }
4470
4471 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4472 #include "selftests/i915_perf.c"
4473 #endif
4474