1 /*
2 * Copyright © 2015-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Robert Bragg <robert@sixbynine.org>
25 */
26
27
28 /**
29 * DOC: i915 Perf Overview
30 *
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
33 * GPU.
34 *
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
38 *
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41 *
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46 * sysctl option.
47 *
48 */
49
50 /**
51 * DOC: i915 Perf History and Comparison with Core Perf
52 *
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
55 *
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
67 *
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
71 *
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
80 *
81 *
82 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84 *
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
89 *
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
93 *
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
100 *
101 *
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
104 * hit:
105 *
106 * - The perf based OA PMU driver broke some significant design assumptions:
107 *
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
113 *
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
116 *
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
120 *
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
126 *
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
141 *
142 * - As a side note on perf's grouping feature; there was also some concern
143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
147 *
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
150 *
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
154 *
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
165 * command streamer.
166 *
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
177 *
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
180 *
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
192 */
193
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197
198 #include "gem/i915_gem_context.h"
199 #include "gt/intel_engine_pm.h"
200 #include "gt/intel_engine_user.h"
201 #include "gt/intel_execlists_submission.h"
202 #include "gt/intel_gpu_commands.h"
203 #include "gt/intel_gt.h"
204 #include "gt/intel_gt_clock_utils.h"
205 #include "gt/intel_lrc.h"
206 #include "gt/intel_ring.h"
207
208 #include "i915_drv.h"
209 #include "i915_perf.h"
210
211 /* HW requires this to be a power of two, between 128k and 16M, though driver
212 * is currently generally designed assuming the largest 16M size is used such
213 * that the overflow cases are unlikely in normal operation.
214 */
215 #define OA_BUFFER_SIZE SZ_16M
216
217 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
218
219 /**
220 * DOC: OA Tail Pointer Race
221 *
222 * There's a HW race condition between OA unit tail pointer register updates and
223 * writes to memory whereby the tail pointer can sometimes get ahead of what's
224 * been written out to the OA buffer so far (in terms of what's visible to the
225 * CPU).
226 *
227 * Although this can be observed explicitly while copying reports to userspace
228 * by checking for a zeroed report-id field in tail reports, we want to account
229 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
230 * redundant read() attempts.
231 *
232 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
233 * in the OA buffer, starting from the tail reported by the HW until we find a
234 * report with its first 2 dwords not 0 meaning its previous report is
235 * completely in memory and ready to be read. Those dwords are also set to 0
236 * once read and the whole buffer is cleared upon OA buffer initialization. The
237 * first dword is the reason for this report while the second is the timestamp,
238 * making the chances of having those 2 fields at 0 fairly unlikely. A more
239 * detailed explanation is available in oa_buffer_check_unlocked().
240 *
241 * Most of the implementation details for this workaround are in
242 * oa_buffer_check_unlocked() and _append_oa_reports()
243 *
244 * Note for posterity: previously the driver used to define an effective tail
245 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
246 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
247 * This was flawed considering that the OA unit may also automatically generate
248 * non-periodic reports (such as on context switch) or the OA unit may be
249 * enabled without any periodic sampling.
250 */
251 #define OA_TAIL_MARGIN_NSEC 100000ULL
252 #define INVALID_TAIL_PTR 0xffffffff
253
254 /* The default frequency for checking whether the OA unit has written new
255 * reports to the circular OA buffer...
256 */
257 #define DEFAULT_POLL_FREQUENCY_HZ 200
258 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
259
260 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
261 static u32 i915_perf_stream_paranoid = true;
262
263 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
264 * of the 64bit timestamp bits to trigger reports from) but there's currently
265 * no known use case for sampling as infrequently as once per 47 thousand years.
266 *
267 * Since the timestamps included in OA reports are only 32bits it seems
268 * reasonable to limit the OA exponent where it's still possible to account for
269 * overflow in OA report timestamps.
270 */
271 #define OA_EXPONENT_MAX 31
272
273 #define INVALID_CTX_ID 0xffffffff
274
275 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
276 #define OAREPORT_REASON_MASK 0x3f
277 #define OAREPORT_REASON_MASK_EXTENDED 0x7f
278 #define OAREPORT_REASON_SHIFT 19
279 #define OAREPORT_REASON_TIMER (1<<0)
280 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
281 #define OAREPORT_REASON_CLK_RATIO (1<<5)
282
283
284 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
285 *
286 * The highest sampling frequency we can theoretically program the OA unit
287 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
288 *
289 * Initialized just before we register the sysctl parameter.
290 */
291 static int oa_sample_rate_hard_limit;
292
293 /* Theoretically we can program the OA unit to sample every 160ns but don't
294 * allow that by default unless root...
295 *
296 * The default threshold of 100000Hz is based on perf's similar
297 * kernel.perf_event_max_sample_rate sysctl parameter.
298 */
299 static u32 i915_oa_max_sample_rate = 100000;
300
301 /* XXX: beware if future OA HW adds new report formats that the current
302 * code assumes all reports have a power-of-two size and ~(size - 1) can
303 * be used as a mask to align the OA tail pointer.
304 */
305 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
306 [I915_OA_FORMAT_A13] = { 0, 64 },
307 [I915_OA_FORMAT_A29] = { 1, 128 },
308 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
309 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
310 [I915_OA_FORMAT_B4_C8] = { 4, 64 },
311 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
312 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
313 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
314 [I915_OA_FORMAT_A12] = { 0, 64 },
315 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
316 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
317 };
318
319 #define SAMPLE_OA_REPORT (1<<0)
320
321 /**
322 * struct perf_open_properties - for validated properties given to open a stream
323 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
324 * @single_context: Whether a single or all gpu contexts should be monitored
325 * @hold_preemption: Whether the preemption is disabled for the filtered
326 * context
327 * @ctx_handle: A gem ctx handle for use with @single_context
328 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
329 * @oa_format: An OA unit HW report format
330 * @oa_periodic: Whether to enable periodic OA unit sampling
331 * @oa_period_exponent: The OA unit sampling period is derived from this
332 * @engine: The engine (typically rcs0) being monitored by the OA unit
333 * @has_sseu: Whether @sseu was specified by userspace
334 * @sseu: internal SSEU configuration computed either from the userspace
335 * specified configuration in the opening parameters or a default value
336 * (see get_default_sseu_config())
337 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
338 * data availability
339 *
340 * As read_properties_unlocked() enumerates and validates the properties given
341 * to open a stream of metrics the configuration is built up in the structure
342 * which starts out zero initialized.
343 */
344 struct perf_open_properties {
345 u32 sample_flags;
346
347 u64 single_context:1;
348 u64 hold_preemption:1;
349 u64 ctx_handle;
350
351 /* OA sampling state */
352 int metrics_set;
353 int oa_format;
354 bool oa_periodic;
355 int oa_period_exponent;
356
357 struct intel_engine_cs *engine;
358
359 bool has_sseu;
360 struct intel_sseu sseu;
361
362 u64 poll_oa_period;
363 };
364
365 struct i915_oa_config_bo {
366 struct llist_node node;
367
368 struct i915_oa_config *oa_config;
369 struct i915_vma *vma;
370 };
371
372 static struct ctl_table_header *sysctl_header;
373
374 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
375
i915_oa_config_release(struct kref * ref)376 void i915_oa_config_release(struct kref *ref)
377 {
378 struct i915_oa_config *oa_config =
379 container_of(ref, typeof(*oa_config), ref);
380
381 kfree(oa_config->flex_regs);
382 kfree(oa_config->b_counter_regs);
383 kfree(oa_config->mux_regs);
384
385 kfree_rcu(oa_config, rcu);
386 }
387
388 struct i915_oa_config *
i915_perf_get_oa_config(struct i915_perf * perf,int metrics_set)389 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
390 {
391 struct i915_oa_config *oa_config;
392
393 rcu_read_lock();
394 oa_config = idr_find(&perf->metrics_idr, metrics_set);
395 if (oa_config)
396 oa_config = i915_oa_config_get(oa_config);
397 rcu_read_unlock();
398
399 return oa_config;
400 }
401
free_oa_config_bo(struct i915_oa_config_bo * oa_bo)402 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
403 {
404 i915_oa_config_put(oa_bo->oa_config);
405 i915_vma_put(oa_bo->vma);
406 kfree(oa_bo);
407 }
408
gen12_oa_hw_tail_read(struct i915_perf_stream * stream)409 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
410 {
411 struct intel_uncore *uncore = stream->uncore;
412
413 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
414 GEN12_OAG_OATAILPTR_MASK;
415 }
416
gen8_oa_hw_tail_read(struct i915_perf_stream * stream)417 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
418 {
419 struct intel_uncore *uncore = stream->uncore;
420
421 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
422 }
423
gen7_oa_hw_tail_read(struct i915_perf_stream * stream)424 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
425 {
426 struct intel_uncore *uncore = stream->uncore;
427 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
428
429 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
430 }
431
432 /**
433 * oa_buffer_check_unlocked - check for data and update tail ptr state
434 * @stream: i915 stream instance
435 *
436 * This is either called via fops (for blocking reads in user ctx) or the poll
437 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
438 * if there is data available for userspace to read.
439 *
440 * This function is central to providing a workaround for the OA unit tail
441 * pointer having a race with respect to what data is visible to the CPU.
442 * It is responsible for reading tail pointers from the hardware and giving
443 * the pointers time to 'age' before they are made available for reading.
444 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
445 *
446 * Besides returning true when there is data available to read() this function
447 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
448 * object.
449 *
450 * Note: It's safe to read OA config state here unlocked, assuming that this is
451 * only called while the stream is enabled, while the global OA configuration
452 * can't be modified.
453 *
454 * Returns: %true if the OA buffer contains data, else %false
455 */
oa_buffer_check_unlocked(struct i915_perf_stream * stream)456 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
457 {
458 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
459 int report_size = stream->oa_buffer.format_size;
460 unsigned long flags;
461 bool pollin;
462 u32 hw_tail;
463 u64 now;
464
465 /* We have to consider the (unlikely) possibility that read() errors
466 * could result in an OA buffer reset which might reset the head and
467 * tail state.
468 */
469 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
470
471 hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
472
473 /* The tail pointer increases in 64 byte increments,
474 * not in report_size steps...
475 */
476 hw_tail &= ~(report_size - 1);
477
478 now = ktime_get_mono_fast_ns();
479
480 if (hw_tail == stream->oa_buffer.aging_tail &&
481 (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
482 /* If the HW tail hasn't move since the last check and the HW
483 * tail has been aging for long enough, declare it the new
484 * tail.
485 */
486 stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
487 } else {
488 u32 head, tail, aged_tail;
489
490 /* NB: The head we observe here might effectively be a little
491 * out of date. If a read() is in progress, the head could be
492 * anywhere between this head and stream->oa_buffer.tail.
493 */
494 head = stream->oa_buffer.head - gtt_offset;
495 aged_tail = stream->oa_buffer.tail - gtt_offset;
496
497 hw_tail -= gtt_offset;
498 tail = hw_tail;
499
500 /* Walk the stream backward until we find a report with dword 0
501 * & 1 not at 0. Since the circular buffer pointers progress by
502 * increments of 64 bytes and that reports can be up to 256
503 * bytes long, we can't tell whether a report has fully landed
504 * in memory before the first 2 dwords of the following report
505 * have effectively landed.
506 *
507 * This is assuming that the writes of the OA unit land in
508 * memory in the order they were written to.
509 * If not : (╯°□°)╯︵ ┻━┻
510 */
511 while (OA_TAKEN(tail, aged_tail) >= report_size) {
512 u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
513
514 if (report32[0] != 0 || report32[1] != 0)
515 break;
516
517 tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
518 }
519
520 if (OA_TAKEN(hw_tail, tail) > report_size &&
521 __ratelimit(&stream->perf->tail_pointer_race))
522 DRM_NOTE("unlanded report(s) head=0x%x "
523 "tail=0x%x hw_tail=0x%x\n",
524 head, tail, hw_tail);
525
526 stream->oa_buffer.tail = gtt_offset + tail;
527 stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
528 stream->oa_buffer.aging_timestamp = now;
529 }
530
531 pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
532 stream->oa_buffer.head - gtt_offset) >= report_size;
533
534 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
535
536 return pollin;
537 }
538
539 /**
540 * append_oa_status - Appends a status record to a userspace read() buffer.
541 * @stream: An i915-perf stream opened for OA metrics
542 * @buf: destination buffer given by userspace
543 * @count: the number of bytes userspace wants to read
544 * @offset: (inout): the current position for writing into @buf
545 * @type: The kind of status to report to userspace
546 *
547 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
548 * into the userspace read() buffer.
549 *
550 * The @buf @offset will only be updated on success.
551 *
552 * Returns: 0 on success, negative error code on failure.
553 */
append_oa_status(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,enum drm_i915_perf_record_type type)554 static int append_oa_status(struct i915_perf_stream *stream,
555 char __user *buf,
556 size_t count,
557 size_t *offset,
558 enum drm_i915_perf_record_type type)
559 {
560 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
561
562 if ((count - *offset) < header.size)
563 return -ENOSPC;
564
565 if (copy_to_user(buf + *offset, &header, sizeof(header)))
566 return -EFAULT;
567
568 (*offset) += header.size;
569
570 return 0;
571 }
572
573 /**
574 * append_oa_sample - Copies single OA report into userspace read() buffer.
575 * @stream: An i915-perf stream opened for OA metrics
576 * @buf: destination buffer given by userspace
577 * @count: the number of bytes userspace wants to read
578 * @offset: (inout): the current position for writing into @buf
579 * @report: A single OA report to (optionally) include as part of the sample
580 *
581 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
582 * properties when opening a stream, tracked as `stream->sample_flags`. This
583 * function copies the requested components of a single sample to the given
584 * read() @buf.
585 *
586 * The @buf @offset will only be updated on success.
587 *
588 * Returns: 0 on success, negative error code on failure.
589 */
append_oa_sample(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,const u8 * report)590 static int append_oa_sample(struct i915_perf_stream *stream,
591 char __user *buf,
592 size_t count,
593 size_t *offset,
594 const u8 *report)
595 {
596 int report_size = stream->oa_buffer.format_size;
597 struct drm_i915_perf_record_header header;
598
599 header.type = DRM_I915_PERF_RECORD_SAMPLE;
600 header.pad = 0;
601 header.size = stream->sample_size;
602
603 if ((count - *offset) < header.size)
604 return -ENOSPC;
605
606 buf += *offset;
607 if (copy_to_user(buf, &header, sizeof(header)))
608 return -EFAULT;
609 buf += sizeof(header);
610
611 if (copy_to_user(buf, report, report_size))
612 return -EFAULT;
613
614 (*offset) += header.size;
615
616 return 0;
617 }
618
619 /**
620 * gen8_append_oa_reports - Copies all buffered OA reports into
621 * userspace read() buffer.
622 * @stream: An i915-perf stream opened for OA metrics
623 * @buf: destination buffer given by userspace
624 * @count: the number of bytes userspace wants to read
625 * @offset: (inout): the current position for writing into @buf
626 *
627 * Notably any error condition resulting in a short read (-%ENOSPC or
628 * -%EFAULT) will be returned even though one or more records may
629 * have been successfully copied. In this case it's up to the caller
630 * to decide if the error should be squashed before returning to
631 * userspace.
632 *
633 * Note: reports are consumed from the head, and appended to the
634 * tail, so the tail chases the head?... If you think that's mad
635 * and back-to-front you're not alone, but this follows the
636 * Gen PRM naming convention.
637 *
638 * Returns: 0 on success, negative error code on failure.
639 */
gen8_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)640 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
641 char __user *buf,
642 size_t count,
643 size_t *offset)
644 {
645 struct intel_uncore *uncore = stream->uncore;
646 int report_size = stream->oa_buffer.format_size;
647 u8 *oa_buf_base = stream->oa_buffer.vaddr;
648 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
649 u32 mask = (OA_BUFFER_SIZE - 1);
650 size_t start_offset = *offset;
651 unsigned long flags;
652 u32 head, tail;
653 u32 taken;
654 int ret = 0;
655
656 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
657 return -EIO;
658
659 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
660
661 head = stream->oa_buffer.head;
662 tail = stream->oa_buffer.tail;
663
664 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
665
666 /*
667 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
668 * while indexing relative to oa_buf_base.
669 */
670 head -= gtt_offset;
671 tail -= gtt_offset;
672
673 /*
674 * An out of bounds or misaligned head or tail pointer implies a driver
675 * bug since we validate + align the tail pointers we read from the
676 * hardware and we are in full control of the head pointer which should
677 * only be incremented by multiples of the report size (notably also
678 * all a power of two).
679 */
680 if (drm_WARN_ONCE(&uncore->i915->drm,
681 head > OA_BUFFER_SIZE || head % report_size ||
682 tail > OA_BUFFER_SIZE || tail % report_size,
683 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
684 head, tail))
685 return -EIO;
686
687
688 for (/* none */;
689 (taken = OA_TAKEN(tail, head));
690 head = (head + report_size) & mask) {
691 u8 *report = oa_buf_base + head;
692 u32 *report32 = (void *)report;
693 u32 ctx_id;
694 u32 reason;
695
696 /*
697 * All the report sizes factor neatly into the buffer
698 * size so we never expect to see a report split
699 * between the beginning and end of the buffer.
700 *
701 * Given the initial alignment check a misalignment
702 * here would imply a driver bug that would result
703 * in an overrun.
704 */
705 if (drm_WARN_ON(&uncore->i915->drm,
706 (OA_BUFFER_SIZE - head) < report_size)) {
707 drm_err(&uncore->i915->drm,
708 "Spurious OA head ptr: non-integral report offset\n");
709 break;
710 }
711
712 /*
713 * The reason field includes flags identifying what
714 * triggered this specific report (mostly timer
715 * triggered or e.g. due to a context switch).
716 *
717 * This field is never expected to be zero so we can
718 * check that the report isn't invalid before copying
719 * it to userspace...
720 */
721 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
722 (GRAPHICS_VER(stream->perf->i915) == 12 ?
723 OAREPORT_REASON_MASK_EXTENDED :
724 OAREPORT_REASON_MASK));
725
726 ctx_id = report32[2] & stream->specific_ctx_id_mask;
727
728 /*
729 * Squash whatever is in the CTX_ID field if it's marked as
730 * invalid to be sure we avoid false-positive, single-context
731 * filtering below...
732 *
733 * Note: that we don't clear the valid_ctx_bit so userspace can
734 * understand that the ID has been squashed by the kernel.
735 */
736 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
737 GRAPHICS_VER(stream->perf->i915) <= 11)
738 ctx_id = report32[2] = INVALID_CTX_ID;
739
740 /*
741 * NB: For Gen 8 the OA unit no longer supports clock gating
742 * off for a specific context and the kernel can't securely
743 * stop the counters from updating as system-wide / global
744 * values.
745 *
746 * Automatic reports now include a context ID so reports can be
747 * filtered on the cpu but it's not worth trying to
748 * automatically subtract/hide counter progress for other
749 * contexts while filtering since we can't stop userspace
750 * issuing MI_REPORT_PERF_COUNT commands which would still
751 * provide a side-band view of the real values.
752 *
753 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
754 * to normalize counters for a single filtered context then it
755 * needs be forwarded bookend context-switch reports so that it
756 * can track switches in between MI_REPORT_PERF_COUNT commands
757 * and can itself subtract/ignore the progress of counters
758 * associated with other contexts. Note that the hardware
759 * automatically triggers reports when switching to a new
760 * context which are tagged with the ID of the newly active
761 * context. To avoid the complexity (and likely fragility) of
762 * reading ahead while parsing reports to try and minimize
763 * forwarding redundant context switch reports (i.e. between
764 * other, unrelated contexts) we simply elect to forward them
765 * all.
766 *
767 * We don't rely solely on the reason field to identify context
768 * switches since it's not-uncommon for periodic samples to
769 * identify a switch before any 'context switch' report.
770 */
771 if (!stream->perf->exclusive_stream->ctx ||
772 stream->specific_ctx_id == ctx_id ||
773 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
774 reason & OAREPORT_REASON_CTX_SWITCH) {
775
776 /*
777 * While filtering for a single context we avoid
778 * leaking the IDs of other contexts.
779 */
780 if (stream->perf->exclusive_stream->ctx &&
781 stream->specific_ctx_id != ctx_id) {
782 report32[2] = INVALID_CTX_ID;
783 }
784
785 ret = append_oa_sample(stream, buf, count, offset,
786 report);
787 if (ret)
788 break;
789
790 stream->oa_buffer.last_ctx_id = ctx_id;
791 }
792
793 /*
794 * Clear out the first 2 dword as a mean to detect unlanded
795 * reports.
796 */
797 report32[0] = 0;
798 report32[1] = 0;
799 }
800
801 if (start_offset != *offset) {
802 i915_reg_t oaheadptr;
803
804 oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
805 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
806
807 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
808
809 /*
810 * We removed the gtt_offset for the copy loop above, indexing
811 * relative to oa_buf_base so put back here...
812 */
813 head += gtt_offset;
814 intel_uncore_write(uncore, oaheadptr,
815 head & GEN12_OAG_OAHEADPTR_MASK);
816 stream->oa_buffer.head = head;
817
818 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
819 }
820
821 return ret;
822 }
823
824 /**
825 * gen8_oa_read - copy status records then buffered OA reports
826 * @stream: An i915-perf stream opened for OA metrics
827 * @buf: destination buffer given by userspace
828 * @count: the number of bytes userspace wants to read
829 * @offset: (inout): the current position for writing into @buf
830 *
831 * Checks OA unit status registers and if necessary appends corresponding
832 * status records for userspace (such as for a buffer full condition) and then
833 * initiate appending any buffered OA reports.
834 *
835 * Updates @offset according to the number of bytes successfully copied into
836 * the userspace buffer.
837 *
838 * NB: some data may be successfully copied to the userspace buffer
839 * even if an error is returned, and this is reflected in the
840 * updated @offset.
841 *
842 * Returns: zero on success or a negative error code
843 */
gen8_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)844 static int gen8_oa_read(struct i915_perf_stream *stream,
845 char __user *buf,
846 size_t count,
847 size_t *offset)
848 {
849 struct intel_uncore *uncore = stream->uncore;
850 u32 oastatus;
851 i915_reg_t oastatus_reg;
852 int ret;
853
854 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
855 return -EIO;
856
857 oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
858 GEN12_OAG_OASTATUS : GEN8_OASTATUS;
859
860 oastatus = intel_uncore_read(uncore, oastatus_reg);
861
862 /*
863 * We treat OABUFFER_OVERFLOW as a significant error:
864 *
865 * Although theoretically we could handle this more gracefully
866 * sometimes, some Gens don't correctly suppress certain
867 * automatically triggered reports in this condition and so we
868 * have to assume that old reports are now being trampled
869 * over.
870 *
871 * Considering how we don't currently give userspace control
872 * over the OA buffer size and always configure a large 16MB
873 * buffer, then a buffer overflow does anyway likely indicate
874 * that something has gone quite badly wrong.
875 */
876 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
877 ret = append_oa_status(stream, buf, count, offset,
878 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
879 if (ret)
880 return ret;
881
882 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
883 stream->period_exponent);
884
885 stream->perf->ops.oa_disable(stream);
886 stream->perf->ops.oa_enable(stream);
887
888 /*
889 * Note: .oa_enable() is expected to re-init the oabuffer and
890 * reset GEN8_OASTATUS for us
891 */
892 oastatus = intel_uncore_read(uncore, oastatus_reg);
893 }
894
895 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
896 ret = append_oa_status(stream, buf, count, offset,
897 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
898 if (ret)
899 return ret;
900
901 intel_uncore_rmw(uncore, oastatus_reg,
902 GEN8_OASTATUS_COUNTER_OVERFLOW |
903 GEN8_OASTATUS_REPORT_LOST,
904 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
905 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
906 GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
907 }
908
909 return gen8_append_oa_reports(stream, buf, count, offset);
910 }
911
912 /**
913 * gen7_append_oa_reports - Copies all buffered OA reports into
914 * userspace read() buffer.
915 * @stream: An i915-perf stream opened for OA metrics
916 * @buf: destination buffer given by userspace
917 * @count: the number of bytes userspace wants to read
918 * @offset: (inout): the current position for writing into @buf
919 *
920 * Notably any error condition resulting in a short read (-%ENOSPC or
921 * -%EFAULT) will be returned even though one or more records may
922 * have been successfully copied. In this case it's up to the caller
923 * to decide if the error should be squashed before returning to
924 * userspace.
925 *
926 * Note: reports are consumed from the head, and appended to the
927 * tail, so the tail chases the head?... If you think that's mad
928 * and back-to-front you're not alone, but this follows the
929 * Gen PRM naming convention.
930 *
931 * Returns: 0 on success, negative error code on failure.
932 */
gen7_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)933 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
934 char __user *buf,
935 size_t count,
936 size_t *offset)
937 {
938 struct intel_uncore *uncore = stream->uncore;
939 int report_size = stream->oa_buffer.format_size;
940 u8 *oa_buf_base = stream->oa_buffer.vaddr;
941 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
942 u32 mask = (OA_BUFFER_SIZE - 1);
943 size_t start_offset = *offset;
944 unsigned long flags;
945 u32 head, tail;
946 u32 taken;
947 int ret = 0;
948
949 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
950 return -EIO;
951
952 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
953
954 head = stream->oa_buffer.head;
955 tail = stream->oa_buffer.tail;
956
957 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
958
959 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
960 * while indexing relative to oa_buf_base.
961 */
962 head -= gtt_offset;
963 tail -= gtt_offset;
964
965 /* An out of bounds or misaligned head or tail pointer implies a driver
966 * bug since we validate + align the tail pointers we read from the
967 * hardware and we are in full control of the head pointer which should
968 * only be incremented by multiples of the report size (notably also
969 * all a power of two).
970 */
971 if (drm_WARN_ONCE(&uncore->i915->drm,
972 head > OA_BUFFER_SIZE || head % report_size ||
973 tail > OA_BUFFER_SIZE || tail % report_size,
974 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
975 head, tail))
976 return -EIO;
977
978
979 for (/* none */;
980 (taken = OA_TAKEN(tail, head));
981 head = (head + report_size) & mask) {
982 u8 *report = oa_buf_base + head;
983 u32 *report32 = (void *)report;
984
985 /* All the report sizes factor neatly into the buffer
986 * size so we never expect to see a report split
987 * between the beginning and end of the buffer.
988 *
989 * Given the initial alignment check a misalignment
990 * here would imply a driver bug that would result
991 * in an overrun.
992 */
993 if (drm_WARN_ON(&uncore->i915->drm,
994 (OA_BUFFER_SIZE - head) < report_size)) {
995 drm_err(&uncore->i915->drm,
996 "Spurious OA head ptr: non-integral report offset\n");
997 break;
998 }
999
1000 /* The report-ID field for periodic samples includes
1001 * some undocumented flags related to what triggered
1002 * the report and is never expected to be zero so we
1003 * can check that the report isn't invalid before
1004 * copying it to userspace...
1005 */
1006 if (report32[0] == 0) {
1007 if (__ratelimit(&stream->perf->spurious_report_rs))
1008 DRM_NOTE("Skipping spurious, invalid OA report\n");
1009 continue;
1010 }
1011
1012 ret = append_oa_sample(stream, buf, count, offset, report);
1013 if (ret)
1014 break;
1015
1016 /* Clear out the first 2 dwords as a mean to detect unlanded
1017 * reports.
1018 */
1019 report32[0] = 0;
1020 report32[1] = 0;
1021 }
1022
1023 if (start_offset != *offset) {
1024 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1025
1026 /* We removed the gtt_offset for the copy loop above, indexing
1027 * relative to oa_buf_base so put back here...
1028 */
1029 head += gtt_offset;
1030
1031 intel_uncore_write(uncore, GEN7_OASTATUS2,
1032 (head & GEN7_OASTATUS2_HEAD_MASK) |
1033 GEN7_OASTATUS2_MEM_SELECT_GGTT);
1034 stream->oa_buffer.head = head;
1035
1036 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1037 }
1038
1039 return ret;
1040 }
1041
1042 /**
1043 * gen7_oa_read - copy status records then buffered OA reports
1044 * @stream: An i915-perf stream opened for OA metrics
1045 * @buf: destination buffer given by userspace
1046 * @count: the number of bytes userspace wants to read
1047 * @offset: (inout): the current position for writing into @buf
1048 *
1049 * Checks Gen 7 specific OA unit status registers and if necessary appends
1050 * corresponding status records for userspace (such as for a buffer full
1051 * condition) and then initiate appending any buffered OA reports.
1052 *
1053 * Updates @offset according to the number of bytes successfully copied into
1054 * the userspace buffer.
1055 *
1056 * Returns: zero on success or a negative error code
1057 */
gen7_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1058 static int gen7_oa_read(struct i915_perf_stream *stream,
1059 char __user *buf,
1060 size_t count,
1061 size_t *offset)
1062 {
1063 struct intel_uncore *uncore = stream->uncore;
1064 u32 oastatus1;
1065 int ret;
1066
1067 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1068 return -EIO;
1069
1070 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1071
1072 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1073 * bits while the OA unit is enabled (while the tail pointer
1074 * may be updated asynchronously) so we ignore status bits
1075 * that have already been reported to userspace.
1076 */
1077 oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1078
1079 /* We treat OABUFFER_OVERFLOW as a significant error:
1080 *
1081 * - The status can be interpreted to mean that the buffer is
1082 * currently full (with a higher precedence than OA_TAKEN()
1083 * which will start to report a near-empty buffer after an
1084 * overflow) but it's awkward that we can't clear the status
1085 * on Haswell, so without a reset we won't be able to catch
1086 * the state again.
1087 *
1088 * - Since it also implies the HW has started overwriting old
1089 * reports it may also affect our sanity checks for invalid
1090 * reports when copying to userspace that assume new reports
1091 * are being written to cleared memory.
1092 *
1093 * - In the future we may want to introduce a flight recorder
1094 * mode where the driver will automatically maintain a safe
1095 * guard band between head/tail, avoiding this overflow
1096 * condition, but we avoid the added driver complexity for
1097 * now.
1098 */
1099 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1100 ret = append_oa_status(stream, buf, count, offset,
1101 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1102 if (ret)
1103 return ret;
1104
1105 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1106 stream->period_exponent);
1107
1108 stream->perf->ops.oa_disable(stream);
1109 stream->perf->ops.oa_enable(stream);
1110
1111 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1112 }
1113
1114 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1115 ret = append_oa_status(stream, buf, count, offset,
1116 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1117 if (ret)
1118 return ret;
1119 stream->perf->gen7_latched_oastatus1 |=
1120 GEN7_OASTATUS1_REPORT_LOST;
1121 }
1122
1123 return gen7_append_oa_reports(stream, buf, count, offset);
1124 }
1125
1126 /**
1127 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1128 * @stream: An i915-perf stream opened for OA metrics
1129 *
1130 * Called when userspace tries to read() from a blocking stream FD opened
1131 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1132 * OA buffer and wakes us.
1133 *
1134 * Note: it's acceptable to have this return with some false positives
1135 * since any subsequent read handling will return -EAGAIN if there isn't
1136 * really data ready for userspace yet.
1137 *
1138 * Returns: zero on success or a negative error code
1139 */
i915_oa_wait_unlocked(struct i915_perf_stream * stream)1140 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1141 {
1142 /* We would wait indefinitely if periodic sampling is not enabled */
1143 if (!stream->periodic)
1144 return -EIO;
1145
1146 return wait_event_interruptible(stream->poll_wq,
1147 oa_buffer_check_unlocked(stream));
1148 }
1149
1150 /**
1151 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1152 * @stream: An i915-perf stream opened for OA metrics
1153 * @file: An i915 perf stream file
1154 * @wait: poll() state table
1155 *
1156 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1157 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1158 * when it sees data ready to read in the circular OA buffer.
1159 */
i915_oa_poll_wait(struct i915_perf_stream * stream,struct file * file,poll_table * wait)1160 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1161 struct file *file,
1162 poll_table *wait)
1163 {
1164 poll_wait(file, &stream->poll_wq, wait);
1165 }
1166
1167 /**
1168 * i915_oa_read - just calls through to &i915_oa_ops->read
1169 * @stream: An i915-perf stream opened for OA metrics
1170 * @buf: destination buffer given by userspace
1171 * @count: the number of bytes userspace wants to read
1172 * @offset: (inout): the current position for writing into @buf
1173 *
1174 * Updates @offset according to the number of bytes successfully copied into
1175 * the userspace buffer.
1176 *
1177 * Returns: zero on success or a negative error code
1178 */
i915_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1179 static int i915_oa_read(struct i915_perf_stream *stream,
1180 char __user *buf,
1181 size_t count,
1182 size_t *offset)
1183 {
1184 return stream->perf->ops.read(stream, buf, count, offset);
1185 }
1186
oa_pin_context(struct i915_perf_stream * stream)1187 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1188 {
1189 struct i915_gem_engines_iter it;
1190 struct i915_gem_context *ctx = stream->ctx;
1191 struct intel_context *ce;
1192 struct i915_gem_ww_ctx ww;
1193 int err = -ENODEV;
1194
1195 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1196 if (ce->engine != stream->engine) /* first match! */
1197 continue;
1198
1199 err = 0;
1200 break;
1201 }
1202 i915_gem_context_unlock_engines(ctx);
1203
1204 if (err)
1205 return ERR_PTR(err);
1206
1207 i915_gem_ww_ctx_init(&ww, true);
1208 retry:
1209 /*
1210 * As the ID is the gtt offset of the context's vma we
1211 * pin the vma to ensure the ID remains fixed.
1212 */
1213 err = intel_context_pin_ww(ce, &ww);
1214 if (err == -EDEADLK) {
1215 err = i915_gem_ww_ctx_backoff(&ww);
1216 if (!err)
1217 goto retry;
1218 }
1219 i915_gem_ww_ctx_fini(&ww);
1220
1221 if (err)
1222 return ERR_PTR(err);
1223
1224 stream->pinned_ctx = ce;
1225 return stream->pinned_ctx;
1226 }
1227
1228 /**
1229 * oa_get_render_ctx_id - determine and hold ctx hw id
1230 * @stream: An i915-perf stream opened for OA metrics
1231 *
1232 * Determine the render context hw id, and ensure it remains fixed for the
1233 * lifetime of the stream. This ensures that we don't have to worry about
1234 * updating the context ID in OACONTROL on the fly.
1235 *
1236 * Returns: zero on success or a negative error code
1237 */
oa_get_render_ctx_id(struct i915_perf_stream * stream)1238 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1239 {
1240 struct intel_context *ce;
1241
1242 ce = oa_pin_context(stream);
1243 if (IS_ERR(ce))
1244 return PTR_ERR(ce);
1245
1246 switch (GRAPHICS_VER(ce->engine->i915)) {
1247 case 7: {
1248 /*
1249 * On Haswell we don't do any post processing of the reports
1250 * and don't need to use the mask.
1251 */
1252 stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1253 stream->specific_ctx_id_mask = 0;
1254 break;
1255 }
1256
1257 case 8:
1258 case 9:
1259 if (intel_engine_uses_guc(ce->engine)) {
1260 /*
1261 * When using GuC, the context descriptor we write in
1262 * i915 is read by GuC and rewritten before it's
1263 * actually written into the hardware. The LRCA is
1264 * what is put into the context id field of the
1265 * context descriptor by GuC. Because it's aligned to
1266 * a page, the lower 12bits are always at 0 and
1267 * dropped by GuC. They won't be part of the context
1268 * ID in the OA reports, so squash those lower bits.
1269 */
1270 stream->specific_ctx_id = ce->lrc.lrca >> 12;
1271
1272 /*
1273 * GuC uses the top bit to signal proxy submission, so
1274 * ignore that bit.
1275 */
1276 stream->specific_ctx_id_mask =
1277 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1278 } else {
1279 stream->specific_ctx_id_mask =
1280 (1U << GEN8_CTX_ID_WIDTH) - 1;
1281 stream->specific_ctx_id = stream->specific_ctx_id_mask;
1282 }
1283 break;
1284
1285 case 11:
1286 case 12:
1287 if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 50)) {
1288 stream->specific_ctx_id_mask =
1289 ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1290 (XEHP_SW_CTX_ID_SHIFT - 32);
1291 stream->specific_ctx_id =
1292 (XEHP_MAX_CONTEXT_HW_ID - 1) <<
1293 (XEHP_SW_CTX_ID_SHIFT - 32);
1294 } else {
1295 stream->specific_ctx_id_mask =
1296 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1297 /*
1298 * Pick an unused context id
1299 * 0 - BITS_PER_LONG are used by other contexts
1300 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1301 */
1302 stream->specific_ctx_id =
1303 (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1304 }
1305 break;
1306
1307 default:
1308 MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1309 }
1310
1311 ce->tag = stream->specific_ctx_id;
1312
1313 drm_dbg(&stream->perf->i915->drm,
1314 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1315 stream->specific_ctx_id,
1316 stream->specific_ctx_id_mask);
1317
1318 return 0;
1319 }
1320
1321 /**
1322 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1323 * @stream: An i915-perf stream opened for OA metrics
1324 *
1325 * In case anything needed doing to ensure the context HW ID would remain valid
1326 * for the lifetime of the stream, then that can be undone here.
1327 */
oa_put_render_ctx_id(struct i915_perf_stream * stream)1328 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1329 {
1330 struct intel_context *ce;
1331
1332 ce = fetch_and_zero(&stream->pinned_ctx);
1333 if (ce) {
1334 ce->tag = 0; /* recomputed on next submission after parking */
1335 intel_context_unpin(ce);
1336 }
1337
1338 stream->specific_ctx_id = INVALID_CTX_ID;
1339 stream->specific_ctx_id_mask = 0;
1340 }
1341
1342 static void
free_oa_buffer(struct i915_perf_stream * stream)1343 free_oa_buffer(struct i915_perf_stream *stream)
1344 {
1345 i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1346 I915_VMA_RELEASE_MAP);
1347
1348 stream->oa_buffer.vaddr = NULL;
1349 }
1350
1351 static void
free_oa_configs(struct i915_perf_stream * stream)1352 free_oa_configs(struct i915_perf_stream *stream)
1353 {
1354 struct i915_oa_config_bo *oa_bo, *tmp;
1355
1356 i915_oa_config_put(stream->oa_config);
1357 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1358 free_oa_config_bo(oa_bo);
1359 }
1360
1361 static void
free_noa_wait(struct i915_perf_stream * stream)1362 free_noa_wait(struct i915_perf_stream *stream)
1363 {
1364 i915_vma_unpin_and_release(&stream->noa_wait, 0);
1365 }
1366
i915_oa_stream_destroy(struct i915_perf_stream * stream)1367 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1368 {
1369 struct i915_perf *perf = stream->perf;
1370
1371 BUG_ON(stream != perf->exclusive_stream);
1372
1373 /*
1374 * Unset exclusive_stream first, it will be checked while disabling
1375 * the metric set on gen8+.
1376 *
1377 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1378 */
1379 WRITE_ONCE(perf->exclusive_stream, NULL);
1380 perf->ops.disable_metric_set(stream);
1381
1382 free_oa_buffer(stream);
1383
1384 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1385 intel_engine_pm_put(stream->engine);
1386
1387 if (stream->ctx)
1388 oa_put_render_ctx_id(stream);
1389
1390 free_oa_configs(stream);
1391 free_noa_wait(stream);
1392
1393 if (perf->spurious_report_rs.missed) {
1394 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1395 perf->spurious_report_rs.missed);
1396 }
1397 }
1398
gen7_init_oa_buffer(struct i915_perf_stream * stream)1399 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1400 {
1401 struct intel_uncore *uncore = stream->uncore;
1402 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1403 unsigned long flags;
1404
1405 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1406
1407 /* Pre-DevBDW: OABUFFER must be set with counters off,
1408 * before OASTATUS1, but after OASTATUS2
1409 */
1410 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1411 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1412 stream->oa_buffer.head = gtt_offset;
1413
1414 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1415
1416 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1417 gtt_offset | OABUFFER_SIZE_16M);
1418
1419 /* Mark that we need updated tail pointers to read from... */
1420 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1421 stream->oa_buffer.tail = gtt_offset;
1422
1423 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1424
1425 /* On Haswell we have to track which OASTATUS1 flags we've
1426 * already seen since they can't be cleared while periodic
1427 * sampling is enabled.
1428 */
1429 stream->perf->gen7_latched_oastatus1 = 0;
1430
1431 /* NB: although the OA buffer will initially be allocated
1432 * zeroed via shmfs (and so this memset is redundant when
1433 * first allocating), we may re-init the OA buffer, either
1434 * when re-enabling a stream or in error/reset paths.
1435 *
1436 * The reason we clear the buffer for each re-init is for the
1437 * sanity check in gen7_append_oa_reports() that looks at the
1438 * report-id field to make sure it's non-zero which relies on
1439 * the assumption that new reports are being written to zeroed
1440 * memory...
1441 */
1442 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1443 }
1444
gen8_init_oa_buffer(struct i915_perf_stream * stream)1445 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1446 {
1447 struct intel_uncore *uncore = stream->uncore;
1448 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1449 unsigned long flags;
1450
1451 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1452
1453 intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1454 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1455 stream->oa_buffer.head = gtt_offset;
1456
1457 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1458
1459 /*
1460 * PRM says:
1461 *
1462 * "This MMIO must be set before the OATAILPTR
1463 * register and after the OAHEADPTR register. This is
1464 * to enable proper functionality of the overflow
1465 * bit."
1466 */
1467 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1468 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1469 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1470
1471 /* Mark that we need updated tail pointers to read from... */
1472 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1473 stream->oa_buffer.tail = gtt_offset;
1474
1475 /*
1476 * Reset state used to recognise context switches, affecting which
1477 * reports we will forward to userspace while filtering for a single
1478 * context.
1479 */
1480 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1481
1482 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1483
1484 /*
1485 * NB: although the OA buffer will initially be allocated
1486 * zeroed via shmfs (and so this memset is redundant when
1487 * first allocating), we may re-init the OA buffer, either
1488 * when re-enabling a stream or in error/reset paths.
1489 *
1490 * The reason we clear the buffer for each re-init is for the
1491 * sanity check in gen8_append_oa_reports() that looks at the
1492 * reason field to make sure it's non-zero which relies on
1493 * the assumption that new reports are being written to zeroed
1494 * memory...
1495 */
1496 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1497 }
1498
gen12_init_oa_buffer(struct i915_perf_stream * stream)1499 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1500 {
1501 struct intel_uncore *uncore = stream->uncore;
1502 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1503 unsigned long flags;
1504
1505 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1506
1507 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1508 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1509 gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1510 stream->oa_buffer.head = gtt_offset;
1511
1512 /*
1513 * PRM says:
1514 *
1515 * "This MMIO must be set before the OATAILPTR
1516 * register and after the OAHEADPTR register. This is
1517 * to enable proper functionality of the overflow
1518 * bit."
1519 */
1520 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1521 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1522 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1523 gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1524
1525 /* Mark that we need updated tail pointers to read from... */
1526 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1527 stream->oa_buffer.tail = gtt_offset;
1528
1529 /*
1530 * Reset state used to recognise context switches, affecting which
1531 * reports we will forward to userspace while filtering for a single
1532 * context.
1533 */
1534 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1535
1536 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1537
1538 /*
1539 * NB: although the OA buffer will initially be allocated
1540 * zeroed via shmfs (and so this memset is redundant when
1541 * first allocating), we may re-init the OA buffer, either
1542 * when re-enabling a stream or in error/reset paths.
1543 *
1544 * The reason we clear the buffer for each re-init is for the
1545 * sanity check in gen8_append_oa_reports() that looks at the
1546 * reason field to make sure it's non-zero which relies on
1547 * the assumption that new reports are being written to zeroed
1548 * memory...
1549 */
1550 memset(stream->oa_buffer.vaddr, 0,
1551 stream->oa_buffer.vma->size);
1552 }
1553
alloc_oa_buffer(struct i915_perf_stream * stream)1554 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1555 {
1556 struct drm_i915_private *i915 = stream->perf->i915;
1557 struct drm_i915_gem_object *bo;
1558 struct i915_vma *vma;
1559 int ret;
1560
1561 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1562 return -ENODEV;
1563
1564 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1565 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1566
1567 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1568 if (IS_ERR(bo)) {
1569 drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1570 return PTR_ERR(bo);
1571 }
1572
1573 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1574
1575 /* PreHSW required 512K alignment, HSW requires 16M */
1576 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1577 if (IS_ERR(vma)) {
1578 ret = PTR_ERR(vma);
1579 goto err_unref;
1580 }
1581 stream->oa_buffer.vma = vma;
1582
1583 stream->oa_buffer.vaddr =
1584 i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1585 if (IS_ERR(stream->oa_buffer.vaddr)) {
1586 ret = PTR_ERR(stream->oa_buffer.vaddr);
1587 goto err_unpin;
1588 }
1589
1590 return 0;
1591
1592 err_unpin:
1593 __i915_vma_unpin(vma);
1594
1595 err_unref:
1596 i915_gem_object_put(bo);
1597
1598 stream->oa_buffer.vaddr = NULL;
1599 stream->oa_buffer.vma = NULL;
1600
1601 return ret;
1602 }
1603
save_restore_register(struct i915_perf_stream * stream,u32 * cs,bool save,i915_reg_t reg,u32 offset,u32 dword_count)1604 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1605 bool save, i915_reg_t reg, u32 offset,
1606 u32 dword_count)
1607 {
1608 u32 cmd;
1609 u32 d;
1610
1611 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1612 cmd |= MI_SRM_LRM_GLOBAL_GTT;
1613 if (GRAPHICS_VER(stream->perf->i915) >= 8)
1614 cmd++;
1615
1616 for (d = 0; d < dword_count; d++) {
1617 *cs++ = cmd;
1618 *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1619 *cs++ = intel_gt_scratch_offset(stream->engine->gt,
1620 offset) + 4 * d;
1621 *cs++ = 0;
1622 }
1623
1624 return cs;
1625 }
1626
alloc_noa_wait(struct i915_perf_stream * stream)1627 static int alloc_noa_wait(struct i915_perf_stream *stream)
1628 {
1629 struct drm_i915_private *i915 = stream->perf->i915;
1630 struct drm_i915_gem_object *bo;
1631 struct i915_vma *vma;
1632 const u64 delay_ticks = 0xffffffffffffffff -
1633 intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
1634 atomic64_read(&stream->perf->noa_programming_delay));
1635 const u32 base = stream->engine->mmio_base;
1636 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1637 u32 *batch, *ts0, *cs, *jump;
1638 struct i915_gem_ww_ctx ww;
1639 int ret, i;
1640 enum {
1641 START_TS,
1642 NOW_TS,
1643 DELTA_TS,
1644 JUMP_PREDICATE,
1645 DELTA_TARGET,
1646 N_CS_GPR
1647 };
1648
1649 bo = i915_gem_object_create_internal(i915, 4096);
1650 if (IS_ERR(bo)) {
1651 drm_err(&i915->drm,
1652 "Failed to allocate NOA wait batchbuffer\n");
1653 return PTR_ERR(bo);
1654 }
1655
1656 i915_gem_ww_ctx_init(&ww, true);
1657 retry:
1658 ret = i915_gem_object_lock(bo, &ww);
1659 if (ret)
1660 goto out_ww;
1661
1662 /*
1663 * We pin in GGTT because we jump into this buffer now because
1664 * multiple OA config BOs will have a jump to this address and it
1665 * needs to be fixed during the lifetime of the i915/perf stream.
1666 */
1667 vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH);
1668 if (IS_ERR(vma)) {
1669 ret = PTR_ERR(vma);
1670 goto out_ww;
1671 }
1672
1673 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1674 if (IS_ERR(batch)) {
1675 ret = PTR_ERR(batch);
1676 goto err_unpin;
1677 }
1678
1679 /* Save registers. */
1680 for (i = 0; i < N_CS_GPR; i++)
1681 cs = save_restore_register(
1682 stream, cs, true /* save */, CS_GPR(i),
1683 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1684 cs = save_restore_register(
1685 stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
1686 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1687
1688 /* First timestamp snapshot location. */
1689 ts0 = cs;
1690
1691 /*
1692 * Initial snapshot of the timestamp register to implement the wait.
1693 * We work with 32b values, so clear out the top 32b bits of the
1694 * register because the ALU works 64bits.
1695 */
1696 *cs++ = MI_LOAD_REGISTER_IMM(1);
1697 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1698 *cs++ = 0;
1699 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1700 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1701 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1702
1703 /*
1704 * This is the location we're going to jump back into until the
1705 * required amount of time has passed.
1706 */
1707 jump = cs;
1708
1709 /*
1710 * Take another snapshot of the timestamp register. Take care to clear
1711 * up the top 32bits of CS_GPR(1) as we're using it for other
1712 * operations below.
1713 */
1714 *cs++ = MI_LOAD_REGISTER_IMM(1);
1715 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1716 *cs++ = 0;
1717 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1718 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1719 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1720
1721 /*
1722 * Do a diff between the 2 timestamps and store the result back into
1723 * CS_GPR(1).
1724 */
1725 *cs++ = MI_MATH(5);
1726 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1727 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1728 *cs++ = MI_MATH_SUB;
1729 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1730 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1731
1732 /*
1733 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1734 * timestamp have rolled over the 32bits) into the predicate register
1735 * to be used for the predicated jump.
1736 */
1737 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1738 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1739 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1740
1741 /* Restart from the beginning if we had timestamps roll over. */
1742 *cs++ = (GRAPHICS_VER(i915) < 8 ?
1743 MI_BATCH_BUFFER_START :
1744 MI_BATCH_BUFFER_START_GEN8) |
1745 MI_BATCH_PREDICATE;
1746 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1747 *cs++ = 0;
1748
1749 /*
1750 * Now add the diff between to previous timestamps and add it to :
1751 * (((1 * << 64) - 1) - delay_ns)
1752 *
1753 * When the Carry Flag contains 1 this means the elapsed time is
1754 * longer than the expected delay, and we can exit the wait loop.
1755 */
1756 *cs++ = MI_LOAD_REGISTER_IMM(2);
1757 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1758 *cs++ = lower_32_bits(delay_ticks);
1759 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1760 *cs++ = upper_32_bits(delay_ticks);
1761
1762 *cs++ = MI_MATH(4);
1763 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1764 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1765 *cs++ = MI_MATH_ADD;
1766 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1767
1768 *cs++ = MI_ARB_CHECK;
1769
1770 /*
1771 * Transfer the result into the predicate register to be used for the
1772 * predicated jump.
1773 */
1774 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1775 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1776 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1777
1778 /* Predicate the jump. */
1779 *cs++ = (GRAPHICS_VER(i915) < 8 ?
1780 MI_BATCH_BUFFER_START :
1781 MI_BATCH_BUFFER_START_GEN8) |
1782 MI_BATCH_PREDICATE;
1783 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1784 *cs++ = 0;
1785
1786 /* Restore registers. */
1787 for (i = 0; i < N_CS_GPR; i++)
1788 cs = save_restore_register(
1789 stream, cs, false /* restore */, CS_GPR(i),
1790 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1791 cs = save_restore_register(
1792 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
1793 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1794
1795 /* And return to the ring. */
1796 *cs++ = MI_BATCH_BUFFER_END;
1797
1798 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1799
1800 i915_gem_object_flush_map(bo);
1801 __i915_gem_object_release_map(bo);
1802
1803 stream->noa_wait = vma;
1804 goto out_ww;
1805
1806 err_unpin:
1807 i915_vma_unpin_and_release(&vma, 0);
1808 out_ww:
1809 if (ret == -EDEADLK) {
1810 ret = i915_gem_ww_ctx_backoff(&ww);
1811 if (!ret)
1812 goto retry;
1813 }
1814 i915_gem_ww_ctx_fini(&ww);
1815 if (ret)
1816 i915_gem_object_put(bo);
1817 return ret;
1818 }
1819
write_cs_mi_lri(u32 * cs,const struct i915_oa_reg * reg_data,u32 n_regs)1820 static u32 *write_cs_mi_lri(u32 *cs,
1821 const struct i915_oa_reg *reg_data,
1822 u32 n_regs)
1823 {
1824 u32 i;
1825
1826 for (i = 0; i < n_regs; i++) {
1827 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1828 u32 n_lri = min_t(u32,
1829 n_regs - i,
1830 MI_LOAD_REGISTER_IMM_MAX_REGS);
1831
1832 *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1833 }
1834 *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1835 *cs++ = reg_data[i].value;
1836 }
1837
1838 return cs;
1839 }
1840
num_lri_dwords(int num_regs)1841 static int num_lri_dwords(int num_regs)
1842 {
1843 int count = 0;
1844
1845 if (num_regs > 0) {
1846 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1847 count += num_regs * 2;
1848 }
1849
1850 return count;
1851 }
1852
1853 static struct i915_oa_config_bo *
alloc_oa_config_buffer(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)1854 alloc_oa_config_buffer(struct i915_perf_stream *stream,
1855 struct i915_oa_config *oa_config)
1856 {
1857 struct drm_i915_gem_object *obj;
1858 struct i915_oa_config_bo *oa_bo;
1859 struct i915_gem_ww_ctx ww;
1860 size_t config_length = 0;
1861 u32 *cs;
1862 int err;
1863
1864 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1865 if (!oa_bo)
1866 return ERR_PTR(-ENOMEM);
1867
1868 config_length += num_lri_dwords(oa_config->mux_regs_len);
1869 config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1870 config_length += num_lri_dwords(oa_config->flex_regs_len);
1871 config_length += 3; /* MI_BATCH_BUFFER_START */
1872 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1873
1874 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1875 if (IS_ERR(obj)) {
1876 err = PTR_ERR(obj);
1877 goto err_free;
1878 }
1879
1880 i915_gem_ww_ctx_init(&ww, true);
1881 retry:
1882 err = i915_gem_object_lock(obj, &ww);
1883 if (err)
1884 goto out_ww;
1885
1886 cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1887 if (IS_ERR(cs)) {
1888 err = PTR_ERR(cs);
1889 goto out_ww;
1890 }
1891
1892 cs = write_cs_mi_lri(cs,
1893 oa_config->mux_regs,
1894 oa_config->mux_regs_len);
1895 cs = write_cs_mi_lri(cs,
1896 oa_config->b_counter_regs,
1897 oa_config->b_counter_regs_len);
1898 cs = write_cs_mi_lri(cs,
1899 oa_config->flex_regs,
1900 oa_config->flex_regs_len);
1901
1902 /* Jump into the active wait. */
1903 *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
1904 MI_BATCH_BUFFER_START :
1905 MI_BATCH_BUFFER_START_GEN8);
1906 *cs++ = i915_ggtt_offset(stream->noa_wait);
1907 *cs++ = 0;
1908
1909 i915_gem_object_flush_map(obj);
1910 __i915_gem_object_release_map(obj);
1911
1912 oa_bo->vma = i915_vma_instance(obj,
1913 &stream->engine->gt->ggtt->vm,
1914 NULL);
1915 if (IS_ERR(oa_bo->vma)) {
1916 err = PTR_ERR(oa_bo->vma);
1917 goto out_ww;
1918 }
1919
1920 oa_bo->oa_config = i915_oa_config_get(oa_config);
1921 llist_add(&oa_bo->node, &stream->oa_config_bos);
1922
1923 out_ww:
1924 if (err == -EDEADLK) {
1925 err = i915_gem_ww_ctx_backoff(&ww);
1926 if (!err)
1927 goto retry;
1928 }
1929 i915_gem_ww_ctx_fini(&ww);
1930
1931 if (err)
1932 i915_gem_object_put(obj);
1933 err_free:
1934 if (err) {
1935 kfree(oa_bo);
1936 return ERR_PTR(err);
1937 }
1938 return oa_bo;
1939 }
1940
1941 static struct i915_vma *
get_oa_vma(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)1942 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1943 {
1944 struct i915_oa_config_bo *oa_bo;
1945
1946 /*
1947 * Look for the buffer in the already allocated BOs attached
1948 * to the stream.
1949 */
1950 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1951 if (oa_bo->oa_config == oa_config &&
1952 memcmp(oa_bo->oa_config->uuid,
1953 oa_config->uuid,
1954 sizeof(oa_config->uuid)) == 0)
1955 goto out;
1956 }
1957
1958 oa_bo = alloc_oa_config_buffer(stream, oa_config);
1959 if (IS_ERR(oa_bo))
1960 return ERR_CAST(oa_bo);
1961
1962 out:
1963 return i915_vma_get(oa_bo->vma);
1964 }
1965
1966 static int
emit_oa_config(struct i915_perf_stream * stream,struct i915_oa_config * oa_config,struct intel_context * ce,struct i915_active * active)1967 emit_oa_config(struct i915_perf_stream *stream,
1968 struct i915_oa_config *oa_config,
1969 struct intel_context *ce,
1970 struct i915_active *active)
1971 {
1972 struct i915_request *rq;
1973 struct i915_vma *vma;
1974 struct i915_gem_ww_ctx ww;
1975 int err;
1976
1977 vma = get_oa_vma(stream, oa_config);
1978 if (IS_ERR(vma))
1979 return PTR_ERR(vma);
1980
1981 i915_gem_ww_ctx_init(&ww, true);
1982 retry:
1983 err = i915_gem_object_lock(vma->obj, &ww);
1984 if (err)
1985 goto err;
1986
1987 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1988 if (err)
1989 goto err;
1990
1991 intel_engine_pm_get(ce->engine);
1992 rq = i915_request_create(ce);
1993 intel_engine_pm_put(ce->engine);
1994 if (IS_ERR(rq)) {
1995 err = PTR_ERR(rq);
1996 goto err_vma_unpin;
1997 }
1998
1999 if (!IS_ERR_OR_NULL(active)) {
2000 /* After all individual context modifications */
2001 err = i915_request_await_active(rq, active,
2002 I915_ACTIVE_AWAIT_ACTIVE);
2003 if (err)
2004 goto err_add_request;
2005
2006 err = i915_active_add_request(active, rq);
2007 if (err)
2008 goto err_add_request;
2009 }
2010
2011 err = i915_request_await_object(rq, vma->obj, 0);
2012 if (!err)
2013 err = i915_vma_move_to_active(vma, rq, 0);
2014 if (err)
2015 goto err_add_request;
2016
2017 err = rq->engine->emit_bb_start(rq,
2018 vma->node.start, 0,
2019 I915_DISPATCH_SECURE);
2020 if (err)
2021 goto err_add_request;
2022
2023 err_add_request:
2024 i915_request_add(rq);
2025 err_vma_unpin:
2026 i915_vma_unpin(vma);
2027 err:
2028 if (err == -EDEADLK) {
2029 err = i915_gem_ww_ctx_backoff(&ww);
2030 if (!err)
2031 goto retry;
2032 }
2033
2034 i915_gem_ww_ctx_fini(&ww);
2035 i915_vma_put(vma);
2036 return err;
2037 }
2038
oa_context(struct i915_perf_stream * stream)2039 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2040 {
2041 return stream->pinned_ctx ?: stream->engine->kernel_context;
2042 }
2043
2044 static int
hsw_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2045 hsw_enable_metric_set(struct i915_perf_stream *stream,
2046 struct i915_active *active)
2047 {
2048 struct intel_uncore *uncore = stream->uncore;
2049
2050 /*
2051 * PRM:
2052 *
2053 * OA unit is using “crclk” for its functionality. When trunk
2054 * level clock gating takes place, OA clock would be gated,
2055 * unable to count the events from non-render clock domain.
2056 * Render clock gating must be disabled when OA is enabled to
2057 * count the events from non-render domain. Unit level clock
2058 * gating for RCS should also be disabled.
2059 */
2060 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2061 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2062 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2063 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2064
2065 return emit_oa_config(stream,
2066 stream->oa_config, oa_context(stream),
2067 active);
2068 }
2069
hsw_disable_metric_set(struct i915_perf_stream * stream)2070 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2071 {
2072 struct intel_uncore *uncore = stream->uncore;
2073
2074 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2075 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2076 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2077 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2078
2079 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2080 }
2081
oa_config_flex_reg(const struct i915_oa_config * oa_config,i915_reg_t reg)2082 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2083 i915_reg_t reg)
2084 {
2085 u32 mmio = i915_mmio_reg_offset(reg);
2086 int i;
2087
2088 /*
2089 * This arbitrary default will select the 'EU FPU0 Pipeline
2090 * Active' event. In the future it's anticipated that there
2091 * will be an explicit 'No Event' we can select, but not yet...
2092 */
2093 if (!oa_config)
2094 return 0;
2095
2096 for (i = 0; i < oa_config->flex_regs_len; i++) {
2097 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2098 return oa_config->flex_regs[i].value;
2099 }
2100
2101 return 0;
2102 }
2103 /*
2104 * NB: It must always remain pointer safe to run this even if the OA unit
2105 * has been disabled.
2106 *
2107 * It's fine to put out-of-date values into these per-context registers
2108 * in the case that the OA unit has been disabled.
2109 */
2110 static void
gen8_update_reg_state_unlocked(const struct intel_context * ce,const struct i915_perf_stream * stream)2111 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2112 const struct i915_perf_stream *stream)
2113 {
2114 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2115 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2116 /* The MMIO offsets for Flex EU registers aren't contiguous */
2117 i915_reg_t flex_regs[] = {
2118 EU_PERF_CNTL0,
2119 EU_PERF_CNTL1,
2120 EU_PERF_CNTL2,
2121 EU_PERF_CNTL3,
2122 EU_PERF_CNTL4,
2123 EU_PERF_CNTL5,
2124 EU_PERF_CNTL6,
2125 };
2126 u32 *reg_state = ce->lrc_reg_state;
2127 int i;
2128
2129 reg_state[ctx_oactxctrl + 1] =
2130 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2131 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2132 GEN8_OA_COUNTER_RESUME;
2133
2134 for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2135 reg_state[ctx_flexeu0 + i * 2 + 1] =
2136 oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2137 }
2138
2139 struct flex {
2140 i915_reg_t reg;
2141 u32 offset;
2142 u32 value;
2143 };
2144
2145 static int
gen8_store_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2146 gen8_store_flex(struct i915_request *rq,
2147 struct intel_context *ce,
2148 const struct flex *flex, unsigned int count)
2149 {
2150 u32 offset;
2151 u32 *cs;
2152
2153 cs = intel_ring_begin(rq, 4 * count);
2154 if (IS_ERR(cs))
2155 return PTR_ERR(cs);
2156
2157 offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2158 do {
2159 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2160 *cs++ = offset + flex->offset * sizeof(u32);
2161 *cs++ = 0;
2162 *cs++ = flex->value;
2163 } while (flex++, --count);
2164
2165 intel_ring_advance(rq, cs);
2166
2167 return 0;
2168 }
2169
2170 static int
gen8_load_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2171 gen8_load_flex(struct i915_request *rq,
2172 struct intel_context *ce,
2173 const struct flex *flex, unsigned int count)
2174 {
2175 u32 *cs;
2176
2177 GEM_BUG_ON(!count || count > 63);
2178
2179 cs = intel_ring_begin(rq, 2 * count + 2);
2180 if (IS_ERR(cs))
2181 return PTR_ERR(cs);
2182
2183 *cs++ = MI_LOAD_REGISTER_IMM(count);
2184 do {
2185 *cs++ = i915_mmio_reg_offset(flex->reg);
2186 *cs++ = flex->value;
2187 } while (flex++, --count);
2188 *cs++ = MI_NOOP;
2189
2190 intel_ring_advance(rq, cs);
2191
2192 return 0;
2193 }
2194
gen8_modify_context(struct intel_context * ce,const struct flex * flex,unsigned int count)2195 static int gen8_modify_context(struct intel_context *ce,
2196 const struct flex *flex, unsigned int count)
2197 {
2198 struct i915_request *rq;
2199 int err;
2200
2201 rq = intel_engine_create_kernel_request(ce->engine);
2202 if (IS_ERR(rq))
2203 return PTR_ERR(rq);
2204
2205 /* Serialise with the remote context */
2206 err = intel_context_prepare_remote_request(ce, rq);
2207 if (err == 0)
2208 err = gen8_store_flex(rq, ce, flex, count);
2209
2210 i915_request_add(rq);
2211 return err;
2212 }
2213
2214 static int
gen8_modify_self(struct intel_context * ce,const struct flex * flex,unsigned int count,struct i915_active * active)2215 gen8_modify_self(struct intel_context *ce,
2216 const struct flex *flex, unsigned int count,
2217 struct i915_active *active)
2218 {
2219 struct i915_request *rq;
2220 int err;
2221
2222 intel_engine_pm_get(ce->engine);
2223 rq = i915_request_create(ce);
2224 intel_engine_pm_put(ce->engine);
2225 if (IS_ERR(rq))
2226 return PTR_ERR(rq);
2227
2228 if (!IS_ERR_OR_NULL(active)) {
2229 err = i915_active_add_request(active, rq);
2230 if (err)
2231 goto err_add_request;
2232 }
2233
2234 err = gen8_load_flex(rq, ce, flex, count);
2235 if (err)
2236 goto err_add_request;
2237
2238 err_add_request:
2239 i915_request_add(rq);
2240 return err;
2241 }
2242
gen8_configure_context(struct i915_gem_context * ctx,struct flex * flex,unsigned int count)2243 static int gen8_configure_context(struct i915_gem_context *ctx,
2244 struct flex *flex, unsigned int count)
2245 {
2246 struct i915_gem_engines_iter it;
2247 struct intel_context *ce;
2248 int err = 0;
2249
2250 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2251 GEM_BUG_ON(ce == ce->engine->kernel_context);
2252
2253 if (ce->engine->class != RENDER_CLASS)
2254 continue;
2255
2256 /* Otherwise OA settings will be set upon first use */
2257 if (!intel_context_pin_if_active(ce))
2258 continue;
2259
2260 flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2261 err = gen8_modify_context(ce, flex, count);
2262
2263 intel_context_unpin(ce);
2264 if (err)
2265 break;
2266 }
2267 i915_gem_context_unlock_engines(ctx);
2268
2269 return err;
2270 }
2271
gen12_configure_oar_context(struct i915_perf_stream * stream,struct i915_active * active)2272 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2273 struct i915_active *active)
2274 {
2275 int err;
2276 struct intel_context *ce = stream->pinned_ctx;
2277 u32 format = stream->oa_buffer.format;
2278 struct flex regs_context[] = {
2279 {
2280 GEN8_OACTXCONTROL,
2281 stream->perf->ctx_oactxctrl_offset + 1,
2282 active ? GEN8_OA_COUNTER_RESUME : 0,
2283 },
2284 };
2285 /* Offsets in regs_lri are not used since this configuration is only
2286 * applied using LRI. Initialize the correct offsets for posterity.
2287 */
2288 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2289 struct flex regs_lri[] = {
2290 {
2291 GEN12_OAR_OACONTROL,
2292 GEN12_OAR_OACONTROL_OFFSET + 1,
2293 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2294 (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2295 },
2296 {
2297 RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2298 CTX_CONTEXT_CONTROL,
2299 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2300 active ?
2301 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2302 0)
2303 },
2304 };
2305
2306 /* Modify the context image of pinned context with regs_context*/
2307 err = intel_context_lock_pinned(ce);
2308 if (err)
2309 return err;
2310
2311 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2312 intel_context_unlock_pinned(ce);
2313 if (err)
2314 return err;
2315
2316 /* Apply regs_lri using LRI with pinned context */
2317 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2318 }
2319
2320 /*
2321 * Manages updating the per-context aspects of the OA stream
2322 * configuration across all contexts.
2323 *
2324 * The awkward consideration here is that OACTXCONTROL controls the
2325 * exponent for periodic sampling which is primarily used for system
2326 * wide profiling where we'd like a consistent sampling period even in
2327 * the face of context switches.
2328 *
2329 * Our approach of updating the register state context (as opposed to
2330 * say using a workaround batch buffer) ensures that the hardware
2331 * won't automatically reload an out-of-date timer exponent even
2332 * transiently before a WA BB could be parsed.
2333 *
2334 * This function needs to:
2335 * - Ensure the currently running context's per-context OA state is
2336 * updated
2337 * - Ensure that all existing contexts will have the correct per-context
2338 * OA state if they are scheduled for use.
2339 * - Ensure any new contexts will be initialized with the correct
2340 * per-context OA state.
2341 *
2342 * Note: it's only the RCS/Render context that has any OA state.
2343 * Note: the first flex register passed must always be R_PWR_CLK_STATE
2344 */
2345 static int
oa_configure_all_contexts(struct i915_perf_stream * stream,struct flex * regs,size_t num_regs,struct i915_active * active)2346 oa_configure_all_contexts(struct i915_perf_stream *stream,
2347 struct flex *regs,
2348 size_t num_regs,
2349 struct i915_active *active)
2350 {
2351 struct drm_i915_private *i915 = stream->perf->i915;
2352 struct intel_engine_cs *engine;
2353 struct i915_gem_context *ctx, *cn;
2354 int err;
2355
2356 lockdep_assert_held(&stream->perf->lock);
2357
2358 /*
2359 * The OA register config is setup through the context image. This image
2360 * might be written to by the GPU on context switch (in particular on
2361 * lite-restore). This means we can't safely update a context's image,
2362 * if this context is scheduled/submitted to run on the GPU.
2363 *
2364 * We could emit the OA register config through the batch buffer but
2365 * this might leave small interval of time where the OA unit is
2366 * configured at an invalid sampling period.
2367 *
2368 * Note that since we emit all requests from a single ring, there
2369 * is still an implicit global barrier here that may cause a high
2370 * priority context to wait for an otherwise independent low priority
2371 * context. Contexts idle at the time of reconfiguration are not
2372 * trapped behind the barrier.
2373 */
2374 spin_lock(&i915->gem.contexts.lock);
2375 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2376 if (!kref_get_unless_zero(&ctx->ref))
2377 continue;
2378
2379 spin_unlock(&i915->gem.contexts.lock);
2380
2381 err = gen8_configure_context(ctx, regs, num_regs);
2382 if (err) {
2383 i915_gem_context_put(ctx);
2384 return err;
2385 }
2386
2387 spin_lock(&i915->gem.contexts.lock);
2388 list_safe_reset_next(ctx, cn, link);
2389 i915_gem_context_put(ctx);
2390 }
2391 spin_unlock(&i915->gem.contexts.lock);
2392
2393 /*
2394 * After updating all other contexts, we need to modify ourselves.
2395 * If we don't modify the kernel_context, we do not get events while
2396 * idle.
2397 */
2398 for_each_uabi_engine(engine, i915) {
2399 struct intel_context *ce = engine->kernel_context;
2400
2401 if (engine->class != RENDER_CLASS)
2402 continue;
2403
2404 regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2405
2406 err = gen8_modify_self(ce, regs, num_regs, active);
2407 if (err)
2408 return err;
2409 }
2410
2411 return 0;
2412 }
2413
2414 static int
gen12_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2415 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2416 const struct i915_oa_config *oa_config,
2417 struct i915_active *active)
2418 {
2419 struct flex regs[] = {
2420 {
2421 GEN8_R_PWR_CLK_STATE,
2422 CTX_R_PWR_CLK_STATE,
2423 },
2424 };
2425
2426 return oa_configure_all_contexts(stream,
2427 regs, ARRAY_SIZE(regs),
2428 active);
2429 }
2430
2431 static int
lrc_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2432 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2433 const struct i915_oa_config *oa_config,
2434 struct i915_active *active)
2435 {
2436 /* The MMIO offsets for Flex EU registers aren't contiguous */
2437 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2438 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2439 struct flex regs[] = {
2440 {
2441 GEN8_R_PWR_CLK_STATE,
2442 CTX_R_PWR_CLK_STATE,
2443 },
2444 {
2445 GEN8_OACTXCONTROL,
2446 stream->perf->ctx_oactxctrl_offset + 1,
2447 },
2448 { EU_PERF_CNTL0, ctx_flexeuN(0) },
2449 { EU_PERF_CNTL1, ctx_flexeuN(1) },
2450 { EU_PERF_CNTL2, ctx_flexeuN(2) },
2451 { EU_PERF_CNTL3, ctx_flexeuN(3) },
2452 { EU_PERF_CNTL4, ctx_flexeuN(4) },
2453 { EU_PERF_CNTL5, ctx_flexeuN(5) },
2454 { EU_PERF_CNTL6, ctx_flexeuN(6) },
2455 };
2456 #undef ctx_flexeuN
2457 int i;
2458
2459 regs[1].value =
2460 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2461 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2462 GEN8_OA_COUNTER_RESUME;
2463
2464 for (i = 2; i < ARRAY_SIZE(regs); i++)
2465 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2466
2467 return oa_configure_all_contexts(stream,
2468 regs, ARRAY_SIZE(regs),
2469 active);
2470 }
2471
2472 static int
gen8_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2473 gen8_enable_metric_set(struct i915_perf_stream *stream,
2474 struct i915_active *active)
2475 {
2476 struct intel_uncore *uncore = stream->uncore;
2477 struct i915_oa_config *oa_config = stream->oa_config;
2478 int ret;
2479
2480 /*
2481 * We disable slice/unslice clock ratio change reports on SKL since
2482 * they are too noisy. The HW generates a lot of redundant reports
2483 * where the ratio hasn't really changed causing a lot of redundant
2484 * work to processes and increasing the chances we'll hit buffer
2485 * overruns.
2486 *
2487 * Although we don't currently use the 'disable overrun' OABUFFER
2488 * feature it's worth noting that clock ratio reports have to be
2489 * disabled before considering to use that feature since the HW doesn't
2490 * correctly block these reports.
2491 *
2492 * Currently none of the high-level metrics we have depend on knowing
2493 * this ratio to normalize.
2494 *
2495 * Note: This register is not power context saved and restored, but
2496 * that's OK considering that we disable RC6 while the OA unit is
2497 * enabled.
2498 *
2499 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2500 * be read back from automatically triggered reports, as part of the
2501 * RPT_ID field.
2502 */
2503 if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2504 intel_uncore_write(uncore, GEN8_OA_DEBUG,
2505 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2506 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2507 }
2508
2509 /*
2510 * Update all contexts prior writing the mux configurations as we need
2511 * to make sure all slices/subslices are ON before writing to NOA
2512 * registers.
2513 */
2514 ret = lrc_configure_all_contexts(stream, oa_config, active);
2515 if (ret)
2516 return ret;
2517
2518 return emit_oa_config(stream,
2519 stream->oa_config, oa_context(stream),
2520 active);
2521 }
2522
oag_report_ctx_switches(const struct i915_perf_stream * stream)2523 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2524 {
2525 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2526 (stream->sample_flags & SAMPLE_OA_REPORT) ?
2527 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2528 }
2529
2530 static int
gen12_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2531 gen12_enable_metric_set(struct i915_perf_stream *stream,
2532 struct i915_active *active)
2533 {
2534 struct intel_uncore *uncore = stream->uncore;
2535 struct i915_oa_config *oa_config = stream->oa_config;
2536 bool periodic = stream->periodic;
2537 u32 period_exponent = stream->period_exponent;
2538 int ret;
2539
2540 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2541 /* Disable clk ratio reports, like previous Gens. */
2542 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2543 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2544 /*
2545 * If the user didn't require OA reports, instruct
2546 * the hardware not to emit ctx switch reports.
2547 */
2548 oag_report_ctx_switches(stream));
2549
2550 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2551 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2552 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2553 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2554 : 0);
2555
2556 /*
2557 * Update all contexts prior writing the mux configurations as we need
2558 * to make sure all slices/subslices are ON before writing to NOA
2559 * registers.
2560 */
2561 ret = gen12_configure_all_contexts(stream, oa_config, active);
2562 if (ret)
2563 return ret;
2564
2565 /*
2566 * For Gen12, performance counters are context
2567 * saved/restored. Only enable it for the context that
2568 * requested this.
2569 */
2570 if (stream->ctx) {
2571 ret = gen12_configure_oar_context(stream, active);
2572 if (ret)
2573 return ret;
2574 }
2575
2576 return emit_oa_config(stream,
2577 stream->oa_config, oa_context(stream),
2578 active);
2579 }
2580
gen8_disable_metric_set(struct i915_perf_stream * stream)2581 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2582 {
2583 struct intel_uncore *uncore = stream->uncore;
2584
2585 /* Reset all contexts' slices/subslices configurations. */
2586 lrc_configure_all_contexts(stream, NULL, NULL);
2587
2588 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2589 }
2590
gen11_disable_metric_set(struct i915_perf_stream * stream)2591 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
2592 {
2593 struct intel_uncore *uncore = stream->uncore;
2594
2595 /* Reset all contexts' slices/subslices configurations. */
2596 lrc_configure_all_contexts(stream, NULL, NULL);
2597
2598 /* Make sure we disable noa to save power. */
2599 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2600 }
2601
gen12_disable_metric_set(struct i915_perf_stream * stream)2602 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2603 {
2604 struct intel_uncore *uncore = stream->uncore;
2605
2606 /* Reset all contexts' slices/subslices configurations. */
2607 gen12_configure_all_contexts(stream, NULL, NULL);
2608
2609 /* disable the context save/restore or OAR counters */
2610 if (stream->ctx)
2611 gen12_configure_oar_context(stream, NULL);
2612
2613 /* Make sure we disable noa to save power. */
2614 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2615 }
2616
gen7_oa_enable(struct i915_perf_stream * stream)2617 static void gen7_oa_enable(struct i915_perf_stream *stream)
2618 {
2619 struct intel_uncore *uncore = stream->uncore;
2620 struct i915_gem_context *ctx = stream->ctx;
2621 u32 ctx_id = stream->specific_ctx_id;
2622 bool periodic = stream->periodic;
2623 u32 period_exponent = stream->period_exponent;
2624 u32 report_format = stream->oa_buffer.format;
2625
2626 /*
2627 * Reset buf pointers so we don't forward reports from before now.
2628 *
2629 * Think carefully if considering trying to avoid this, since it
2630 * also ensures status flags and the buffer itself are cleared
2631 * in error paths, and we have checks for invalid reports based
2632 * on the assumption that certain fields are written to zeroed
2633 * memory which this helps maintains.
2634 */
2635 gen7_init_oa_buffer(stream);
2636
2637 intel_uncore_write(uncore, GEN7_OACONTROL,
2638 (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2639 (period_exponent <<
2640 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2641 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2642 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2643 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2644 GEN7_OACONTROL_ENABLE);
2645 }
2646
gen8_oa_enable(struct i915_perf_stream * stream)2647 static void gen8_oa_enable(struct i915_perf_stream *stream)
2648 {
2649 struct intel_uncore *uncore = stream->uncore;
2650 u32 report_format = stream->oa_buffer.format;
2651
2652 /*
2653 * Reset buf pointers so we don't forward reports from before now.
2654 *
2655 * Think carefully if considering trying to avoid this, since it
2656 * also ensures status flags and the buffer itself are cleared
2657 * in error paths, and we have checks for invalid reports based
2658 * on the assumption that certain fields are written to zeroed
2659 * memory which this helps maintains.
2660 */
2661 gen8_init_oa_buffer(stream);
2662
2663 /*
2664 * Note: we don't rely on the hardware to perform single context
2665 * filtering and instead filter on the cpu based on the context-id
2666 * field of reports
2667 */
2668 intel_uncore_write(uncore, GEN8_OACONTROL,
2669 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2670 GEN8_OA_COUNTER_ENABLE);
2671 }
2672
gen12_oa_enable(struct i915_perf_stream * stream)2673 static void gen12_oa_enable(struct i915_perf_stream *stream)
2674 {
2675 struct intel_uncore *uncore = stream->uncore;
2676 u32 report_format = stream->oa_buffer.format;
2677
2678 /*
2679 * If we don't want OA reports from the OA buffer, then we don't even
2680 * need to program the OAG unit.
2681 */
2682 if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2683 return;
2684
2685 gen12_init_oa_buffer(stream);
2686
2687 intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2688 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2689 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2690 }
2691
2692 /**
2693 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2694 * @stream: An i915 perf stream opened for OA metrics
2695 *
2696 * [Re]enables hardware periodic sampling according to the period configured
2697 * when opening the stream. This also starts a hrtimer that will periodically
2698 * check for data in the circular OA buffer for notifying userspace (e.g.
2699 * during a read() or poll()).
2700 */
i915_oa_stream_enable(struct i915_perf_stream * stream)2701 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2702 {
2703 stream->pollin = false;
2704
2705 stream->perf->ops.oa_enable(stream);
2706
2707 if (stream->sample_flags & SAMPLE_OA_REPORT)
2708 hrtimer_start(&stream->poll_check_timer,
2709 ns_to_ktime(stream->poll_oa_period),
2710 HRTIMER_MODE_REL_PINNED);
2711 }
2712
gen7_oa_disable(struct i915_perf_stream * stream)2713 static void gen7_oa_disable(struct i915_perf_stream *stream)
2714 {
2715 struct intel_uncore *uncore = stream->uncore;
2716
2717 intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2718 if (intel_wait_for_register(uncore,
2719 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2720 50))
2721 drm_err(&stream->perf->i915->drm,
2722 "wait for OA to be disabled timed out\n");
2723 }
2724
gen8_oa_disable(struct i915_perf_stream * stream)2725 static void gen8_oa_disable(struct i915_perf_stream *stream)
2726 {
2727 struct intel_uncore *uncore = stream->uncore;
2728
2729 intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2730 if (intel_wait_for_register(uncore,
2731 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2732 50))
2733 drm_err(&stream->perf->i915->drm,
2734 "wait for OA to be disabled timed out\n");
2735 }
2736
gen12_oa_disable(struct i915_perf_stream * stream)2737 static void gen12_oa_disable(struct i915_perf_stream *stream)
2738 {
2739 struct intel_uncore *uncore = stream->uncore;
2740
2741 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2742 if (intel_wait_for_register(uncore,
2743 GEN12_OAG_OACONTROL,
2744 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2745 50))
2746 drm_err(&stream->perf->i915->drm,
2747 "wait for OA to be disabled timed out\n");
2748
2749 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
2750 if (intel_wait_for_register(uncore,
2751 GEN12_OA_TLB_INV_CR,
2752 1, 0,
2753 50))
2754 drm_err(&stream->perf->i915->drm,
2755 "wait for OA tlb invalidate timed out\n");
2756 }
2757
2758 /**
2759 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2760 * @stream: An i915 perf stream opened for OA metrics
2761 *
2762 * Stops the OA unit from periodically writing counter reports into the
2763 * circular OA buffer. This also stops the hrtimer that periodically checks for
2764 * data in the circular OA buffer, for notifying userspace.
2765 */
i915_oa_stream_disable(struct i915_perf_stream * stream)2766 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2767 {
2768 stream->perf->ops.oa_disable(stream);
2769
2770 if (stream->sample_flags & SAMPLE_OA_REPORT)
2771 hrtimer_cancel(&stream->poll_check_timer);
2772 }
2773
2774 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2775 .destroy = i915_oa_stream_destroy,
2776 .enable = i915_oa_stream_enable,
2777 .disable = i915_oa_stream_disable,
2778 .wait_unlocked = i915_oa_wait_unlocked,
2779 .poll_wait = i915_oa_poll_wait,
2780 .read = i915_oa_read,
2781 };
2782
i915_perf_stream_enable_sync(struct i915_perf_stream * stream)2783 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2784 {
2785 struct i915_active *active;
2786 int err;
2787
2788 active = i915_active_create();
2789 if (!active)
2790 return -ENOMEM;
2791
2792 err = stream->perf->ops.enable_metric_set(stream, active);
2793 if (err == 0)
2794 __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
2795
2796 i915_active_put(active);
2797 return err;
2798 }
2799
2800 static void
get_default_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine)2801 get_default_sseu_config(struct intel_sseu *out_sseu,
2802 struct intel_engine_cs *engine)
2803 {
2804 const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
2805
2806 *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
2807
2808 if (GRAPHICS_VER(engine->i915) == 11) {
2809 /*
2810 * We only need subslice count so it doesn't matter which ones
2811 * we select - just turn off low bits in the amount of half of
2812 * all available subslices per slice.
2813 */
2814 out_sseu->subslice_mask =
2815 ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
2816 out_sseu->slice_mask = 0x1;
2817 }
2818 }
2819
2820 static int
get_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine,const struct drm_i915_gem_context_param_sseu * drm_sseu)2821 get_sseu_config(struct intel_sseu *out_sseu,
2822 struct intel_engine_cs *engine,
2823 const struct drm_i915_gem_context_param_sseu *drm_sseu)
2824 {
2825 if (drm_sseu->engine.engine_class != engine->uabi_class ||
2826 drm_sseu->engine.engine_instance != engine->uabi_instance)
2827 return -EINVAL;
2828
2829 return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
2830 }
2831
2832 /**
2833 * i915_oa_stream_init - validate combined props for OA stream and init
2834 * @stream: An i915 perf stream
2835 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2836 * @props: The property state that configures stream (individually validated)
2837 *
2838 * While read_properties_unlocked() validates properties in isolation it
2839 * doesn't ensure that the combination necessarily makes sense.
2840 *
2841 * At this point it has been determined that userspace wants a stream of
2842 * OA metrics, but still we need to further validate the combined
2843 * properties are OK.
2844 *
2845 * If the configuration makes sense then we can allocate memory for
2846 * a circular OA buffer and apply the requested metric set configuration.
2847 *
2848 * Returns: zero on success or a negative error code.
2849 */
i915_oa_stream_init(struct i915_perf_stream * stream,struct drm_i915_perf_open_param * param,struct perf_open_properties * props)2850 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2851 struct drm_i915_perf_open_param *param,
2852 struct perf_open_properties *props)
2853 {
2854 struct drm_i915_private *i915 = stream->perf->i915;
2855 struct i915_perf *perf = stream->perf;
2856 int format_size;
2857 int ret;
2858
2859 if (!props->engine) {
2860 DRM_DEBUG("OA engine not specified\n");
2861 return -EINVAL;
2862 }
2863
2864 /*
2865 * If the sysfs metrics/ directory wasn't registered for some
2866 * reason then don't let userspace try their luck with config
2867 * IDs
2868 */
2869 if (!perf->metrics_kobj) {
2870 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2871 return -EINVAL;
2872 }
2873
2874 if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2875 (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
2876 DRM_DEBUG("Only OA report sampling supported\n");
2877 return -EINVAL;
2878 }
2879
2880 if (!perf->ops.enable_metric_set) {
2881 DRM_DEBUG("OA unit not supported\n");
2882 return -ENODEV;
2883 }
2884
2885 /*
2886 * To avoid the complexity of having to accurately filter
2887 * counter reports and marshal to the appropriate client
2888 * we currently only allow exclusive access
2889 */
2890 if (perf->exclusive_stream) {
2891 DRM_DEBUG("OA unit already in use\n");
2892 return -EBUSY;
2893 }
2894
2895 if (!props->oa_format) {
2896 DRM_DEBUG("OA report format not specified\n");
2897 return -EINVAL;
2898 }
2899
2900 stream->engine = props->engine;
2901 stream->uncore = stream->engine->gt->uncore;
2902
2903 stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2904
2905 format_size = perf->oa_formats[props->oa_format].size;
2906
2907 stream->sample_flags = props->sample_flags;
2908 stream->sample_size += format_size;
2909
2910 stream->oa_buffer.format_size = format_size;
2911 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2912 return -EINVAL;
2913
2914 stream->hold_preemption = props->hold_preemption;
2915
2916 stream->oa_buffer.format =
2917 perf->oa_formats[props->oa_format].format;
2918
2919 stream->periodic = props->oa_periodic;
2920 if (stream->periodic)
2921 stream->period_exponent = props->oa_period_exponent;
2922
2923 if (stream->ctx) {
2924 ret = oa_get_render_ctx_id(stream);
2925 if (ret) {
2926 DRM_DEBUG("Invalid context id to filter with\n");
2927 return ret;
2928 }
2929 }
2930
2931 ret = alloc_noa_wait(stream);
2932 if (ret) {
2933 DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
2934 goto err_noa_wait_alloc;
2935 }
2936
2937 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2938 if (!stream->oa_config) {
2939 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
2940 ret = -EINVAL;
2941 goto err_config;
2942 }
2943
2944 /* PRM - observability performance counters:
2945 *
2946 * OACONTROL, performance counter enable, note:
2947 *
2948 * "When this bit is set, in order to have coherent counts,
2949 * RC6 power state and trunk clock gating must be disabled.
2950 * This can be achieved by programming MMIO registers as
2951 * 0xA094=0 and 0xA090[31]=1"
2952 *
2953 * In our case we are expecting that taking pm + FORCEWAKE
2954 * references will effectively disable RC6.
2955 */
2956 intel_engine_pm_get(stream->engine);
2957 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
2958
2959 ret = alloc_oa_buffer(stream);
2960 if (ret)
2961 goto err_oa_buf_alloc;
2962
2963 stream->ops = &i915_oa_stream_ops;
2964
2965 perf->sseu = props->sseu;
2966 WRITE_ONCE(perf->exclusive_stream, stream);
2967
2968 ret = i915_perf_stream_enable_sync(stream);
2969 if (ret) {
2970 DRM_DEBUG("Unable to enable metric set\n");
2971 goto err_enable;
2972 }
2973
2974 DRM_DEBUG("opening stream oa config uuid=%s\n",
2975 stream->oa_config->uuid);
2976
2977 hrtimer_init(&stream->poll_check_timer,
2978 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2979 stream->poll_check_timer.function = oa_poll_check_timer_cb;
2980 init_waitqueue_head(&stream->poll_wq);
2981 spin_lock_init(&stream->oa_buffer.ptr_lock);
2982
2983 return 0;
2984
2985 err_enable:
2986 WRITE_ONCE(perf->exclusive_stream, NULL);
2987 perf->ops.disable_metric_set(stream);
2988
2989 free_oa_buffer(stream);
2990
2991 err_oa_buf_alloc:
2992 free_oa_configs(stream);
2993
2994 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
2995 intel_engine_pm_put(stream->engine);
2996
2997 err_config:
2998 free_noa_wait(stream);
2999
3000 err_noa_wait_alloc:
3001 if (stream->ctx)
3002 oa_put_render_ctx_id(stream);
3003
3004 return ret;
3005 }
3006
i915_oa_init_reg_state(const struct intel_context * ce,const struct intel_engine_cs * engine)3007 void i915_oa_init_reg_state(const struct intel_context *ce,
3008 const struct intel_engine_cs *engine)
3009 {
3010 struct i915_perf_stream *stream;
3011
3012 if (engine->class != RENDER_CLASS)
3013 return;
3014
3015 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3016 stream = READ_ONCE(engine->i915->perf.exclusive_stream);
3017 if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3018 gen8_update_reg_state_unlocked(ce, stream);
3019 }
3020
3021 /**
3022 * i915_perf_read - handles read() FOP for i915 perf stream FDs
3023 * @file: An i915 perf stream file
3024 * @buf: destination buffer given by userspace
3025 * @count: the number of bytes userspace wants to read
3026 * @ppos: (inout) file seek position (unused)
3027 *
3028 * The entry point for handling a read() on a stream file descriptor from
3029 * userspace. Most of the work is left to the i915_perf_read_locked() and
3030 * &i915_perf_stream_ops->read but to save having stream implementations (of
3031 * which we might have multiple later) we handle blocking read here.
3032 *
3033 * We can also consistently treat trying to read from a disabled stream
3034 * as an IO error so implementations can assume the stream is enabled
3035 * while reading.
3036 *
3037 * Returns: The number of bytes copied or a negative error code on failure.
3038 */
i915_perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)3039 static ssize_t i915_perf_read(struct file *file,
3040 char __user *buf,
3041 size_t count,
3042 loff_t *ppos)
3043 {
3044 struct i915_perf_stream *stream = file->private_data;
3045 struct i915_perf *perf = stream->perf;
3046 size_t offset = 0;
3047 int ret;
3048
3049 /* To ensure it's handled consistently we simply treat all reads of a
3050 * disabled stream as an error. In particular it might otherwise lead
3051 * to a deadlock for blocking file descriptors...
3052 */
3053 if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3054 return -EIO;
3055
3056 if (!(file->f_flags & O_NONBLOCK)) {
3057 /* There's the small chance of false positives from
3058 * stream->ops->wait_unlocked.
3059 *
3060 * E.g. with single context filtering since we only wait until
3061 * oabuffer has >= 1 report we don't immediately know whether
3062 * any reports really belong to the current context
3063 */
3064 do {
3065 ret = stream->ops->wait_unlocked(stream);
3066 if (ret)
3067 return ret;
3068
3069 mutex_lock(&perf->lock);
3070 ret = stream->ops->read(stream, buf, count, &offset);
3071 mutex_unlock(&perf->lock);
3072 } while (!offset && !ret);
3073 } else {
3074 mutex_lock(&perf->lock);
3075 ret = stream->ops->read(stream, buf, count, &offset);
3076 mutex_unlock(&perf->lock);
3077 }
3078
3079 /* We allow the poll checking to sometimes report false positive EPOLLIN
3080 * events where we might actually report EAGAIN on read() if there's
3081 * not really any data available. In this situation though we don't
3082 * want to enter a busy loop between poll() reporting a EPOLLIN event
3083 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3084 * effectively ensures we back off until the next hrtimer callback
3085 * before reporting another EPOLLIN event.
3086 * The exception to this is if ops->read() returned -ENOSPC which means
3087 * that more OA data is available than could fit in the user provided
3088 * buffer. In this case we want the next poll() call to not block.
3089 */
3090 if (ret != -ENOSPC)
3091 stream->pollin = false;
3092
3093 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3094 return offset ?: (ret ?: -EAGAIN);
3095 }
3096
oa_poll_check_timer_cb(struct hrtimer * hrtimer)3097 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3098 {
3099 struct i915_perf_stream *stream =
3100 container_of(hrtimer, typeof(*stream), poll_check_timer);
3101
3102 if (oa_buffer_check_unlocked(stream)) {
3103 stream->pollin = true;
3104 wake_up(&stream->poll_wq);
3105 }
3106
3107 hrtimer_forward_now(hrtimer,
3108 ns_to_ktime(stream->poll_oa_period));
3109
3110 return HRTIMER_RESTART;
3111 }
3112
3113 /**
3114 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3115 * @stream: An i915 perf stream
3116 * @file: An i915 perf stream file
3117 * @wait: poll() state table
3118 *
3119 * For handling userspace polling on an i915 perf stream, this calls through to
3120 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3121 * will be woken for new stream data.
3122 *
3123 * Note: The &perf->lock mutex has been taken to serialize
3124 * with any non-file-operation driver hooks.
3125 *
3126 * Returns: any poll events that are ready without sleeping
3127 */
i915_perf_poll_locked(struct i915_perf_stream * stream,struct file * file,poll_table * wait)3128 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3129 struct file *file,
3130 poll_table *wait)
3131 {
3132 __poll_t events = 0;
3133
3134 stream->ops->poll_wait(stream, file, wait);
3135
3136 /* Note: we don't explicitly check whether there's something to read
3137 * here since this path may be very hot depending on what else
3138 * userspace is polling, or on the timeout in use. We rely solely on
3139 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3140 * samples to read.
3141 */
3142 if (stream->pollin)
3143 events |= EPOLLIN;
3144
3145 return events;
3146 }
3147
3148 /**
3149 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3150 * @file: An i915 perf stream file
3151 * @wait: poll() state table
3152 *
3153 * For handling userspace polling on an i915 perf stream, this ensures
3154 * poll_wait() gets called with a wait queue that will be woken for new stream
3155 * data.
3156 *
3157 * Note: Implementation deferred to i915_perf_poll_locked()
3158 *
3159 * Returns: any poll events that are ready without sleeping
3160 */
i915_perf_poll(struct file * file,poll_table * wait)3161 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3162 {
3163 struct i915_perf_stream *stream = file->private_data;
3164 struct i915_perf *perf = stream->perf;
3165 __poll_t ret;
3166
3167 mutex_lock(&perf->lock);
3168 ret = i915_perf_poll_locked(stream, file, wait);
3169 mutex_unlock(&perf->lock);
3170
3171 return ret;
3172 }
3173
3174 /**
3175 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3176 * @stream: A disabled i915 perf stream
3177 *
3178 * [Re]enables the associated capture of data for this stream.
3179 *
3180 * If a stream was previously enabled then there's currently no intention
3181 * to provide userspace any guarantee about the preservation of previously
3182 * buffered data.
3183 */
i915_perf_enable_locked(struct i915_perf_stream * stream)3184 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3185 {
3186 if (stream->enabled)
3187 return;
3188
3189 /* Allow stream->ops->enable() to refer to this */
3190 stream->enabled = true;
3191
3192 if (stream->ops->enable)
3193 stream->ops->enable(stream);
3194
3195 if (stream->hold_preemption)
3196 intel_context_set_nopreempt(stream->pinned_ctx);
3197 }
3198
3199 /**
3200 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3201 * @stream: An enabled i915 perf stream
3202 *
3203 * Disables the associated capture of data for this stream.
3204 *
3205 * The intention is that disabling an re-enabling a stream will ideally be
3206 * cheaper than destroying and re-opening a stream with the same configuration,
3207 * though there are no formal guarantees about what state or buffered data
3208 * must be retained between disabling and re-enabling a stream.
3209 *
3210 * Note: while a stream is disabled it's considered an error for userspace
3211 * to attempt to read from the stream (-EIO).
3212 */
i915_perf_disable_locked(struct i915_perf_stream * stream)3213 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3214 {
3215 if (!stream->enabled)
3216 return;
3217
3218 /* Allow stream->ops->disable() to refer to this */
3219 stream->enabled = false;
3220
3221 if (stream->hold_preemption)
3222 intel_context_clear_nopreempt(stream->pinned_ctx);
3223
3224 if (stream->ops->disable)
3225 stream->ops->disable(stream);
3226 }
3227
i915_perf_config_locked(struct i915_perf_stream * stream,unsigned long metrics_set)3228 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3229 unsigned long metrics_set)
3230 {
3231 struct i915_oa_config *config;
3232 long ret = stream->oa_config->id;
3233
3234 config = i915_perf_get_oa_config(stream->perf, metrics_set);
3235 if (!config)
3236 return -EINVAL;
3237
3238 if (config != stream->oa_config) {
3239 int err;
3240
3241 /*
3242 * If OA is bound to a specific context, emit the
3243 * reconfiguration inline from that context. The update
3244 * will then be ordered with respect to submission on that
3245 * context.
3246 *
3247 * When set globally, we use a low priority kernel context,
3248 * so it will effectively take effect when idle.
3249 */
3250 err = emit_oa_config(stream, config, oa_context(stream), NULL);
3251 if (!err)
3252 config = xchg(&stream->oa_config, config);
3253 else
3254 ret = err;
3255 }
3256
3257 i915_oa_config_put(config);
3258
3259 return ret;
3260 }
3261
3262 /**
3263 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3264 * @stream: An i915 perf stream
3265 * @cmd: the ioctl request
3266 * @arg: the ioctl data
3267 *
3268 * Note: The &perf->lock mutex has been taken to serialize
3269 * with any non-file-operation driver hooks.
3270 *
3271 * Returns: zero on success or a negative error code. Returns -EINVAL for
3272 * an unknown ioctl request.
3273 */
i915_perf_ioctl_locked(struct i915_perf_stream * stream,unsigned int cmd,unsigned long arg)3274 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3275 unsigned int cmd,
3276 unsigned long arg)
3277 {
3278 switch (cmd) {
3279 case I915_PERF_IOCTL_ENABLE:
3280 i915_perf_enable_locked(stream);
3281 return 0;
3282 case I915_PERF_IOCTL_DISABLE:
3283 i915_perf_disable_locked(stream);
3284 return 0;
3285 case I915_PERF_IOCTL_CONFIG:
3286 return i915_perf_config_locked(stream, arg);
3287 }
3288
3289 return -EINVAL;
3290 }
3291
3292 /**
3293 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3294 * @file: An i915 perf stream file
3295 * @cmd: the ioctl request
3296 * @arg: the ioctl data
3297 *
3298 * Implementation deferred to i915_perf_ioctl_locked().
3299 *
3300 * Returns: zero on success or a negative error code. Returns -EINVAL for
3301 * an unknown ioctl request.
3302 */
i915_perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3303 static long i915_perf_ioctl(struct file *file,
3304 unsigned int cmd,
3305 unsigned long arg)
3306 {
3307 struct i915_perf_stream *stream = file->private_data;
3308 struct i915_perf *perf = stream->perf;
3309 long ret;
3310
3311 mutex_lock(&perf->lock);
3312 ret = i915_perf_ioctl_locked(stream, cmd, arg);
3313 mutex_unlock(&perf->lock);
3314
3315 return ret;
3316 }
3317
3318 /**
3319 * i915_perf_destroy_locked - destroy an i915 perf stream
3320 * @stream: An i915 perf stream
3321 *
3322 * Frees all resources associated with the given i915 perf @stream, disabling
3323 * any associated data capture in the process.
3324 *
3325 * Note: The &perf->lock mutex has been taken to serialize
3326 * with any non-file-operation driver hooks.
3327 */
i915_perf_destroy_locked(struct i915_perf_stream * stream)3328 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3329 {
3330 if (stream->enabled)
3331 i915_perf_disable_locked(stream);
3332
3333 if (stream->ops->destroy)
3334 stream->ops->destroy(stream);
3335
3336 if (stream->ctx)
3337 i915_gem_context_put(stream->ctx);
3338
3339 kfree(stream);
3340 }
3341
3342 /**
3343 * i915_perf_release - handles userspace close() of a stream file
3344 * @inode: anonymous inode associated with file
3345 * @file: An i915 perf stream file
3346 *
3347 * Cleans up any resources associated with an open i915 perf stream file.
3348 *
3349 * NB: close() can't really fail from the userspace point of view.
3350 *
3351 * Returns: zero on success or a negative error code.
3352 */
i915_perf_release(struct inode * inode,struct file * file)3353 static int i915_perf_release(struct inode *inode, struct file *file)
3354 {
3355 struct i915_perf_stream *stream = file->private_data;
3356 struct i915_perf *perf = stream->perf;
3357
3358 mutex_lock(&perf->lock);
3359 i915_perf_destroy_locked(stream);
3360 mutex_unlock(&perf->lock);
3361
3362 /* Release the reference the perf stream kept on the driver. */
3363 drm_dev_put(&perf->i915->drm);
3364
3365 return 0;
3366 }
3367
3368
3369 static const struct file_operations fops = {
3370 .owner = THIS_MODULE,
3371 .llseek = no_llseek,
3372 .release = i915_perf_release,
3373 .poll = i915_perf_poll,
3374 .read = i915_perf_read,
3375 .unlocked_ioctl = i915_perf_ioctl,
3376 /* Our ioctl have no arguments, so it's safe to use the same function
3377 * to handle 32bits compatibility.
3378 */
3379 .compat_ioctl = i915_perf_ioctl,
3380 };
3381
3382
3383 /**
3384 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3385 * @perf: i915 perf instance
3386 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3387 * @props: individually validated u64 property value pairs
3388 * @file: drm file
3389 *
3390 * See i915_perf_ioctl_open() for interface details.
3391 *
3392 * Implements further stream config validation and stream initialization on
3393 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3394 * taken to serialize with any non-file-operation driver hooks.
3395 *
3396 * Note: at this point the @props have only been validated in isolation and
3397 * it's still necessary to validate that the combination of properties makes
3398 * sense.
3399 *
3400 * In the case where userspace is interested in OA unit metrics then further
3401 * config validation and stream initialization details will be handled by
3402 * i915_oa_stream_init(). The code here should only validate config state that
3403 * will be relevant to all stream types / backends.
3404 *
3405 * Returns: zero on success or a negative error code.
3406 */
3407 static int
i915_perf_open_ioctl_locked(struct i915_perf * perf,struct drm_i915_perf_open_param * param,struct perf_open_properties * props,struct drm_file * file)3408 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3409 struct drm_i915_perf_open_param *param,
3410 struct perf_open_properties *props,
3411 struct drm_file *file)
3412 {
3413 struct i915_gem_context *specific_ctx = NULL;
3414 struct i915_perf_stream *stream = NULL;
3415 unsigned long f_flags = 0;
3416 bool privileged_op = true;
3417 int stream_fd;
3418 int ret;
3419
3420 if (props->single_context) {
3421 u32 ctx_handle = props->ctx_handle;
3422 struct drm_i915_file_private *file_priv = file->driver_priv;
3423
3424 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3425 if (IS_ERR(specific_ctx)) {
3426 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
3427 ctx_handle);
3428 ret = PTR_ERR(specific_ctx);
3429 goto err;
3430 }
3431 }
3432
3433 /*
3434 * On Haswell the OA unit supports clock gating off for a specific
3435 * context and in this mode there's no visibility of metrics for the
3436 * rest of the system, which we consider acceptable for a
3437 * non-privileged client.
3438 *
3439 * For Gen8->11 the OA unit no longer supports clock gating off for a
3440 * specific context and the kernel can't securely stop the counters
3441 * from updating as system-wide / global values. Even though we can
3442 * filter reports based on the included context ID we can't block
3443 * clients from seeing the raw / global counter values via
3444 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3445 * enable the OA unit by default.
3446 *
3447 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3448 * per context basis. So we can relax requirements there if the user
3449 * doesn't request global stream access (i.e. query based sampling
3450 * using MI_RECORD_PERF_COUNT.
3451 */
3452 if (IS_HASWELL(perf->i915) && specific_ctx)
3453 privileged_op = false;
3454 else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3455 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3456 privileged_op = false;
3457
3458 if (props->hold_preemption) {
3459 if (!props->single_context) {
3460 DRM_DEBUG("preemption disable with no context\n");
3461 ret = -EINVAL;
3462 goto err;
3463 }
3464 privileged_op = true;
3465 }
3466
3467 /*
3468 * Asking for SSEU configuration is a priviliged operation.
3469 */
3470 if (props->has_sseu)
3471 privileged_op = true;
3472 else
3473 get_default_sseu_config(&props->sseu, props->engine);
3474
3475 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3476 * we check a dev.i915.perf_stream_paranoid sysctl option
3477 * to determine if it's ok to access system wide OA counters
3478 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3479 */
3480 if (privileged_op &&
3481 i915_perf_stream_paranoid && !perfmon_capable()) {
3482 DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
3483 ret = -EACCES;
3484 goto err_ctx;
3485 }
3486
3487 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3488 if (!stream) {
3489 ret = -ENOMEM;
3490 goto err_ctx;
3491 }
3492
3493 stream->perf = perf;
3494 stream->ctx = specific_ctx;
3495 stream->poll_oa_period = props->poll_oa_period;
3496
3497 ret = i915_oa_stream_init(stream, param, props);
3498 if (ret)
3499 goto err_alloc;
3500
3501 /* we avoid simply assigning stream->sample_flags = props->sample_flags
3502 * to have _stream_init check the combination of sample flags more
3503 * thoroughly, but still this is the expected result at this point.
3504 */
3505 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3506 ret = -ENODEV;
3507 goto err_flags;
3508 }
3509
3510 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3511 f_flags |= O_CLOEXEC;
3512 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3513 f_flags |= O_NONBLOCK;
3514
3515 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3516 if (stream_fd < 0) {
3517 ret = stream_fd;
3518 goto err_flags;
3519 }
3520
3521 if (!(param->flags & I915_PERF_FLAG_DISABLED))
3522 i915_perf_enable_locked(stream);
3523
3524 /* Take a reference on the driver that will be kept with stream_fd
3525 * until its release.
3526 */
3527 drm_dev_get(&perf->i915->drm);
3528
3529 return stream_fd;
3530
3531 err_flags:
3532 if (stream->ops->destroy)
3533 stream->ops->destroy(stream);
3534 err_alloc:
3535 kfree(stream);
3536 err_ctx:
3537 if (specific_ctx)
3538 i915_gem_context_put(specific_ctx);
3539 err:
3540 return ret;
3541 }
3542
oa_exponent_to_ns(struct i915_perf * perf,int exponent)3543 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3544 {
3545 return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
3546 2ULL << exponent);
3547 }
3548
3549 static __always_inline bool
oa_format_valid(struct i915_perf * perf,enum drm_i915_oa_format format)3550 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
3551 {
3552 return test_bit(format, perf->format_mask);
3553 }
3554
3555 static __always_inline void
oa_format_add(struct i915_perf * perf,enum drm_i915_oa_format format)3556 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
3557 {
3558 __set_bit(format, perf->format_mask);
3559 }
3560
3561 /**
3562 * read_properties_unlocked - validate + copy userspace stream open properties
3563 * @perf: i915 perf instance
3564 * @uprops: The array of u64 key value pairs given by userspace
3565 * @n_props: The number of key value pairs expected in @uprops
3566 * @props: The stream configuration built up while validating properties
3567 *
3568 * Note this function only validates properties in isolation it doesn't
3569 * validate that the combination of properties makes sense or that all
3570 * properties necessary for a particular kind of stream have been set.
3571 *
3572 * Note that there currently aren't any ordering requirements for properties so
3573 * we shouldn't validate or assume anything about ordering here. This doesn't
3574 * rule out defining new properties with ordering requirements in the future.
3575 */
read_properties_unlocked(struct i915_perf * perf,u64 __user * uprops,u32 n_props,struct perf_open_properties * props)3576 static int read_properties_unlocked(struct i915_perf *perf,
3577 u64 __user *uprops,
3578 u32 n_props,
3579 struct perf_open_properties *props)
3580 {
3581 u64 __user *uprop = uprops;
3582 u32 i;
3583 int ret;
3584
3585 memset(props, 0, sizeof(struct perf_open_properties));
3586 props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3587
3588 if (!n_props) {
3589 DRM_DEBUG("No i915 perf properties given\n");
3590 return -EINVAL;
3591 }
3592
3593 /* At the moment we only support using i915-perf on the RCS. */
3594 props->engine = intel_engine_lookup_user(perf->i915,
3595 I915_ENGINE_CLASS_RENDER,
3596 0);
3597 if (!props->engine) {
3598 DRM_DEBUG("No RENDER-capable engines\n");
3599 return -EINVAL;
3600 }
3601
3602 /* Considering that ID = 0 is reserved and assuming that we don't
3603 * (currently) expect any configurations to ever specify duplicate
3604 * values for a particular property ID then the last _PROP_MAX value is
3605 * one greater than the maximum number of properties we expect to get
3606 * from userspace.
3607 */
3608 if (n_props >= DRM_I915_PERF_PROP_MAX) {
3609 DRM_DEBUG("More i915 perf properties specified than exist\n");
3610 return -EINVAL;
3611 }
3612
3613 for (i = 0; i < n_props; i++) {
3614 u64 oa_period, oa_freq_hz;
3615 u64 id, value;
3616
3617 ret = get_user(id, uprop);
3618 if (ret)
3619 return ret;
3620
3621 ret = get_user(value, uprop + 1);
3622 if (ret)
3623 return ret;
3624
3625 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3626 DRM_DEBUG("Unknown i915 perf property ID\n");
3627 return -EINVAL;
3628 }
3629
3630 switch ((enum drm_i915_perf_property_id)id) {
3631 case DRM_I915_PERF_PROP_CTX_HANDLE:
3632 props->single_context = 1;
3633 props->ctx_handle = value;
3634 break;
3635 case DRM_I915_PERF_PROP_SAMPLE_OA:
3636 if (value)
3637 props->sample_flags |= SAMPLE_OA_REPORT;
3638 break;
3639 case DRM_I915_PERF_PROP_OA_METRICS_SET:
3640 if (value == 0) {
3641 DRM_DEBUG("Unknown OA metric set ID\n");
3642 return -EINVAL;
3643 }
3644 props->metrics_set = value;
3645 break;
3646 case DRM_I915_PERF_PROP_OA_FORMAT:
3647 if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3648 DRM_DEBUG("Out-of-range OA report format %llu\n",
3649 value);
3650 return -EINVAL;
3651 }
3652 if (!oa_format_valid(perf, value)) {
3653 DRM_DEBUG("Unsupported OA report format %llu\n",
3654 value);
3655 return -EINVAL;
3656 }
3657 props->oa_format = value;
3658 break;
3659 case DRM_I915_PERF_PROP_OA_EXPONENT:
3660 if (value > OA_EXPONENT_MAX) {
3661 DRM_DEBUG("OA timer exponent too high (> %u)\n",
3662 OA_EXPONENT_MAX);
3663 return -EINVAL;
3664 }
3665
3666 /* Theoretically we can program the OA unit to sample
3667 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3668 * for BXT. We don't allow such high sampling
3669 * frequencies by default unless root.
3670 */
3671
3672 BUILD_BUG_ON(sizeof(oa_period) != 8);
3673 oa_period = oa_exponent_to_ns(perf, value);
3674
3675 /* This check is primarily to ensure that oa_period <=
3676 * UINT32_MAX (before passing to do_div which only
3677 * accepts a u32 denominator), but we can also skip
3678 * checking anything < 1Hz which implicitly can't be
3679 * limited via an integer oa_max_sample_rate.
3680 */
3681 if (oa_period <= NSEC_PER_SEC) {
3682 u64 tmp = NSEC_PER_SEC;
3683 do_div(tmp, oa_period);
3684 oa_freq_hz = tmp;
3685 } else
3686 oa_freq_hz = 0;
3687
3688 if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
3689 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
3690 i915_oa_max_sample_rate);
3691 return -EACCES;
3692 }
3693
3694 props->oa_periodic = true;
3695 props->oa_period_exponent = value;
3696 break;
3697 case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3698 props->hold_preemption = !!value;
3699 break;
3700 case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
3701 struct drm_i915_gem_context_param_sseu user_sseu;
3702
3703 if (copy_from_user(&user_sseu,
3704 u64_to_user_ptr(value),
3705 sizeof(user_sseu))) {
3706 DRM_DEBUG("Unable to copy global sseu parameter\n");
3707 return -EFAULT;
3708 }
3709
3710 ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
3711 if (ret) {
3712 DRM_DEBUG("Invalid SSEU configuration\n");
3713 return ret;
3714 }
3715 props->has_sseu = true;
3716 break;
3717 }
3718 case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
3719 if (value < 100000 /* 100us */) {
3720 DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
3721 value);
3722 return -EINVAL;
3723 }
3724 props->poll_oa_period = value;
3725 break;
3726 case DRM_I915_PERF_PROP_MAX:
3727 MISSING_CASE(id);
3728 return -EINVAL;
3729 }
3730
3731 uprop += 2;
3732 }
3733
3734 return 0;
3735 }
3736
3737 /**
3738 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3739 * @dev: drm device
3740 * @data: ioctl data copied from userspace (unvalidated)
3741 * @file: drm file
3742 *
3743 * Validates the stream open parameters given by userspace including flags
3744 * and an array of u64 key, value pair properties.
3745 *
3746 * Very little is assumed up front about the nature of the stream being
3747 * opened (for instance we don't assume it's for periodic OA unit metrics). An
3748 * i915-perf stream is expected to be a suitable interface for other forms of
3749 * buffered data written by the GPU besides periodic OA metrics.
3750 *
3751 * Note we copy the properties from userspace outside of the i915 perf
3752 * mutex to avoid an awkward lockdep with mmap_lock.
3753 *
3754 * Most of the implementation details are handled by
3755 * i915_perf_open_ioctl_locked() after taking the &perf->lock
3756 * mutex for serializing with any non-file-operation driver hooks.
3757 *
3758 * Return: A newly opened i915 Perf stream file descriptor or negative
3759 * error code on failure.
3760 */
i915_perf_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3761 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3762 struct drm_file *file)
3763 {
3764 struct i915_perf *perf = &to_i915(dev)->perf;
3765 struct drm_i915_perf_open_param *param = data;
3766 struct perf_open_properties props;
3767 u32 known_open_flags;
3768 int ret;
3769
3770 if (!perf->i915) {
3771 DRM_DEBUG("i915 perf interface not available for this system\n");
3772 return -ENOTSUPP;
3773 }
3774
3775 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3776 I915_PERF_FLAG_FD_NONBLOCK |
3777 I915_PERF_FLAG_DISABLED;
3778 if (param->flags & ~known_open_flags) {
3779 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
3780 return -EINVAL;
3781 }
3782
3783 ret = read_properties_unlocked(perf,
3784 u64_to_user_ptr(param->properties_ptr),
3785 param->num_properties,
3786 &props);
3787 if (ret)
3788 return ret;
3789
3790 mutex_lock(&perf->lock);
3791 ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3792 mutex_unlock(&perf->lock);
3793
3794 return ret;
3795 }
3796
3797 /**
3798 * i915_perf_register - exposes i915-perf to userspace
3799 * @i915: i915 device instance
3800 *
3801 * In particular OA metric sets are advertised under a sysfs metrics/
3802 * directory allowing userspace to enumerate valid IDs that can be
3803 * used to open an i915-perf stream.
3804 */
i915_perf_register(struct drm_i915_private * i915)3805 void i915_perf_register(struct drm_i915_private *i915)
3806 {
3807 struct i915_perf *perf = &i915->perf;
3808
3809 if (!perf->i915)
3810 return;
3811
3812 /* To be sure we're synchronized with an attempted
3813 * i915_perf_open_ioctl(); considering that we register after
3814 * being exposed to userspace.
3815 */
3816 mutex_lock(&perf->lock);
3817
3818 perf->metrics_kobj =
3819 kobject_create_and_add("metrics",
3820 &i915->drm.primary->kdev->kobj);
3821
3822 mutex_unlock(&perf->lock);
3823 }
3824
3825 /**
3826 * i915_perf_unregister - hide i915-perf from userspace
3827 * @i915: i915 device instance
3828 *
3829 * i915-perf state cleanup is split up into an 'unregister' and
3830 * 'deinit' phase where the interface is first hidden from
3831 * userspace by i915_perf_unregister() before cleaning up
3832 * remaining state in i915_perf_fini().
3833 */
i915_perf_unregister(struct drm_i915_private * i915)3834 void i915_perf_unregister(struct drm_i915_private *i915)
3835 {
3836 struct i915_perf *perf = &i915->perf;
3837
3838 if (!perf->metrics_kobj)
3839 return;
3840
3841 kobject_put(perf->metrics_kobj);
3842 perf->metrics_kobj = NULL;
3843 }
3844
gen8_is_valid_flex_addr(struct i915_perf * perf,u32 addr)3845 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3846 {
3847 static const i915_reg_t flex_eu_regs[] = {
3848 EU_PERF_CNTL0,
3849 EU_PERF_CNTL1,
3850 EU_PERF_CNTL2,
3851 EU_PERF_CNTL3,
3852 EU_PERF_CNTL4,
3853 EU_PERF_CNTL5,
3854 EU_PERF_CNTL6,
3855 };
3856 int i;
3857
3858 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3859 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3860 return true;
3861 }
3862 return false;
3863 }
3864
3865 #define ADDR_IN_RANGE(addr, start, end) \
3866 ((addr) >= (start) && \
3867 (addr) <= (end))
3868
3869 #define REG_IN_RANGE(addr, start, end) \
3870 ((addr) >= i915_mmio_reg_offset(start) && \
3871 (addr) <= i915_mmio_reg_offset(end))
3872
3873 #define REG_EQUAL(addr, mmio) \
3874 ((addr) == i915_mmio_reg_offset(mmio))
3875
gen7_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)3876 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3877 {
3878 return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
3879 REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
3880 REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
3881 }
3882
gen7_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3883 static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3884 {
3885 return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
3886 REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
3887 REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
3888 REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
3889 }
3890
gen8_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3891 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3892 {
3893 return gen7_is_valid_mux_addr(perf, addr) ||
3894 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3895 REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
3896 }
3897
gen11_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3898 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3899 {
3900 return gen8_is_valid_mux_addr(perf, addr) ||
3901 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3902 REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
3903 }
3904
hsw_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3905 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3906 {
3907 return gen7_is_valid_mux_addr(perf, addr) ||
3908 ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
3909 REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
3910 REG_EQUAL(addr, HSW_MBVID2_MISR0);
3911 }
3912
chv_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3913 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3914 {
3915 return gen7_is_valid_mux_addr(perf, addr) ||
3916 ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
3917 }
3918
gen12_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)3919 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3920 {
3921 return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
3922 REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
3923 REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
3924 REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
3925 REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
3926 REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
3927 REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
3928 }
3929
gen12_is_valid_mux_addr(struct i915_perf * perf,u32 addr)3930 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3931 {
3932 return REG_EQUAL(addr, NOA_WRITE) ||
3933 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3934 REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
3935 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3936 REG_EQUAL(addr, RPM_CONFIG0) ||
3937 REG_EQUAL(addr, RPM_CONFIG1) ||
3938 REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
3939 }
3940
mask_reg_value(u32 reg,u32 val)3941 static u32 mask_reg_value(u32 reg, u32 val)
3942 {
3943 /* HALF_SLICE_CHICKEN2 is programmed with a the
3944 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3945 * programmed by userspace doesn't change this.
3946 */
3947 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
3948 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3949
3950 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3951 * indicated by its name and a bunch of selection fields used by OA
3952 * configs.
3953 */
3954 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
3955 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3956
3957 return val;
3958 }
3959
alloc_oa_regs(struct i915_perf * perf,bool (* is_valid)(struct i915_perf * perf,u32 addr),u32 __user * regs,u32 n_regs)3960 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
3961 bool (*is_valid)(struct i915_perf *perf, u32 addr),
3962 u32 __user *regs,
3963 u32 n_regs)
3964 {
3965 struct i915_oa_reg *oa_regs;
3966 int err;
3967 u32 i;
3968
3969 if (!n_regs)
3970 return NULL;
3971
3972 /* No is_valid function means we're not allowing any register to be programmed. */
3973 GEM_BUG_ON(!is_valid);
3974 if (!is_valid)
3975 return ERR_PTR(-EINVAL);
3976
3977 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3978 if (!oa_regs)
3979 return ERR_PTR(-ENOMEM);
3980
3981 for (i = 0; i < n_regs; i++) {
3982 u32 addr, value;
3983
3984 err = get_user(addr, regs);
3985 if (err)
3986 goto addr_err;
3987
3988 if (!is_valid(perf, addr)) {
3989 DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3990 err = -EINVAL;
3991 goto addr_err;
3992 }
3993
3994 err = get_user(value, regs + 1);
3995 if (err)
3996 goto addr_err;
3997
3998 oa_regs[i].addr = _MMIO(addr);
3999 oa_regs[i].value = mask_reg_value(addr, value);
4000
4001 regs += 2;
4002 }
4003
4004 return oa_regs;
4005
4006 addr_err:
4007 kfree(oa_regs);
4008 return ERR_PTR(err);
4009 }
4010
show_dynamic_id(struct device * dev,struct device_attribute * attr,char * buf)4011 static ssize_t show_dynamic_id(struct device *dev,
4012 struct device_attribute *attr,
4013 char *buf)
4014 {
4015 struct i915_oa_config *oa_config =
4016 container_of(attr, typeof(*oa_config), sysfs_metric_id);
4017
4018 return sprintf(buf, "%d\n", oa_config->id);
4019 }
4020
create_dynamic_oa_sysfs_entry(struct i915_perf * perf,struct i915_oa_config * oa_config)4021 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4022 struct i915_oa_config *oa_config)
4023 {
4024 sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4025 oa_config->sysfs_metric_id.attr.name = "id";
4026 oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4027 oa_config->sysfs_metric_id.show = show_dynamic_id;
4028 oa_config->sysfs_metric_id.store = NULL;
4029
4030 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4031 oa_config->attrs[1] = NULL;
4032
4033 oa_config->sysfs_metric.name = oa_config->uuid;
4034 oa_config->sysfs_metric.attrs = oa_config->attrs;
4035
4036 return sysfs_create_group(perf->metrics_kobj,
4037 &oa_config->sysfs_metric);
4038 }
4039
4040 /**
4041 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4042 * @dev: drm device
4043 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4044 * userspace (unvalidated)
4045 * @file: drm file
4046 *
4047 * Validates the submitted OA register to be saved into a new OA config that
4048 * can then be used for programming the OA unit and its NOA network.
4049 *
4050 * Returns: A new allocated config number to be used with the perf open ioctl
4051 * or a negative error code on failure.
4052 */
i915_perf_add_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4053 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4054 struct drm_file *file)
4055 {
4056 struct i915_perf *perf = &to_i915(dev)->perf;
4057 struct drm_i915_perf_oa_config *args = data;
4058 struct i915_oa_config *oa_config, *tmp;
4059 struct i915_oa_reg *regs;
4060 int err, id;
4061
4062 if (!perf->i915) {
4063 DRM_DEBUG("i915 perf interface not available for this system\n");
4064 return -ENOTSUPP;
4065 }
4066
4067 if (!perf->metrics_kobj) {
4068 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
4069 return -EINVAL;
4070 }
4071
4072 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4073 DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
4074 return -EACCES;
4075 }
4076
4077 if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4078 (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4079 (!args->flex_regs_ptr || !args->n_flex_regs)) {
4080 DRM_DEBUG("No OA registers given\n");
4081 return -EINVAL;
4082 }
4083
4084 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4085 if (!oa_config) {
4086 DRM_DEBUG("Failed to allocate memory for the OA config\n");
4087 return -ENOMEM;
4088 }
4089
4090 oa_config->perf = perf;
4091 kref_init(&oa_config->ref);
4092
4093 if (!uuid_is_valid(args->uuid)) {
4094 DRM_DEBUG("Invalid uuid format for OA config\n");
4095 err = -EINVAL;
4096 goto reg_err;
4097 }
4098
4099 /* Last character in oa_config->uuid will be 0 because oa_config is
4100 * kzalloc.
4101 */
4102 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4103
4104 oa_config->mux_regs_len = args->n_mux_regs;
4105 regs = alloc_oa_regs(perf,
4106 perf->ops.is_valid_mux_reg,
4107 u64_to_user_ptr(args->mux_regs_ptr),
4108 args->n_mux_regs);
4109
4110 if (IS_ERR(regs)) {
4111 DRM_DEBUG("Failed to create OA config for mux_regs\n");
4112 err = PTR_ERR(regs);
4113 goto reg_err;
4114 }
4115 oa_config->mux_regs = regs;
4116
4117 oa_config->b_counter_regs_len = args->n_boolean_regs;
4118 regs = alloc_oa_regs(perf,
4119 perf->ops.is_valid_b_counter_reg,
4120 u64_to_user_ptr(args->boolean_regs_ptr),
4121 args->n_boolean_regs);
4122
4123 if (IS_ERR(regs)) {
4124 DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
4125 err = PTR_ERR(regs);
4126 goto reg_err;
4127 }
4128 oa_config->b_counter_regs = regs;
4129
4130 if (GRAPHICS_VER(perf->i915) < 8) {
4131 if (args->n_flex_regs != 0) {
4132 err = -EINVAL;
4133 goto reg_err;
4134 }
4135 } else {
4136 oa_config->flex_regs_len = args->n_flex_regs;
4137 regs = alloc_oa_regs(perf,
4138 perf->ops.is_valid_flex_reg,
4139 u64_to_user_ptr(args->flex_regs_ptr),
4140 args->n_flex_regs);
4141
4142 if (IS_ERR(regs)) {
4143 DRM_DEBUG("Failed to create OA config for flex_regs\n");
4144 err = PTR_ERR(regs);
4145 goto reg_err;
4146 }
4147 oa_config->flex_regs = regs;
4148 }
4149
4150 err = mutex_lock_interruptible(&perf->metrics_lock);
4151 if (err)
4152 goto reg_err;
4153
4154 /* We shouldn't have too many configs, so this iteration shouldn't be
4155 * too costly.
4156 */
4157 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4158 if (!strcmp(tmp->uuid, oa_config->uuid)) {
4159 DRM_DEBUG("OA config already exists with this uuid\n");
4160 err = -EADDRINUSE;
4161 goto sysfs_err;
4162 }
4163 }
4164
4165 err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4166 if (err) {
4167 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4168 goto sysfs_err;
4169 }
4170
4171 /* Config id 0 is invalid, id 1 for kernel stored test config. */
4172 oa_config->id = idr_alloc(&perf->metrics_idr,
4173 oa_config, 2,
4174 0, GFP_KERNEL);
4175 if (oa_config->id < 0) {
4176 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4177 err = oa_config->id;
4178 goto sysfs_err;
4179 }
4180
4181 mutex_unlock(&perf->metrics_lock);
4182
4183 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4184
4185 return oa_config->id;
4186
4187 sysfs_err:
4188 mutex_unlock(&perf->metrics_lock);
4189 reg_err:
4190 i915_oa_config_put(oa_config);
4191 DRM_DEBUG("Failed to add new OA config\n");
4192 return err;
4193 }
4194
4195 /**
4196 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4197 * @dev: drm device
4198 * @data: ioctl data (pointer to u64 integer) copied from userspace
4199 * @file: drm file
4200 *
4201 * Configs can be removed while being used, the will stop appearing in sysfs
4202 * and their content will be freed when the stream using the config is closed.
4203 *
4204 * Returns: 0 on success or a negative error code on failure.
4205 */
i915_perf_remove_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4206 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4207 struct drm_file *file)
4208 {
4209 struct i915_perf *perf = &to_i915(dev)->perf;
4210 u64 *arg = data;
4211 struct i915_oa_config *oa_config;
4212 int ret;
4213
4214 if (!perf->i915) {
4215 DRM_DEBUG("i915 perf interface not available for this system\n");
4216 return -ENOTSUPP;
4217 }
4218
4219 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4220 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
4221 return -EACCES;
4222 }
4223
4224 ret = mutex_lock_interruptible(&perf->metrics_lock);
4225 if (ret)
4226 return ret;
4227
4228 oa_config = idr_find(&perf->metrics_idr, *arg);
4229 if (!oa_config) {
4230 DRM_DEBUG("Failed to remove unknown OA config\n");
4231 ret = -ENOENT;
4232 goto err_unlock;
4233 }
4234
4235 GEM_BUG_ON(*arg != oa_config->id);
4236
4237 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4238
4239 idr_remove(&perf->metrics_idr, *arg);
4240
4241 mutex_unlock(&perf->metrics_lock);
4242
4243 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4244
4245 i915_oa_config_put(oa_config);
4246
4247 return 0;
4248
4249 err_unlock:
4250 mutex_unlock(&perf->metrics_lock);
4251 return ret;
4252 }
4253
4254 static struct ctl_table oa_table[] = {
4255 {
4256 .procname = "perf_stream_paranoid",
4257 .data = &i915_perf_stream_paranoid,
4258 .maxlen = sizeof(i915_perf_stream_paranoid),
4259 .mode = 0644,
4260 .proc_handler = proc_dointvec_minmax,
4261 .extra1 = SYSCTL_ZERO,
4262 .extra2 = SYSCTL_ONE,
4263 },
4264 {
4265 .procname = "oa_max_sample_rate",
4266 .data = &i915_oa_max_sample_rate,
4267 .maxlen = sizeof(i915_oa_max_sample_rate),
4268 .mode = 0644,
4269 .proc_handler = proc_dointvec_minmax,
4270 .extra1 = SYSCTL_ZERO,
4271 .extra2 = &oa_sample_rate_hard_limit,
4272 },
4273 {}
4274 };
4275
4276 static struct ctl_table i915_root[] = {
4277 {
4278 .procname = "i915",
4279 .maxlen = 0,
4280 .mode = 0555,
4281 .child = oa_table,
4282 },
4283 {}
4284 };
4285
4286 static struct ctl_table dev_root[] = {
4287 {
4288 .procname = "dev",
4289 .maxlen = 0,
4290 .mode = 0555,
4291 .child = i915_root,
4292 },
4293 {}
4294 };
4295
oa_init_supported_formats(struct i915_perf * perf)4296 static void oa_init_supported_formats(struct i915_perf *perf)
4297 {
4298 struct drm_i915_private *i915 = perf->i915;
4299 enum intel_platform platform = INTEL_INFO(i915)->platform;
4300
4301 switch (platform) {
4302 case INTEL_HASWELL:
4303 oa_format_add(perf, I915_OA_FORMAT_A13);
4304 oa_format_add(perf, I915_OA_FORMAT_A13);
4305 oa_format_add(perf, I915_OA_FORMAT_A29);
4306 oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
4307 oa_format_add(perf, I915_OA_FORMAT_B4_C8);
4308 oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
4309 oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
4310 oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4311 break;
4312
4313 case INTEL_BROADWELL:
4314 case INTEL_CHERRYVIEW:
4315 case INTEL_SKYLAKE:
4316 case INTEL_BROXTON:
4317 case INTEL_KABYLAKE:
4318 case INTEL_GEMINILAKE:
4319 case INTEL_COFFEELAKE:
4320 case INTEL_COMETLAKE:
4321 case INTEL_ICELAKE:
4322 case INTEL_ELKHARTLAKE:
4323 case INTEL_JASPERLAKE:
4324 case INTEL_TIGERLAKE:
4325 case INTEL_ROCKETLAKE:
4326 case INTEL_DG1:
4327 case INTEL_ALDERLAKE_S:
4328 case INTEL_ALDERLAKE_P:
4329 oa_format_add(perf, I915_OA_FORMAT_A12);
4330 oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
4331 oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
4332 oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4333 break;
4334
4335 default:
4336 MISSING_CASE(platform);
4337 }
4338 }
4339
4340 /**
4341 * i915_perf_init - initialize i915-perf state on module bind
4342 * @i915: i915 device instance
4343 *
4344 * Initializes i915-perf state without exposing anything to userspace.
4345 *
4346 * Note: i915-perf initialization is split into an 'init' and 'register'
4347 * phase with the i915_perf_register() exposing state to userspace.
4348 */
i915_perf_init(struct drm_i915_private * i915)4349 void i915_perf_init(struct drm_i915_private *i915)
4350 {
4351 struct i915_perf *perf = &i915->perf;
4352
4353 /* XXX const struct i915_perf_ops! */
4354
4355 perf->oa_formats = oa_formats;
4356 if (IS_HASWELL(i915)) {
4357 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4358 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4359 perf->ops.is_valid_flex_reg = NULL;
4360 perf->ops.enable_metric_set = hsw_enable_metric_set;
4361 perf->ops.disable_metric_set = hsw_disable_metric_set;
4362 perf->ops.oa_enable = gen7_oa_enable;
4363 perf->ops.oa_disable = gen7_oa_disable;
4364 perf->ops.read = gen7_oa_read;
4365 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4366 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4367 /* Note: that although we could theoretically also support the
4368 * legacy ringbuffer mode on BDW (and earlier iterations of
4369 * this driver, before upstreaming did this) it didn't seem
4370 * worth the complexity to maintain now that BDW+ enable
4371 * execlist mode by default.
4372 */
4373 perf->ops.read = gen8_oa_read;
4374
4375 if (IS_GRAPHICS_VER(i915, 8, 9)) {
4376 perf->ops.is_valid_b_counter_reg =
4377 gen7_is_valid_b_counter_addr;
4378 perf->ops.is_valid_mux_reg =
4379 gen8_is_valid_mux_addr;
4380 perf->ops.is_valid_flex_reg =
4381 gen8_is_valid_flex_addr;
4382
4383 if (IS_CHERRYVIEW(i915)) {
4384 perf->ops.is_valid_mux_reg =
4385 chv_is_valid_mux_addr;
4386 }
4387
4388 perf->ops.oa_enable = gen8_oa_enable;
4389 perf->ops.oa_disable = gen8_oa_disable;
4390 perf->ops.enable_metric_set = gen8_enable_metric_set;
4391 perf->ops.disable_metric_set = gen8_disable_metric_set;
4392 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4393
4394 if (GRAPHICS_VER(i915) == 8) {
4395 perf->ctx_oactxctrl_offset = 0x120;
4396 perf->ctx_flexeu0_offset = 0x2ce;
4397
4398 perf->gen8_valid_ctx_bit = BIT(25);
4399 } else {
4400 perf->ctx_oactxctrl_offset = 0x128;
4401 perf->ctx_flexeu0_offset = 0x3de;
4402
4403 perf->gen8_valid_ctx_bit = BIT(16);
4404 }
4405 } else if (GRAPHICS_VER(i915) == 11) {
4406 perf->ops.is_valid_b_counter_reg =
4407 gen7_is_valid_b_counter_addr;
4408 perf->ops.is_valid_mux_reg =
4409 gen11_is_valid_mux_addr;
4410 perf->ops.is_valid_flex_reg =
4411 gen8_is_valid_flex_addr;
4412
4413 perf->ops.oa_enable = gen8_oa_enable;
4414 perf->ops.oa_disable = gen8_oa_disable;
4415 perf->ops.enable_metric_set = gen8_enable_metric_set;
4416 perf->ops.disable_metric_set = gen11_disable_metric_set;
4417 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4418
4419 perf->ctx_oactxctrl_offset = 0x124;
4420 perf->ctx_flexeu0_offset = 0x78e;
4421
4422 perf->gen8_valid_ctx_bit = BIT(16);
4423 } else if (GRAPHICS_VER(i915) == 12) {
4424 perf->ops.is_valid_b_counter_reg =
4425 gen12_is_valid_b_counter_addr;
4426 perf->ops.is_valid_mux_reg =
4427 gen12_is_valid_mux_addr;
4428 perf->ops.is_valid_flex_reg =
4429 gen8_is_valid_flex_addr;
4430
4431 perf->ops.oa_enable = gen12_oa_enable;
4432 perf->ops.oa_disable = gen12_oa_disable;
4433 perf->ops.enable_metric_set = gen12_enable_metric_set;
4434 perf->ops.disable_metric_set = gen12_disable_metric_set;
4435 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4436
4437 perf->ctx_flexeu0_offset = 0;
4438 perf->ctx_oactxctrl_offset = 0x144;
4439 }
4440 }
4441
4442 if (perf->ops.enable_metric_set) {
4443 mutex_init(&perf->lock);
4444
4445 /* Choose a representative limit */
4446 oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
4447
4448 mutex_init(&perf->metrics_lock);
4449 idr_init_base(&perf->metrics_idr, 1);
4450
4451 /* We set up some ratelimit state to potentially throttle any
4452 * _NOTES about spurious, invalid OA reports which we don't
4453 * forward to userspace.
4454 *
4455 * We print a _NOTE about any throttling when closing the
4456 * stream instead of waiting until driver _fini which no one
4457 * would ever see.
4458 *
4459 * Using the same limiting factors as printk_ratelimit()
4460 */
4461 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4462 /* Since we use a DRM_NOTE for spurious reports it would be
4463 * inconsistent to let __ratelimit() automatically print a
4464 * warning for throttling.
4465 */
4466 ratelimit_set_flags(&perf->spurious_report_rs,
4467 RATELIMIT_MSG_ON_RELEASE);
4468
4469 ratelimit_state_init(&perf->tail_pointer_race,
4470 5 * HZ, 10);
4471 ratelimit_set_flags(&perf->tail_pointer_race,
4472 RATELIMIT_MSG_ON_RELEASE);
4473
4474 atomic64_set(&perf->noa_programming_delay,
4475 500 * 1000 /* 500us */);
4476
4477 perf->i915 = i915;
4478
4479 oa_init_supported_formats(perf);
4480 }
4481 }
4482
destroy_config(int id,void * p,void * data)4483 static int destroy_config(int id, void *p, void *data)
4484 {
4485 i915_oa_config_put(p);
4486 return 0;
4487 }
4488
i915_perf_sysctl_register(void)4489 int i915_perf_sysctl_register(void)
4490 {
4491 sysctl_header = register_sysctl_table(dev_root);
4492 return 0;
4493 }
4494
i915_perf_sysctl_unregister(void)4495 void i915_perf_sysctl_unregister(void)
4496 {
4497 unregister_sysctl_table(sysctl_header);
4498 }
4499
4500 /**
4501 * i915_perf_fini - Counter part to i915_perf_init()
4502 * @i915: i915 device instance
4503 */
i915_perf_fini(struct drm_i915_private * i915)4504 void i915_perf_fini(struct drm_i915_private *i915)
4505 {
4506 struct i915_perf *perf = &i915->perf;
4507
4508 if (!perf->i915)
4509 return;
4510
4511 idr_for_each(&perf->metrics_idr, destroy_config, perf);
4512 idr_destroy(&perf->metrics_idr);
4513
4514 memset(&perf->ops, 0, sizeof(perf->ops));
4515 perf->i915 = NULL;
4516 }
4517
4518 /**
4519 * i915_perf_ioctl_version - Version of the i915-perf subsystem
4520 *
4521 * This version number is used by userspace to detect available features.
4522 */
i915_perf_ioctl_version(void)4523 int i915_perf_ioctl_version(void)
4524 {
4525 /*
4526 * 1: Initial version
4527 * I915_PERF_IOCTL_ENABLE
4528 * I915_PERF_IOCTL_DISABLE
4529 *
4530 * 2: Added runtime modification of OA config.
4531 * I915_PERF_IOCTL_CONFIG
4532 *
4533 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4534 * preemption on a particular context so that performance data is
4535 * accessible from a delta of MI_RPC reports without looking at the
4536 * OA buffer.
4537 *
4538 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4539 * be run for the duration of the performance recording based on
4540 * their SSEU configuration.
4541 *
4542 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4543 * interval for the hrtimer used to check for OA data.
4544 */
4545 return 5;
4546 }
4547
4548 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4549 #include "selftests/i915_perf.c"
4550 #endif
4551