1 /*
2 * Copyright © 2015-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Robert Bragg <robert@sixbynine.org>
25 */
26
27
28 /**
29 * DOC: i915 Perf Overview
30 *
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
33 * GPU.
34 *
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
38 *
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41 *
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46 * sysctl option.
47 *
48 */
49
50 /**
51 * DOC: i915 Perf History and Comparison with Core Perf
52 *
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
55 *
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
67 *
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
71 *
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
80 *
81 *
82 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84 *
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
89 *
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
93 *
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
100 *
101 *
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
104 * hit:
105 *
106 * - The perf based OA PMU driver broke some significant design assumptions:
107 *
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
113 *
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
116 *
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
120 *
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
126 *
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
141 *
142 * - As a side note on perf's grouping feature; there was also some concern
143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
147 *
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
150 *
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
154 *
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
165 * command streamer.
166 *
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
177 *
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
180 *
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
192 */
193
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197
198 #include "i915_drv.h"
199 #include "i915_oa_hsw.h"
200 #include "i915_oa_bdw.h"
201 #include "i915_oa_chv.h"
202 #include "i915_oa_sklgt2.h"
203 #include "i915_oa_sklgt3.h"
204 #include "i915_oa_sklgt4.h"
205 #include "i915_oa_bxt.h"
206 #include "i915_oa_kblgt2.h"
207 #include "i915_oa_kblgt3.h"
208 #include "i915_oa_glk.h"
209 #include "i915_oa_cflgt2.h"
210 #include "i915_oa_cflgt3.h"
211 #include "i915_oa_cnl.h"
212 #include "i915_oa_icl.h"
213
214 /* HW requires this to be a power of two, between 128k and 16M, though driver
215 * is currently generally designed assuming the largest 16M size is used such
216 * that the overflow cases are unlikely in normal operation.
217 */
218 #define OA_BUFFER_SIZE SZ_16M
219
220 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
221
222 /**
223 * DOC: OA Tail Pointer Race
224 *
225 * There's a HW race condition between OA unit tail pointer register updates and
226 * writes to memory whereby the tail pointer can sometimes get ahead of what's
227 * been written out to the OA buffer so far (in terms of what's visible to the
228 * CPU).
229 *
230 * Although this can be observed explicitly while copying reports to userspace
231 * by checking for a zeroed report-id field in tail reports, we want to account
232 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
233 * read() attempts.
234 *
235 * In effect we define a tail pointer for reading that lags the real tail
236 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
237 * time for the corresponding reports to become visible to the CPU.
238 *
239 * To manage this we actually track two tail pointers:
240 * 1) An 'aging' tail with an associated timestamp that is tracked until we
241 * can trust the corresponding data is visible to the CPU; at which point
242 * it is considered 'aged'.
243 * 2) An 'aged' tail that can be used for read()ing.
244 *
245 * The two separate pointers let us decouple read()s from tail pointer aging.
246 *
247 * The tail pointers are checked and updated at a limited rate within a hrtimer
248 * callback (the same callback that is used for delivering EPOLLIN events)
249 *
250 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
251 * indicates that an updated tail pointer is needed.
252 *
253 * Most of the implementation details for this workaround are in
254 * oa_buffer_check_unlocked() and _append_oa_reports()
255 *
256 * Note for posterity: previously the driver used to define an effective tail
257 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
258 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
259 * This was flawed considering that the OA unit may also automatically generate
260 * non-periodic reports (such as on context switch) or the OA unit may be
261 * enabled without any periodic sampling.
262 */
263 #define OA_TAIL_MARGIN_NSEC 100000ULL
264 #define INVALID_TAIL_PTR 0xffffffff
265
266 /* frequency for checking whether the OA unit has written new reports to the
267 * circular OA buffer...
268 */
269 #define POLL_FREQUENCY 200
270 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
271
272 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
273 static int zero;
274 static int one = 1;
275 static u32 i915_perf_stream_paranoid = true;
276
277 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
278 * of the 64bit timestamp bits to trigger reports from) but there's currently
279 * no known use case for sampling as infrequently as once per 47 thousand years.
280 *
281 * Since the timestamps included in OA reports are only 32bits it seems
282 * reasonable to limit the OA exponent where it's still possible to account for
283 * overflow in OA report timestamps.
284 */
285 #define OA_EXPONENT_MAX 31
286
287 #define INVALID_CTX_ID 0xffffffff
288
289 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
290 #define OAREPORT_REASON_MASK 0x3f
291 #define OAREPORT_REASON_SHIFT 19
292 #define OAREPORT_REASON_TIMER (1<<0)
293 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
294 #define OAREPORT_REASON_CLK_RATIO (1<<5)
295
296
297 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
298 *
299 * The highest sampling frequency we can theoretically program the OA unit
300 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
301 *
302 * Initialized just before we register the sysctl parameter.
303 */
304 static int oa_sample_rate_hard_limit;
305
306 /* Theoretically we can program the OA unit to sample every 160ns but don't
307 * allow that by default unless root...
308 *
309 * The default threshold of 100000Hz is based on perf's similar
310 * kernel.perf_event_max_sample_rate sysctl parameter.
311 */
312 static u32 i915_oa_max_sample_rate = 100000;
313
314 /* XXX: beware if future OA HW adds new report formats that the current
315 * code assumes all reports have a power-of-two size and ~(size - 1) can
316 * be used as a mask to align the OA tail pointer.
317 */
318 static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
319 [I915_OA_FORMAT_A13] = { 0, 64 },
320 [I915_OA_FORMAT_A29] = { 1, 128 },
321 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
322 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
323 [I915_OA_FORMAT_B4_C8] = { 4, 64 },
324 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
325 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
326 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
327 };
328
329 static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
330 [I915_OA_FORMAT_A12] = { 0, 64 },
331 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
332 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
333 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
334 };
335
336 #define SAMPLE_OA_REPORT (1<<0)
337
338 /**
339 * struct perf_open_properties - for validated properties given to open a stream
340 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
341 * @single_context: Whether a single or all gpu contexts should be monitored
342 * @ctx_handle: A gem ctx handle for use with @single_context
343 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
344 * @oa_format: An OA unit HW report format
345 * @oa_periodic: Whether to enable periodic OA unit sampling
346 * @oa_period_exponent: The OA unit sampling period is derived from this
347 *
348 * As read_properties_unlocked() enumerates and validates the properties given
349 * to open a stream of metrics the configuration is built up in the structure
350 * which starts out zero initialized.
351 */
352 struct perf_open_properties {
353 u32 sample_flags;
354
355 u64 single_context:1;
356 u64 ctx_handle;
357
358 /* OA sampling state */
359 int metrics_set;
360 int oa_format;
361 bool oa_periodic;
362 int oa_period_exponent;
363 };
364
free_oa_config(struct drm_i915_private * dev_priv,struct i915_oa_config * oa_config)365 static void free_oa_config(struct drm_i915_private *dev_priv,
366 struct i915_oa_config *oa_config)
367 {
368 if (!PTR_ERR(oa_config->flex_regs))
369 kfree(oa_config->flex_regs);
370 if (!PTR_ERR(oa_config->b_counter_regs))
371 kfree(oa_config->b_counter_regs);
372 if (!PTR_ERR(oa_config->mux_regs))
373 kfree(oa_config->mux_regs);
374 kfree(oa_config);
375 }
376
put_oa_config(struct drm_i915_private * dev_priv,struct i915_oa_config * oa_config)377 static void put_oa_config(struct drm_i915_private *dev_priv,
378 struct i915_oa_config *oa_config)
379 {
380 if (!atomic_dec_and_test(&oa_config->ref_count))
381 return;
382
383 free_oa_config(dev_priv, oa_config);
384 }
385
get_oa_config(struct drm_i915_private * dev_priv,int metrics_set,struct i915_oa_config ** out_config)386 static int get_oa_config(struct drm_i915_private *dev_priv,
387 int metrics_set,
388 struct i915_oa_config **out_config)
389 {
390 int ret;
391
392 if (metrics_set == 1) {
393 *out_config = &dev_priv->perf.oa.test_config;
394 atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
395 return 0;
396 }
397
398 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
399 if (ret)
400 return ret;
401
402 *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
403 if (!*out_config)
404 ret = -EINVAL;
405 else
406 atomic_inc(&(*out_config)->ref_count);
407
408 mutex_unlock(&dev_priv->perf.metrics_lock);
409
410 return ret;
411 }
412
gen8_oa_hw_tail_read(struct drm_i915_private * dev_priv)413 static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
414 {
415 return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
416 }
417
gen7_oa_hw_tail_read(struct drm_i915_private * dev_priv)418 static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
419 {
420 u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
421
422 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
423 }
424
425 /**
426 * oa_buffer_check_unlocked - check for data and update tail ptr state
427 * @dev_priv: i915 device instance
428 *
429 * This is either called via fops (for blocking reads in user ctx) or the poll
430 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
431 * if there is data available for userspace to read.
432 *
433 * This function is central to providing a workaround for the OA unit tail
434 * pointer having a race with respect to what data is visible to the CPU.
435 * It is responsible for reading tail pointers from the hardware and giving
436 * the pointers time to 'age' before they are made available for reading.
437 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
438 *
439 * Besides returning true when there is data available to read() this function
440 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
441 * and .aged_tail_idx state used for reading.
442 *
443 * Note: It's safe to read OA config state here unlocked, assuming that this is
444 * only called while the stream is enabled, while the global OA configuration
445 * can't be modified.
446 *
447 * Returns: %true if the OA buffer contains data, else %false
448 */
oa_buffer_check_unlocked(struct drm_i915_private * dev_priv)449 static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
450 {
451 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
452 unsigned long flags;
453 unsigned int aged_idx;
454 u32 head, hw_tail, aged_tail, aging_tail;
455 u64 now;
456
457 /* We have to consider the (unlikely) possibility that read() errors
458 * could result in an OA buffer reset which might reset the head,
459 * tails[] and aged_tail state.
460 */
461 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
462
463 /* NB: The head we observe here might effectively be a little out of
464 * date (between head and tails[aged_idx].offset if there is currently
465 * a read() in progress.
466 */
467 head = dev_priv->perf.oa.oa_buffer.head;
468
469 aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
470 aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
471 aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
472
473 hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
474
475 /* The tail pointer increases in 64 byte increments,
476 * not in report_size steps...
477 */
478 hw_tail &= ~(report_size - 1);
479
480 now = ktime_get_mono_fast_ns();
481
482 /* Update the aged tail
483 *
484 * Flip the tail pointer available for read()s once the aging tail is
485 * old enough to trust that the corresponding data will be visible to
486 * the CPU...
487 *
488 * Do this before updating the aging pointer in case we may be able to
489 * immediately start aging a new pointer too (if new data has become
490 * available) without needing to wait for a later hrtimer callback.
491 */
492 if (aging_tail != INVALID_TAIL_PTR &&
493 ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
494 OA_TAIL_MARGIN_NSEC)) {
495
496 aged_idx ^= 1;
497 dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
498
499 aged_tail = aging_tail;
500
501 /* Mark that we need a new pointer to start aging... */
502 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
503 aging_tail = INVALID_TAIL_PTR;
504 }
505
506 /* Update the aging tail
507 *
508 * We throttle aging tail updates until we have a new tail that
509 * represents >= one report more data than is already available for
510 * reading. This ensures there will be enough data for a successful
511 * read once this new pointer has aged and ensures we will give the new
512 * pointer time to age.
513 */
514 if (aging_tail == INVALID_TAIL_PTR &&
515 (aged_tail == INVALID_TAIL_PTR ||
516 OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
517 struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
518 u32 gtt_offset = i915_ggtt_offset(vma);
519
520 /* Be paranoid and do a bounds check on the pointer read back
521 * from hardware, just in case some spurious hardware condition
522 * could put the tail out of bounds...
523 */
524 if (hw_tail >= gtt_offset &&
525 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
526 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
527 aging_tail = hw_tail;
528 dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
529 } else {
530 DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
531 hw_tail);
532 }
533 }
534
535 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
536
537 return aged_tail == INVALID_TAIL_PTR ?
538 false : OA_TAKEN(aged_tail, head) >= report_size;
539 }
540
541 /**
542 * append_oa_status - Appends a status record to a userspace read() buffer.
543 * @stream: An i915-perf stream opened for OA metrics
544 * @buf: destination buffer given by userspace
545 * @count: the number of bytes userspace wants to read
546 * @offset: (inout): the current position for writing into @buf
547 * @type: The kind of status to report to userspace
548 *
549 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
550 * into the userspace read() buffer.
551 *
552 * The @buf @offset will only be updated on success.
553 *
554 * Returns: 0 on success, negative error code on failure.
555 */
append_oa_status(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,enum drm_i915_perf_record_type type)556 static int append_oa_status(struct i915_perf_stream *stream,
557 char __user *buf,
558 size_t count,
559 size_t *offset,
560 enum drm_i915_perf_record_type type)
561 {
562 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
563
564 if ((count - *offset) < header.size)
565 return -ENOSPC;
566
567 if (copy_to_user(buf + *offset, &header, sizeof(header)))
568 return -EFAULT;
569
570 (*offset) += header.size;
571
572 return 0;
573 }
574
575 /**
576 * append_oa_sample - Copies single OA report into userspace read() buffer.
577 * @stream: An i915-perf stream opened for OA metrics
578 * @buf: destination buffer given by userspace
579 * @count: the number of bytes userspace wants to read
580 * @offset: (inout): the current position for writing into @buf
581 * @report: A single OA report to (optionally) include as part of the sample
582 *
583 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
584 * properties when opening a stream, tracked as `stream->sample_flags`. This
585 * function copies the requested components of a single sample to the given
586 * read() @buf.
587 *
588 * The @buf @offset will only be updated on success.
589 *
590 * Returns: 0 on success, negative error code on failure.
591 */
append_oa_sample(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,const u8 * report)592 static int append_oa_sample(struct i915_perf_stream *stream,
593 char __user *buf,
594 size_t count,
595 size_t *offset,
596 const u8 *report)
597 {
598 struct drm_i915_private *dev_priv = stream->dev_priv;
599 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
600 struct drm_i915_perf_record_header header;
601 u32 sample_flags = stream->sample_flags;
602
603 header.type = DRM_I915_PERF_RECORD_SAMPLE;
604 header.pad = 0;
605 header.size = stream->sample_size;
606
607 if ((count - *offset) < header.size)
608 return -ENOSPC;
609
610 buf += *offset;
611 if (copy_to_user(buf, &header, sizeof(header)))
612 return -EFAULT;
613 buf += sizeof(header);
614
615 if (sample_flags & SAMPLE_OA_REPORT) {
616 if (copy_to_user(buf, report, report_size))
617 return -EFAULT;
618 }
619
620 (*offset) += header.size;
621
622 return 0;
623 }
624
625 /**
626 * Copies all buffered OA reports into userspace read() buffer.
627 * @stream: An i915-perf stream opened for OA metrics
628 * @buf: destination buffer given by userspace
629 * @count: the number of bytes userspace wants to read
630 * @offset: (inout): the current position for writing into @buf
631 *
632 * Notably any error condition resulting in a short read (-%ENOSPC or
633 * -%EFAULT) will be returned even though one or more records may
634 * have been successfully copied. In this case it's up to the caller
635 * to decide if the error should be squashed before returning to
636 * userspace.
637 *
638 * Note: reports are consumed from the head, and appended to the
639 * tail, so the tail chases the head?... If you think that's mad
640 * and back-to-front you're not alone, but this follows the
641 * Gen PRM naming convention.
642 *
643 * Returns: 0 on success, negative error code on failure.
644 */
gen8_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)645 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
646 char __user *buf,
647 size_t count,
648 size_t *offset)
649 {
650 struct drm_i915_private *dev_priv = stream->dev_priv;
651 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
652 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
653 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
654 u32 mask = (OA_BUFFER_SIZE - 1);
655 size_t start_offset = *offset;
656 unsigned long flags;
657 unsigned int aged_tail_idx;
658 u32 head, tail;
659 u32 taken;
660 int ret = 0;
661
662 if (WARN_ON(!stream->enabled))
663 return -EIO;
664
665 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
666
667 head = dev_priv->perf.oa.oa_buffer.head;
668 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
669 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
670
671 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
672
673 /*
674 * An invalid tail pointer here means we're still waiting for the poll
675 * hrtimer callback to give us a pointer
676 */
677 if (tail == INVALID_TAIL_PTR)
678 return -EAGAIN;
679
680 /*
681 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
682 * while indexing relative to oa_buf_base.
683 */
684 head -= gtt_offset;
685 tail -= gtt_offset;
686
687 /*
688 * An out of bounds or misaligned head or tail pointer implies a driver
689 * bug since we validate + align the tail pointers we read from the
690 * hardware and we are in full control of the head pointer which should
691 * only be incremented by multiples of the report size (notably also
692 * all a power of two).
693 */
694 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
695 tail > OA_BUFFER_SIZE || tail % report_size,
696 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
697 head, tail))
698 return -EIO;
699
700
701 for (/* none */;
702 (taken = OA_TAKEN(tail, head));
703 head = (head + report_size) & mask) {
704 u8 *report = oa_buf_base + head;
705 u32 *report32 = (void *)report;
706 u32 ctx_id;
707 u32 reason;
708
709 /*
710 * All the report sizes factor neatly into the buffer
711 * size so we never expect to see a report split
712 * between the beginning and end of the buffer.
713 *
714 * Given the initial alignment check a misalignment
715 * here would imply a driver bug that would result
716 * in an overrun.
717 */
718 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
719 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
720 break;
721 }
722
723 /*
724 * The reason field includes flags identifying what
725 * triggered this specific report (mostly timer
726 * triggered or e.g. due to a context switch).
727 *
728 * This field is never expected to be zero so we can
729 * check that the report isn't invalid before copying
730 * it to userspace...
731 */
732 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
733 OAREPORT_REASON_MASK);
734 if (reason == 0) {
735 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
736 DRM_NOTE("Skipping spurious, invalid OA report\n");
737 continue;
738 }
739
740 ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
741
742 /*
743 * Squash whatever is in the CTX_ID field if it's marked as
744 * invalid to be sure we avoid false-positive, single-context
745 * filtering below...
746 *
747 * Note: that we don't clear the valid_ctx_bit so userspace can
748 * understand that the ID has been squashed by the kernel.
749 */
750 if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
751 ctx_id = report32[2] = INVALID_CTX_ID;
752
753 /*
754 * NB: For Gen 8 the OA unit no longer supports clock gating
755 * off for a specific context and the kernel can't securely
756 * stop the counters from updating as system-wide / global
757 * values.
758 *
759 * Automatic reports now include a context ID so reports can be
760 * filtered on the cpu but it's not worth trying to
761 * automatically subtract/hide counter progress for other
762 * contexts while filtering since we can't stop userspace
763 * issuing MI_REPORT_PERF_COUNT commands which would still
764 * provide a side-band view of the real values.
765 *
766 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
767 * to normalize counters for a single filtered context then it
768 * needs be forwarded bookend context-switch reports so that it
769 * can track switches in between MI_REPORT_PERF_COUNT commands
770 * and can itself subtract/ignore the progress of counters
771 * associated with other contexts. Note that the hardware
772 * automatically triggers reports when switching to a new
773 * context which are tagged with the ID of the newly active
774 * context. To avoid the complexity (and likely fragility) of
775 * reading ahead while parsing reports to try and minimize
776 * forwarding redundant context switch reports (i.e. between
777 * other, unrelated contexts) we simply elect to forward them
778 * all.
779 *
780 * We don't rely solely on the reason field to identify context
781 * switches since it's not-uncommon for periodic samples to
782 * identify a switch before any 'context switch' report.
783 */
784 if (!dev_priv->perf.oa.exclusive_stream->ctx ||
785 dev_priv->perf.oa.specific_ctx_id == ctx_id ||
786 (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
787 dev_priv->perf.oa.specific_ctx_id) ||
788 reason & OAREPORT_REASON_CTX_SWITCH) {
789
790 /*
791 * While filtering for a single context we avoid
792 * leaking the IDs of other contexts.
793 */
794 if (dev_priv->perf.oa.exclusive_stream->ctx &&
795 dev_priv->perf.oa.specific_ctx_id != ctx_id) {
796 report32[2] = INVALID_CTX_ID;
797 }
798
799 ret = append_oa_sample(stream, buf, count, offset,
800 report);
801 if (ret)
802 break;
803
804 dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
805 }
806
807 /*
808 * The above reason field sanity check is based on
809 * the assumption that the OA buffer is initially
810 * zeroed and we reset the field after copying so the
811 * check is still meaningful once old reports start
812 * being overwritten.
813 */
814 report32[0] = 0;
815 }
816
817 if (start_offset != *offset) {
818 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
819
820 /*
821 * We removed the gtt_offset for the copy loop above, indexing
822 * relative to oa_buf_base so put back here...
823 */
824 head += gtt_offset;
825
826 I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
827 dev_priv->perf.oa.oa_buffer.head = head;
828
829 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
830 }
831
832 return ret;
833 }
834
835 /**
836 * gen8_oa_read - copy status records then buffered OA reports
837 * @stream: An i915-perf stream opened for OA metrics
838 * @buf: destination buffer given by userspace
839 * @count: the number of bytes userspace wants to read
840 * @offset: (inout): the current position for writing into @buf
841 *
842 * Checks OA unit status registers and if necessary appends corresponding
843 * status records for userspace (such as for a buffer full condition) and then
844 * initiate appending any buffered OA reports.
845 *
846 * Updates @offset according to the number of bytes successfully copied into
847 * the userspace buffer.
848 *
849 * NB: some data may be successfully copied to the userspace buffer
850 * even if an error is returned, and this is reflected in the
851 * updated @offset.
852 *
853 * Returns: zero on success or a negative error code
854 */
gen8_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)855 static int gen8_oa_read(struct i915_perf_stream *stream,
856 char __user *buf,
857 size_t count,
858 size_t *offset)
859 {
860 struct drm_i915_private *dev_priv = stream->dev_priv;
861 u32 oastatus;
862 int ret;
863
864 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
865 return -EIO;
866
867 oastatus = I915_READ(GEN8_OASTATUS);
868
869 /*
870 * We treat OABUFFER_OVERFLOW as a significant error:
871 *
872 * Although theoretically we could handle this more gracefully
873 * sometimes, some Gens don't correctly suppress certain
874 * automatically triggered reports in this condition and so we
875 * have to assume that old reports are now being trampled
876 * over.
877 *
878 * Considering how we don't currently give userspace control
879 * over the OA buffer size and always configure a large 16MB
880 * buffer, then a buffer overflow does anyway likely indicate
881 * that something has gone quite badly wrong.
882 */
883 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
884 ret = append_oa_status(stream, buf, count, offset,
885 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
886 if (ret)
887 return ret;
888
889 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
890 dev_priv->perf.oa.period_exponent);
891
892 dev_priv->perf.oa.ops.oa_disable(dev_priv);
893 dev_priv->perf.oa.ops.oa_enable(dev_priv);
894
895 /*
896 * Note: .oa_enable() is expected to re-init the oabuffer and
897 * reset GEN8_OASTATUS for us
898 */
899 oastatus = I915_READ(GEN8_OASTATUS);
900 }
901
902 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
903 ret = append_oa_status(stream, buf, count, offset,
904 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
905 if (ret)
906 return ret;
907 I915_WRITE(GEN8_OASTATUS,
908 oastatus & ~GEN8_OASTATUS_REPORT_LOST);
909 }
910
911 return gen8_append_oa_reports(stream, buf, count, offset);
912 }
913
914 /**
915 * Copies all buffered OA reports into userspace read() buffer.
916 * @stream: An i915-perf stream opened for OA metrics
917 * @buf: destination buffer given by userspace
918 * @count: the number of bytes userspace wants to read
919 * @offset: (inout): the current position for writing into @buf
920 *
921 * Notably any error condition resulting in a short read (-%ENOSPC or
922 * -%EFAULT) will be returned even though one or more records may
923 * have been successfully copied. In this case it's up to the caller
924 * to decide if the error should be squashed before returning to
925 * userspace.
926 *
927 * Note: reports are consumed from the head, and appended to the
928 * tail, so the tail chases the head?... If you think that's mad
929 * and back-to-front you're not alone, but this follows the
930 * Gen PRM naming convention.
931 *
932 * Returns: 0 on success, negative error code on failure.
933 */
gen7_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)934 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
935 char __user *buf,
936 size_t count,
937 size_t *offset)
938 {
939 struct drm_i915_private *dev_priv = stream->dev_priv;
940 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
941 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
942 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
943 u32 mask = (OA_BUFFER_SIZE - 1);
944 size_t start_offset = *offset;
945 unsigned long flags;
946 unsigned int aged_tail_idx;
947 u32 head, tail;
948 u32 taken;
949 int ret = 0;
950
951 if (WARN_ON(!stream->enabled))
952 return -EIO;
953
954 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
955
956 head = dev_priv->perf.oa.oa_buffer.head;
957 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
958 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
959
960 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
961
962 /* An invalid tail pointer here means we're still waiting for the poll
963 * hrtimer callback to give us a pointer
964 */
965 if (tail == INVALID_TAIL_PTR)
966 return -EAGAIN;
967
968 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
969 * while indexing relative to oa_buf_base.
970 */
971 head -= gtt_offset;
972 tail -= gtt_offset;
973
974 /* An out of bounds or misaligned head or tail pointer implies a driver
975 * bug since we validate + align the tail pointers we read from the
976 * hardware and we are in full control of the head pointer which should
977 * only be incremented by multiples of the report size (notably also
978 * all a power of two).
979 */
980 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
981 tail > OA_BUFFER_SIZE || tail % report_size,
982 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
983 head, tail))
984 return -EIO;
985
986
987 for (/* none */;
988 (taken = OA_TAKEN(tail, head));
989 head = (head + report_size) & mask) {
990 u8 *report = oa_buf_base + head;
991 u32 *report32 = (void *)report;
992
993 /* All the report sizes factor neatly into the buffer
994 * size so we never expect to see a report split
995 * between the beginning and end of the buffer.
996 *
997 * Given the initial alignment check a misalignment
998 * here would imply a driver bug that would result
999 * in an overrun.
1000 */
1001 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
1002 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
1003 break;
1004 }
1005
1006 /* The report-ID field for periodic samples includes
1007 * some undocumented flags related to what triggered
1008 * the report and is never expected to be zero so we
1009 * can check that the report isn't invalid before
1010 * copying it to userspace...
1011 */
1012 if (report32[0] == 0) {
1013 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
1014 DRM_NOTE("Skipping spurious, invalid OA report\n");
1015 continue;
1016 }
1017
1018 ret = append_oa_sample(stream, buf, count, offset, report);
1019 if (ret)
1020 break;
1021
1022 /* The above report-id field sanity check is based on
1023 * the assumption that the OA buffer is initially
1024 * zeroed and we reset the field after copying so the
1025 * check is still meaningful once old reports start
1026 * being overwritten.
1027 */
1028 report32[0] = 0;
1029 }
1030
1031 if (start_offset != *offset) {
1032 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1033
1034 /* We removed the gtt_offset for the copy loop above, indexing
1035 * relative to oa_buf_base so put back here...
1036 */
1037 head += gtt_offset;
1038
1039 I915_WRITE(GEN7_OASTATUS2,
1040 ((head & GEN7_OASTATUS2_HEAD_MASK) |
1041 GEN7_OASTATUS2_MEM_SELECT_GGTT));
1042 dev_priv->perf.oa.oa_buffer.head = head;
1043
1044 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1045 }
1046
1047 return ret;
1048 }
1049
1050 /**
1051 * gen7_oa_read - copy status records then buffered OA reports
1052 * @stream: An i915-perf stream opened for OA metrics
1053 * @buf: destination buffer given by userspace
1054 * @count: the number of bytes userspace wants to read
1055 * @offset: (inout): the current position for writing into @buf
1056 *
1057 * Checks Gen 7 specific OA unit status registers and if necessary appends
1058 * corresponding status records for userspace (such as for a buffer full
1059 * condition) and then initiate appending any buffered OA reports.
1060 *
1061 * Updates @offset according to the number of bytes successfully copied into
1062 * the userspace buffer.
1063 *
1064 * Returns: zero on success or a negative error code
1065 */
gen7_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1066 static int gen7_oa_read(struct i915_perf_stream *stream,
1067 char __user *buf,
1068 size_t count,
1069 size_t *offset)
1070 {
1071 struct drm_i915_private *dev_priv = stream->dev_priv;
1072 u32 oastatus1;
1073 int ret;
1074
1075 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
1076 return -EIO;
1077
1078 oastatus1 = I915_READ(GEN7_OASTATUS1);
1079
1080 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1081 * bits while the OA unit is enabled (while the tail pointer
1082 * may be updated asynchronously) so we ignore status bits
1083 * that have already been reported to userspace.
1084 */
1085 oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
1086
1087 /* We treat OABUFFER_OVERFLOW as a significant error:
1088 *
1089 * - The status can be interpreted to mean that the buffer is
1090 * currently full (with a higher precedence than OA_TAKEN()
1091 * which will start to report a near-empty buffer after an
1092 * overflow) but it's awkward that we can't clear the status
1093 * on Haswell, so without a reset we won't be able to catch
1094 * the state again.
1095 *
1096 * - Since it also implies the HW has started overwriting old
1097 * reports it may also affect our sanity checks for invalid
1098 * reports when copying to userspace that assume new reports
1099 * are being written to cleared memory.
1100 *
1101 * - In the future we may want to introduce a flight recorder
1102 * mode where the driver will automatically maintain a safe
1103 * guard band between head/tail, avoiding this overflow
1104 * condition, but we avoid the added driver complexity for
1105 * now.
1106 */
1107 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1108 ret = append_oa_status(stream, buf, count, offset,
1109 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1110 if (ret)
1111 return ret;
1112
1113 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1114 dev_priv->perf.oa.period_exponent);
1115
1116 dev_priv->perf.oa.ops.oa_disable(dev_priv);
1117 dev_priv->perf.oa.ops.oa_enable(dev_priv);
1118
1119 oastatus1 = I915_READ(GEN7_OASTATUS1);
1120 }
1121
1122 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1123 ret = append_oa_status(stream, buf, count, offset,
1124 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1125 if (ret)
1126 return ret;
1127 dev_priv->perf.oa.gen7_latched_oastatus1 |=
1128 GEN7_OASTATUS1_REPORT_LOST;
1129 }
1130
1131 return gen7_append_oa_reports(stream, buf, count, offset);
1132 }
1133
1134 /**
1135 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1136 * @stream: An i915-perf stream opened for OA metrics
1137 *
1138 * Called when userspace tries to read() from a blocking stream FD opened
1139 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1140 * OA buffer and wakes us.
1141 *
1142 * Note: it's acceptable to have this return with some false positives
1143 * since any subsequent read handling will return -EAGAIN if there isn't
1144 * really data ready for userspace yet.
1145 *
1146 * Returns: zero on success or a negative error code
1147 */
i915_oa_wait_unlocked(struct i915_perf_stream * stream)1148 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1149 {
1150 struct drm_i915_private *dev_priv = stream->dev_priv;
1151
1152 /* We would wait indefinitely if periodic sampling is not enabled */
1153 if (!dev_priv->perf.oa.periodic)
1154 return -EIO;
1155
1156 return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
1157 oa_buffer_check_unlocked(dev_priv));
1158 }
1159
1160 /**
1161 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1162 * @stream: An i915-perf stream opened for OA metrics
1163 * @file: An i915 perf stream file
1164 * @wait: poll() state table
1165 *
1166 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1167 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1168 * when it sees data ready to read in the circular OA buffer.
1169 */
i915_oa_poll_wait(struct i915_perf_stream * stream,struct file * file,poll_table * wait)1170 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1171 struct file *file,
1172 poll_table *wait)
1173 {
1174 struct drm_i915_private *dev_priv = stream->dev_priv;
1175
1176 poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
1177 }
1178
1179 /**
1180 * i915_oa_read - just calls through to &i915_oa_ops->read
1181 * @stream: An i915-perf stream opened for OA metrics
1182 * @buf: destination buffer given by userspace
1183 * @count: the number of bytes userspace wants to read
1184 * @offset: (inout): the current position for writing into @buf
1185 *
1186 * Updates @offset according to the number of bytes successfully copied into
1187 * the userspace buffer.
1188 *
1189 * Returns: zero on success or a negative error code
1190 */
i915_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1191 static int i915_oa_read(struct i915_perf_stream *stream,
1192 char __user *buf,
1193 size_t count,
1194 size_t *offset)
1195 {
1196 struct drm_i915_private *dev_priv = stream->dev_priv;
1197
1198 return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
1199 }
1200
oa_pin_context(struct drm_i915_private * i915,struct i915_gem_context * ctx)1201 static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
1202 struct i915_gem_context *ctx)
1203 {
1204 struct intel_engine_cs *engine = i915->engine[RCS];
1205 struct intel_context *ce;
1206 int ret;
1207
1208 ret = i915_mutex_lock_interruptible(&i915->drm);
1209 if (ret)
1210 return ERR_PTR(ret);
1211
1212 /*
1213 * As the ID is the gtt offset of the context's vma we
1214 * pin the vma to ensure the ID remains fixed.
1215 *
1216 * NB: implied RCS engine...
1217 */
1218 ce = intel_context_pin(ctx, engine);
1219 mutex_unlock(&i915->drm.struct_mutex);
1220 if (IS_ERR(ce))
1221 return ce;
1222
1223 i915->perf.oa.pinned_ctx = ce;
1224
1225 return ce;
1226 }
1227
1228 /**
1229 * oa_get_render_ctx_id - determine and hold ctx hw id
1230 * @stream: An i915-perf stream opened for OA metrics
1231 *
1232 * Determine the render context hw id, and ensure it remains fixed for the
1233 * lifetime of the stream. This ensures that we don't have to worry about
1234 * updating the context ID in OACONTROL on the fly.
1235 *
1236 * Returns: zero on success or a negative error code
1237 */
oa_get_render_ctx_id(struct i915_perf_stream * stream)1238 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1239 {
1240 struct drm_i915_private *i915 = stream->dev_priv;
1241 struct intel_context *ce;
1242
1243 ce = oa_pin_context(i915, stream->ctx);
1244 if (IS_ERR(ce))
1245 return PTR_ERR(ce);
1246
1247 switch (INTEL_GEN(i915)) {
1248 case 7: {
1249 /*
1250 * On Haswell we don't do any post processing of the reports
1251 * and don't need to use the mask.
1252 */
1253 i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
1254 i915->perf.oa.specific_ctx_id_mask = 0;
1255 break;
1256 }
1257
1258 case 8:
1259 case 9:
1260 case 10:
1261 if (USES_GUC_SUBMISSION(i915)) {
1262 /*
1263 * When using GuC, the context descriptor we write in
1264 * i915 is read by GuC and rewritten before it's
1265 * actually written into the hardware. The LRCA is
1266 * what is put into the context id field of the
1267 * context descriptor by GuC. Because it's aligned to
1268 * a page, the lower 12bits are always at 0 and
1269 * dropped by GuC. They won't be part of the context
1270 * ID in the OA reports, so squash those lower bits.
1271 */
1272 i915->perf.oa.specific_ctx_id =
1273 lower_32_bits(ce->lrc_desc) >> 12;
1274
1275 /*
1276 * GuC uses the top bit to signal proxy submission, so
1277 * ignore that bit.
1278 */
1279 i915->perf.oa.specific_ctx_id_mask =
1280 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1281 } else {
1282 i915->perf.oa.specific_ctx_id_mask =
1283 (1U << GEN8_CTX_ID_WIDTH) - 1;
1284 i915->perf.oa.specific_ctx_id =
1285 upper_32_bits(ce->lrc_desc);
1286 i915->perf.oa.specific_ctx_id &=
1287 i915->perf.oa.specific_ctx_id_mask;
1288 }
1289 break;
1290
1291 case 11: {
1292 i915->perf.oa.specific_ctx_id_mask =
1293 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
1294 ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
1295 ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
1296 i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
1297 i915->perf.oa.specific_ctx_id &=
1298 i915->perf.oa.specific_ctx_id_mask;
1299 break;
1300 }
1301
1302 default:
1303 MISSING_CASE(INTEL_GEN(i915));
1304 }
1305
1306 DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1307 i915->perf.oa.specific_ctx_id,
1308 i915->perf.oa.specific_ctx_id_mask);
1309
1310 return 0;
1311 }
1312
1313 /**
1314 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1315 * @stream: An i915-perf stream opened for OA metrics
1316 *
1317 * In case anything needed doing to ensure the context HW ID would remain valid
1318 * for the lifetime of the stream, then that can be undone here.
1319 */
oa_put_render_ctx_id(struct i915_perf_stream * stream)1320 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1321 {
1322 struct drm_i915_private *dev_priv = stream->dev_priv;
1323 struct intel_context *ce;
1324
1325 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
1326 dev_priv->perf.oa.specific_ctx_id_mask = 0;
1327
1328 ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
1329 if (ce) {
1330 mutex_lock(&dev_priv->drm.struct_mutex);
1331 intel_context_unpin(ce);
1332 mutex_unlock(&dev_priv->drm.struct_mutex);
1333 }
1334 }
1335
1336 static void
free_oa_buffer(struct drm_i915_private * i915)1337 free_oa_buffer(struct drm_i915_private *i915)
1338 {
1339 mutex_lock(&i915->drm.struct_mutex);
1340
1341 i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
1342 i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
1343 i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
1344
1345 i915->perf.oa.oa_buffer.vma = NULL;
1346 i915->perf.oa.oa_buffer.vaddr = NULL;
1347
1348 mutex_unlock(&i915->drm.struct_mutex);
1349 }
1350
i915_oa_stream_destroy(struct i915_perf_stream * stream)1351 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1352 {
1353 struct drm_i915_private *dev_priv = stream->dev_priv;
1354
1355 BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
1356
1357 /*
1358 * Unset exclusive_stream first, it will be checked while disabling
1359 * the metric set on gen8+.
1360 */
1361 mutex_lock(&dev_priv->drm.struct_mutex);
1362 dev_priv->perf.oa.exclusive_stream = NULL;
1363 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
1364 mutex_unlock(&dev_priv->drm.struct_mutex);
1365
1366 free_oa_buffer(dev_priv);
1367
1368 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1369 intel_runtime_pm_put(dev_priv);
1370
1371 if (stream->ctx)
1372 oa_put_render_ctx_id(stream);
1373
1374 put_oa_config(dev_priv, stream->oa_config);
1375
1376 if (dev_priv->perf.oa.spurious_report_rs.missed) {
1377 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1378 dev_priv->perf.oa.spurious_report_rs.missed);
1379 }
1380 }
1381
gen7_init_oa_buffer(struct drm_i915_private * dev_priv)1382 static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
1383 {
1384 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
1385 unsigned long flags;
1386
1387 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1388
1389 /* Pre-DevBDW: OABUFFER must be set with counters off,
1390 * before OASTATUS1, but after OASTATUS2
1391 */
1392 I915_WRITE(GEN7_OASTATUS2,
1393 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
1394 dev_priv->perf.oa.oa_buffer.head = gtt_offset;
1395
1396 I915_WRITE(GEN7_OABUFFER, gtt_offset);
1397
1398 I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
1399
1400 /* Mark that we need updated tail pointers to read from... */
1401 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1402 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1403
1404 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1405
1406 /* On Haswell we have to track which OASTATUS1 flags we've
1407 * already seen since they can't be cleared while periodic
1408 * sampling is enabled.
1409 */
1410 dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
1411
1412 /* NB: although the OA buffer will initially be allocated
1413 * zeroed via shmfs (and so this memset is redundant when
1414 * first allocating), we may re-init the OA buffer, either
1415 * when re-enabling a stream or in error/reset paths.
1416 *
1417 * The reason we clear the buffer for each re-init is for the
1418 * sanity check in gen7_append_oa_reports() that looks at the
1419 * report-id field to make sure it's non-zero which relies on
1420 * the assumption that new reports are being written to zeroed
1421 * memory...
1422 */
1423 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1424
1425 /* Maybe make ->pollin per-stream state if we support multiple
1426 * concurrent streams in the future.
1427 */
1428 dev_priv->perf.oa.pollin = false;
1429 }
1430
gen8_init_oa_buffer(struct drm_i915_private * dev_priv)1431 static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
1432 {
1433 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
1434 unsigned long flags;
1435
1436 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1437
1438 I915_WRITE(GEN8_OASTATUS, 0);
1439 I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
1440 dev_priv->perf.oa.oa_buffer.head = gtt_offset;
1441
1442 I915_WRITE(GEN8_OABUFFER_UDW, 0);
1443
1444 /*
1445 * PRM says:
1446 *
1447 * "This MMIO must be set before the OATAILPTR
1448 * register and after the OAHEADPTR register. This is
1449 * to enable proper functionality of the overflow
1450 * bit."
1451 */
1452 I915_WRITE(GEN8_OABUFFER, gtt_offset |
1453 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1454 I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1455
1456 /* Mark that we need updated tail pointers to read from... */
1457 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1458 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1459
1460 /*
1461 * Reset state used to recognise context switches, affecting which
1462 * reports we will forward to userspace while filtering for a single
1463 * context.
1464 */
1465 dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
1466
1467 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1468
1469 /*
1470 * NB: although the OA buffer will initially be allocated
1471 * zeroed via shmfs (and so this memset is redundant when
1472 * first allocating), we may re-init the OA buffer, either
1473 * when re-enabling a stream or in error/reset paths.
1474 *
1475 * The reason we clear the buffer for each re-init is for the
1476 * sanity check in gen8_append_oa_reports() that looks at the
1477 * reason field to make sure it's non-zero which relies on
1478 * the assumption that new reports are being written to zeroed
1479 * memory...
1480 */
1481 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1482
1483 /*
1484 * Maybe make ->pollin per-stream state if we support multiple
1485 * concurrent streams in the future.
1486 */
1487 dev_priv->perf.oa.pollin = false;
1488 }
1489
alloc_oa_buffer(struct drm_i915_private * dev_priv)1490 static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
1491 {
1492 struct drm_i915_gem_object *bo;
1493 struct i915_vma *vma;
1494 int ret;
1495
1496 if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
1497 return -ENODEV;
1498
1499 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1500 if (ret)
1501 return ret;
1502
1503 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1504 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1505
1506 bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
1507 if (IS_ERR(bo)) {
1508 DRM_ERROR("Failed to allocate OA buffer\n");
1509 ret = PTR_ERR(bo);
1510 goto unlock;
1511 }
1512
1513 ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
1514 if (ret)
1515 goto err_unref;
1516
1517 /* PreHSW required 512K alignment, HSW requires 16M */
1518 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1519 if (IS_ERR(vma)) {
1520 ret = PTR_ERR(vma);
1521 goto err_unref;
1522 }
1523 dev_priv->perf.oa.oa_buffer.vma = vma;
1524
1525 dev_priv->perf.oa.oa_buffer.vaddr =
1526 i915_gem_object_pin_map(bo, I915_MAP_WB);
1527 if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
1528 ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
1529 goto err_unpin;
1530 }
1531
1532 dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
1533
1534 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
1535 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
1536 dev_priv->perf.oa.oa_buffer.vaddr);
1537
1538 goto unlock;
1539
1540 err_unpin:
1541 __i915_vma_unpin(vma);
1542
1543 err_unref:
1544 i915_gem_object_put(bo);
1545
1546 dev_priv->perf.oa.oa_buffer.vaddr = NULL;
1547 dev_priv->perf.oa.oa_buffer.vma = NULL;
1548
1549 unlock:
1550 mutex_unlock(&dev_priv->drm.struct_mutex);
1551 return ret;
1552 }
1553
config_oa_regs(struct drm_i915_private * dev_priv,const struct i915_oa_reg * regs,u32 n_regs)1554 static void config_oa_regs(struct drm_i915_private *dev_priv,
1555 const struct i915_oa_reg *regs,
1556 u32 n_regs)
1557 {
1558 u32 i;
1559
1560 for (i = 0; i < n_regs; i++) {
1561 const struct i915_oa_reg *reg = regs + i;
1562
1563 I915_WRITE(reg->addr, reg->value);
1564 }
1565 }
1566
hsw_enable_metric_set(struct drm_i915_private * dev_priv,const struct i915_oa_config * oa_config)1567 static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
1568 const struct i915_oa_config *oa_config)
1569 {
1570 /* PRM:
1571 *
1572 * OA unit is using “crclk” for its functionality. When trunk
1573 * level clock gating takes place, OA clock would be gated,
1574 * unable to count the events from non-render clock domain.
1575 * Render clock gating must be disabled when OA is enabled to
1576 * count the events from non-render domain. Unit level clock
1577 * gating for RCS should also be disabled.
1578 */
1579 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1580 ~GEN7_DOP_CLOCK_GATE_ENABLE));
1581 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
1582 GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1583
1584 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1585
1586 /* It apparently takes a fairly long time for a new MUX
1587 * configuration to be be applied after these register writes.
1588 * This delay duration was derived empirically based on the
1589 * render_basic config but hopefully it covers the maximum
1590 * configuration latency.
1591 *
1592 * As a fallback, the checks in _append_oa_reports() to skip
1593 * invalid OA reports do also seem to work to discard reports
1594 * generated before this config has completed - albeit not
1595 * silently.
1596 *
1597 * Unfortunately this is essentially a magic number, since we
1598 * don't currently know of a reliable mechanism for predicting
1599 * how long the MUX config will take to apply and besides
1600 * seeing invalid reports we don't know of a reliable way to
1601 * explicitly check that the MUX config has landed.
1602 *
1603 * It's even possible we've miss characterized the underlying
1604 * problem - it just seems like the simplest explanation why
1605 * a delay at this location would mitigate any invalid reports.
1606 */
1607 usleep_range(15000, 20000);
1608
1609 config_oa_regs(dev_priv, oa_config->b_counter_regs,
1610 oa_config->b_counter_regs_len);
1611
1612 return 0;
1613 }
1614
hsw_disable_metric_set(struct drm_i915_private * dev_priv)1615 static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
1616 {
1617 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
1618 ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1619 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
1620 GEN7_DOP_CLOCK_GATE_ENABLE));
1621
1622 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1623 ~GT_NOA_ENABLE));
1624 }
1625
1626 /*
1627 * NB: It must always remain pointer safe to run this even if the OA unit
1628 * has been disabled.
1629 *
1630 * It's fine to put out-of-date values into these per-context registers
1631 * in the case that the OA unit has been disabled.
1632 */
gen8_update_reg_state_unlocked(struct i915_gem_context * ctx,u32 * reg_state,const struct i915_oa_config * oa_config)1633 static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
1634 u32 *reg_state,
1635 const struct i915_oa_config *oa_config)
1636 {
1637 struct drm_i915_private *dev_priv = ctx->i915;
1638 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
1639 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
1640 /* The MMIO offsets for Flex EU registers aren't contiguous */
1641 u32 flex_mmio[] = {
1642 i915_mmio_reg_offset(EU_PERF_CNTL0),
1643 i915_mmio_reg_offset(EU_PERF_CNTL1),
1644 i915_mmio_reg_offset(EU_PERF_CNTL2),
1645 i915_mmio_reg_offset(EU_PERF_CNTL3),
1646 i915_mmio_reg_offset(EU_PERF_CNTL4),
1647 i915_mmio_reg_offset(EU_PERF_CNTL5),
1648 i915_mmio_reg_offset(EU_PERF_CNTL6),
1649 };
1650 int i;
1651
1652 reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1653 reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
1654 GEN8_OA_TIMER_PERIOD_SHIFT) |
1655 (dev_priv->perf.oa.periodic ?
1656 GEN8_OA_TIMER_ENABLE : 0) |
1657 GEN8_OA_COUNTER_RESUME;
1658
1659 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
1660 u32 state_offset = ctx_flexeu0 + i * 2;
1661 u32 mmio = flex_mmio[i];
1662
1663 /*
1664 * This arbitrary default will select the 'EU FPU0 Pipeline
1665 * Active' event. In the future it's anticipated that there
1666 * will be an explicit 'No Event' we can select, but not yet...
1667 */
1668 u32 value = 0;
1669
1670 if (oa_config) {
1671 u32 j;
1672
1673 for (j = 0; j < oa_config->flex_regs_len; j++) {
1674 if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
1675 value = oa_config->flex_regs[j].value;
1676 break;
1677 }
1678 }
1679 }
1680
1681 reg_state[state_offset] = mmio;
1682 reg_state[state_offset+1] = value;
1683 }
1684 }
1685
1686 /*
1687 * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
1688 * is only used by the kernel context.
1689 */
gen8_emit_oa_config(struct i915_request * rq,const struct i915_oa_config * oa_config)1690 static int gen8_emit_oa_config(struct i915_request *rq,
1691 const struct i915_oa_config *oa_config)
1692 {
1693 struct drm_i915_private *dev_priv = rq->i915;
1694 /* The MMIO offsets for Flex EU registers aren't contiguous */
1695 u32 flex_mmio[] = {
1696 i915_mmio_reg_offset(EU_PERF_CNTL0),
1697 i915_mmio_reg_offset(EU_PERF_CNTL1),
1698 i915_mmio_reg_offset(EU_PERF_CNTL2),
1699 i915_mmio_reg_offset(EU_PERF_CNTL3),
1700 i915_mmio_reg_offset(EU_PERF_CNTL4),
1701 i915_mmio_reg_offset(EU_PERF_CNTL5),
1702 i915_mmio_reg_offset(EU_PERF_CNTL6),
1703 };
1704 u32 *cs;
1705 int i;
1706
1707 cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4);
1708 if (IS_ERR(cs))
1709 return PTR_ERR(cs);
1710
1711 *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
1712
1713 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1714 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
1715 (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
1716 GEN8_OA_COUNTER_RESUME;
1717
1718 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
1719 u32 mmio = flex_mmio[i];
1720
1721 /*
1722 * This arbitrary default will select the 'EU FPU0 Pipeline
1723 * Active' event. In the future it's anticipated that there
1724 * will be an explicit 'No Event' we can select, but not
1725 * yet...
1726 */
1727 u32 value = 0;
1728
1729 if (oa_config) {
1730 u32 j;
1731
1732 for (j = 0; j < oa_config->flex_regs_len; j++) {
1733 if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
1734 value = oa_config->flex_regs[j].value;
1735 break;
1736 }
1737 }
1738 }
1739
1740 *cs++ = mmio;
1741 *cs++ = value;
1742 }
1743
1744 *cs++ = MI_NOOP;
1745 intel_ring_advance(rq, cs);
1746
1747 return 0;
1748 }
1749
gen8_switch_to_updated_kernel_context(struct drm_i915_private * dev_priv,const struct i915_oa_config * oa_config)1750 static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
1751 const struct i915_oa_config *oa_config)
1752 {
1753 struct intel_engine_cs *engine = dev_priv->engine[RCS];
1754 struct i915_timeline *timeline;
1755 struct i915_request *rq;
1756 int ret;
1757
1758 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1759
1760 i915_retire_requests(dev_priv);
1761
1762 rq = i915_request_alloc(engine, dev_priv->kernel_context);
1763 if (IS_ERR(rq))
1764 return PTR_ERR(rq);
1765
1766 ret = gen8_emit_oa_config(rq, oa_config);
1767 if (ret) {
1768 i915_request_add(rq);
1769 return ret;
1770 }
1771
1772 /* Queue this switch after all other activity */
1773 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
1774 struct i915_request *prev;
1775
1776 prev = i915_gem_active_raw(&timeline->last_request,
1777 &dev_priv->drm.struct_mutex);
1778 if (prev)
1779 i915_request_await_dma_fence(rq, &prev->fence);
1780 }
1781
1782 i915_request_add(rq);
1783
1784 return 0;
1785 }
1786
1787 /*
1788 * Manages updating the per-context aspects of the OA stream
1789 * configuration across all contexts.
1790 *
1791 * The awkward consideration here is that OACTXCONTROL controls the
1792 * exponent for periodic sampling which is primarily used for system
1793 * wide profiling where we'd like a consistent sampling period even in
1794 * the face of context switches.
1795 *
1796 * Our approach of updating the register state context (as opposed to
1797 * say using a workaround batch buffer) ensures that the hardware
1798 * won't automatically reload an out-of-date timer exponent even
1799 * transiently before a WA BB could be parsed.
1800 *
1801 * This function needs to:
1802 * - Ensure the currently running context's per-context OA state is
1803 * updated
1804 * - Ensure that all existing contexts will have the correct per-context
1805 * OA state if they are scheduled for use.
1806 * - Ensure any new contexts will be initialized with the correct
1807 * per-context OA state.
1808 *
1809 * Note: it's only the RCS/Render context that has any OA state.
1810 */
gen8_configure_all_contexts(struct drm_i915_private * dev_priv,const struct i915_oa_config * oa_config)1811 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1812 const struct i915_oa_config *oa_config)
1813 {
1814 struct intel_engine_cs *engine = dev_priv->engine[RCS];
1815 struct i915_gem_context *ctx;
1816 int ret;
1817 unsigned int wait_flags = I915_WAIT_LOCKED;
1818
1819 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1820
1821 /* Switch away from any user context. */
1822 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
1823 if (ret)
1824 goto out;
1825
1826 /*
1827 * The OA register config is setup through the context image. This image
1828 * might be written to by the GPU on context switch (in particular on
1829 * lite-restore). This means we can't safely update a context's image,
1830 * if this context is scheduled/submitted to run on the GPU.
1831 *
1832 * We could emit the OA register config through the batch buffer but
1833 * this might leave small interval of time where the OA unit is
1834 * configured at an invalid sampling period.
1835 *
1836 * So far the best way to work around this issue seems to be draining
1837 * the GPU from any submitted work.
1838 */
1839 ret = i915_gem_wait_for_idle(dev_priv,
1840 wait_flags,
1841 MAX_SCHEDULE_TIMEOUT);
1842 if (ret)
1843 goto out;
1844
1845 /* Update all contexts now that we've stalled the submission. */
1846 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1847 struct intel_context *ce = to_intel_context(ctx, engine);
1848 u32 *regs;
1849
1850 /* OA settings will be set upon first use */
1851 if (!ce->state)
1852 continue;
1853
1854 regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
1855 if (IS_ERR(regs)) {
1856 ret = PTR_ERR(regs);
1857 goto out;
1858 }
1859
1860 ce->state->obj->mm.dirty = true;
1861 regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
1862
1863 gen8_update_reg_state_unlocked(ctx, regs, oa_config);
1864
1865 i915_gem_object_unpin_map(ce->state->obj);
1866 }
1867
1868 out:
1869 return ret;
1870 }
1871
gen8_enable_metric_set(struct drm_i915_private * dev_priv,const struct i915_oa_config * oa_config)1872 static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1873 const struct i915_oa_config *oa_config)
1874 {
1875 int ret;
1876
1877 /*
1878 * We disable slice/unslice clock ratio change reports on SKL since
1879 * they are too noisy. The HW generates a lot of redundant reports
1880 * where the ratio hasn't really changed causing a lot of redundant
1881 * work to processes and increasing the chances we'll hit buffer
1882 * overruns.
1883 *
1884 * Although we don't currently use the 'disable overrun' OABUFFER
1885 * feature it's worth noting that clock ratio reports have to be
1886 * disabled before considering to use that feature since the HW doesn't
1887 * correctly block these reports.
1888 *
1889 * Currently none of the high-level metrics we have depend on knowing
1890 * this ratio to normalize.
1891 *
1892 * Note: This register is not power context saved and restored, but
1893 * that's OK considering that we disable RC6 while the OA unit is
1894 * enabled.
1895 *
1896 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
1897 * be read back from automatically triggered reports, as part of the
1898 * RPT_ID field.
1899 */
1900 if (IS_GEN(dev_priv, 9, 11)) {
1901 I915_WRITE(GEN8_OA_DEBUG,
1902 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
1903 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
1904 }
1905
1906 /*
1907 * Update all contexts prior writing the mux configurations as we need
1908 * to make sure all slices/subslices are ON before writing to NOA
1909 * registers.
1910 */
1911 ret = gen8_configure_all_contexts(dev_priv, oa_config);
1912 if (ret)
1913 return ret;
1914
1915 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1916
1917 config_oa_regs(dev_priv, oa_config->b_counter_regs,
1918 oa_config->b_counter_regs_len);
1919
1920 return 0;
1921 }
1922
gen8_disable_metric_set(struct drm_i915_private * dev_priv)1923 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1924 {
1925 /* Reset all contexts' slices/subslices configurations. */
1926 gen8_configure_all_contexts(dev_priv, NULL);
1927
1928 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1929 ~GT_NOA_ENABLE));
1930 }
1931
gen10_disable_metric_set(struct drm_i915_private * dev_priv)1932 static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
1933 {
1934 /* Reset all contexts' slices/subslices configurations. */
1935 gen8_configure_all_contexts(dev_priv, NULL);
1936
1937 /* Make sure we disable noa to save power. */
1938 I915_WRITE(RPM_CONFIG1,
1939 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
1940 }
1941
gen7_oa_enable(struct drm_i915_private * dev_priv)1942 static void gen7_oa_enable(struct drm_i915_private *dev_priv)
1943 {
1944 struct i915_gem_context *ctx =
1945 dev_priv->perf.oa.exclusive_stream->ctx;
1946 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
1947 bool periodic = dev_priv->perf.oa.periodic;
1948 u32 period_exponent = dev_priv->perf.oa.period_exponent;
1949 u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1950
1951 /*
1952 * Reset buf pointers so we don't forward reports from before now.
1953 *
1954 * Think carefully if considering trying to avoid this, since it
1955 * also ensures status flags and the buffer itself are cleared
1956 * in error paths, and we have checks for invalid reports based
1957 * on the assumption that certain fields are written to zeroed
1958 * memory which this helps maintains.
1959 */
1960 gen7_init_oa_buffer(dev_priv);
1961
1962 I915_WRITE(GEN7_OACONTROL,
1963 (ctx_id & GEN7_OACONTROL_CTX_MASK) |
1964 (period_exponent <<
1965 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
1966 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
1967 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
1968 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
1969 GEN7_OACONTROL_ENABLE);
1970 }
1971
gen8_oa_enable(struct drm_i915_private * dev_priv)1972 static void gen8_oa_enable(struct drm_i915_private *dev_priv)
1973 {
1974 u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1975
1976 /*
1977 * Reset buf pointers so we don't forward reports from before now.
1978 *
1979 * Think carefully if considering trying to avoid this, since it
1980 * also ensures status flags and the buffer itself are cleared
1981 * in error paths, and we have checks for invalid reports based
1982 * on the assumption that certain fields are written to zeroed
1983 * memory which this helps maintains.
1984 */
1985 gen8_init_oa_buffer(dev_priv);
1986
1987 /*
1988 * Note: we don't rely on the hardware to perform single context
1989 * filtering and instead filter on the cpu based on the context-id
1990 * field of reports
1991 */
1992 I915_WRITE(GEN8_OACONTROL, (report_format <<
1993 GEN8_OA_REPORT_FORMAT_SHIFT) |
1994 GEN8_OA_COUNTER_ENABLE);
1995 }
1996
1997 /**
1998 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
1999 * @stream: An i915 perf stream opened for OA metrics
2000 *
2001 * [Re]enables hardware periodic sampling according to the period configured
2002 * when opening the stream. This also starts a hrtimer that will periodically
2003 * check for data in the circular OA buffer for notifying userspace (e.g.
2004 * during a read() or poll()).
2005 */
i915_oa_stream_enable(struct i915_perf_stream * stream)2006 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2007 {
2008 struct drm_i915_private *dev_priv = stream->dev_priv;
2009
2010 dev_priv->perf.oa.ops.oa_enable(dev_priv);
2011
2012 if (dev_priv->perf.oa.periodic)
2013 hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
2014 ns_to_ktime(POLL_PERIOD),
2015 HRTIMER_MODE_REL_PINNED);
2016 }
2017
gen7_oa_disable(struct drm_i915_private * dev_priv)2018 static void gen7_oa_disable(struct drm_i915_private *dev_priv)
2019 {
2020 I915_WRITE(GEN7_OACONTROL, 0);
2021 if (intel_wait_for_register(dev_priv,
2022 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2023 50))
2024 DRM_ERROR("wait for OA to be disabled timed out\n");
2025 }
2026
gen8_oa_disable(struct drm_i915_private * dev_priv)2027 static void gen8_oa_disable(struct drm_i915_private *dev_priv)
2028 {
2029 I915_WRITE(GEN8_OACONTROL, 0);
2030 if (intel_wait_for_register(dev_priv,
2031 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2032 50))
2033 DRM_ERROR("wait for OA to be disabled timed out\n");
2034 }
2035
2036 /**
2037 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2038 * @stream: An i915 perf stream opened for OA metrics
2039 *
2040 * Stops the OA unit from periodically writing counter reports into the
2041 * circular OA buffer. This also stops the hrtimer that periodically checks for
2042 * data in the circular OA buffer, for notifying userspace.
2043 */
i915_oa_stream_disable(struct i915_perf_stream * stream)2044 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2045 {
2046 struct drm_i915_private *dev_priv = stream->dev_priv;
2047
2048 dev_priv->perf.oa.ops.oa_disable(dev_priv);
2049
2050 if (dev_priv->perf.oa.periodic)
2051 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
2052 }
2053
2054 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2055 .destroy = i915_oa_stream_destroy,
2056 .enable = i915_oa_stream_enable,
2057 .disable = i915_oa_stream_disable,
2058 .wait_unlocked = i915_oa_wait_unlocked,
2059 .poll_wait = i915_oa_poll_wait,
2060 .read = i915_oa_read,
2061 };
2062
2063 /**
2064 * i915_oa_stream_init - validate combined props for OA stream and init
2065 * @stream: An i915 perf stream
2066 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2067 * @props: The property state that configures stream (individually validated)
2068 *
2069 * While read_properties_unlocked() validates properties in isolation it
2070 * doesn't ensure that the combination necessarily makes sense.
2071 *
2072 * At this point it has been determined that userspace wants a stream of
2073 * OA metrics, but still we need to further validate the combined
2074 * properties are OK.
2075 *
2076 * If the configuration makes sense then we can allocate memory for
2077 * a circular OA buffer and apply the requested metric set configuration.
2078 *
2079 * Returns: zero on success or a negative error code.
2080 */
i915_oa_stream_init(struct i915_perf_stream * stream,struct drm_i915_perf_open_param * param,struct perf_open_properties * props)2081 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2082 struct drm_i915_perf_open_param *param,
2083 struct perf_open_properties *props)
2084 {
2085 struct drm_i915_private *dev_priv = stream->dev_priv;
2086 int format_size;
2087 int ret;
2088
2089 /* If the sysfs metrics/ directory wasn't registered for some
2090 * reason then don't let userspace try their luck with config
2091 * IDs
2092 */
2093 if (!dev_priv->perf.metrics_kobj) {
2094 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2095 return -EINVAL;
2096 }
2097
2098 if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
2099 DRM_DEBUG("Only OA report sampling supported\n");
2100 return -EINVAL;
2101 }
2102
2103 if (!dev_priv->perf.oa.ops.init_oa_buffer) {
2104 DRM_DEBUG("OA unit not supported\n");
2105 return -ENODEV;
2106 }
2107
2108 /* To avoid the complexity of having to accurately filter
2109 * counter reports and marshal to the appropriate client
2110 * we currently only allow exclusive access
2111 */
2112 if (dev_priv->perf.oa.exclusive_stream) {
2113 DRM_DEBUG("OA unit already in use\n");
2114 return -EBUSY;
2115 }
2116
2117 if (!props->oa_format) {
2118 DRM_DEBUG("OA report format not specified\n");
2119 return -EINVAL;
2120 }
2121
2122 /* We set up some ratelimit state to potentially throttle any _NOTES
2123 * about spurious, invalid OA reports which we don't forward to
2124 * userspace.
2125 *
2126 * The initialization is associated with opening the stream (not driver
2127 * init) considering we print a _NOTE about any throttling when closing
2128 * the stream instead of waiting until driver _fini which no one would
2129 * ever see.
2130 *
2131 * Using the same limiting factors as printk_ratelimit()
2132 */
2133 ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
2134 5 * HZ, 10);
2135 /* Since we use a DRM_NOTE for spurious reports it would be
2136 * inconsistent to let __ratelimit() automatically print a warning for
2137 * throttling.
2138 */
2139 ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
2140 RATELIMIT_MSG_ON_RELEASE);
2141
2142 stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2143
2144 format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
2145
2146 stream->sample_flags |= SAMPLE_OA_REPORT;
2147 stream->sample_size += format_size;
2148
2149 dev_priv->perf.oa.oa_buffer.format_size = format_size;
2150 if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
2151 return -EINVAL;
2152
2153 dev_priv->perf.oa.oa_buffer.format =
2154 dev_priv->perf.oa.oa_formats[props->oa_format].format;
2155
2156 dev_priv->perf.oa.periodic = props->oa_periodic;
2157 if (dev_priv->perf.oa.periodic)
2158 dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
2159
2160 if (stream->ctx) {
2161 ret = oa_get_render_ctx_id(stream);
2162 if (ret) {
2163 DRM_DEBUG("Invalid context id to filter with\n");
2164 return ret;
2165 }
2166 }
2167
2168 ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
2169 if (ret) {
2170 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
2171 goto err_config;
2172 }
2173
2174 /* PRM - observability performance counters:
2175 *
2176 * OACONTROL, performance counter enable, note:
2177 *
2178 * "When this bit is set, in order to have coherent counts,
2179 * RC6 power state and trunk clock gating must be disabled.
2180 * This can be achieved by programming MMIO registers as
2181 * 0xA094=0 and 0xA090[31]=1"
2182 *
2183 * In our case we are expecting that taking pm + FORCEWAKE
2184 * references will effectively disable RC6.
2185 */
2186 intel_runtime_pm_get(dev_priv);
2187 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2188
2189 ret = alloc_oa_buffer(dev_priv);
2190 if (ret)
2191 goto err_oa_buf_alloc;
2192
2193 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2194 if (ret)
2195 goto err_lock;
2196
2197 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
2198 stream->oa_config);
2199 if (ret) {
2200 DRM_DEBUG("Unable to enable metric set\n");
2201 goto err_enable;
2202 }
2203
2204 stream->ops = &i915_oa_stream_ops;
2205
2206 dev_priv->perf.oa.exclusive_stream = stream;
2207
2208 mutex_unlock(&dev_priv->drm.struct_mutex);
2209
2210 return 0;
2211
2212 err_enable:
2213 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2214 mutex_unlock(&dev_priv->drm.struct_mutex);
2215
2216 err_lock:
2217 free_oa_buffer(dev_priv);
2218
2219 err_oa_buf_alloc:
2220 put_oa_config(dev_priv, stream->oa_config);
2221
2222 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2223 intel_runtime_pm_put(dev_priv);
2224
2225 err_config:
2226 if (stream->ctx)
2227 oa_put_render_ctx_id(stream);
2228
2229 return ret;
2230 }
2231
i915_oa_init_reg_state(struct intel_engine_cs * engine,struct i915_gem_context * ctx,u32 * reg_state)2232 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
2233 struct i915_gem_context *ctx,
2234 u32 *reg_state)
2235 {
2236 struct i915_perf_stream *stream;
2237
2238 if (engine->id != RCS)
2239 return;
2240
2241 stream = engine->i915->perf.oa.exclusive_stream;
2242 if (stream)
2243 gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
2244 }
2245
2246 /**
2247 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
2248 * @stream: An i915 perf stream
2249 * @file: An i915 perf stream file
2250 * @buf: destination buffer given by userspace
2251 * @count: the number of bytes userspace wants to read
2252 * @ppos: (inout) file seek position (unused)
2253 *
2254 * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
2255 * ensure that if we've successfully copied any data then reporting that takes
2256 * precedence over any internal error status, so the data isn't lost.
2257 *
2258 * For example ret will be -ENOSPC whenever there is more buffered data than
2259 * can be copied to userspace, but that's only interesting if we weren't able
2260 * to copy some data because it implies the userspace buffer is too small to
2261 * receive a single record (and we never split records).
2262 *
2263 * Another case with ret == -EFAULT is more of a grey area since it would seem
2264 * like bad form for userspace to ask us to overrun its buffer, but the user
2265 * knows best:
2266 *
2267 * http://yarchive.net/comp/linux/partial_reads_writes.html
2268 *
2269 * Returns: The number of bytes copied or a negative error code on failure.
2270 */
i915_perf_read_locked(struct i915_perf_stream * stream,struct file * file,char __user * buf,size_t count,loff_t * ppos)2271 static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
2272 struct file *file,
2273 char __user *buf,
2274 size_t count,
2275 loff_t *ppos)
2276 {
2277 /* Note we keep the offset (aka bytes read) separate from any
2278 * error status so that the final check for whether we return
2279 * the bytes read with a higher precedence than any error (see
2280 * comment below) doesn't need to be handled/duplicated in
2281 * stream->ops->read() implementations.
2282 */
2283 size_t offset = 0;
2284 int ret = stream->ops->read(stream, buf, count, &offset);
2285
2286 return offset ?: (ret ?: -EAGAIN);
2287 }
2288
2289 /**
2290 * i915_perf_read - handles read() FOP for i915 perf stream FDs
2291 * @file: An i915 perf stream file
2292 * @buf: destination buffer given by userspace
2293 * @count: the number of bytes userspace wants to read
2294 * @ppos: (inout) file seek position (unused)
2295 *
2296 * The entry point for handling a read() on a stream file descriptor from
2297 * userspace. Most of the work is left to the i915_perf_read_locked() and
2298 * &i915_perf_stream_ops->read but to save having stream implementations (of
2299 * which we might have multiple later) we handle blocking read here.
2300 *
2301 * We can also consistently treat trying to read from a disabled stream
2302 * as an IO error so implementations can assume the stream is enabled
2303 * while reading.
2304 *
2305 * Returns: The number of bytes copied or a negative error code on failure.
2306 */
i915_perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2307 static ssize_t i915_perf_read(struct file *file,
2308 char __user *buf,
2309 size_t count,
2310 loff_t *ppos)
2311 {
2312 struct i915_perf_stream *stream = file->private_data;
2313 struct drm_i915_private *dev_priv = stream->dev_priv;
2314 ssize_t ret;
2315
2316 /* To ensure it's handled consistently we simply treat all reads of a
2317 * disabled stream as an error. In particular it might otherwise lead
2318 * to a deadlock for blocking file descriptors...
2319 */
2320 if (!stream->enabled)
2321 return -EIO;
2322
2323 if (!(file->f_flags & O_NONBLOCK)) {
2324 /* There's the small chance of false positives from
2325 * stream->ops->wait_unlocked.
2326 *
2327 * E.g. with single context filtering since we only wait until
2328 * oabuffer has >= 1 report we don't immediately know whether
2329 * any reports really belong to the current context
2330 */
2331 do {
2332 ret = stream->ops->wait_unlocked(stream);
2333 if (ret)
2334 return ret;
2335
2336 mutex_lock(&dev_priv->perf.lock);
2337 ret = i915_perf_read_locked(stream, file,
2338 buf, count, ppos);
2339 mutex_unlock(&dev_priv->perf.lock);
2340 } while (ret == -EAGAIN);
2341 } else {
2342 mutex_lock(&dev_priv->perf.lock);
2343 ret = i915_perf_read_locked(stream, file, buf, count, ppos);
2344 mutex_unlock(&dev_priv->perf.lock);
2345 }
2346
2347 /* We allow the poll checking to sometimes report false positive EPOLLIN
2348 * events where we might actually report EAGAIN on read() if there's
2349 * not really any data available. In this situation though we don't
2350 * want to enter a busy loop between poll() reporting a EPOLLIN event
2351 * and read() returning -EAGAIN. Clearing the oa.pollin state here
2352 * effectively ensures we back off until the next hrtimer callback
2353 * before reporting another EPOLLIN event.
2354 */
2355 if (ret >= 0 || ret == -EAGAIN) {
2356 /* Maybe make ->pollin per-stream state if we support multiple
2357 * concurrent streams in the future.
2358 */
2359 dev_priv->perf.oa.pollin = false;
2360 }
2361
2362 return ret;
2363 }
2364
oa_poll_check_timer_cb(struct hrtimer * hrtimer)2365 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
2366 {
2367 struct drm_i915_private *dev_priv =
2368 container_of(hrtimer, typeof(*dev_priv),
2369 perf.oa.poll_check_timer);
2370
2371 if (oa_buffer_check_unlocked(dev_priv)) {
2372 dev_priv->perf.oa.pollin = true;
2373 wake_up(&dev_priv->perf.oa.poll_wq);
2374 }
2375
2376 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
2377
2378 return HRTIMER_RESTART;
2379 }
2380
2381 /**
2382 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
2383 * @dev_priv: i915 device instance
2384 * @stream: An i915 perf stream
2385 * @file: An i915 perf stream file
2386 * @wait: poll() state table
2387 *
2388 * For handling userspace polling on an i915 perf stream, this calls through to
2389 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
2390 * will be woken for new stream data.
2391 *
2392 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2393 * with any non-file-operation driver hooks.
2394 *
2395 * Returns: any poll events that are ready without sleeping
2396 */
i915_perf_poll_locked(struct drm_i915_private * dev_priv,struct i915_perf_stream * stream,struct file * file,poll_table * wait)2397 static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
2398 struct i915_perf_stream *stream,
2399 struct file *file,
2400 poll_table *wait)
2401 {
2402 __poll_t events = 0;
2403
2404 stream->ops->poll_wait(stream, file, wait);
2405
2406 /* Note: we don't explicitly check whether there's something to read
2407 * here since this path may be very hot depending on what else
2408 * userspace is polling, or on the timeout in use. We rely solely on
2409 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
2410 * samples to read.
2411 */
2412 if (dev_priv->perf.oa.pollin)
2413 events |= EPOLLIN;
2414
2415 return events;
2416 }
2417
2418 /**
2419 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
2420 * @file: An i915 perf stream file
2421 * @wait: poll() state table
2422 *
2423 * For handling userspace polling on an i915 perf stream, this ensures
2424 * poll_wait() gets called with a wait queue that will be woken for new stream
2425 * data.
2426 *
2427 * Note: Implementation deferred to i915_perf_poll_locked()
2428 *
2429 * Returns: any poll events that are ready without sleeping
2430 */
i915_perf_poll(struct file * file,poll_table * wait)2431 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
2432 {
2433 struct i915_perf_stream *stream = file->private_data;
2434 struct drm_i915_private *dev_priv = stream->dev_priv;
2435 __poll_t ret;
2436
2437 mutex_lock(&dev_priv->perf.lock);
2438 ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
2439 mutex_unlock(&dev_priv->perf.lock);
2440
2441 return ret;
2442 }
2443
2444 /**
2445 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
2446 * @stream: A disabled i915 perf stream
2447 *
2448 * [Re]enables the associated capture of data for this stream.
2449 *
2450 * If a stream was previously enabled then there's currently no intention
2451 * to provide userspace any guarantee about the preservation of previously
2452 * buffered data.
2453 */
i915_perf_enable_locked(struct i915_perf_stream * stream)2454 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
2455 {
2456 if (stream->enabled)
2457 return;
2458
2459 /* Allow stream->ops->enable() to refer to this */
2460 stream->enabled = true;
2461
2462 if (stream->ops->enable)
2463 stream->ops->enable(stream);
2464 }
2465
2466 /**
2467 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
2468 * @stream: An enabled i915 perf stream
2469 *
2470 * Disables the associated capture of data for this stream.
2471 *
2472 * The intention is that disabling an re-enabling a stream will ideally be
2473 * cheaper than destroying and re-opening a stream with the same configuration,
2474 * though there are no formal guarantees about what state or buffered data
2475 * must be retained between disabling and re-enabling a stream.
2476 *
2477 * Note: while a stream is disabled it's considered an error for userspace
2478 * to attempt to read from the stream (-EIO).
2479 */
i915_perf_disable_locked(struct i915_perf_stream * stream)2480 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
2481 {
2482 if (!stream->enabled)
2483 return;
2484
2485 /* Allow stream->ops->disable() to refer to this */
2486 stream->enabled = false;
2487
2488 if (stream->ops->disable)
2489 stream->ops->disable(stream);
2490 }
2491
2492 /**
2493 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2494 * @stream: An i915 perf stream
2495 * @cmd: the ioctl request
2496 * @arg: the ioctl data
2497 *
2498 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2499 * with any non-file-operation driver hooks.
2500 *
2501 * Returns: zero on success or a negative error code. Returns -EINVAL for
2502 * an unknown ioctl request.
2503 */
i915_perf_ioctl_locked(struct i915_perf_stream * stream,unsigned int cmd,unsigned long arg)2504 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
2505 unsigned int cmd,
2506 unsigned long arg)
2507 {
2508 switch (cmd) {
2509 case I915_PERF_IOCTL_ENABLE:
2510 i915_perf_enable_locked(stream);
2511 return 0;
2512 case I915_PERF_IOCTL_DISABLE:
2513 i915_perf_disable_locked(stream);
2514 return 0;
2515 }
2516
2517 return -EINVAL;
2518 }
2519
2520 /**
2521 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2522 * @file: An i915 perf stream file
2523 * @cmd: the ioctl request
2524 * @arg: the ioctl data
2525 *
2526 * Implementation deferred to i915_perf_ioctl_locked().
2527 *
2528 * Returns: zero on success or a negative error code. Returns -EINVAL for
2529 * an unknown ioctl request.
2530 */
i915_perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2531 static long i915_perf_ioctl(struct file *file,
2532 unsigned int cmd,
2533 unsigned long arg)
2534 {
2535 struct i915_perf_stream *stream = file->private_data;
2536 struct drm_i915_private *dev_priv = stream->dev_priv;
2537 long ret;
2538
2539 mutex_lock(&dev_priv->perf.lock);
2540 ret = i915_perf_ioctl_locked(stream, cmd, arg);
2541 mutex_unlock(&dev_priv->perf.lock);
2542
2543 return ret;
2544 }
2545
2546 /**
2547 * i915_perf_destroy_locked - destroy an i915 perf stream
2548 * @stream: An i915 perf stream
2549 *
2550 * Frees all resources associated with the given i915 perf @stream, disabling
2551 * any associated data capture in the process.
2552 *
2553 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2554 * with any non-file-operation driver hooks.
2555 */
i915_perf_destroy_locked(struct i915_perf_stream * stream)2556 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
2557 {
2558 if (stream->enabled)
2559 i915_perf_disable_locked(stream);
2560
2561 if (stream->ops->destroy)
2562 stream->ops->destroy(stream);
2563
2564 list_del(&stream->link);
2565
2566 if (stream->ctx)
2567 i915_gem_context_put(stream->ctx);
2568
2569 kfree(stream);
2570 }
2571
2572 /**
2573 * i915_perf_release - handles userspace close() of a stream file
2574 * @inode: anonymous inode associated with file
2575 * @file: An i915 perf stream file
2576 *
2577 * Cleans up any resources associated with an open i915 perf stream file.
2578 *
2579 * NB: close() can't really fail from the userspace point of view.
2580 *
2581 * Returns: zero on success or a negative error code.
2582 */
i915_perf_release(struct inode * inode,struct file * file)2583 static int i915_perf_release(struct inode *inode, struct file *file)
2584 {
2585 struct i915_perf_stream *stream = file->private_data;
2586 struct drm_i915_private *dev_priv = stream->dev_priv;
2587
2588 mutex_lock(&dev_priv->perf.lock);
2589 i915_perf_destroy_locked(stream);
2590 mutex_unlock(&dev_priv->perf.lock);
2591
2592 return 0;
2593 }
2594
2595
2596 static const struct file_operations fops = {
2597 .owner = THIS_MODULE,
2598 .llseek = no_llseek,
2599 .release = i915_perf_release,
2600 .poll = i915_perf_poll,
2601 .read = i915_perf_read,
2602 .unlocked_ioctl = i915_perf_ioctl,
2603 /* Our ioctl have no arguments, so it's safe to use the same function
2604 * to handle 32bits compatibility.
2605 */
2606 .compat_ioctl = i915_perf_ioctl,
2607 };
2608
2609
2610 /**
2611 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
2612 * @dev_priv: i915 device instance
2613 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
2614 * @props: individually validated u64 property value pairs
2615 * @file: drm file
2616 *
2617 * See i915_perf_ioctl_open() for interface details.
2618 *
2619 * Implements further stream config validation and stream initialization on
2620 * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
2621 * taken to serialize with any non-file-operation driver hooks.
2622 *
2623 * Note: at this point the @props have only been validated in isolation and
2624 * it's still necessary to validate that the combination of properties makes
2625 * sense.
2626 *
2627 * In the case where userspace is interested in OA unit metrics then further
2628 * config validation and stream initialization details will be handled by
2629 * i915_oa_stream_init(). The code here should only validate config state that
2630 * will be relevant to all stream types / backends.
2631 *
2632 * Returns: zero on success or a negative error code.
2633 */
2634 static int
i915_perf_open_ioctl_locked(struct drm_i915_private * dev_priv,struct drm_i915_perf_open_param * param,struct perf_open_properties * props,struct drm_file * file)2635 i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
2636 struct drm_i915_perf_open_param *param,
2637 struct perf_open_properties *props,
2638 struct drm_file *file)
2639 {
2640 struct i915_gem_context *specific_ctx = NULL;
2641 struct i915_perf_stream *stream = NULL;
2642 unsigned long f_flags = 0;
2643 bool privileged_op = true;
2644 int stream_fd;
2645 int ret;
2646
2647 if (props->single_context) {
2648 u32 ctx_handle = props->ctx_handle;
2649 struct drm_i915_file_private *file_priv = file->driver_priv;
2650
2651 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
2652 if (!specific_ctx) {
2653 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
2654 ctx_handle);
2655 ret = -ENOENT;
2656 goto err;
2657 }
2658 }
2659
2660 /*
2661 * On Haswell the OA unit supports clock gating off for a specific
2662 * context and in this mode there's no visibility of metrics for the
2663 * rest of the system, which we consider acceptable for a
2664 * non-privileged client.
2665 *
2666 * For Gen8+ the OA unit no longer supports clock gating off for a
2667 * specific context and the kernel can't securely stop the counters
2668 * from updating as system-wide / global values. Even though we can
2669 * filter reports based on the included context ID we can't block
2670 * clients from seeing the raw / global counter values via
2671 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
2672 * enable the OA unit by default.
2673 */
2674 if (IS_HASWELL(dev_priv) && specific_ctx)
2675 privileged_op = false;
2676
2677 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
2678 * we check a dev.i915.perf_stream_paranoid sysctl option
2679 * to determine if it's ok to access system wide OA counters
2680 * without CAP_SYS_ADMIN privileges.
2681 */
2682 if (privileged_op &&
2683 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
2684 DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
2685 ret = -EACCES;
2686 goto err_ctx;
2687 }
2688
2689 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
2690 if (!stream) {
2691 ret = -ENOMEM;
2692 goto err_ctx;
2693 }
2694
2695 stream->dev_priv = dev_priv;
2696 stream->ctx = specific_ctx;
2697
2698 ret = i915_oa_stream_init(stream, param, props);
2699 if (ret)
2700 goto err_alloc;
2701
2702 /* we avoid simply assigning stream->sample_flags = props->sample_flags
2703 * to have _stream_init check the combination of sample flags more
2704 * thoroughly, but still this is the expected result at this point.
2705 */
2706 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
2707 ret = -ENODEV;
2708 goto err_flags;
2709 }
2710
2711 list_add(&stream->link, &dev_priv->perf.streams);
2712
2713 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
2714 f_flags |= O_CLOEXEC;
2715 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
2716 f_flags |= O_NONBLOCK;
2717
2718 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
2719 if (stream_fd < 0) {
2720 ret = stream_fd;
2721 goto err_open;
2722 }
2723
2724 if (!(param->flags & I915_PERF_FLAG_DISABLED))
2725 i915_perf_enable_locked(stream);
2726
2727 return stream_fd;
2728
2729 err_open:
2730 list_del(&stream->link);
2731 err_flags:
2732 if (stream->ops->destroy)
2733 stream->ops->destroy(stream);
2734 err_alloc:
2735 kfree(stream);
2736 err_ctx:
2737 if (specific_ctx)
2738 i915_gem_context_put(specific_ctx);
2739 err:
2740 return ret;
2741 }
2742
oa_exponent_to_ns(struct drm_i915_private * dev_priv,int exponent)2743 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
2744 {
2745 return div64_u64(1000000000ULL * (2ULL << exponent),
2746 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
2747 }
2748
2749 /**
2750 * read_properties_unlocked - validate + copy userspace stream open properties
2751 * @dev_priv: i915 device instance
2752 * @uprops: The array of u64 key value pairs given by userspace
2753 * @n_props: The number of key value pairs expected in @uprops
2754 * @props: The stream configuration built up while validating properties
2755 *
2756 * Note this function only validates properties in isolation it doesn't
2757 * validate that the combination of properties makes sense or that all
2758 * properties necessary for a particular kind of stream have been set.
2759 *
2760 * Note that there currently aren't any ordering requirements for properties so
2761 * we shouldn't validate or assume anything about ordering here. This doesn't
2762 * rule out defining new properties with ordering requirements in the future.
2763 */
read_properties_unlocked(struct drm_i915_private * dev_priv,u64 __user * uprops,u32 n_props,struct perf_open_properties * props)2764 static int read_properties_unlocked(struct drm_i915_private *dev_priv,
2765 u64 __user *uprops,
2766 u32 n_props,
2767 struct perf_open_properties *props)
2768 {
2769 u64 __user *uprop = uprops;
2770 u32 i;
2771
2772 memset(props, 0, sizeof(struct perf_open_properties));
2773
2774 if (!n_props) {
2775 DRM_DEBUG("No i915 perf properties given\n");
2776 return -EINVAL;
2777 }
2778
2779 /* Considering that ID = 0 is reserved and assuming that we don't
2780 * (currently) expect any configurations to ever specify duplicate
2781 * values for a particular property ID then the last _PROP_MAX value is
2782 * one greater than the maximum number of properties we expect to get
2783 * from userspace.
2784 */
2785 if (n_props >= DRM_I915_PERF_PROP_MAX) {
2786 DRM_DEBUG("More i915 perf properties specified than exist\n");
2787 return -EINVAL;
2788 }
2789
2790 for (i = 0; i < n_props; i++) {
2791 u64 oa_period, oa_freq_hz;
2792 u64 id, value;
2793 int ret;
2794
2795 ret = get_user(id, uprop);
2796 if (ret)
2797 return ret;
2798
2799 ret = get_user(value, uprop + 1);
2800 if (ret)
2801 return ret;
2802
2803 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
2804 DRM_DEBUG("Unknown i915 perf property ID\n");
2805 return -EINVAL;
2806 }
2807
2808 switch ((enum drm_i915_perf_property_id)id) {
2809 case DRM_I915_PERF_PROP_CTX_HANDLE:
2810 props->single_context = 1;
2811 props->ctx_handle = value;
2812 break;
2813 case DRM_I915_PERF_PROP_SAMPLE_OA:
2814 if (value)
2815 props->sample_flags |= SAMPLE_OA_REPORT;
2816 break;
2817 case DRM_I915_PERF_PROP_OA_METRICS_SET:
2818 if (value == 0) {
2819 DRM_DEBUG("Unknown OA metric set ID\n");
2820 return -EINVAL;
2821 }
2822 props->metrics_set = value;
2823 break;
2824 case DRM_I915_PERF_PROP_OA_FORMAT:
2825 if (value == 0 || value >= I915_OA_FORMAT_MAX) {
2826 DRM_DEBUG("Out-of-range OA report format %llu\n",
2827 value);
2828 return -EINVAL;
2829 }
2830 if (!dev_priv->perf.oa.oa_formats[value].size) {
2831 DRM_DEBUG("Unsupported OA report format %llu\n",
2832 value);
2833 return -EINVAL;
2834 }
2835 props->oa_format = value;
2836 break;
2837 case DRM_I915_PERF_PROP_OA_EXPONENT:
2838 if (value > OA_EXPONENT_MAX) {
2839 DRM_DEBUG("OA timer exponent too high (> %u)\n",
2840 OA_EXPONENT_MAX);
2841 return -EINVAL;
2842 }
2843
2844 /* Theoretically we can program the OA unit to sample
2845 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
2846 * for BXT. We don't allow such high sampling
2847 * frequencies by default unless root.
2848 */
2849
2850 BUILD_BUG_ON(sizeof(oa_period) != 8);
2851 oa_period = oa_exponent_to_ns(dev_priv, value);
2852
2853 /* This check is primarily to ensure that oa_period <=
2854 * UINT32_MAX (before passing to do_div which only
2855 * accepts a u32 denominator), but we can also skip
2856 * checking anything < 1Hz which implicitly can't be
2857 * limited via an integer oa_max_sample_rate.
2858 */
2859 if (oa_period <= NSEC_PER_SEC) {
2860 u64 tmp = NSEC_PER_SEC;
2861 do_div(tmp, oa_period);
2862 oa_freq_hz = tmp;
2863 } else
2864 oa_freq_hz = 0;
2865
2866 if (oa_freq_hz > i915_oa_max_sample_rate &&
2867 !capable(CAP_SYS_ADMIN)) {
2868 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
2869 i915_oa_max_sample_rate);
2870 return -EACCES;
2871 }
2872
2873 props->oa_periodic = true;
2874 props->oa_period_exponent = value;
2875 break;
2876 case DRM_I915_PERF_PROP_MAX:
2877 MISSING_CASE(id);
2878 return -EINVAL;
2879 }
2880
2881 uprop += 2;
2882 }
2883
2884 return 0;
2885 }
2886
2887 /**
2888 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
2889 * @dev: drm device
2890 * @data: ioctl data copied from userspace (unvalidated)
2891 * @file: drm file
2892 *
2893 * Validates the stream open parameters given by userspace including flags
2894 * and an array of u64 key, value pair properties.
2895 *
2896 * Very little is assumed up front about the nature of the stream being
2897 * opened (for instance we don't assume it's for periodic OA unit metrics). An
2898 * i915-perf stream is expected to be a suitable interface for other forms of
2899 * buffered data written by the GPU besides periodic OA metrics.
2900 *
2901 * Note we copy the properties from userspace outside of the i915 perf
2902 * mutex to avoid an awkward lockdep with mmap_sem.
2903 *
2904 * Most of the implementation details are handled by
2905 * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
2906 * mutex for serializing with any non-file-operation driver hooks.
2907 *
2908 * Return: A newly opened i915 Perf stream file descriptor or negative
2909 * error code on failure.
2910 */
i915_perf_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2911 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
2912 struct drm_file *file)
2913 {
2914 struct drm_i915_private *dev_priv = dev->dev_private;
2915 struct drm_i915_perf_open_param *param = data;
2916 struct perf_open_properties props;
2917 u32 known_open_flags;
2918 int ret;
2919
2920 if (!dev_priv->perf.initialized) {
2921 DRM_DEBUG("i915 perf interface not available for this system\n");
2922 return -ENOTSUPP;
2923 }
2924
2925 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
2926 I915_PERF_FLAG_FD_NONBLOCK |
2927 I915_PERF_FLAG_DISABLED;
2928 if (param->flags & ~known_open_flags) {
2929 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
2930 return -EINVAL;
2931 }
2932
2933 ret = read_properties_unlocked(dev_priv,
2934 u64_to_user_ptr(param->properties_ptr),
2935 param->num_properties,
2936 &props);
2937 if (ret)
2938 return ret;
2939
2940 mutex_lock(&dev_priv->perf.lock);
2941 ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
2942 mutex_unlock(&dev_priv->perf.lock);
2943
2944 return ret;
2945 }
2946
2947 /**
2948 * i915_perf_register - exposes i915-perf to userspace
2949 * @dev_priv: i915 device instance
2950 *
2951 * In particular OA metric sets are advertised under a sysfs metrics/
2952 * directory allowing userspace to enumerate valid IDs that can be
2953 * used to open an i915-perf stream.
2954 */
i915_perf_register(struct drm_i915_private * dev_priv)2955 void i915_perf_register(struct drm_i915_private *dev_priv)
2956 {
2957 int ret;
2958
2959 if (!dev_priv->perf.initialized)
2960 return;
2961
2962 /* To be sure we're synchronized with an attempted
2963 * i915_perf_open_ioctl(); considering that we register after
2964 * being exposed to userspace.
2965 */
2966 mutex_lock(&dev_priv->perf.lock);
2967
2968 dev_priv->perf.metrics_kobj =
2969 kobject_create_and_add("metrics",
2970 &dev_priv->drm.primary->kdev->kobj);
2971 if (!dev_priv->perf.metrics_kobj)
2972 goto exit;
2973
2974 sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
2975
2976 if (IS_HASWELL(dev_priv)) {
2977 i915_perf_load_test_config_hsw(dev_priv);
2978 } else if (IS_BROADWELL(dev_priv)) {
2979 i915_perf_load_test_config_bdw(dev_priv);
2980 } else if (IS_CHERRYVIEW(dev_priv)) {
2981 i915_perf_load_test_config_chv(dev_priv);
2982 } else if (IS_SKYLAKE(dev_priv)) {
2983 if (IS_SKL_GT2(dev_priv))
2984 i915_perf_load_test_config_sklgt2(dev_priv);
2985 else if (IS_SKL_GT3(dev_priv))
2986 i915_perf_load_test_config_sklgt3(dev_priv);
2987 else if (IS_SKL_GT4(dev_priv))
2988 i915_perf_load_test_config_sklgt4(dev_priv);
2989 } else if (IS_BROXTON(dev_priv)) {
2990 i915_perf_load_test_config_bxt(dev_priv);
2991 } else if (IS_KABYLAKE(dev_priv)) {
2992 if (IS_KBL_GT2(dev_priv))
2993 i915_perf_load_test_config_kblgt2(dev_priv);
2994 else if (IS_KBL_GT3(dev_priv))
2995 i915_perf_load_test_config_kblgt3(dev_priv);
2996 } else if (IS_GEMINILAKE(dev_priv)) {
2997 i915_perf_load_test_config_glk(dev_priv);
2998 } else if (IS_COFFEELAKE(dev_priv)) {
2999 if (IS_CFL_GT2(dev_priv))
3000 i915_perf_load_test_config_cflgt2(dev_priv);
3001 if (IS_CFL_GT3(dev_priv))
3002 i915_perf_load_test_config_cflgt3(dev_priv);
3003 } else if (IS_CANNONLAKE(dev_priv)) {
3004 i915_perf_load_test_config_cnl(dev_priv);
3005 } else if (IS_ICELAKE(dev_priv)) {
3006 i915_perf_load_test_config_icl(dev_priv);
3007 }
3008
3009 if (dev_priv->perf.oa.test_config.id == 0)
3010 goto sysfs_error;
3011
3012 ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
3013 &dev_priv->perf.oa.test_config.sysfs_metric);
3014 if (ret)
3015 goto sysfs_error;
3016
3017 atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
3018
3019 goto exit;
3020
3021 sysfs_error:
3022 kobject_put(dev_priv->perf.metrics_kobj);
3023 dev_priv->perf.metrics_kobj = NULL;
3024
3025 exit:
3026 mutex_unlock(&dev_priv->perf.lock);
3027 }
3028
3029 /**
3030 * i915_perf_unregister - hide i915-perf from userspace
3031 * @dev_priv: i915 device instance
3032 *
3033 * i915-perf state cleanup is split up into an 'unregister' and
3034 * 'deinit' phase where the interface is first hidden from
3035 * userspace by i915_perf_unregister() before cleaning up
3036 * remaining state in i915_perf_fini().
3037 */
i915_perf_unregister(struct drm_i915_private * dev_priv)3038 void i915_perf_unregister(struct drm_i915_private *dev_priv)
3039 {
3040 if (!dev_priv->perf.metrics_kobj)
3041 return;
3042
3043 sysfs_remove_group(dev_priv->perf.metrics_kobj,
3044 &dev_priv->perf.oa.test_config.sysfs_metric);
3045
3046 kobject_put(dev_priv->perf.metrics_kobj);
3047 dev_priv->perf.metrics_kobj = NULL;
3048 }
3049
gen8_is_valid_flex_addr(struct drm_i915_private * dev_priv,u32 addr)3050 static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
3051 {
3052 static const i915_reg_t flex_eu_regs[] = {
3053 EU_PERF_CNTL0,
3054 EU_PERF_CNTL1,
3055 EU_PERF_CNTL2,
3056 EU_PERF_CNTL3,
3057 EU_PERF_CNTL4,
3058 EU_PERF_CNTL5,
3059 EU_PERF_CNTL6,
3060 };
3061 int i;
3062
3063 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3064 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3065 return true;
3066 }
3067 return false;
3068 }
3069
gen7_is_valid_b_counter_addr(struct drm_i915_private * dev_priv,u32 addr)3070 static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
3071 {
3072 return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
3073 addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
3074 (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
3075 addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
3076 (addr >= i915_mmio_reg_offset(OACEC0_0) &&
3077 addr <= i915_mmio_reg_offset(OACEC7_1));
3078 }
3079
gen7_is_valid_mux_addr(struct drm_i915_private * dev_priv,u32 addr)3080 static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3081 {
3082 return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
3083 (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
3084 addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
3085 (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
3086 addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
3087 (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
3088 addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
3089 }
3090
gen8_is_valid_mux_addr(struct drm_i915_private * dev_priv,u32 addr)3091 static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3092 {
3093 return gen7_is_valid_mux_addr(dev_priv, addr) ||
3094 addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
3095 (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
3096 addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
3097 }
3098
gen10_is_valid_mux_addr(struct drm_i915_private * dev_priv,u32 addr)3099 static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3100 {
3101 return gen8_is_valid_mux_addr(dev_priv, addr) ||
3102 (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
3103 addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
3104 }
3105
hsw_is_valid_mux_addr(struct drm_i915_private * dev_priv,u32 addr)3106 static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3107 {
3108 return gen7_is_valid_mux_addr(dev_priv, addr) ||
3109 (addr >= 0x25100 && addr <= 0x2FF90) ||
3110 (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
3111 addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
3112 addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
3113 }
3114
chv_is_valid_mux_addr(struct drm_i915_private * dev_priv,u32 addr)3115 static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3116 {
3117 return gen7_is_valid_mux_addr(dev_priv, addr) ||
3118 (addr >= 0x182300 && addr <= 0x1823A4);
3119 }
3120
mask_reg_value(u32 reg,u32 val)3121 static uint32_t mask_reg_value(u32 reg, u32 val)
3122 {
3123 /* HALF_SLICE_CHICKEN2 is programmed with a the
3124 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3125 * programmed by userspace doesn't change this.
3126 */
3127 if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
3128 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3129
3130 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3131 * indicated by its name and a bunch of selection fields used by OA
3132 * configs.
3133 */
3134 if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
3135 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3136
3137 return val;
3138 }
3139
alloc_oa_regs(struct drm_i915_private * dev_priv,bool (* is_valid)(struct drm_i915_private * dev_priv,u32 addr),u32 __user * regs,u32 n_regs)3140 static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
3141 bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
3142 u32 __user *regs,
3143 u32 n_regs)
3144 {
3145 struct i915_oa_reg *oa_regs;
3146 int err;
3147 u32 i;
3148
3149 if (!n_regs)
3150 return NULL;
3151
3152 if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
3153 return ERR_PTR(-EFAULT);
3154
3155 /* No is_valid function means we're not allowing any register to be programmed. */
3156 GEM_BUG_ON(!is_valid);
3157 if (!is_valid)
3158 return ERR_PTR(-EINVAL);
3159
3160 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3161 if (!oa_regs)
3162 return ERR_PTR(-ENOMEM);
3163
3164 for (i = 0; i < n_regs; i++) {
3165 u32 addr, value;
3166
3167 err = get_user(addr, regs);
3168 if (err)
3169 goto addr_err;
3170
3171 if (!is_valid(dev_priv, addr)) {
3172 DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3173 err = -EINVAL;
3174 goto addr_err;
3175 }
3176
3177 err = get_user(value, regs + 1);
3178 if (err)
3179 goto addr_err;
3180
3181 oa_regs[i].addr = _MMIO(addr);
3182 oa_regs[i].value = mask_reg_value(addr, value);
3183
3184 regs += 2;
3185 }
3186
3187 return oa_regs;
3188
3189 addr_err:
3190 kfree(oa_regs);
3191 return ERR_PTR(err);
3192 }
3193
show_dynamic_id(struct device * dev,struct device_attribute * attr,char * buf)3194 static ssize_t show_dynamic_id(struct device *dev,
3195 struct device_attribute *attr,
3196 char *buf)
3197 {
3198 struct i915_oa_config *oa_config =
3199 container_of(attr, typeof(*oa_config), sysfs_metric_id);
3200
3201 return sprintf(buf, "%d\n", oa_config->id);
3202 }
3203
create_dynamic_oa_sysfs_entry(struct drm_i915_private * dev_priv,struct i915_oa_config * oa_config)3204 static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
3205 struct i915_oa_config *oa_config)
3206 {
3207 sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
3208 oa_config->sysfs_metric_id.attr.name = "id";
3209 oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
3210 oa_config->sysfs_metric_id.show = show_dynamic_id;
3211 oa_config->sysfs_metric_id.store = NULL;
3212
3213 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
3214 oa_config->attrs[1] = NULL;
3215
3216 oa_config->sysfs_metric.name = oa_config->uuid;
3217 oa_config->sysfs_metric.attrs = oa_config->attrs;
3218
3219 return sysfs_create_group(dev_priv->perf.metrics_kobj,
3220 &oa_config->sysfs_metric);
3221 }
3222
3223 /**
3224 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
3225 * @dev: drm device
3226 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
3227 * userspace (unvalidated)
3228 * @file: drm file
3229 *
3230 * Validates the submitted OA register to be saved into a new OA config that
3231 * can then be used for programming the OA unit and its NOA network.
3232 *
3233 * Returns: A new allocated config number to be used with the perf open ioctl
3234 * or a negative error code on failure.
3235 */
i915_perf_add_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3236 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3237 struct drm_file *file)
3238 {
3239 struct drm_i915_private *dev_priv = dev->dev_private;
3240 struct drm_i915_perf_oa_config *args = data;
3241 struct i915_oa_config *oa_config, *tmp;
3242 int err, id;
3243
3244 if (!dev_priv->perf.initialized) {
3245 DRM_DEBUG("i915 perf interface not available for this system\n");
3246 return -ENOTSUPP;
3247 }
3248
3249 if (!dev_priv->perf.metrics_kobj) {
3250 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
3251 return -EINVAL;
3252 }
3253
3254 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3255 DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
3256 return -EACCES;
3257 }
3258
3259 if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
3260 (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
3261 (!args->flex_regs_ptr || !args->n_flex_regs)) {
3262 DRM_DEBUG("No OA registers given\n");
3263 return -EINVAL;
3264 }
3265
3266 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
3267 if (!oa_config) {
3268 DRM_DEBUG("Failed to allocate memory for the OA config\n");
3269 return -ENOMEM;
3270 }
3271
3272 atomic_set(&oa_config->ref_count, 1);
3273
3274 if (!uuid_is_valid(args->uuid)) {
3275 DRM_DEBUG("Invalid uuid format for OA config\n");
3276 err = -EINVAL;
3277 goto reg_err;
3278 }
3279
3280 /* Last character in oa_config->uuid will be 0 because oa_config is
3281 * kzalloc.
3282 */
3283 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
3284
3285 oa_config->mux_regs_len = args->n_mux_regs;
3286 oa_config->mux_regs =
3287 alloc_oa_regs(dev_priv,
3288 dev_priv->perf.oa.ops.is_valid_mux_reg,
3289 u64_to_user_ptr(args->mux_regs_ptr),
3290 args->n_mux_regs);
3291
3292 if (IS_ERR(oa_config->mux_regs)) {
3293 DRM_DEBUG("Failed to create OA config for mux_regs\n");
3294 err = PTR_ERR(oa_config->mux_regs);
3295 goto reg_err;
3296 }
3297
3298 oa_config->b_counter_regs_len = args->n_boolean_regs;
3299 oa_config->b_counter_regs =
3300 alloc_oa_regs(dev_priv,
3301 dev_priv->perf.oa.ops.is_valid_b_counter_reg,
3302 u64_to_user_ptr(args->boolean_regs_ptr),
3303 args->n_boolean_regs);
3304
3305 if (IS_ERR(oa_config->b_counter_regs)) {
3306 DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
3307 err = PTR_ERR(oa_config->b_counter_regs);
3308 goto reg_err;
3309 }
3310
3311 if (INTEL_GEN(dev_priv) < 8) {
3312 if (args->n_flex_regs != 0) {
3313 err = -EINVAL;
3314 goto reg_err;
3315 }
3316 } else {
3317 oa_config->flex_regs_len = args->n_flex_regs;
3318 oa_config->flex_regs =
3319 alloc_oa_regs(dev_priv,
3320 dev_priv->perf.oa.ops.is_valid_flex_reg,
3321 u64_to_user_ptr(args->flex_regs_ptr),
3322 args->n_flex_regs);
3323
3324 if (IS_ERR(oa_config->flex_regs)) {
3325 DRM_DEBUG("Failed to create OA config for flex_regs\n");
3326 err = PTR_ERR(oa_config->flex_regs);
3327 goto reg_err;
3328 }
3329 }
3330
3331 err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3332 if (err)
3333 goto reg_err;
3334
3335 /* We shouldn't have too many configs, so this iteration shouldn't be
3336 * too costly.
3337 */
3338 idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
3339 if (!strcmp(tmp->uuid, oa_config->uuid)) {
3340 DRM_DEBUG("OA config already exists with this uuid\n");
3341 err = -EADDRINUSE;
3342 goto sysfs_err;
3343 }
3344 }
3345
3346 err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
3347 if (err) {
3348 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3349 goto sysfs_err;
3350 }
3351
3352 /* Config id 0 is invalid, id 1 for kernel stored test config. */
3353 oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
3354 oa_config, 2,
3355 0, GFP_KERNEL);
3356 if (oa_config->id < 0) {
3357 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3358 err = oa_config->id;
3359 goto sysfs_err;
3360 }
3361
3362 mutex_unlock(&dev_priv->perf.metrics_lock);
3363
3364 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
3365
3366 return oa_config->id;
3367
3368 sysfs_err:
3369 mutex_unlock(&dev_priv->perf.metrics_lock);
3370 reg_err:
3371 put_oa_config(dev_priv, oa_config);
3372 DRM_DEBUG("Failed to add new OA config\n");
3373 return err;
3374 }
3375
3376 /**
3377 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
3378 * @dev: drm device
3379 * @data: ioctl data (pointer to u64 integer) copied from userspace
3380 * @file: drm file
3381 *
3382 * Configs can be removed while being used, the will stop appearing in sysfs
3383 * and their content will be freed when the stream using the config is closed.
3384 *
3385 * Returns: 0 on success or a negative error code on failure.
3386 */
i915_perf_remove_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3387 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3388 struct drm_file *file)
3389 {
3390 struct drm_i915_private *dev_priv = dev->dev_private;
3391 u64 *arg = data;
3392 struct i915_oa_config *oa_config;
3393 int ret;
3394
3395 if (!dev_priv->perf.initialized) {
3396 DRM_DEBUG("i915 perf interface not available for this system\n");
3397 return -ENOTSUPP;
3398 }
3399
3400 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3401 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
3402 return -EACCES;
3403 }
3404
3405 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3406 if (ret)
3407 goto lock_err;
3408
3409 oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
3410 if (!oa_config) {
3411 DRM_DEBUG("Failed to remove unknown OA config\n");
3412 ret = -ENOENT;
3413 goto config_err;
3414 }
3415
3416 GEM_BUG_ON(*arg != oa_config->id);
3417
3418 sysfs_remove_group(dev_priv->perf.metrics_kobj,
3419 &oa_config->sysfs_metric);
3420
3421 idr_remove(&dev_priv->perf.metrics_idr, *arg);
3422
3423 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
3424
3425 put_oa_config(dev_priv, oa_config);
3426
3427 config_err:
3428 mutex_unlock(&dev_priv->perf.metrics_lock);
3429 lock_err:
3430 return ret;
3431 }
3432
3433 static struct ctl_table oa_table[] = {
3434 {
3435 .procname = "perf_stream_paranoid",
3436 .data = &i915_perf_stream_paranoid,
3437 .maxlen = sizeof(i915_perf_stream_paranoid),
3438 .mode = 0644,
3439 .proc_handler = proc_dointvec_minmax,
3440 .extra1 = &zero,
3441 .extra2 = &one,
3442 },
3443 {
3444 .procname = "oa_max_sample_rate",
3445 .data = &i915_oa_max_sample_rate,
3446 .maxlen = sizeof(i915_oa_max_sample_rate),
3447 .mode = 0644,
3448 .proc_handler = proc_dointvec_minmax,
3449 .extra1 = &zero,
3450 .extra2 = &oa_sample_rate_hard_limit,
3451 },
3452 {}
3453 };
3454
3455 static struct ctl_table i915_root[] = {
3456 {
3457 .procname = "i915",
3458 .maxlen = 0,
3459 .mode = 0555,
3460 .child = oa_table,
3461 },
3462 {}
3463 };
3464
3465 static struct ctl_table dev_root[] = {
3466 {
3467 .procname = "dev",
3468 .maxlen = 0,
3469 .mode = 0555,
3470 .child = i915_root,
3471 },
3472 {}
3473 };
3474
3475 /**
3476 * i915_perf_init - initialize i915-perf state on module load
3477 * @dev_priv: i915 device instance
3478 *
3479 * Initializes i915-perf state without exposing anything to userspace.
3480 *
3481 * Note: i915-perf initialization is split into an 'init' and 'register'
3482 * phase with the i915_perf_register() exposing state to userspace.
3483 */
i915_perf_init(struct drm_i915_private * dev_priv)3484 void i915_perf_init(struct drm_i915_private *dev_priv)
3485 {
3486 if (IS_HASWELL(dev_priv)) {
3487 dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3488 gen7_is_valid_b_counter_addr;
3489 dev_priv->perf.oa.ops.is_valid_mux_reg =
3490 hsw_is_valid_mux_addr;
3491 dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
3492 dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
3493 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
3494 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
3495 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
3496 dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
3497 dev_priv->perf.oa.ops.read = gen7_oa_read;
3498 dev_priv->perf.oa.ops.oa_hw_tail_read =
3499 gen7_oa_hw_tail_read;
3500
3501 dev_priv->perf.oa.oa_formats = hsw_oa_formats;
3502 } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
3503 /* Note: that although we could theoretically also support the
3504 * legacy ringbuffer mode on BDW (and earlier iterations of
3505 * this driver, before upstreaming did this) it didn't seem
3506 * worth the complexity to maintain now that BDW+ enable
3507 * execlist mode by default.
3508 */
3509 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
3510
3511 dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
3512 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
3513 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
3514 dev_priv->perf.oa.ops.read = gen8_oa_read;
3515 dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
3516
3517 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
3518 dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3519 gen7_is_valid_b_counter_addr;
3520 dev_priv->perf.oa.ops.is_valid_mux_reg =
3521 gen8_is_valid_mux_addr;
3522 dev_priv->perf.oa.ops.is_valid_flex_reg =
3523 gen8_is_valid_flex_addr;
3524
3525 if (IS_CHERRYVIEW(dev_priv)) {
3526 dev_priv->perf.oa.ops.is_valid_mux_reg =
3527 chv_is_valid_mux_addr;
3528 }
3529
3530 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
3531 dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
3532
3533 if (IS_GEN8(dev_priv)) {
3534 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
3535 dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
3536
3537 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
3538 } else {
3539 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
3540 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
3541
3542 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
3543 }
3544 } else if (IS_GEN(dev_priv, 10, 11)) {
3545 dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3546 gen7_is_valid_b_counter_addr;
3547 dev_priv->perf.oa.ops.is_valid_mux_reg =
3548 gen10_is_valid_mux_addr;
3549 dev_priv->perf.oa.ops.is_valid_flex_reg =
3550 gen8_is_valid_flex_addr;
3551
3552 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
3553 dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
3554
3555 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
3556 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
3557
3558 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
3559 }
3560 }
3561
3562 if (dev_priv->perf.oa.ops.enable_metric_set) {
3563 hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
3564 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3565 dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
3566 init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
3567
3568 INIT_LIST_HEAD(&dev_priv->perf.streams);
3569 mutex_init(&dev_priv->perf.lock);
3570 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
3571
3572 oa_sample_rate_hard_limit = 1000 *
3573 (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
3574 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
3575
3576 mutex_init(&dev_priv->perf.metrics_lock);
3577 idr_init(&dev_priv->perf.metrics_idr);
3578
3579 dev_priv->perf.initialized = true;
3580 }
3581 }
3582
destroy_config(int id,void * p,void * data)3583 static int destroy_config(int id, void *p, void *data)
3584 {
3585 struct drm_i915_private *dev_priv = data;
3586 struct i915_oa_config *oa_config = p;
3587
3588 put_oa_config(dev_priv, oa_config);
3589
3590 return 0;
3591 }
3592
3593 /**
3594 * i915_perf_fini - Counter part to i915_perf_init()
3595 * @dev_priv: i915 device instance
3596 */
i915_perf_fini(struct drm_i915_private * dev_priv)3597 void i915_perf_fini(struct drm_i915_private *dev_priv)
3598 {
3599 if (!dev_priv->perf.initialized)
3600 return;
3601
3602 idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
3603 idr_destroy(&dev_priv->perf.metrics_idr);
3604
3605 unregister_sysctl_table(dev_priv->perf.sysctl_header);
3606
3607 memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
3608
3609 dev_priv->perf.initialized = false;
3610 }
3611