1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_GT_TYPES__ 7 #define __INTEL_GT_TYPES__ 8 9 #include <linux/ktime.h> 10 #include <linux/list.h> 11 #include <linux/llist.h> 12 #include <linux/mutex.h> 13 #include <linux/notifier.h> 14 #include <linux/spinlock.h> 15 #include <linux/types.h> 16 #include <linux/workqueue.h> 17 18 #include "uc/intel_uc.h" 19 20 #include "i915_vma.h" 21 #include "intel_engine_types.h" 22 #include "intel_gt_buffer_pool_types.h" 23 #include "intel_llc_types.h" 24 #include "intel_reset_types.h" 25 #include "intel_rc6_types.h" 26 #include "intel_rps_types.h" 27 #include "intel_migrate_types.h" 28 #include "intel_wakeref.h" 29 30 struct drm_i915_private; 31 struct i915_ggtt; 32 struct intel_engine_cs; 33 struct intel_uncore; 34 35 struct intel_mmio_range { 36 u32 start; 37 u32 end; 38 }; 39 40 /* 41 * The hardware has multiple kinds of multicast register ranges that need 42 * special register steering (and future platforms are expected to add 43 * additional types). 44 * 45 * During driver startup, we initialize the steering control register to 46 * direct reads to a slice/subslice that are valid for the 'subslice' class 47 * of multicast registers. If another type of steering does not have any 48 * overlap in valid steering targets with 'subslice' style registers, we will 49 * need to explicitly re-steer reads of registers of the other type. 50 * 51 * Only the replication types that may need additional non-default steering 52 * are listed here. 53 */ 54 enum intel_steering_type { 55 L3BANK, 56 MSLICE, 57 LNCF, 58 59 NUM_STEERING_TYPES 60 }; 61 62 enum intel_submission_method { 63 INTEL_SUBMISSION_RING, 64 INTEL_SUBMISSION_ELSP, 65 INTEL_SUBMISSION_GUC, 66 }; 67 68 struct intel_gt { 69 struct drm_i915_private *i915; 70 struct intel_uncore *uncore; 71 struct i915_ggtt *ggtt; 72 73 struct intel_uc uc; 74 75 struct intel_gt_timelines { 76 spinlock_t lock; /* protects active_list */ 77 struct list_head active_list; 78 } timelines; 79 80 struct intel_gt_requests { 81 /** 82 * We leave the user IRQ off as much as possible, 83 * but this means that requests will finish and never 84 * be retired once the system goes idle. Set a timer to 85 * fire periodically while the ring is running. When it 86 * fires, go retire requests. 87 */ 88 struct delayed_work retire_work; 89 } requests; 90 91 struct { 92 struct llist_head list; 93 struct work_struct work; 94 } watchdog; 95 96 struct intel_wakeref wakeref; 97 atomic_t user_wakeref; 98 99 struct list_head closed_vma; 100 spinlock_t closed_lock; /* guards the list of closed_vma */ 101 102 ktime_t last_init_time; 103 struct intel_reset reset; 104 105 /** 106 * Is the GPU currently considered idle, or busy executing 107 * userspace requests? Whilst idle, we allow runtime power 108 * management to power down the hardware and display clocks. 109 * In order to reduce the effect on performance, there 110 * is a slight delay before we do so. 111 */ 112 intel_wakeref_t awake; 113 114 u32 clock_frequency; 115 u32 clock_period_ns; 116 117 struct intel_llc llc; 118 struct intel_rc6 rc6; 119 struct intel_rps rps; 120 121 spinlock_t irq_lock; 122 u32 gt_imr; 123 u32 pm_ier; 124 u32 pm_imr; 125 126 u32 pm_guc_events; 127 128 struct { 129 bool active; 130 131 /** 132 * @lock: Lock protecting the below fields. 133 */ 134 seqcount_mutex_t lock; 135 136 /** 137 * @total: Total time this engine was busy. 138 * 139 * Accumulated time not counting the most recent block in cases 140 * where engine is currently busy (active > 0). 141 */ 142 ktime_t total; 143 144 /** 145 * @start: Timestamp of the last idle to active transition. 146 * 147 * Idle is defined as active == 0, active is active > 0. 148 */ 149 ktime_t start; 150 } stats; 151 152 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 153 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] 154 [MAX_ENGINE_INSTANCE + 1]; 155 enum intel_submission_method submission_method; 156 157 /* 158 * Default address space (either GGTT or ppGTT depending on arch). 159 * 160 * Reserved for exclusive use by the kernel. 161 */ 162 struct i915_address_space *vm; 163 164 /* 165 * A pool of objects to use as shadow copies of client batch buffers 166 * when the command parser is enabled. Prevents the client from 167 * modifying the batch contents after software parsing. 168 * 169 * Buffers older than 1s are periodically reaped from the pool, 170 * or may be reclaimed by the shrinker before then. 171 */ 172 struct intel_gt_buffer_pool buffer_pool; 173 174 struct i915_vma *scratch; 175 176 struct intel_migrate migrate; 177 178 const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES]; 179 180 struct intel_gt_info { 181 intel_engine_mask_t engine_mask; 182 183 u32 l3bank_mask; 184 185 u8 num_engines; 186 187 /* Media engine access to SFC per instance */ 188 u8 vdbox_sfc_access; 189 190 /* Slice/subslice/EU info */ 191 struct sseu_dev_info sseu; 192 193 unsigned long mslice_mask; 194 } info; 195 }; 196 197 enum intel_gt_scratch_field { 198 /* 8 bytes */ 199 INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, 200 201 /* 8 bytes */ 202 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, 203 204 /* 8 bytes */ 205 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256, 206 207 /* 6 * 8 bytes */ 208 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048, 209 210 /* 4 bytes */ 211 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096, 212 }; 213 214 #endif /* __INTEL_GT_TYPES_H__ */ 215