1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "i915_drv.h" 26 #include "intel_ringbuffer.h" 27 #include "intel_lrc.h" 28 29 static const struct engine_info { 30 const char *name; 31 unsigned exec_id; 32 unsigned guc_id; 33 u32 mmio_base; 34 unsigned irq_shift; 35 int (*init_legacy)(struct intel_engine_cs *engine); 36 int (*init_execlists)(struct intel_engine_cs *engine); 37 } intel_engines[] = { 38 [RCS] = { 39 .name = "render ring", 40 .exec_id = I915_EXEC_RENDER, 41 .guc_id = GUC_RENDER_ENGINE, 42 .mmio_base = RENDER_RING_BASE, 43 .irq_shift = GEN8_RCS_IRQ_SHIFT, 44 .init_execlists = logical_render_ring_init, 45 .init_legacy = intel_init_render_ring_buffer, 46 }, 47 [BCS] = { 48 .name = "blitter ring", 49 .exec_id = I915_EXEC_BLT, 50 .guc_id = GUC_BLITTER_ENGINE, 51 .mmio_base = BLT_RING_BASE, 52 .irq_shift = GEN8_BCS_IRQ_SHIFT, 53 .init_execlists = logical_xcs_ring_init, 54 .init_legacy = intel_init_blt_ring_buffer, 55 }, 56 [VCS] = { 57 .name = "bsd ring", 58 .exec_id = I915_EXEC_BSD, 59 .guc_id = GUC_VIDEO_ENGINE, 60 .mmio_base = GEN6_BSD_RING_BASE, 61 .irq_shift = GEN8_VCS1_IRQ_SHIFT, 62 .init_execlists = logical_xcs_ring_init, 63 .init_legacy = intel_init_bsd_ring_buffer, 64 }, 65 [VCS2] = { 66 .name = "bsd2 ring", 67 .exec_id = I915_EXEC_BSD, 68 .guc_id = GUC_VIDEO_ENGINE2, 69 .mmio_base = GEN8_BSD2_RING_BASE, 70 .irq_shift = GEN8_VCS2_IRQ_SHIFT, 71 .init_execlists = logical_xcs_ring_init, 72 .init_legacy = intel_init_bsd2_ring_buffer, 73 }, 74 [VECS] = { 75 .name = "video enhancement ring", 76 .exec_id = I915_EXEC_VEBOX, 77 .guc_id = GUC_VIDEOENHANCE_ENGINE, 78 .mmio_base = VEBOX_RING_BASE, 79 .irq_shift = GEN8_VECS_IRQ_SHIFT, 80 .init_execlists = logical_xcs_ring_init, 81 .init_legacy = intel_init_vebox_ring_buffer, 82 }, 83 }; 84 85 static struct intel_engine_cs * 86 intel_engine_setup(struct drm_i915_private *dev_priv, 87 enum intel_engine_id id) 88 { 89 const struct engine_info *info = &intel_engines[id]; 90 struct intel_engine_cs *engine = &dev_priv->engine[id]; 91 92 engine->id = id; 93 engine->i915 = dev_priv; 94 engine->name = info->name; 95 engine->exec_id = info->exec_id; 96 engine->hw_id = engine->guc_id = info->guc_id; 97 engine->mmio_base = info->mmio_base; 98 engine->irq_shift = info->irq_shift; 99 100 return engine; 101 } 102 103 /** 104 * intel_engines_init() - allocate, populate and init the Engine Command Streamers 105 * @dev: DRM device. 106 * 107 * Return: non-zero if the initialization failed. 108 */ 109 int intel_engines_init(struct drm_device *dev) 110 { 111 struct drm_i915_private *dev_priv = to_i915(dev); 112 unsigned int mask = 0; 113 int (*init)(struct intel_engine_cs *engine); 114 unsigned int i; 115 int ret; 116 117 WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0); 118 WARN_ON(INTEL_INFO(dev_priv)->ring_mask & 119 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); 120 121 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { 122 if (!HAS_ENGINE(dev_priv, i)) 123 continue; 124 125 if (i915.enable_execlists) 126 init = intel_engines[i].init_execlists; 127 else 128 init = intel_engines[i].init_legacy; 129 130 if (!init) 131 continue; 132 133 ret = init(intel_engine_setup(dev_priv, i)); 134 if (ret) 135 goto cleanup; 136 137 mask |= ENGINE_MASK(i); 138 } 139 140 /* 141 * Catch failures to update intel_engines table when the new engines 142 * are added to the driver by a warning and disabling the forgotten 143 * engines. 144 */ 145 if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) { 146 struct intel_device_info *info = 147 (struct intel_device_info *)&dev_priv->info; 148 info->ring_mask = mask; 149 } 150 151 return 0; 152 153 cleanup: 154 for (i = 0; i < I915_NUM_ENGINES; i++) { 155 if (i915.enable_execlists) 156 intel_logical_ring_cleanup(&dev_priv->engine[i]); 157 else 158 intel_engine_cleanup(&dev_priv->engine[i]); 159 } 160 161 return ret; 162 } 163 164 void intel_engine_init_hangcheck(struct intel_engine_cs *engine) 165 { 166 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); 167 } 168 169 static void intel_engine_init_requests(struct intel_engine_cs *engine) 170 { 171 init_request_active(&engine->last_request, NULL); 172 INIT_LIST_HEAD(&engine->request_list); 173 } 174 175 /** 176 * intel_engines_setup_common - setup engine state not requiring hw access 177 * @engine: Engine to setup. 178 * 179 * Initializes @engine@ structure members shared between legacy and execlists 180 * submission modes which do not require hardware access. 181 * 182 * Typically done early in the submission mode specific engine setup stage. 183 */ 184 void intel_engine_setup_common(struct intel_engine_cs *engine) 185 { 186 INIT_LIST_HEAD(&engine->buffers); 187 INIT_LIST_HEAD(&engine->execlist_queue); 188 lockinit(&engine->execlist_lock, "i915el", 0, LK_CANRECURSE); 189 190 engine->fence_context = fence_context_alloc(1); 191 192 intel_engine_init_requests(engine); 193 intel_engine_init_hangcheck(engine); 194 i915_gem_batch_pool_init(engine, &engine->batch_pool); 195 } 196 197 /** 198 * intel_engines_init_common - initialize cengine state which might require hw access 199 * @engine: Engine to initialize. 200 * 201 * Initializes @engine@ structure members shared between legacy and execlists 202 * submission modes which do require hardware access. 203 * 204 * Typcally done at later stages of submission mode specific engine setup. 205 * 206 * Returns zero on success or an error code on failure. 207 */ 208 int intel_engine_init_common(struct intel_engine_cs *engine) 209 { 210 int ret; 211 212 ret = intel_engine_init_breadcrumbs(engine); 213 if (ret) 214 return ret; 215 216 return intel_engine_init_cmd_parser(engine); 217 } 218 219 /** 220 * intel_engines_cleanup_common - cleans up the engine state created by 221 * the common initiailizers. 222 * @engine: Engine to cleanup. 223 * 224 * This cleans up everything created by the common helpers. 225 */ 226 void intel_engine_cleanup_common(struct intel_engine_cs *engine) 227 { 228 intel_engine_cleanup_cmd_parser(engine); 229 intel_engine_fini_breadcrumbs(engine); 230 i915_gem_batch_pool_fini(&engine->batch_pool); 231 } 232