1 /* 2 * Copyright © 2016-2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_guc_ct.h" 26 27 enum { CTB_SEND = 0, CTB_RECV = 1 }; 28 29 enum { CTB_OWNER_HOST = 0 }; 30 31 void intel_guc_ct_init_early(struct intel_guc_ct *ct) 32 { 33 /* we're using static channel owners */ 34 ct->host_channel.owner = CTB_OWNER_HOST; 35 } 36 37 static inline const char *guc_ct_buffer_type_to_str(u32 type) 38 { 39 switch (type) { 40 case INTEL_GUC_CT_BUFFER_TYPE_SEND: 41 return "SEND"; 42 case INTEL_GUC_CT_BUFFER_TYPE_RECV: 43 return "RECV"; 44 default: 45 return "<invalid>"; 46 } 47 } 48 49 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, 50 u32 cmds_addr, u32 size, u32 owner) 51 { 52 DRM_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n", 53 desc, cmds_addr, size, owner); 54 memset(desc, 0, sizeof(*desc)); 55 desc->addr = cmds_addr; 56 desc->size = size; 57 desc->owner = owner; 58 } 59 60 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) 61 { 62 DRM_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n", 63 desc, desc->head, desc->tail); 64 desc->head = 0; 65 desc->tail = 0; 66 desc->is_in_error = 0; 67 } 68 69 static int guc_action_register_ct_buffer(struct intel_guc *guc, 70 u32 desc_addr, 71 u32 type) 72 { 73 u32 action[] = { 74 INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER, 75 desc_addr, 76 sizeof(struct guc_ct_buffer_desc), 77 type 78 }; 79 int err; 80 81 /* Can't use generic send(), CT registration must go over MMIO */ 82 err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action)); 83 if (err) 84 DRM_ERROR("CT: register %s buffer failed; err=%d\n", 85 guc_ct_buffer_type_to_str(type), err); 86 return err; 87 } 88 89 static int guc_action_deregister_ct_buffer(struct intel_guc *guc, 90 u32 owner, 91 u32 type) 92 { 93 u32 action[] = { 94 INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER, 95 owner, 96 type 97 }; 98 int err; 99 100 /* Can't use generic send(), CT deregistration must go over MMIO */ 101 err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action)); 102 if (err) 103 DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n", 104 guc_ct_buffer_type_to_str(type), owner, err); 105 return err; 106 } 107 108 static bool ctch_is_open(struct intel_guc_ct_channel *ctch) 109 { 110 return ctch->vma != NULL; 111 } 112 113 static int ctch_init(struct intel_guc *guc, 114 struct intel_guc_ct_channel *ctch) 115 { 116 struct i915_vma *vma; 117 void *blob; 118 int err; 119 int i; 120 121 GEM_BUG_ON(ctch->vma); 122 123 /* We allocate 1 page to hold both descriptors and both buffers. 124 * ___________..................... 125 * |desc (SEND)| : 126 * |___________| PAGE/4 127 * :___________....................: 128 * |desc (RECV)| : 129 * |___________| PAGE/4 130 * :_______________________________: 131 * |cmds (SEND) | 132 * | PAGE/4 133 * |_______________________________| 134 * |cmds (RECV) | 135 * | PAGE/4 136 * |_______________________________| 137 * 138 * Each message can use a maximum of 32 dwords and we don't expect to 139 * have more than 1 in flight at any time, so we have enough space. 140 * Some logic further ahead will rely on the fact that there is only 1 141 * page and that it is always mapped, so if the size is changed the 142 * other code will need updating as well. 143 */ 144 145 /* allocate vma */ 146 vma = intel_guc_allocate_vma(guc, PAGE_SIZE); 147 if (IS_ERR(vma)) { 148 err = PTR_ERR(vma); 149 goto err_out; 150 } 151 ctch->vma = vma; 152 153 /* map first page */ 154 blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); 155 if (IS_ERR(blob)) { 156 err = PTR_ERR(blob); 157 goto err_vma; 158 } 159 DRM_DEBUG_DRIVER("CT: vma base=%#x\n", guc_ggtt_offset(ctch->vma)); 160 161 /* store pointers to desc and cmds */ 162 for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { 163 GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); 164 ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i; 165 ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2; 166 } 167 168 return 0; 169 170 err_vma: 171 i915_vma_unpin_and_release(&ctch->vma); 172 err_out: 173 DRM_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", 174 ctch->owner, err); 175 return err; 176 } 177 178 static void ctch_fini(struct intel_guc *guc, 179 struct intel_guc_ct_channel *ctch) 180 { 181 GEM_BUG_ON(!ctch->vma); 182 183 i915_gem_object_unpin_map(ctch->vma->obj); 184 i915_vma_unpin_and_release(&ctch->vma); 185 } 186 187 static int ctch_open(struct intel_guc *guc, 188 struct intel_guc_ct_channel *ctch) 189 { 190 u32 base; 191 int err; 192 int i; 193 194 DRM_DEBUG_DRIVER("CT: channel %d reopen=%s\n", 195 ctch->owner, yesno(ctch_is_open(ctch))); 196 197 if (!ctch->vma) { 198 err = ctch_init(guc, ctch); 199 if (unlikely(err)) 200 goto err_out; 201 } 202 203 /* vma should be already allocated and map'ed */ 204 base = guc_ggtt_offset(ctch->vma); 205 206 /* (re)initialize descriptors 207 * cmds buffers are in the second half of the blob page 208 */ 209 for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { 210 GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); 211 guc_ct_buffer_desc_init(ctch->ctbs[i].desc, 212 base + PAGE_SIZE/4 * i + PAGE_SIZE/2, 213 PAGE_SIZE/4, 214 ctch->owner); 215 } 216 217 /* register buffers, starting wirh RECV buffer 218 * descriptors are in first half of the blob 219 */ 220 err = guc_action_register_ct_buffer(guc, 221 base + PAGE_SIZE/4 * CTB_RECV, 222 INTEL_GUC_CT_BUFFER_TYPE_RECV); 223 if (unlikely(err)) 224 goto err_fini; 225 226 err = guc_action_register_ct_buffer(guc, 227 base + PAGE_SIZE/4 * CTB_SEND, 228 INTEL_GUC_CT_BUFFER_TYPE_SEND); 229 if (unlikely(err)) 230 goto err_deregister; 231 232 return 0; 233 234 err_deregister: 235 guc_action_deregister_ct_buffer(guc, 236 ctch->owner, 237 INTEL_GUC_CT_BUFFER_TYPE_RECV); 238 err_fini: 239 ctch_fini(guc, ctch); 240 err_out: 241 DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err); 242 return err; 243 } 244 245 static void ctch_close(struct intel_guc *guc, 246 struct intel_guc_ct_channel *ctch) 247 { 248 GEM_BUG_ON(!ctch_is_open(ctch)); 249 250 guc_action_deregister_ct_buffer(guc, 251 ctch->owner, 252 INTEL_GUC_CT_BUFFER_TYPE_SEND); 253 guc_action_deregister_ct_buffer(guc, 254 ctch->owner, 255 INTEL_GUC_CT_BUFFER_TYPE_RECV); 256 ctch_fini(guc, ctch); 257 } 258 259 static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) 260 { 261 /* For now it's trivial */ 262 return ++ctch->next_fence; 263 } 264 265 static int ctb_write(struct intel_guc_ct_buffer *ctb, 266 const u32 *action, 267 u32 len /* in dwords */, 268 u32 fence) 269 { 270 struct guc_ct_buffer_desc *desc = ctb->desc; 271 u32 head = desc->head / 4; /* in dwords */ 272 u32 tail = desc->tail / 4; /* in dwords */ 273 u32 size = desc->size / 4; /* in dwords */ 274 u32 used; /* in dwords */ 275 u32 header; 276 u32 *cmds = ctb->cmds; 277 unsigned int i; 278 279 GEM_BUG_ON(desc->size % 4); 280 GEM_BUG_ON(desc->head % 4); 281 GEM_BUG_ON(desc->tail % 4); 282 GEM_BUG_ON(tail >= size); 283 284 /* 285 * tail == head condition indicates empty. GuC FW does not support 286 * using up the entire buffer to get tail == head meaning full. 287 */ 288 if (tail < head) 289 used = (size - head) + tail; 290 else 291 used = tail - head; 292 293 /* make sure there is a space including extra dw for the fence */ 294 if (unlikely(used + len + 1 >= size)) 295 return -ENOSPC; 296 297 /* Write the message. The format is the following: 298 * DW0: header (including action code) 299 * DW1: fence 300 * DW2+: action data 301 */ 302 header = (len << GUC_CT_MSG_LEN_SHIFT) | 303 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | 304 (action[0] << GUC_CT_MSG_ACTION_SHIFT); 305 306 cmds[tail] = header; 307 tail = (tail + 1) % size; 308 309 cmds[tail] = fence; 310 tail = (tail + 1) % size; 311 312 for (i = 1; i < len; i++) { 313 cmds[tail] = action[i]; 314 tail = (tail + 1) % size; 315 } 316 317 /* now update desc tail (back in bytes) */ 318 desc->tail = tail * 4; 319 GEM_BUG_ON(desc->tail > desc->size); 320 321 return 0; 322 } 323 324 /* Wait for the response from the GuC. 325 * @fence: response fence 326 * @status: placeholder for status 327 * return: 0 response received (status is valid) 328 * -ETIMEDOUT no response within hardcoded timeout 329 * -EPROTO no response, ct buffer was in error 330 */ 331 static int wait_for_response(struct guc_ct_buffer_desc *desc, 332 u32 fence, 333 u32 *status) 334 { 335 int err; 336 337 /* 338 * Fast commands should complete in less than 10us, so sample quickly 339 * up to that length of time, then switch to a slower sleep-wait loop. 340 * No GuC command should ever take longer than 10ms. 341 */ 342 #define done (READ_ONCE(desc->fence) == fence) 343 err = wait_for_us(done, 10); 344 if (err) 345 err = wait_for(done, 10); 346 #undef done 347 348 if (unlikely(err)) { 349 DRM_ERROR("CT: fence %u failed; reported fence=%u\n", 350 fence, desc->fence); 351 352 if (WARN_ON(desc->is_in_error)) { 353 /* Something went wrong with the messaging, try to reset 354 * the buffer and hope for the best 355 */ 356 guc_ct_buffer_desc_reset(desc); 357 err = -EPROTO; 358 } 359 } 360 361 *status = desc->status; 362 return err; 363 } 364 365 static int ctch_send(struct intel_guc *guc, 366 struct intel_guc_ct_channel *ctch, 367 const u32 *action, 368 u32 len, 369 u32 *status) 370 { 371 struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND]; 372 struct guc_ct_buffer_desc *desc = ctb->desc; 373 u32 fence; 374 int err; 375 376 GEM_BUG_ON(!ctch_is_open(ctch)); 377 GEM_BUG_ON(!len); 378 GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); 379 380 fence = ctch_get_next_fence(ctch); 381 err = ctb_write(ctb, action, len, fence); 382 if (unlikely(err)) 383 return err; 384 385 intel_guc_notify(guc); 386 387 err = wait_for_response(desc, fence, status); 388 if (unlikely(err)) 389 return err; 390 if (*status != INTEL_GUC_STATUS_SUCCESS) 391 return -EIO; 392 return 0; 393 } 394 395 /* 396 * Command Transport (CT) buffer based GuC send function. 397 */ 398 static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len) 399 { 400 struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; 401 u32 status = ~0; /* undefined */ 402 int err; 403 404 mutex_lock(&guc->send_mutex); 405 406 err = ctch_send(guc, ctch, action, len, &status); 407 if (unlikely(err)) { 408 DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", 409 action[0], err, status); 410 } 411 412 mutex_unlock(&guc->send_mutex); 413 return err; 414 } 415 416 /** 417 * Enable buffer based command transport 418 * Shall only be called for platforms with HAS_GUC_CT. 419 * @guc: the guc 420 * return: 0 on success 421 * non-zero on failure 422 */ 423 int intel_guc_enable_ct(struct intel_guc *guc) 424 { 425 struct drm_i915_private *dev_priv = guc_to_i915(guc); 426 struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; 427 int err; 428 429 GEM_BUG_ON(!HAS_GUC_CT(dev_priv)); 430 431 err = ctch_open(guc, ctch); 432 if (unlikely(err)) 433 return err; 434 435 /* Switch into cmd transport buffer based send() */ 436 guc->send = intel_guc_send_ct; 437 DRM_INFO("CT: %s\n", enableddisabled(true)); 438 return 0; 439 } 440 441 /** 442 * Disable buffer based command transport. 443 * Shall only be called for platforms with HAS_GUC_CT. 444 * @guc: the guc 445 */ 446 void intel_guc_disable_ct(struct intel_guc *guc) 447 { 448 struct drm_i915_private *dev_priv = guc_to_i915(guc); 449 struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; 450 451 GEM_BUG_ON(!HAS_GUC_CT(dev_priv)); 452 453 if (!ctch_is_open(ctch)) 454 return; 455 456 ctch_close(guc, ctch); 457 458 /* Disable send */ 459 guc->send = intel_guc_send_nop; 460 DRM_INFO("CT: %s\n", enableddisabled(false)); 461 } 462