1 /*==============================================================================
2 Copyright(c) 2017 Intel Corporation
3
4 Permission is hereby granted, free of charge, to any person obtaining a
5 copy of this software and associated documentation files(the "Software"),
6 to deal in the Software without restriction, including without limitation
7 the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and / or sell copies of the Software, and to permit persons to whom the
9 Software is furnished to do so, subject to the following conditions:
10
11 The above copyright notice and this permission notice shall be included
12 in all copies or substantial portions of the Software.
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 OTHER DEALINGS IN THE SOFTWARE.
21 ============================================================================*/
22
23
24 #include "Internal/Common/GmmLibInc.h"
25
26 /////////////////////////////////////////////////////////////////////////////////////
27 /// Returns indication of whether resource is eligible for 64KB pages or not.
28 /// On Windows, UMD must call this api after GmmResCreate()
29 /// @return 1/0
30 /////////////////////////////////////////////////////////////////////////////////////
Is64KBPageSuitable()31 uint8_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::Is64KBPageSuitable()
32 {
33 bool Ignore64KBPadding = false;
34 //!!!! DO NOT USE GetSizeSurface() as it returns the padded size and not natural size.
35 GMM_GFX_SIZE_T Size = Surf.Size + AuxSurf.Size + AuxSecSurf.Size;
36
37 __GMM_ASSERT(Size);
38
39 // All ESM resources and VirtuaPadding are exempt from 64KB paging
40 if(Surf.Flags.Info.ExistingSysMem ||
41 Surf.Flags.Info.XAdapter ||
42 Surf.Flags.Gpu.CameraCapture ||
43 Surf.Flags.Info.KernelModeMapped ||
44 (Surf.Flags.Gpu.S3d && !Surf.Flags.Gpu.S3dDx &&
45 !GetGmmLibContext()->GetSkuTable().FtrDisplayEngineS3d)
46 #if(LHDM)
47 || (Surf.Flags.Info.AllowVirtualPadding &&
48 ExistingSysMem.hParentAllocation)
49 #endif
50 )
51 {
52 Ignore64KBPadding = true;
53 }
54
55 if(GetGmmLibContext()->GetSkuTable().FtrLocalMemory)
56 {
57 Ignore64KBPadding |= (Surf.Flags.Info.NonLocalOnly || (Surf.Flags.Info.Shared && !Surf.Flags.Info.NotLockable));
58 Ignore64KBPadding |= (GetGmmLibContext()->GetSkuTable().FtrLocalMemoryAllows4KB && Surf.Flags.Info.NoOptimizationPadding);
59 Ignore64KBPadding |= ((GetGmmLibContext()->GetSkuTable().FtrLocalMemoryAllows4KB) && (((Size * (100 + (GMM_GFX_SIZE_T)GetGmmLibContext()->GetAllowedPaddingFor64KbPagesPercentage())) / 100) < GFX_ALIGN(Size, GMM_KBYTE(64))));
60 }
61 else
62 {
63 // The final padded size cannot be larger then a set percentage of the original size
64 if((Surf.Flags.Info.NoOptimizationPadding && !GFX_IS_ALIGNED(Size, GMM_KBYTE(64))) /*Surface is not 64kb aligned*/ ||
65 (!Surf.Flags.Info.NoOptimizationPadding && (((Size * (100 + GetGmmLibContext()->GetAllowedPaddingFor64KbPagesPercentage())) / 100) < GFX_ALIGN(Size, GMM_KBYTE(64)))) /*10% padding TBC */)
66 {
67 Ignore64KBPadding |= true;
68 }
69 }
70
71 // If 64KB paging is enabled pad out the resource to 64KB alignment
72 if(GetGmmLibContext()->GetSkuTable().FtrWddm2_1_64kbPages &&
73 // Ignore the padding for the above VirtualPadding or ESM cases
74 (!Ignore64KBPadding) &&
75 // Resource must be 64KB aligned
76 (GFX_IS_ALIGNED(Surf.Alignment.BaseAlignment, GMM_KBYTE(64)) ||
77 // Or must be aligned to a factor of 64KB
78 (Surf.Alignment.BaseAlignment == GMM_KBYTE(32)) ||
79 (Surf.Alignment.BaseAlignment == GMM_KBYTE(16)) ||
80 (Surf.Alignment.BaseAlignment == GMM_KBYTE(8)) ||
81 (Surf.Alignment.BaseAlignment == GMM_KBYTE(4))))
82 {
83 return 1;
84 }
85
86 return 0;
87 }
88
89 /////////////////////////////////////////////////////////////////////////////////////
90 /// Allows clients to "create" any type of resource. This function does not
91 /// allocate any memory for the resource. It just calculates the various parameters
92 /// which are useful for the client and can be queried using other functions.
93 ///
94 /// @param[in] GmmLib Context: Reference to ::GmmLibContext
95 /// @param[in] CreateParams: Flags which specify what sort of resource to create
96 ///
97 /// @return ::GMM_STATUS
98 /////////////////////////////////////////////////////////////////////////////////////
Create(GMM_RESCREATE_PARAMS & CreateParams)99 GMM_STATUS GMM_STDCALL GmmLib::GmmResourceInfoCommon::Create(GMM_RESCREATE_PARAMS &CreateParams)
100 {
101 GMM_STATUS Status = GMM_ERROR;
102 // ToDo: Only Vk is using this Create API directly. Derive the GmmLibCOntext from the ClientContext stored in
103 // ResInfo object.
104 Status = Create(*(reinterpret_cast<GMM_CLIENT_CONTEXT *>(pClientContext)->GetLibContext()), CreateParams);
105
106 return Status;
107 }
108
109 /////////////////////////////////////////////////////////////////////////////////////
110 /// Allows clients to "create" Custom memory layout received from the App as user pointer or DMABUF
111 // This function does not allocate any memory for the resource. It just calculates/ populates the various parameters
112 /// which are useful for the client and can be queried using other functions.
113 ///
114 /// @param[in] GmmLib Context: Reference to ::GmmLibContext
115 /// @param[in] CreateParams: Flags which specify what sort of resource to create
116 ///
117 /// @return ::GMM_STATUS
118 /////////////////////////////////////////////////////////////////////////////////////
CreateCustomRes(Context & GmmLibContext,GMM_RESCREATE_CUSTOM_PARAMS & CreateParams)119 GMM_STATUS GMM_STDCALL GmmLib::GmmResourceInfoCommon::CreateCustomRes(Context &GmmLibContext, GMM_RESCREATE_CUSTOM_PARAMS &CreateParams)
120 {
121 const GMM_PLATFORM_INFO *pPlatform;
122 GMM_STATUS Status = GMM_ERROR;
123 GMM_TEXTURE_CALC * pTextureCalc = NULL;
124 uint32_t BitsPerPixel, i;
125
126
127 GMM_DPF_ENTER;
128
129 GET_GMM_CLIENT_TYPE(pClientContext, ClientType);
130 pGmmUmdLibContext = reinterpret_cast<uint64_t>(&GmmLibContext);
131
132
133 if((CreateParams.Format > GMM_FORMAT_INVALID) &&
134 (CreateParams.Format < GMM_RESOURCE_FORMATS))
135 {
136 BitsPerPixel = GetGmmLibContext()->GetPlatformInfo().FormatTable[CreateParams.Format].Element.BitsPer;
137 }
138 else
139 {
140 GMM_ASSERTDPF(0, "Format Error");
141 Status = GMM_INVALIDPARAM;
142 goto ERROR_CASE;
143 }
144
145 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(&Surf, GetGmmLibContext());
146 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
147
148 Surf.Type = CreateParams.Type;
149 Surf.Format = CreateParams.Format;
150 Surf.BaseWidth = CreateParams.BaseWidth64;
151 Surf.BaseHeight = CreateParams.BaseHeight;
152 Surf.Flags = CreateParams.Flags;
153 Surf.CachePolicy.Usage = CreateParams.Usage;
154 Surf.Pitch = CreateParams.Pitch;
155 Surf.Size = CreateParams.Size;
156 Surf.Alignment.BaseAlignment = CreateParams.BaseAlignment;
157 Surf.MaxLod = 1;
158 Surf.ArraySize = 1;
159 Surf.CpTag = CreateParams.CpTag;
160
161 #if(_DEBUG || _RELEASE_INTERNAL)
162 Surf.Platform = GetGmmLibContext()->GetPlatformInfo().Platform;
163 #endif
164 Surf.BitsPerPixel = BitsPerPixel;
165 Surf.Alignment.QPitch = (GMM_GLOBAL_GFX_SIZE_T)(Surf.Pitch * Surf.BaseHeight);
166
167 pTextureCalc->SetTileMode(&Surf);
168
169 if(GmmIsPlanar(Surf.Format))
170 {
171 if(GMM_IS_TILED(pPlatform->TileInfo[Surf.TileMode]))
172 {
173 Surf.OffsetInfo.Plane.IsTileAlignedPlanes = true;
174 }
175 for(i = 1; i <= CreateParams.NoOfPlanes; i++)
176 {
177 Surf.OffsetInfo.Plane.X[i] = CreateParams.PlaneOffset.X[i];
178 Surf.OffsetInfo.Plane.Y[i] = CreateParams.PlaneOffset.Y[i];
179 }
180 Surf.OffsetInfo.Plane.NoOfPlanes = CreateParams.NoOfPlanes;
181
182 if (Surf.ArraySize > 1)
183 {
184 //Surf.OffsetInfo.Plane.ArrayQPitch = Surf.Size; //Not required as this new interface doesn't support arrayed surfaces.
185 }
186
187 UpdateUnAlignedParams();
188 }
189
190 switch(Surf.Type)
191 {
192 case RESOURCE_1D:
193 case RESOURCE_2D:
194 case RESOURCE_PRIMARY:
195 case RESOURCE_SHADOW:
196 case RESOURCE_STAGING:
197 case RESOURCE_GDI:
198 case RESOURCE_NNDI:
199 case RESOURCE_HARDWARE_MBM:
200 case RESOURCE_OVERLAY_INTERMEDIATE_SURFACE:
201 case RESOURCE_IFFS_MAPTOGTT:
202 #if _WIN32
203 case RESOURCE_WGBOX_ENCODE_DISPLAY:
204 case RESOURCE_WGBOX_ENCODE_REFERENCE:
205 #endif
206 {
207
208 if (Surf.ArraySize > 1)
209 {
210 // Surf.OffsetInfo.Texture2DOffsetInfo.ArrayQPitchRender =
211 // Surf.OffsetInfo.Texture2DOffsetInfo.ArrayQPitchLock = Surf.Pitch * Surf.BaseHeight; //Not required as this new interface doesn't support arrayed surfaces.
212 }
213
214 for(i = 0; i <= Surf.MaxLod; i++)
215 {
216 Surf.OffsetInfo.Texture2DOffsetInfo.Offset[i] = 0;
217 }
218
219 break;
220 }
221 default:
222 {
223 GMM_ASSERTDPF(0, "GmmTexAlloc: Unknown surface type!");
224 Status = GMM_INVALIDPARAM;
225 goto ERROR_CASE;
226 ;
227 }
228 };
229
230 GMM_DPF_EXIT;
231 return GMM_SUCCESS;
232
233 ERROR_CASE:
234 //Zero out all the members
235 new(this) GmmResourceInfoCommon();
236
237 GMM_DPF_EXIT;
238 return Status;
239 }
240 /////////////////////////////////////////////////////////////////////////////////////
241 /// Allows clients to "create" any type of resource. This function does not
242 /// allocate any memory for the resource. It just calculates the various parameters
243 /// which are useful for the client and can be queried using other functions.
244 ///
245 /// @param[in] GmmLib Context: Reference to ::GmmLibContext
246 /// @param[in] CreateParams: Flags which specify what sort of resource to create
247 ///
248 /// @return ::GMM_STATUS
249 /////////////////////////////////////////////////////////////////////////////////////
Create(Context & GmmLibContext,GMM_RESCREATE_PARAMS & CreateParams)250 GMM_STATUS GMM_STDCALL GmmLib::GmmResourceInfoCommon::Create(Context &GmmLibContext, GMM_RESCREATE_PARAMS &CreateParams)
251 {
252 const GMM_PLATFORM_INFO *pPlatform;
253 GMM_STATUS Status = GMM_ERROR;
254 GMM_TEXTURE_CALC * pTextureCalc = NULL;
255
256 GMM_DPF_ENTER;
257
258 GET_GMM_CLIENT_TYPE(pClientContext, ClientType);
259 pGmmUmdLibContext = reinterpret_cast<uint64_t>(&GmmLibContext);
260 __GMM_ASSERTPTR(pGmmUmdLibContext, GMM_ERROR);
261
262 if(CreateParams.Flags.Info.ExistingSysMem &&
263 (CreateParams.Flags.Info.TiledW ||
264 CreateParams.Flags.Info.TiledX ||
265 GMM_IS_4KB_TILE(CreateParams.Flags) ||
266 GMM_IS_64KB_TILE(CreateParams.Flags)))
267 {
268 GMM_ASSERTDPF(0, "Tiled System Accelerated Memory not supported.");
269 Status = GMM_INVALIDPARAM;
270 goto ERROR_CASE;
271 }
272
273 if(!CopyClientParams(CreateParams))
274 {
275 Status = GMM_INVALIDPARAM;
276 goto ERROR_CASE;
277 }
278
279 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(&Surf, GetGmmLibContext());
280 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
281
282 #if defined(__GMM_KMD__) || !defined(_WIN32)
283 if(!CreateParams.Flags.Info.ExistingSysMem)
284 #else
285 // TiledResource uses a private gfx alloc, which doesn't receive a WDDM CreateAllocation
286 if(!CreateParams.Flags.Info.ExistingSysMem &&
287 (CreateParams.NoGfxMemory || CreateParams.Flags.Gpu.TiledResource))
288 #endif
289 {
290 if(!ValidateParams())
291 {
292 GMM_ASSERTDPF(0, "Invalid parameter!");
293 Status = GMM_INVALIDPARAM;
294 goto ERROR_CASE;
295 }
296
297 if(GMM_SUCCESS != pTextureCalc->AllocateTexture(&Surf))
298 {
299 GMM_ASSERTDPF(0, "GmmTexAlloc failed!");
300 goto ERROR_CASE;
301 }
302
303 if(Surf.Flags.Gpu.UnifiedAuxSurface)
304 {
305 GMM_GFX_SIZE_T TotalSize;
306 uint32_t Alignment;
307
308 if(GMM_SUCCESS != pTextureCalc->FillTexCCS(&Surf, (AuxSecSurf.Type != RESOURCE_INVALID ? &AuxSecSurf : &AuxSurf)))
309 {
310 GMM_ASSERTDPF(0, "GmmTexAlloc failed!");
311 goto ERROR_CASE;
312 }
313
314 if(AuxSurf.Size == 0 && AuxSurf.Type != RESOURCE_INVALID && GMM_SUCCESS != pTextureCalc->AllocateTexture(&AuxSurf))
315 {
316 GMM_ASSERTDPF(0, "GmmTexAlloc failed!");
317 goto ERROR_CASE;
318 }
319
320 AuxSurf.UnpaddedSize = AuxSurf.Size;
321
322 if(Surf.Flags.Gpu.IndirectClearColor ||
323 Surf.Flags.Gpu.ColorDiscard)
324 {
325 if(GetGmmLibContext()->GetSkuTable().FtrFlatPhysCCS && AuxSurf.Type == RESOURCE_INVALID)
326 {
327 //ie only AuxType is CCS, doesn't exist with FlatCCS, enable it for CC
328 AuxSurf.Type = Surf.Type;
329 }
330 if(!Surf.Flags.Gpu.TiledResource)
331 {
332 AuxSurf.CCSize = PAGE_SIZE; // 128bit Float Value + 32bit RT Native Value + Padding.
333 AuxSurf.Size += PAGE_SIZE;
334 }
335 else
336 {
337 AuxSurf.CCSize = GMM_KBYTE(64); // 128bit Float Value + 32bit RT Native Value + Padding.
338 AuxSurf.Size += GMM_KBYTE(64);
339 }
340 }
341
342 if(Surf.Flags.Gpu.ProceduralTexture)
343 {
344 //Do not require main surface access either in GPUVA/physical space.
345 Surf.Size = 0;
346 }
347
348 TotalSize = Surf.Size + AuxSurf.Size; //Not including AuxSecSurf size, multi-Aux surface isn't supported for displayables
349 Alignment = GFX_ULONG_CAST(Surf.Pitch * pPlatform->TileInfo[Surf.TileMode].LogicalTileHeight);
350
351 // We need to pad the aux size to the size of the paired surface's tile row (i.e. Pitch * TileHeight) to
352 // ensure the entire surface can be described with a constant pitch (for GGTT aliasing, clean FENCE'ing and
353 // AcquireSwizzlingRange, even though the aux isn't intentionally part of such fencing).
354 if(Surf.Flags.Gpu.FlipChain &&
355 !__GMM_IS_ALIGN(TotalSize, Alignment))
356 {
357 AuxSurf.Size += (GFX_ALIGN_NP2(TotalSize, Alignment) - TotalSize);
358 }
359
360 if((Surf.Size + AuxSurf.Size + AuxSecSurf.Size) > (GMM_GFX_SIZE_T)(pPlatform->SurfaceMaxSize))
361 {
362 GMM_ASSERTDPF(0, "Surface too large!");
363 goto ERROR_CASE;
364 }
365 }
366 }
367
368 if(Surf.Flags.Info.ExistingSysMem)
369 {
370 Surf.ExistingSysMem.IsGmmAllocated =
371 (CreateParams.pExistingSysMem &&
372 CreateParams.ExistingSysMemSize) ?
373 false :
374 true;
375
376 if(!Surf.ExistingSysMem.IsGmmAllocated)
377 {
378 Surf.ExistingSysMem.IsPageAligned =
379 (((CreateParams.pExistingSysMem & (PAGE_SIZE - 1)) == 0) &&
380 (((CreateParams.pExistingSysMem + CreateParams.ExistingSysMemSize) & (PAGE_SIZE - 1)) == 0)) ?
381 true :
382 false;
383 }
384
385 if(!ValidateParams())
386 {
387 GMM_ASSERTDPF(0, "Invalid parameter!");
388 goto ERROR_CASE;
389 }
390
391 // Get surface Gfx memory size required.
392 if(GMM_SUCCESS != pTextureCalc->AllocateTexture(&Surf))
393 {
394 GMM_ASSERTDPF(0, "GmmTexAlloc failed!");
395 goto ERROR_CASE;
396 }
397
398 if(CreateParams.pExistingSysMem &&
399 CreateParams.ExistingSysMemSize)
400 {
401 // Client provided own memory and is not assumed to be Gfx aligned
402 ExistingSysMem.IsGmmAllocated = 0;
403
404 ExistingSysMem.pExistingSysMem = CreateParams.pExistingSysMem;
405 ExistingSysMem.Size = CreateParams.ExistingSysMemSize;
406
407 // An upper dword of 0xffffffff is invalid and may mean the address
408 // was sign extended or came from a rogue UMD. In either case
409 // we can truncate the address down to 32 bits prevent attempts
410 // to access an invalid address range.
411 if((ExistingSysMem.pExistingSysMem & (0xffffffff00000000ull)) == (0xffffffff00000000ull))
412 {
413 ExistingSysMem.pExistingSysMem &= 0xffffffff;
414 }
415
416 //Align the base address to new ESM requirements.
417 if(!Surf.ExistingSysMem.IsPageAligned)
418 {
419 if(GMM_SUCCESS != ApplyExistingSysMemRestrictions())
420 {
421 GMM_ASSERTDPF(0, "Malloc'ed Sys Mem too small for gfx surface!");
422 goto ERROR_CASE;
423 }
424 }
425 else
426 {
427 ExistingSysMem.pVirtAddress =
428 ExistingSysMem.pGfxAlignedVirtAddress = CreateParams.pExistingSysMem;
429 }
430
431 if((ExistingSysMem.pVirtAddress + Surf.Size) >
432 (CreateParams.pExistingSysMem + ExistingSysMem.Size))
433 {
434 GMM_ASSERTDPF(0, "Malloc'ed Sys Mem too small for gfx surface");
435 goto ERROR_CASE;
436 }
437 }
438 else
439 {
440 __GMM_BUFFER_TYPE Restrictions = {0};
441
442 ExistingSysMem.IsGmmAllocated = 1;
443 Surf.ExistingSysMem.IsPageAligned = 1;
444
445 // Adjust memory size to compensate for Gfx alignment.
446 pTextureCalc->GetResRestrictions(&Surf, Restrictions);
447 ExistingSysMem.Size = Restrictions.Alignment + Surf.Size;
448
449 ExistingSysMem.pVirtAddress = (uint64_t)GMM_MALLOC(GFX_ULONG_CAST(ExistingSysMem.Size));
450 if(!ExistingSysMem.pVirtAddress)
451 {
452 GMM_ASSERTDPF(0, "Failed to allocate System Accelerated Memory.");
453 goto ERROR_CASE;
454 }
455 else
456 {
457 ExistingSysMem.pGfxAlignedVirtAddress = (uint64_t)GFX_ALIGN(ExistingSysMem.pVirtAddress, Restrictions.Alignment);
458 }
459 }
460 }
461
462 if(Is64KBPageSuitable() && GetGmmLibContext()->GetSkuTable().FtrLocalMemory)
463 {
464 // BaseAlignment can be greater than 64KB and needs to be aligned to 64KB
465 Surf.Alignment.BaseAlignment = GFX_MAX(GFX_ALIGN(Surf.Alignment.BaseAlignment, GMM_KBYTE(64)), GMM_KBYTE(64));
466 }
467
468 GMM_DPF_EXIT;
469 return GMM_SUCCESS;
470
471 ERROR_CASE:
472 //Zero out all the members
473 new(this) GmmResourceInfoCommon();
474
475 if(CreateParams.pPreallocatedResInfo)
476 {
477 this->GetResFlags().Info.__PreallocatedResInfo = 1; // Set flag if PreAllocated ResInfo has been set by the Client.
478 }
479
480 GMM_DPF_EXIT;
481 return Status;
482 }
483
UpdateUnAlignedParams()484 void GmmLib::GmmResourceInfoCommon::UpdateUnAlignedParams()
485 {
486 uint32_t YHeight = 0, VHeight = 0;
487 uint32_t Height = 0, UmdUHeight = 0, UmdVHeight = 0;
488 uint32_t WidthBytesPhysical = GFX_ULONG_CAST(Surf.BaseWidth) * Surf.BitsPerPixel >> 3;
489
490 __GMM_ASSERTPTR(((Surf.TileMode < GMM_TILE_MODES) && (Surf.TileMode >= TILE_NONE)), VOIDRETURN);
491 GMM_DPF_ENTER;
492
493 Height = Surf.BaseHeight;
494
495 switch(Surf.Format)
496 {
497 case GMM_FORMAT_IMC1: // IMC1 = IMC3 with Swapped U/V
498 case GMM_FORMAT_IMC3:
499 case GMM_FORMAT_MFX_JPEG_YUV420: // Same as IMC3.
500 // YYYYYYYY
501 // YYYYYYYY
502 // YYYYYYYY
503 // YYYYYYYY
504 // UUUU
505 // UUUU
506 // VVVV
507 // VVVV
508 case GMM_FORMAT_MFX_JPEG_YUV422V: // Similar to IMC3 but U/V are full width.
509 // YYYYYYYY
510 // YYYYYYYY
511 // YYYYYYYY
512 // YYYYYYYY
513 // UUUUUUUU
514 // UUUUUUUU
515 // VVVVVVVV
516 // VVVVVVVV
517 {
518 YHeight = GFX_ALIGN(Surf.BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
519
520 VHeight = GFX_ALIGN(GFX_CEIL_DIV(Surf.BaseHeight, 2), GMM_IMCx_PLANE_ROW_ALIGNMENT);
521
522 break;
523 }
524 case GMM_FORMAT_MFX_JPEG_YUV411R_TYPE: //Similar to IMC3 but U/V are quarther height and full width.
525 //YYYYYYYY
526 //YYYYYYYY
527 //YYYYYYYY
528 //YYYYYYYY
529 //UUUUUUUU
530 //VVVVVVVV
531 {
532 YHeight = GFX_ALIGN(Surf.BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
533
534 VHeight = GFX_ALIGN(GFX_CEIL_DIV(Surf.BaseHeight, 4), GMM_IMCx_PLANE_ROW_ALIGNMENT);
535
536 break;
537 }
538 case GMM_FORMAT_MFX_JPEG_YUV411: // Similar to IMC3 but U/V are quarter width and full height.
539 // YYYYYYYY
540 // YYYYYYYY
541 // YYYYYYYY
542 // YYYYYYYY
543 // UU
544 // UU
545 // UU
546 // UU
547 // VV
548 // VV
549 // VV
550 // VV
551 case GMM_FORMAT_MFX_JPEG_YUV422H: // Similar to IMC3 but U/V are full height.
552 // YYYYYYYY
553 // YYYYYYYY
554 // YYYYYYYY
555 // YYYYYYYY
556 // UUUU
557 // UUUU
558 // UUUU
559 // UUUU
560 // VVVV
561 // VVVV
562 // VVVV
563 // VVVV
564 case GMM_FORMAT_BGRP:
565 case GMM_FORMAT_RGBP:
566 case GMM_FORMAT_MFX_JPEG_YUV444: // Similar to IMC3 but U/V are full size.
567 // YYYYYYYY
568 // YYYYYYYY
569 // YYYYYYYY
570 // YYYYYYYY
571 // UUUUUUUU
572 // UUUUUUUU
573 // UUUUUUUU
574 // UUUUUUUU
575 // VVVVVVVV
576 // VVVVVVVV
577 // VVVVVVVV
578 // VVVVVVVV
579 {
580 YHeight = GFX_ALIGN(Surf.BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
581
582 VHeight = GFX_ALIGN(Surf.BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
583
584 break;
585 }
586 case GMM_FORMAT_IMC2: // IMC2 = IMC4 with Swapped U/V
587 case GMM_FORMAT_IMC4:
588 {
589 // YYYYYYYY
590 // YYYYYYYY
591 // YYYYYYYY
592 // YYYYYYYY
593 // UUUUVVVV
594 // UUUUVVVV
595
596 __GMM_ASSERT((Surf.Pitch & 1) == 0);
597
598 YHeight = GFX_ALIGN(Surf.BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
599
600 VHeight = GFX_CEIL_DIV(YHeight, 2);
601
602 break;
603 }
604 case GMM_FORMAT_I420: // I420 = IYUV
605 case GMM_FORMAT_IYUV: // I420/IYUV = YV12 with Swapped U/V
606 case GMM_FORMAT_YV12:
607 case GMM_FORMAT_YVU9:
608 {
609 // YYYYYYYY
610 // YYYYYYYY
611 // YYYYYYYY
612 // YYYYYYYY
613 // VVVVVV.. <-- V and U planes follow the Y plane, as linear
614 // ..UUUUUU arrays--without respect to pitch.
615
616 uint32_t YSize, YVSizeRShift, VSize, UOffset;
617 uint32_t YSizeForUVPurposes, YSizeForUVPurposesDimensionalAlignment;
618
619 YSize = GFX_ULONG_CAST(Surf.Pitch) * Surf.BaseHeight;
620
621 // YVU9 has one U/V pixel for each 4x4 Y block.
622 // The others have one U/V pixel for each 2x2 Y block.
623
624 // YVU9 has a Y:V size ratio of 16 (4x4 --> 1).
625 // The others have a ratio of 4 (2x2 --> 1).
626 YVSizeRShift = (Surf.Format != GMM_FORMAT_YVU9) ? 2 : 4;
627
628 // If a Y plane isn't fully-aligned to its Y-->U/V block size, the
629 // extra/unaligned Y pixels still need corresponding U/V pixels--So
630 // for the purpose of computing the UVSize, we must consider a
631 // dimensionally "rounded-up" YSize. (E.g. a 13x5 YVU9 Y plane would
632 // require 4x2 U/V planes--the same UVSize as a fully-aligned 16x8 Y.)
633 YSizeForUVPurposesDimensionalAlignment = (Surf.Format != GMM_FORMAT_YVU9) ? 2 : 4;
634 YSizeForUVPurposes =
635 GFX_ALIGN(GFX_ULONG_CAST(Surf.Pitch), YSizeForUVPurposesDimensionalAlignment) *
636 GFX_ALIGN(Surf.BaseHeight, YSizeForUVPurposesDimensionalAlignment);
637
638 VSize = (YSizeForUVPurposes >> YVSizeRShift);
639
640 YHeight = GFX_CEIL_DIV(YSize + 2 * VSize, WidthBytesPhysical);
641
642 break;
643 }
644 case GMM_FORMAT_NV12:
645 case GMM_FORMAT_NV21:
646 case GMM_FORMAT_NV11:
647 case GMM_FORMAT_P010:
648 case GMM_FORMAT_P012:
649 case GMM_FORMAT_P016:
650 case GMM_FORMAT_P208:
651 {
652 // YYYYYYYY
653 // YYYYYYYY
654 // YYYYYYYY
655 // YYYYYYYY
656 // [UV-Packing]
657 YHeight = GFX_ALIGN(Height, __GMM_EVEN_ROW);
658
659 if((Surf.Format == GMM_FORMAT_NV12) ||
660 (Surf.Format == GMM_FORMAT_NV21) ||
661 (Surf.Format == GMM_FORMAT_P010) ||
662 (Surf.Format == GMM_FORMAT_P012) ||
663 (Surf.Format == GMM_FORMAT_P016))
664 {
665 VHeight = GFX_CEIL_DIV(Height, 2);
666 }
667 else
668 {
669 VHeight = YHeight; // U/V plane is same as Y
670 }
671
672 break;
673 }
674 default:
675 {
676 GMM_ASSERTDPF(0, "Unknown Video Format U\n");
677 break;
678 }
679 }
680
681 Surf.OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_Y] = YHeight;
682 if(Surf.OffsetInfo.Plane.NoOfPlanes == 2)
683 {
684 Surf.OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U] = VHeight;
685 UmdUHeight = (GMM_GLOBAL_GFX_SIZE_T)((Surf.Size / Surf.Pitch) - Surf.OffsetInfo.Plane.Y[GMM_PLANE_U]);
686 }
687 else if(Surf.OffsetInfo.Plane.NoOfPlanes == 3)
688 {
689 Surf.OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U] =
690 Surf.OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_V] = VHeight;
691 UmdUHeight = (GMM_GLOBAL_GFX_SIZE_T)(Surf.OffsetInfo.Plane.Y[GMM_PLANE_V] - Surf.OffsetInfo.Plane.Y[GMM_PLANE_U]);
692 UmdVHeight = (GMM_GLOBAL_GFX_SIZE_T)(((Surf.Size / Surf.Pitch) - Surf.OffsetInfo.Plane.Y[GMM_PLANE_U]) / 2);
693 __GMM_ASSERTPTR((UmdUHeight == UmdVHeight), VOIDRETURN);
694 }
695
696 __GMM_ASSERTPTR(((Surf.OffsetInfo.Plane.Y[GMM_PLANE_U] == YHeight) && (UmdUHeight == VHeight)), VOIDRETURN);
697 }
698 /////////////////////////////////////////////////////////////////////////////////////
699 /// Returns downscaled width for fast clear of given subresource
700 /// @param[in] uint32_t : MipLevel
701 /// @return Width
702 /////////////////////////////////////////////////////////////////////////////////////
GetFastClearWidth(uint32_t MipLevel)703 uint64_t GmmLib::GmmResourceInfoCommon::GetFastClearWidth(uint32_t MipLevel)
704 {
705 uint64_t width = 0;
706 uint64_t mipWidth = GetMipWidth(MipLevel);
707 uint32_t numSamples = GetNumSamples();
708
709 GMM_TEXTURE_CALC *pTextureCalc;
710 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
711
712 if(numSamples == 1)
713 {
714 width = pTextureCalc->ScaleFCRectWidth(&Surf, mipWidth);
715 }
716 else if(numSamples == 2 || numSamples == 4)
717 {
718 width = GFX_ALIGN(mipWidth, 8) / 8;
719 }
720 else if(numSamples == 8)
721 {
722 width = GFX_ALIGN(mipWidth, 2) / 2;
723 }
724 else // numSamples == 16
725 {
726 width = mipWidth;
727 }
728
729 return width;
730 }
731
732
733 /////////////////////////////////////////////////////////////////////////////////////
734 /// Returns downscaled height for fast clear of given subresource
735 /// @param[in] uint32_t : MipLevel
736 /// @return height
737 /////////////////////////////////////////////////////////////////////////////////////
GetFastClearHeight(uint32_t MipLevel)738 uint32_t GmmLib::GmmResourceInfoCommon::GetFastClearHeight(uint32_t MipLevel)
739 {
740 uint32_t height = 0;
741 uint32_t mipHeight = GetMipHeight(MipLevel);
742 uint32_t numSamples = GetNumSamples();
743
744 GMM_TEXTURE_CALC *pTextureCalc;
745 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
746
747 if(numSamples == 1)
748 {
749 height = pTextureCalc->ScaleFCRectHeight(&Surf, mipHeight);
750 }
751 else
752 {
753 height = GFX_ALIGN(mipHeight, 2) / 2;
754 }
755
756 return height;
757 }
758
759 /////////////////////////////////////////////////////////////////////////////////////
760 /// Returns the Platform info. If Platform has been overriden by the clients, then
761 /// it returns the overriden Platform Info struct.
762 /// @return Reference to the relevent ::GMM_PLATFORM_INFO
763 /////////////////////////////////////////////////////////////////////////////////////
GetPlatformInfo()764 const GMM_PLATFORM_INFO &GmmLib::GmmResourceInfoCommon::GetPlatformInfo()
765 {
766 #if(defined(__GMM_KMD__) && (_DEBUG || _RELEASE_INTERNAL))
767 if(GFX_GET_CURRENT_RENDERCORE(Surf.Platform) != GFX_GET_CURRENT_RENDERCORE(((Context *)pGmmKmdLibContext)->GetPlatformInfo().Platform))
768 {
769 return ((Context *)pGmmKmdLibContext)->GetOverridePlatformInfo();
770 }
771 else
772 {
773 return ((Context *)pGmmKmdLibContext)->GetPlatformInfo();
774 }
775 #else
776 return ((Context *)pGmmUmdLibContext)->GetPlatformInfo();
777 #endif
778 }
779
780 /////////////////////////////////////////////////////////////////////////////////////
781 /// Returns width padded to HAlign. Only called for special flags. See asserts in
782 /// function for which surfaces are supported.
783 ///
784 /// @param[in] MipLevel Mip level for which the width is requested
785 /// @return Padded Width
786 /////////////////////////////////////////////////////////////////////////////////////
GetPaddedWidth(uint32_t MipLevel)787 uint32_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetPaddedWidth(uint32_t MipLevel)
788 {
789 GMM_TEXTURE_CALC *pTextureCalc;
790 uint32_t AlignedWidth;
791 GMM_GFX_SIZE_T MipWidth;
792 uint32_t HAlign;
793
794 __GMM_ASSERT(MipLevel <= Surf.MaxLod);
795
796 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
797
798 // This shall be called for Depth and Separate Stencil main surface resource
799 // This shall be called for the Aux surfaces (MCS, CCS and Hiz) too.
800 // MCS will have Surf.Flags.Gpu.CCS set
801 // Hiz will have Surf.Flags.Gpu.HiZ set
802 __GMM_ASSERT(Surf.Flags.Gpu.Depth || Surf.Flags.Gpu.SeparateStencil ||
803 Surf.Flags.Gpu.CCS || Surf.Flags.Gpu.HiZ ||
804 AuxSurf.Flags.Gpu.__MsaaTileMcs ||
805 AuxSurf.Flags.Gpu.CCS || AuxSurf.Flags.Gpu.__NonMsaaTileYCcs);
806
807 MipWidth = pTextureCalc->GmmTexGetMipWidth(&Surf, MipLevel);
808
809 HAlign = Surf.Alignment.HAlign;
810 if(AuxSurf.Flags.Gpu.CCS && AuxSurf.Flags.Gpu.__NonMsaaTileYCcs)
811 {
812 HAlign = AuxSurf.Alignment.HAlign;
813 }
814
815 AlignedWidth = __GMM_EXPAND_WIDTH(pTextureCalc,
816 GFX_ULONG_CAST(MipWidth),
817 HAlign,
818 &Surf);
819
820 if(Surf.Flags.Gpu.SeparateStencil)
821 {
822 if(Surf.Flags.Info.TiledW)
823 {
824 AlignedWidth *= 2;
825 }
826
827 // Reverse MSAA Expansion ////////////////////////////////////////////////
828 // It might seem strange that we ExpandWidth (with consideration for MSAA)
829 // only to "reverse" the MSAA portion of the expansion...It's an order-of-
830 // operations thing--The intention of the reversal isn't to have
831 // disregarded the original MSAA expansion but to produce a width, that
832 // when MSAA'ed will match the true physical width (which requires MSAA
833 // consideration to compute).
834 switch(Surf.MSAA.NumSamples)
835 {
836 case 1:
837 break;
838 case 2: // Same as 4x...
839 case 4:
840 AlignedWidth /= 2;
841 break;
842 case 8: // Same as 16x...
843 case 16:
844 AlignedWidth /= 4;
845 break;
846 default:
847 __GMM_ASSERT(0);
848 }
849 }
850
851 // CCS Aux surface, Aligned width needs to be scaled based on main surface bpp
852 if(AuxSurf.Flags.Gpu.CCS && AuxSurf.Flags.Gpu.__NonMsaaTileYCcs)
853 {
854 AlignedWidth = pTextureCalc->ScaleTextureWidth(&AuxSurf, AlignedWidth);
855 }
856
857 return AlignedWidth;
858 }
859
860 /////////////////////////////////////////////////////////////////////////////////////
861 /// Returns height padded to VAlign. Only called for special flags. See asserts in
862 /// function for which surfaces are supported.
863 ///
864 /// @param[in] MipLevel Mip level for which the height is requested
865 /// @return Padded height
866 /////////////////////////////////////////////////////////////////////////////////////
GetPaddedHeight(uint32_t MipLevel)867 uint32_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetPaddedHeight(uint32_t MipLevel)
868 {
869 GMM_TEXTURE_CALC *pTextureCalc;
870 uint32_t AlignedHeight, MipHeight;
871 uint32_t VAlign;
872
873 __GMM_ASSERT(MipLevel <= Surf.MaxLod);
874
875 // See note in GmmResGetPaddedWidth.
876 __GMM_ASSERT(Surf.Flags.Gpu.Depth || Surf.Flags.Gpu.SeparateStencil ||
877 Surf.Flags.Gpu.CCS || Surf.Flags.Gpu.HiZ ||
878 AuxSurf.Flags.Gpu.__MsaaTileMcs ||
879 AuxSurf.Flags.Gpu.CCS || AuxSurf.Flags.Gpu.__NonMsaaTileYCcs);
880
881 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
882
883 MipHeight = pTextureCalc->GmmTexGetMipHeight(&Surf, MipLevel);
884
885 VAlign = Surf.Alignment.VAlign;
886 if(AuxSurf.Flags.Gpu.CCS && AuxSurf.Flags.Gpu.__NonMsaaTileYCcs)
887 {
888 VAlign = AuxSurf.Alignment.VAlign;
889 }
890
891 AlignedHeight = __GMM_EXPAND_HEIGHT(pTextureCalc,
892 MipHeight,
893 VAlign,
894 &Surf);
895
896 if(Surf.Flags.Gpu.SeparateStencil)
897 {
898 if(Surf.Flags.Info.TiledW)
899 {
900 AlignedHeight /= 2;
901 }
902
903 // Reverse MSAA Expansion ////////////////////////////////////////////////
904 // See note in GmmResGetPaddedWidth.
905 switch(Surf.MSAA.NumSamples)
906 {
907 case 1:
908 break;
909 case 2:
910 break; // No height adjustment for 2x...
911 case 4: // Same as 8x...
912 case 8:
913 AlignedHeight /= 2;
914 break;
915 case 16:
916 AlignedHeight /= 4;
917 break;
918 default:
919 __GMM_ASSERT(0);
920 }
921 }
922
923 // CCS Aux surface, AlignedHeight needs to be scaled by 16
924 if(AuxSurf.Flags.Gpu.CCS && AuxSurf.Flags.Gpu.__NonMsaaTileYCcs)
925 {
926 AlignedHeight = pTextureCalc->ScaleTextureHeight(&AuxSurf, AlignedHeight);
927 }
928
929 return AlignedHeight;
930 }
931
932 /////////////////////////////////////////////////////////////////////////////////////
933 /// Returns pitch padded to VAlign. Only called for special flags. See asserts in
934 /// function for which surfaces are supported.
935 ///
936 /// @param[in] MipLevel Mip level for which the pitch is requested
937 /// @return Padded pitch
938 /////////////////////////////////////////////////////////////////////////////////////
GetPaddedPitch(uint32_t MipLevel)939 uint32_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetPaddedPitch(uint32_t MipLevel)
940 {
941 uint32_t AlignedWidth;
942 uint32_t AlignedPitch;
943 uint32_t BitsPerPixel;
944
945 __GMM_ASSERT(MipLevel <= Surf.MaxLod);
946
947 // See note in GetPaddedWidth.
948 AlignedWidth = GetPaddedWidth(MipLevel);
949
950 BitsPerPixel = Surf.BitsPerPixel;
951 if(AuxSurf.Flags.Gpu.CCS && AuxSurf.Flags.Gpu.__NonMsaaTileYCcs)
952 {
953 BitsPerPixel = 8; //Aux surface are 8bpp
954 }
955
956 AlignedPitch = AlignedWidth * BitsPerPixel >> 3;
957
958 return AlignedPitch;
959 }
960
961 /////////////////////////////////////////////////////////////////////////////////////
962 /// Returns resource's QPitch.
963 ///
964 /// @return QPitch
965 /////////////////////////////////////////////////////////////////////////////////////
GetQPitch()966 uint32_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetQPitch()
967 {
968 const GMM_PLATFORM_INFO *pPlatform;
969 uint32_t QPitch;
970
971 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(&Surf, GetGmmLibContext());
972
973 __GMM_ASSERT(GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN8_CORE);
974 __GMM_ASSERT((Surf.Type != RESOURCE_3D) ||
975 (GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN9_CORE));
976
977 // 2D/CUBE ==> distance in rows between array slices
978 // 3D ==> distance in rows between R-slices
979 // Compressed ==> one row contains a complete compression block vertically
980 // HiZ ==> HZ_PxPerByte * HZ_QPitch
981 // Stencil ==> logical, i.e. not halved
982
983 if((GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN9_CORE) &&
984 GmmIsCompressed(GetGmmLibContext(), Surf.Format))
985 {
986 QPitch = Surf.Alignment.QPitch / GetCompressionBlockHeight();
987
988 if((Surf.Type == RESOURCE_3D) && !Surf.Flags.Info.Linear)
989 {
990 const GMM_TILE_MODE TileMode = Surf.TileMode;
991 __GMM_ASSERT(TileMode < GMM_TILE_MODES);
992 QPitch = GFX_ALIGN(QPitch, pPlatform->TileInfo[TileMode].LogicalTileHeight);
993 }
994 }
995 else if(Surf.Flags.Gpu.HiZ)
996 {
997 QPitch = Surf.Alignment.QPitch * pPlatform->HiZPixelsPerByte;
998 }
999 else
1000 {
1001 QPitch = Surf.Alignment.QPitch;
1002 }
1003
1004 return QPitch;
1005 }
1006
1007 /////////////////////////////////////////////////////////////////////////////////////
1008 /// Returns offset information to a particular mip map or plane.
1009 ///
1010 /// @param[in][out] Has info about which offset client is requesting. Offset is also
1011 /// passed back to the client in this parameter.
1012 /// @return ::GMM_STATUS
1013 /////////////////////////////////////////////////////////////////////////////////////
GetOffset(GMM_REQ_OFFSET_INFO & ReqInfo)1014 GMM_STATUS GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetOffset(GMM_REQ_OFFSET_INFO &ReqInfo)
1015 {
1016 GMM_TEXTURE_CALC *pTextureCalc;
1017
1018 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
1019
1020 __GMM_ASSERT((pTextureCalc != NULL));
1021
1022 if(Surf.Flags.Info.RedecribedPlanes)
1023 {
1024 uint8_t RestoreReqStdLayout = ReqInfo.ReqStdLayout ? 1 : 0;
1025
1026 // Lock and Render offsets do not require additional handling
1027 if(ReqInfo.ReqLock || ReqInfo.ReqRender)
1028 {
1029 ReqInfo.ReqStdLayout = 0;
1030 GmmTexGetMipMapOffset(&Surf, &ReqInfo, GetGmmLibContext());
1031 ReqInfo.ReqStdLayout = RestoreReqStdLayout;
1032 }
1033
1034 if(ReqInfo.ReqStdLayout)
1035 {
1036 GMM_REQ_OFFSET_INFO TempReqInfo[GMM_MAX_PLANE] = {0};
1037 GMM_TEXTURE_INFO TexInfo[GMM_MAX_PLANE];
1038 uint32_t Plane, TotalPlanes = GmmLib::Utility::GmmGetNumPlanes(Surf.Format);
1039
1040 // Caller must specify which plane they need the offset into if not
1041 // getting the whole surface size
1042 if(ReqInfo.Plane >= GMM_MAX_PLANE ||
1043 (ReqInfo.StdLayout.Offset != -1 && !ReqInfo.Plane))
1044 {
1045 __GMM_ASSERT(0);
1046 return GMM_ERROR;
1047 }
1048
1049 TempReqInfo[GMM_PLANE_Y] = *&ReqInfo;
1050 TempReqInfo[GMM_PLANE_Y].Plane = GMM_NO_PLANE;
1051 TempReqInfo[GMM_PLANE_Y].ReqLock = TempReqInfo[GMM_PLANE_Y].ReqRender = 0;
1052
1053 TempReqInfo[GMM_PLANE_V] = TempReqInfo[GMM_PLANE_U] = TempReqInfo[GMM_PLANE_Y];
1054
1055 pTextureCalc->GetRedescribedPlaneParams(&Surf, GMM_PLANE_Y, &TexInfo[GMM_PLANE_Y]);
1056 pTextureCalc->GetRedescribedPlaneParams(&Surf, GMM_PLANE_U, &TexInfo[GMM_PLANE_U]);
1057 pTextureCalc->GetRedescribedPlaneParams(&Surf, GMM_PLANE_V, &TexInfo[GMM_PLANE_V]);
1058
1059 if(GMM_SUCCESS != GmmTexGetMipMapOffset(&TexInfo[GMM_PLANE_Y], &TempReqInfo[GMM_PLANE_Y], GetGmmLibContext()) ||
1060 GMM_SUCCESS != GmmTexGetMipMapOffset(&TexInfo[GMM_PLANE_U], &TempReqInfo[GMM_PLANE_U], GetGmmLibContext()) ||
1061 GMM_SUCCESS != GmmTexGetMipMapOffset(&TexInfo[GMM_PLANE_V], &TempReqInfo[GMM_PLANE_V], GetGmmLibContext()))
1062 {
1063 __GMM_ASSERT(0);
1064 return GMM_ERROR;
1065 }
1066
1067 ReqInfo.StdLayout.TileDepthPitch = TempReqInfo[ReqInfo.Plane].StdLayout.TileDepthPitch;
1068 ReqInfo.StdLayout.TileRowPitch = TempReqInfo[ReqInfo.Plane].StdLayout.TileRowPitch;
1069
1070 if(ReqInfo.StdLayout.Offset == -1)
1071 {
1072 // Special request to get the StdLayout size
1073 ReqInfo.StdLayout.Offset = TempReqInfo[ReqInfo.Plane].StdLayout.Offset;
1074
1075 if(!ReqInfo.Plane)
1076 {
1077 for(Plane = GMM_PLANE_Y; Plane <= TotalPlanes; Plane++)
1078 {
1079 ReqInfo.StdLayout.Offset += TempReqInfo[Plane].StdLayout.Offset;
1080 }
1081 }
1082 }
1083 else
1084 {
1085 ReqInfo.StdLayout.Offset = TempReqInfo[ReqInfo.Plane].StdLayout.Offset;
1086
1087 for(Plane = GMM_PLANE_Y; Plane < (uint32_t)ReqInfo.Plane; Plane++)
1088 {
1089 // Find the size of the previous planes and add it to the offset
1090 TempReqInfo[Plane].StdLayout.Offset = -1;
1091
1092 if(GMM_SUCCESS != GmmTexGetMipMapOffset(&TexInfo[Plane], &TempReqInfo[Plane], GetGmmLibContext()))
1093 {
1094
1095 __GMM_ASSERT(0);
1096 return GMM_ERROR;
1097 }
1098
1099 ReqInfo.StdLayout.Offset += TempReqInfo[Plane].StdLayout.Offset;
1100 }
1101 }
1102 }
1103
1104 return GMM_SUCCESS;
1105 }
1106 else
1107 {
1108 return GmmTexGetMipMapOffset(&Surf, &ReqInfo, GetGmmLibContext());
1109 }
1110 }
1111
1112 /////////////////////////////////////////////////////////////////////////////////////
1113 /// Performs a CPU BLT between a specified GPU resource and a system memory surface,
1114 /// as defined by the GMM_RES_COPY_BLT descriptor.
1115 ///
1116 /// @param[in] pBlt: Describes the blit operation. See ::GMM_RES_COPY_BLT for more info.
1117 /// @return 1 if succeeded, 0 otherwise
1118 /////////////////////////////////////////////////////////////////////////////////////
CpuBlt(GMM_RES_COPY_BLT * pBlt)1119 uint8_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::CpuBlt(GMM_RES_COPY_BLT *pBlt)
1120 {
1121 #define REQUIRE(e) \
1122 if(!(e)) \
1123 { \
1124 __GMM_ASSERT(0); \
1125 Success = 0; \
1126 goto EXIT; \
1127 }
1128
1129 const GMM_PLATFORM_INFO *pPlatform;
1130 uint8_t Success = 1;
1131 GMM_TEXTURE_INFO * pTexInfo;
1132 GMM_TEXTURE_CALC * pTextureCalc;
1133 GMM_TEXTURE_INFO RedescribedPlaneInfo;
1134
1135 __GMM_ASSERTPTR(pBlt, 0);
1136
1137 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(&Surf, GetGmmLibContext());
1138 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
1139
1140 __GMM_ASSERT(
1141 Surf.Type == RESOURCE_1D ||
1142 Surf.Type == RESOURCE_2D ||
1143 Surf.Type == RESOURCE_PRIMARY ||
1144 Surf.Type == RESOURCE_CUBE ||
1145 Surf.Type == RESOURCE_3D);
1146 __GMM_ASSERT(pBlt->Gpu.MipLevel <= Surf.MaxLod);
1147 __GMM_ASSERT(Surf.MSAA.NumSamples <= 1); // Supported by CpuSwizzleBlt--but not yet this function.
1148 __GMM_ASSERT(!Surf.Flags.Gpu.Depth || Surf.MSAA.NumSamples <= 1); // MSAA depth currently ends up with a few exchange swizzles--CpuSwizzleBlt could support with expanded XOR'ing, but probably no use case.
1149 __GMM_ASSERT(!(
1150 pBlt->Blt.Upload &&
1151 Surf.Flags.Gpu.Depth &&
1152 (Surf.BitsPerPixel == 32) &&
1153 (pBlt->Sys.PixelPitch == 4) &&
1154 (pBlt->Blt.BytesPerPixel == 3))); // When uploading D24 data from D24S8 to D24X8, no harm in copying S8 to X8 and upload will then be faster.
1155
1156 pTexInfo = &(Surf);
1157
1158 // YUV Planar surface
1159 if(pTexInfo->OffsetInfo.Plane.IsTileAlignedPlanes && GmmIsPlanar(Surf.Format))
1160 {
1161 uint32_t PlaneId = GMM_NO_PLANE;
1162 uint32_t TotalHeight = 0;
1163
1164 if(pTexInfo->OffsetInfo.Plane.NoOfPlanes == 2)
1165 {
1166 TotalHeight = GFX_ULONG_CAST(pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_Y] +
1167 pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U]);
1168 }
1169 else if(pTexInfo->OffsetInfo.Plane.NoOfPlanes == 3)
1170 {
1171 TotalHeight = GFX_ULONG_CAST(pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_Y] +
1172 pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U] +
1173 pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_V]);
1174 }
1175 else
1176 {
1177 TotalHeight = GFX_ULONG_CAST(pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_Y]); //YV12 exception
1178 }
1179
1180 // Determine if BLT rectange is for monolithic surface or contained in specific Y/UV plane
1181 if(((pBlt->Gpu.OffsetY + pBlt->Blt.Height <= Surf.OffsetInfo.Plane.Y[GMM_PLANE_U]) || pTexInfo->OffsetInfo.Plane.NoOfPlanes == 1) &&
1182 (pBlt->Gpu.OffsetX + pBlt->Blt.Width <= Surf.BaseWidth))
1183 {
1184 PlaneId = GMM_PLANE_Y;
1185 }
1186 else if(pBlt->Gpu.OffsetY >= Surf.OffsetInfo.Plane.Y[GMM_PLANE_U] &&
1187 (pBlt->Gpu.OffsetY + pBlt->Blt.Height <= (Surf.OffsetInfo.Plane.Y[GMM_PLANE_U] + pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U])) &&
1188 (pBlt->Gpu.OffsetX + pBlt->Blt.Width <= Surf.BaseWidth))
1189 {
1190 PlaneId = GMM_PLANE_U;
1191 }
1192 else if(pBlt->Gpu.OffsetY >= Surf.OffsetInfo.Plane.Y[GMM_PLANE_V] &&
1193 (pBlt->Gpu.OffsetY + pBlt->Blt.Height <= (Surf.OffsetInfo.Plane.Y[GMM_PLANE_V] + pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U])) &&
1194 (pBlt->Gpu.OffsetX + pBlt->Blt.Width <= Surf.BaseWidth))
1195 {
1196 PlaneId = GMM_PLANE_V;
1197 }
1198
1199 // For smaller surface, BLT rect may fall in Y Plane due to tile alignment but user may have requested monolithic BLT
1200 if(pBlt->Gpu.OffsetX == 0 &&
1201 pBlt->Gpu.OffsetY == 0 &&
1202 pBlt->Blt.Height >= TotalHeight)
1203 {
1204 PlaneId = GMM_MAX_PLANE;
1205 }
1206
1207 if(PlaneId == GMM_MAX_PLANE)
1208 {
1209 // TODO BLT rect should not overlap between planes.
1210 {
1211 // __GMM_ASSERT(0); // decide later, for now blt it
1212 //return 0;
1213 }
1214
1215 // BLT monolithic surface per plane and remove padding due to tiling.
1216 for(PlaneId = GMM_PLANE_Y; PlaneId <= pTexInfo->OffsetInfo.Plane.NoOfPlanes; PlaneId++)
1217 {
1218 if(PlaneId == GMM_PLANE_Y)
1219 {
1220 pBlt->Gpu.OffsetX = GFX_ULONG_CAST(Surf.OffsetInfo.Plane.X[GMM_PLANE_Y]);
1221 pBlt->Gpu.OffsetY = GFX_ULONG_CAST(Surf.OffsetInfo.Plane.Y[GMM_PLANE_Y]);
1222 pBlt->Blt.Height = GFX_ULONG_CAST(Surf.OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_Y]);
1223 }
1224 else if(PlaneId == GMM_PLANE_U)
1225 {
1226 pBlt->Gpu.OffsetX = GFX_ULONG_CAST(Surf.OffsetInfo.Plane.X[GMM_PLANE_U]);
1227 pBlt->Gpu.OffsetY = GFX_ULONG_CAST(Surf.OffsetInfo.Plane.Y[GMM_PLANE_U]);
1228
1229 pBlt->Sys.pData = (char *)pBlt->Sys.pData + uint32_t(pBlt->Blt.Height * pBlt->Sys.RowPitch);
1230 pBlt->Blt.Height = GFX_ULONG_CAST(Surf.OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U]);
1231 if(Surf.Flags.Info.RedecribedPlanes)
1232 {
1233 __GMM_ASSERT(0);
1234 }
1235 }
1236 else
1237 {
1238 pBlt->Gpu.OffsetX = GFX_ULONG_CAST(Surf.OffsetInfo.Plane.X[GMM_PLANE_V]);
1239 pBlt->Gpu.OffsetY = GFX_ULONG_CAST(Surf.OffsetInfo.Plane.Y[GMM_PLANE_V]);
1240 pBlt->Blt.Height = GFX_ULONG_CAST(Surf.OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U]);
1241 pBlt->Sys.pData = (char *)pBlt->Sys.pData + uint32_t(pBlt->Blt.Height * pBlt->Sys.RowPitch);
1242 }
1243
1244 CpuBlt(pBlt);
1245 }
1246 }
1247 // else continue below
1248 }
1249
1250 // UV packed planar surfaces will have different tiling geometries for the
1251 // Y and UV planes. Blts cannot span across the tiling boundaries and we
1252 // must select the proper mode for each plane. Non-UV packed formats will
1253 // have a constant tiling mode, and so do not have the same limits
1254 if(Surf.Flags.Info.RedecribedPlanes &&
1255 GmmIsUVPacked(Surf.Format))
1256 {
1257 if(!((pBlt->Gpu.OffsetY >= pTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_U]) ||
1258 ((pBlt->Gpu.OffsetY + pBlt->Blt.Height) <= pTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_U])))
1259 {
1260 __GMM_ASSERT(0);
1261 return false;
1262 }
1263
1264 if(pBlt->Gpu.OffsetY < pTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_U])
1265 {
1266 pTextureCalc->GetRedescribedPlaneParams(pTexInfo, GMM_PLANE_Y, &RedescribedPlaneInfo);
1267 // Y Plane
1268 pTexInfo = &RedescribedPlaneInfo;
1269 }
1270 else
1271 {
1272 // UV Plane
1273 pTextureCalc->GetRedescribedPlaneParams(pTexInfo, GMM_PLANE_U, &RedescribedPlaneInfo);
1274 pTexInfo = &RedescribedPlaneInfo;
1275 }
1276 }
1277
1278 if(pBlt->Blt.Slices > 1)
1279 {
1280 GMM_RES_COPY_BLT SliceBlt = *pBlt;
1281 uint32_t Slice;
1282
1283 SliceBlt.Blt.Slices = 1;
1284 for(Slice = pBlt->Gpu.Slice;
1285 Slice < (pBlt->Gpu.Slice + pBlt->Blt.Slices);
1286 Slice++)
1287 {
1288 SliceBlt.Gpu.Slice = Slice;
1289 SliceBlt.Sys.pData = (void *)((char *)pBlt->Sys.pData + (Slice - pBlt->Gpu.Slice) * pBlt->Sys.SlicePitch);
1290 SliceBlt.Sys.BufferSize = pBlt->Sys.BufferSize - GFX_ULONG_CAST((char *)SliceBlt.Sys.pData - (char *)pBlt->Sys.pData);
1291 CpuBlt(&SliceBlt);
1292 }
1293 }
1294 else // Single Subresource...
1295 {
1296 uint32_t ResPixelPitch = pTexInfo->BitsPerPixel / CHAR_BIT;
1297 uint32_t BlockWidth, BlockHeight, BlockDepth;
1298 uint32_t __CopyWidthBytes, __CopyHeight, __OffsetXBytes, __OffsetY;
1299 GMM_REQ_OFFSET_INFO GetOffset = {0};
1300
1301 pTextureCalc->GetCompressionBlockDimensions(pTexInfo->Format, &BlockWidth, &BlockHeight, &BlockDepth);
1302
1303 #if(LHDM)
1304 if(pTexInfo->MsFormat == D3DDDIFMT_G8R8_G8B8 ||
1305 pTexInfo->MsFormat == D3DDDIFMT_R8G8_B8G8)
1306 {
1307 BlockWidth = 2;
1308 ResPixelPitch = 4;
1309 }
1310 #endif
1311
1312 { // __CopyWidthBytes...
1313 uint32_t Width;
1314
1315 if(!pBlt->Blt.Width) // i.e. "Full Width"
1316 {
1317 __GMM_ASSERT(!GmmIsPlanar(pTexInfo->Format)); // Caller must set Blt.Width--GMM "auto-size on zero" not supported with planars since multiple interpretations would confuse more than help.
1318
1319 Width = GFX_ULONG_CAST(pTextureCalc->GmmTexGetMipWidth(pTexInfo, pBlt->Gpu.MipLevel));
1320
1321 __GMM_ASSERT(Width >= pBlt->Gpu.OffsetX);
1322 Width -= pBlt->Gpu.OffsetX;
1323 __GMM_ASSERT(Width);
1324 }
1325 else
1326 {
1327 Width = pBlt->Blt.Width;
1328 }
1329
1330 if(((pBlt->Sys.PixelPitch == 0) ||
1331 (pBlt->Sys.PixelPitch == ResPixelPitch)) &&
1332 ((pBlt->Blt.BytesPerPixel == 0) ||
1333 (pBlt->Blt.BytesPerPixel == ResPixelPitch)))
1334 {
1335 // Full-Pixel BLT...
1336 __CopyWidthBytes =
1337 GFX_CEIL_DIV(Width, BlockWidth) * ResPixelPitch;
1338 }
1339 else // Partial-Pixel BLT...
1340 {
1341 __GMM_ASSERT(BlockWidth == 1); // No partial-pixel support for block-compressed formats.
1342
1343 // When copying between surfaces with different pixel pitches,
1344 // specify CopyWidthBytes in terms of unswizzled surface
1345 // (convenient convention used by CpuSwizzleBlt).
1346 __CopyWidthBytes =
1347 Width *
1348 (pBlt->Sys.PixelPitch ?
1349 pBlt->Sys.PixelPitch :
1350 ResPixelPitch);
1351 }
1352 }
1353
1354 { // __CopyHeight...
1355 if(!pBlt->Blt.Height) // i.e. "Full Height"
1356 {
1357 __GMM_ASSERT(!GmmIsPlanar(pTexInfo->Format)); // Caller must set Blt.Height--GMM "auto-size on zero" not supported with planars since multiple interpretations would confuse more than help.
1358
1359 __CopyHeight = pTextureCalc->GmmTexGetMipHeight(pTexInfo, pBlt->Gpu.MipLevel);
1360 __GMM_ASSERT(__CopyHeight >= pBlt->Gpu.OffsetY);
1361 __CopyHeight -= pBlt->Gpu.OffsetY;
1362 __GMM_ASSERT(__CopyHeight);
1363 }
1364 else
1365 {
1366 __CopyHeight = pBlt->Blt.Height;
1367 }
1368
1369 __CopyHeight = GFX_CEIL_DIV(__CopyHeight, BlockHeight);
1370 }
1371
1372 __GMM_ASSERT((pBlt->Gpu.OffsetX % BlockWidth) == 0);
1373 __OffsetXBytes = (pBlt->Gpu.OffsetX / BlockWidth) * ResPixelPitch + pBlt->Gpu.OffsetSubpixel;
1374
1375 __GMM_ASSERT((pBlt->Gpu.OffsetY % BlockHeight) == 0);
1376 __OffsetY = (pBlt->Gpu.OffsetY / BlockHeight);
1377
1378 { // Get pResData Offsets to this subresource...
1379 GetOffset.ReqLock = pTexInfo->Flags.Info.Linear;
1380 GetOffset.ReqStdLayout = !GetOffset.ReqLock && pTexInfo->Flags.Info.StdSwizzle;
1381 GetOffset.ReqRender = !GetOffset.ReqLock && !GetOffset.ReqStdLayout;
1382 GetOffset.MipLevel = pBlt->Gpu.MipLevel;
1383 switch(pTexInfo->Type)
1384 {
1385 case RESOURCE_1D:
1386 case RESOURCE_2D:
1387 case RESOURCE_PRIMARY:
1388 {
1389 GetOffset.ArrayIndex = pBlt->Gpu.Slice;
1390 break;
1391 }
1392 case RESOURCE_CUBE:
1393 {
1394 GetOffset.ArrayIndex = pBlt->Gpu.Slice / 6;
1395 GetOffset.CubeFace = (GMM_CUBE_FACE_ENUM)(pBlt->Gpu.Slice % 6);
1396 break;
1397 }
1398 case RESOURCE_3D:
1399 {
1400 GetOffset.Slice = (GMM_IS_64KB_TILE(pTexInfo->Flags) || pTexInfo->Flags.Info.TiledYf) ?
1401 (pBlt->Gpu.Slice / pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileDepth) :
1402 pBlt->Gpu.Slice;
1403 break;
1404 }
1405 default:
1406 __GMM_ASSERT(0);
1407 }
1408
1409 REQUIRE(this->GetOffset(GetOffset) == GMM_SUCCESS);
1410 }
1411
1412 if(pTexInfo->Flags.Info.Linear)
1413 {
1414 char * pDest, *pSrc;
1415 uint32_t DestPitch, SrcPitch;
1416 uint32_t y;
1417
1418 __GMM_ASSERT( // Linear-to-linear subpixel BLT unexpected--Not implemented.
1419 (!pBlt->Sys.PixelPitch || (pBlt->Sys.PixelPitch == ResPixelPitch)) &&
1420 (!pBlt->Blt.BytesPerPixel || (pBlt->Blt.BytesPerPixel == ResPixelPitch)));
1421
1422 if(pBlt->Blt.Upload)
1423 {
1424 pDest = (char *)pBlt->Gpu.pData;
1425 DestPitch = GFX_ULONG_CAST(pTexInfo->Pitch);
1426
1427 pSrc = (char *)pBlt->Sys.pData;
1428 SrcPitch = pBlt->Sys.RowPitch;
1429 }
1430 else
1431 {
1432 pDest = (char *)pBlt->Sys.pData;
1433 DestPitch = pBlt->Sys.RowPitch;
1434
1435 pSrc = (char *)pBlt->Gpu.pData;
1436 SrcPitch = GFX_ULONG_CAST(pTexInfo->Pitch);
1437 }
1438
1439 __GMM_ASSERT(GetOffset.Lock.Offset < pTexInfo->Size);
1440 pDest += GetOffset.Lock.Offset + (__OffsetY * DestPitch + __OffsetXBytes);
1441
1442 for(y = 0; y < __CopyHeight; y++)
1443 {
1444 // Memcpy per row isn't optimal, but doubt this linear-to-linear path matters.
1445
1446 #if _WIN32
1447 #ifdef __GMM_KMD__
1448 GFX_MEMCPY_S
1449 #else
1450 memcpy_s
1451 #endif
1452 (pDest, __CopyWidthBytes, pSrc, __CopyWidthBytes);
1453 #else
1454 memcpy(pDest, pSrc, __CopyWidthBytes);
1455 #endif
1456 pDest += DestPitch;
1457 pSrc += SrcPitch;
1458 }
1459 }
1460 else // Swizzled BLT...
1461 {
1462 CPU_SWIZZLE_BLT_SURFACE LinearSurface = {0}, SwizzledSurface;
1463 uint32_t ZOffset = 0;
1464
1465 __GMM_ASSERT(GetOffset.Render.Offset64 < pTexInfo->Size);
1466
1467 ZOffset = (pTexInfo->Type == RESOURCE_3D &&
1468 (GMM_IS_64KB_TILE(pTexInfo->Flags) || pTexInfo->Flags.Info.TiledYf)) ?
1469 (pBlt->Gpu.Slice % pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileDepth) :
1470 0;
1471
1472 if(pTexInfo->Flags.Info.StdSwizzle == 1)
1473 {
1474 SwizzledSurface.pBase = (char *)pBlt->Gpu.pData + GFX_ULONG_CAST(GetOffset.StdLayout.Offset);
1475 SwizzledSurface.OffsetX = __OffsetXBytes;
1476 SwizzledSurface.OffsetY = __OffsetY;
1477 SwizzledSurface.OffsetZ = ZOffset;
1478
1479 uint32_t MipWidth = GFX_ULONG_CAST(pTextureCalc->GmmTexGetMipWidth(pTexInfo, pBlt->Gpu.MipLevel));
1480 uint32_t MipHeight = pTextureCalc->GmmTexGetMipHeight(pTexInfo, pBlt->Gpu.MipLevel);
1481
1482 pTextureCalc->AlignTexHeightWidth(pTexInfo, &MipHeight, &MipWidth);
1483 SwizzledSurface.Height = MipHeight;
1484 SwizzledSurface.Pitch = MipWidth * ResPixelPitch;
1485 }
1486 else
1487 {
1488 SwizzledSurface.pBase = (char *)pBlt->Gpu.pData + GFX_ULONG_CAST(GetOffset.Render.Offset64);
1489 SwizzledSurface.Pitch = GFX_ULONG_CAST(pTexInfo->Pitch);
1490 SwizzledSurface.OffsetX = GetOffset.Render.XOffset + __OffsetXBytes;
1491 SwizzledSurface.OffsetY = GetOffset.Render.YOffset + __OffsetY;
1492 SwizzledSurface.OffsetZ = GetOffset.Render.ZOffset + ZOffset;
1493 SwizzledSurface.Height = GFX_ULONG_CAST(pTexInfo->Size / pTexInfo->Pitch);
1494 }
1495
1496 SwizzledSurface.Element.Pitch = ResPixelPitch;
1497
1498 LinearSurface.pBase = pBlt->Sys.pData;
1499 LinearSurface.Pitch = pBlt->Sys.RowPitch;
1500 LinearSurface.Height =
1501 pBlt->Sys.BufferSize /
1502 (pBlt->Sys.RowPitch ?
1503 pBlt->Sys.RowPitch :
1504 pBlt->Sys.BufferSize);
1505 LinearSurface.Element.Pitch =
1506 pBlt->Sys.PixelPitch ?
1507 pBlt->Sys.PixelPitch :
1508 ResPixelPitch;
1509 LinearSurface.Element.Size =
1510 SwizzledSurface.Element.Size =
1511 pBlt->Blt.BytesPerPixel ?
1512 pBlt->Blt.BytesPerPixel :
1513 ResPixelPitch;
1514
1515 SwizzledSurface.pSwizzle = NULL;
1516
1517 if(pTexInfo->Flags.Info.TiledW)
1518 {
1519 SwizzledSurface.pSwizzle = &INTEL_TILE_W;
1520
1521 // Correct for GMM's 2x Pitch handling of stencil...
1522 // (Unlike the HW, CpuSwizzleBlt handles TileW as a natural,
1523 // 64x64=4KB tile, so the pre-Gen10 "double-pitch/half-height"
1524 // kludging to TileY shape must be reversed.)
1525 __GMM_ASSERT((SwizzledSurface.Pitch % 2) == 0);
1526 SwizzledSurface.Pitch /= 2;
1527 SwizzledSurface.Height *= 2;
1528 }
1529 else if(GMM_IS_4KB_TILE(pTexInfo->Flags) &&
1530 !(pTexInfo->Flags.Info.TiledYf ||
1531 GMM_IS_64KB_TILE(pTexInfo->Flags)))
1532 {
1533 if(GetGmmLibContext()->GetSkuTable().FtrTileY)
1534 {
1535 SwizzledSurface.pSwizzle = &INTEL_TILE_Y;
1536 }
1537 else
1538 {
1539 SwizzledSurface.pSwizzle = &INTEL_TILE_4;
1540 }
1541 }
1542 else if(pTexInfo->Flags.Info.TiledX)
1543 {
1544 SwizzledSurface.pSwizzle = &INTEL_TILE_X;
1545 }
1546 else // Yf/s...
1547 {
1548 // clang-format off
1549 #define NA
1550
1551 #define CASE(Layout, Tile, msaa, xD, bpe) \
1552 case bpe: \
1553 SwizzledSurface.pSwizzle = &Layout##_##Tile##_##msaa##xD##bpe; \
1554 break
1555
1556 #define SWITCH_BPP(Layout, Tile, msaa, xD) \
1557 switch(pTexInfo->BitsPerPixel) \
1558 { \
1559 CASE(Layout, Tile, msaa, xD, 8); \
1560 CASE(Layout, Tile, msaa, xD, 16); \
1561 CASE(Layout, Tile, msaa, xD, 32); \
1562 CASE(Layout, Tile, msaa, xD, 64); \
1563 CASE(Layout, Tile, msaa, xD, 128); \
1564 }
1565
1566 #define SWITCH_MSAA_TILE64(Layout, Tile, xD) \
1567 {\
1568 switch(pTexInfo->MSAA.NumSamples) \
1569 { \
1570 case 0: \
1571 SWITCH_BPP(Layout, Tile, , xD); \
1572 break; \
1573 case 1: \
1574 SWITCH_BPP(Layout, Tile, , xD); \
1575 break; \
1576 case 2: \
1577 SWITCH_BPP(Layout, Tile, MSAA2_, xD); \
1578 break; \
1579 case 4: \
1580 case 8: \
1581 case 16: \
1582 SWITCH_BPP(Layout, Tile, MSAA_, xD); \
1583 break; \
1584 }\
1585 }
1586
1587 #define SWITCH_MSAA(Layout, Tile, xD) \
1588 {\
1589 switch(pTexInfo->MSAA.NumSamples) \
1590 { \
1591 case 0: \
1592 SWITCH_BPP(Layout, Tile, , xD); \
1593 break; \
1594 case 1: \
1595 SWITCH_BPP(Layout, Tile, , xD); \
1596 break; \
1597 case 2: \
1598 SWITCH_BPP(Layout, Tile, MSAA2_, xD); \
1599 break; \
1600 case 4: \
1601 SWITCH_BPP(Layout, Tile, MSAA4_, xD); \
1602 break; \
1603 case 8: \
1604 SWITCH_BPP(Layout, Tile, MSAA8_, xD); \
1605 break; \
1606 case 16: \
1607 SWITCH_BPP(Layout, Tile, MSAA16_, xD); \
1608 break; \
1609 }\
1610 }
1611 // clang-format on
1612
1613 if(pTexInfo->Type == RESOURCE_3D)
1614 {
1615 if(pTexInfo->Flags.Info.TiledYf)
1616 {
1617 SWITCH_BPP(INTEL, TILE_YF, , 3D_);
1618 }
1619 else if(GMM_IS_64KB_TILE(pTexInfo->Flags))
1620 {
1621 if(GetGmmLibContext()->GetSkuTable().FtrTileY)
1622 {
1623 SWITCH_BPP(INTEL, TILE_YS, , 3D_);
1624 }
1625 else
1626 {
1627 SWITCH_BPP(INTEL, TILE_64, , 3D_);
1628 }
1629 }
1630 }
1631 else // 2D/Cube...
1632 {
1633 if(pTexInfo->Flags.Info.TiledYf)
1634 {
1635 SWITCH_MSAA(INTEL, TILE_YF, );
1636 }
1637 else if(GMM_IS_64KB_TILE(pTexInfo->Flags))
1638 {
1639 if(GetGmmLibContext()->GetSkuTable().FtrTileY)
1640 {
1641 SWITCH_MSAA(INTEL, TILE_YS, );
1642 }
1643 else
1644 {
1645 SWITCH_MSAA_TILE64(INTEL, TILE_64, );
1646 }
1647 }
1648 }
1649 }
1650 __GMM_ASSERT(SwizzledSurface.pSwizzle);
1651
1652 if(pBlt->Blt.Upload)
1653 {
1654 CpuSwizzleBlt(&SwizzledSurface, &LinearSurface, __CopyWidthBytes, __CopyHeight);
1655 }
1656 else
1657 {
1658 CpuSwizzleBlt(&LinearSurface, &SwizzledSurface, __CopyWidthBytes, __CopyHeight);
1659 }
1660 }
1661 }
1662
1663 EXIT:
1664
1665 return Success;
1666 }
1667
1668 /////////////////////////////////////////////////////////////////////////////////////
1669 /// Helper function that helps UMDs map in the surface in a layout that
1670 /// our HW understands. Clients call this function in a loop until it
1671 /// returns failure. Clients will get back information in pMapping->Span,
1672 /// which they can use to map Span.Size bytes to Span.VirtualOffset gfx
1673 /// address with Span.PhysicalOffset physical page.
1674 ///
1675 /// @param[in] pMapping: Clients call the function with initially zero'd out GMM_GET_MAPPING.
1676 /// @return 1 if more span descriptors to report, 0 if all mapping is done
1677 /////////////////////////////////////////////////////////////////////////////////////
GetMappingSpanDesc(GMM_GET_MAPPING * pMapping)1678 uint8_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetMappingSpanDesc(GMM_GET_MAPPING *pMapping)
1679 {
1680 const GMM_PLATFORM_INFO *pPlatform;
1681 uint8_t WasFinalSpan = 0;
1682 GMM_TEXTURE_INFO * pTexInfo;
1683 GMM_TEXTURE_CALC * pTextureCalc;
1684 GMM_TEXTURE_INFO RedescribedPlaneInfo;
1685
1686 __GMM_ASSERT(Surf.Flags.Info.StdSwizzle);
1687
1688 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(&Surf, GetGmmLibContext());
1689 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
1690
1691 __GMM_ASSERT(pTextureCalc != NULL);
1692 pTexInfo = &Surf;
1693
1694 if(pMapping->Type == GMM_MAPPING_GEN9_YS_TO_STDSWIZZLE)
1695 {
1696 const uint32_t TileSize = GMM_KBYTE(64);
1697
1698 __GMM_ASSERT(Surf.Flags.Info.TiledYs);
1699 __GMM_ASSERT(
1700 (Surf.Type == RESOURCE_2D) ||
1701 (Surf.Type == RESOURCE_3D) ||
1702 (Surf.Type == RESOURCE_CUBE));
1703 __GMM_ASSERT(Surf.Flags.Gpu.Depth == 0); // TODO(Minor): Proper StdSwizzle exemptions?
1704 __GMM_ASSERT(Surf.Flags.Gpu.SeparateStencil == 0);
1705
1706 __GMM_ASSERT(AuxSurf.Size == 0); // TODO(Medium): Support not yet implemented, but DX12 UMD not using yet.
1707 __GMM_ASSERT(Surf.Flags.Gpu.MMC == 0); // TODO(Medium): Support not yet implemented, but not yet needed for DX12.
1708
1709 // For planar surfaces we need to reorder the planes into what HW expects.
1710 // OS will provide planes in [Y0][Y1][U0][U1][V0][V1] order while
1711 // HW requires them to be in [Y0][U0][V0][Y1][U1][V1] order
1712 if(Surf.Flags.Info.RedecribedPlanes)
1713 {
1714 if(pMapping->Scratch.Plane == GMM_NO_PLANE)
1715 {
1716 pMapping->Scratch.Plane = GMM_PLANE_Y;
1717 if(GmmLib::Utility::GmmGetNumPlanes(Surf.Format) == GMM_PLANE_V)
1718 {
1719 pMapping->Scratch.LastPlane = GMM_PLANE_V;
1720 }
1721 else
1722 {
1723 pMapping->Scratch.LastPlane = GMM_PLANE_U;
1724 }
1725 }
1726 else if(pMapping->Scratch.Row == pMapping->Scratch.Rows)
1727 {
1728 // If we've crossed into a new plane then need to reset
1729 // the current mapping info and adjust the mapping
1730 // params accordingly
1731 GMM_REQ_OFFSET_INFO ReqInfo = {0};
1732 uint32_t Plane = pMapping->Scratch.Plane + 1;
1733 GMM_YUV_PLANE LastPlane = pMapping->Scratch.LastPlane;
1734
1735 memset(pMapping, 0, sizeof(*pMapping));
1736
1737 pMapping->Type = GMM_MAPPING_GEN9_YS_TO_STDSWIZZLE;
1738 pMapping->Scratch.Plane = GMM_YUV_PLANE(Plane);
1739 pMapping->Scratch.LastPlane = LastPlane;
1740
1741 ReqInfo.ReqRender = ReqInfo.ReqStdLayout = 1;
1742 ReqInfo.Plane = GMM_YUV_PLANE(Plane);
1743
1744 this->GetOffset(ReqInfo);
1745
1746 pMapping->__NextSpan.PhysicalOffset = ReqInfo.StdLayout.Offset;
1747 pMapping->__NextSpan.VirtualOffset = ReqInfo.Render.Offset64;
1748 }
1749
1750 pTextureCalc->GetRedescribedPlaneParams(pTexInfo, GMM_PLANE_Y, &RedescribedPlaneInfo);
1751 pTexInfo = &RedescribedPlaneInfo;
1752
1753 }
1754
1755 // Initialization of Mapping Params...
1756 if(pMapping->Scratch.Element.Width == 0) // i.e. initially zero'ed struct.
1757 {
1758 uint32_t BytesPerElement = pTexInfo->BitsPerPixel / CHAR_BIT;
1759
1760 pMapping->Scratch.EffectiveLodMax = GFX_MIN(pTexInfo->MaxLod, pTexInfo->Alignment.MipTailStartLod);
1761
1762 pTextureCalc->GetCompressionBlockDimensions(
1763 pTexInfo->Format,
1764 &pMapping->Scratch.Element.Width,
1765 &pMapping->Scratch.Element.Height,
1766 &pMapping->Scratch.Element.Depth);
1767
1768 { // Tile Dimensions...
1769 GMM_TILE_MODE TileMode = pTexInfo->TileMode;
1770 __GMM_ASSERT(TileMode < GMM_TILE_MODES);
1771
1772 // Get Tile Logical Tile Dimensions (i.e. uncompressed pixels)...
1773 pMapping->Scratch.Tile.Width =
1774 (pPlatform->TileInfo[TileMode].LogicalTileWidth / BytesPerElement) *
1775 pMapping->Scratch.Element.Width;
1776
1777 pMapping->Scratch.Tile.Height =
1778 pPlatform->TileInfo[TileMode].LogicalTileHeight *
1779 pMapping->Scratch.Element.Height;
1780
1781 pMapping->Scratch.Tile.Depth =
1782 pPlatform->TileInfo[TileMode].LogicalTileDepth *
1783 pMapping->Scratch.Element.Depth;
1784
1785 pMapping->Scratch.RowPitchVirtual =
1786 GFX_ULONG_CAST(pTexInfo->Pitch) *
1787 pPlatform->TileInfo[TileMode].LogicalTileHeight *
1788 pPlatform->TileInfo[TileMode].LogicalTileDepth;
1789 }
1790
1791 { // Slice...
1792 uint32_t Lod;
1793 uint32_t LodsPerSlice =
1794 (pTexInfo->Type != RESOURCE_3D) ?
1795 pMapping->Scratch.EffectiveLodMax + 1 :
1796 1; // 3D Std Swizzle traverses slices before MIP's.
1797
1798 if(pMapping->Scratch.Plane)
1799 {
1800 // If planar then we need the parent descriptors planar pitch
1801 pMapping->Scratch.SlicePitch.Virtual =
1802 GFX_ULONG_CAST(Surf.OffsetInfo.Plane.ArrayQPitch) *
1803 (pMapping->Scratch.Tile.Depth / pMapping->Scratch.Element.Depth);
1804 }
1805 else
1806 {
1807 pMapping->Scratch.SlicePitch.Virtual =
1808 GFX_ULONG_CAST(pTexInfo->OffsetInfo.Texture2DOffsetInfo.ArrayQPitchRender) *
1809 (pMapping->Scratch.Tile.Depth / pMapping->Scratch.Element.Depth);
1810 }
1811
1812 // SlicePitch.Physical...
1813 __GMM_ASSERT(pMapping->Scratch.SlicePitch.Physical == 0);
1814 for(Lod = 0; Lod < LodsPerSlice; Lod++)
1815 {
1816 uint32_t MipCols, MipRows;
1817 GMM_GFX_SIZE_T MipWidth;
1818 uint32_t MipHeight;
1819
1820 MipWidth = pTextureCalc->GmmTexGetMipWidth(pTexInfo, Lod);
1821 MipHeight = pTextureCalc->GmmTexGetMipHeight(pTexInfo, Lod);
1822
1823 MipCols = GFX_ULONG_CAST(
1824 GFX_CEIL_DIV(
1825 MipWidth,
1826 pMapping->Scratch.Tile.Width));
1827 MipRows =
1828 GFX_CEIL_DIV(
1829 MipHeight,
1830 pMapping->Scratch.Tile.Height);
1831
1832 pMapping->Scratch.SlicePitch.Physical +=
1833 MipCols * MipRows * TileSize;
1834 }
1835 }
1836
1837 { // Mip0...
1838 if(pTexInfo->Type != RESOURCE_3D)
1839 {
1840 pMapping->Scratch.Slices =
1841 GFX_MAX(pTexInfo->ArraySize, 1) *
1842 ((pTexInfo->Type == RESOURCE_CUBE) ? 6 : 1);
1843 }
1844 else
1845 {
1846 pMapping->Scratch.Slices =
1847 GFX_CEIL_DIV(pTexInfo->Depth, pMapping->Scratch.Tile.Depth);
1848 }
1849
1850 if(pTexInfo->Pitch ==
1851 (GFX_ALIGN(pTexInfo->BaseWidth, pMapping->Scratch.Tile.Width) /
1852 pMapping->Scratch.Element.Width * BytesPerElement))
1853 {
1854 // Treat Each LOD0 MIP as Single, Large Mapping Row...
1855 pMapping->Scratch.Rows = 1;
1856
1857 pMapping->__NextSpan.Size =
1858 GFX_CEIL_DIV(pTexInfo->BaseWidth, pMapping->Scratch.Tile.Width) *
1859 GFX_CEIL_DIV(pTexInfo->BaseHeight, pMapping->Scratch.Tile.Height) *
1860 TileSize;
1861 }
1862 else
1863 {
1864 pMapping->Scratch.Rows =
1865 GFX_CEIL_DIV(pTexInfo->BaseHeight, pMapping->Scratch.Tile.Height);
1866
1867 pMapping->__NextSpan.Size =
1868 GFX_CEIL_DIV(pTexInfo->BaseWidth, pMapping->Scratch.Tile.Width) *
1869 TileSize;
1870 }
1871 }
1872 }
1873
1874 // This iteration's span descriptor...
1875 pMapping->Span = pMapping->__NextSpan;
1876
1877 // Prepare for Next Iteration...
1878 // for(Lod = 0; Lod <= EffectiveLodMax; Lod += 1)
1879 // for(Row = 0; Row < Rows; Row += 1)
1880 // for(Slice = 0; Slice < Slices; Slice += 1)
1881 if((pMapping->Scratch.Slice += 1) < pMapping->Scratch.Slices)
1882 {
1883 pMapping->__NextSpan.PhysicalOffset += pMapping->Scratch.SlicePitch.Physical;
1884 pMapping->__NextSpan.VirtualOffset += pMapping->Scratch.SlicePitch.Virtual;
1885 }
1886 else
1887 {
1888 pMapping->Scratch.Slice = 0;
1889
1890 if((pMapping->Scratch.Row += 1) < pMapping->Scratch.Rows)
1891 {
1892 pMapping->__NextSpan.PhysicalOffset =
1893 pMapping->Scratch.Slice0MipOffset.Physical += pMapping->Span.Size;
1894
1895 pMapping->__NextSpan.VirtualOffset =
1896 pMapping->Scratch.Slice0MipOffset.Virtual += pMapping->Scratch.RowPitchVirtual;
1897 }
1898 else if((pMapping->Scratch.Lod += 1) <= pMapping->Scratch.EffectiveLodMax)
1899 {
1900 GMM_REQ_OFFSET_INFO GetOffset = {0};
1901 GMM_GFX_SIZE_T MipWidth;
1902 uint32_t MipHeight, MipCols;
1903
1904 MipWidth = pTextureCalc->GmmTexGetMipWidth(pTexInfo, pMapping->Scratch.Lod);
1905 MipHeight = pTextureCalc->GmmTexGetMipHeight(pTexInfo, pMapping->Scratch.Lod);
1906
1907 MipCols = GFX_ULONG_CAST(
1908 GFX_CEIL_DIV(
1909 MipWidth,
1910 pMapping->Scratch.Tile.Width));
1911
1912 pMapping->Scratch.Row = 0;
1913 pMapping->Scratch.Rows =
1914 GFX_CEIL_DIV(
1915 MipHeight,
1916 pMapping->Scratch.Tile.Height);
1917
1918 if(pTexInfo->Type != RESOURCE_3D)
1919 {
1920 pMapping->__NextSpan.PhysicalOffset =
1921 pMapping->Scratch.Slice0MipOffset.Physical += pMapping->Span.Size;
1922 }
1923 else
1924 {
1925 uint32_t MipDepth;
1926
1927 MipDepth = pTextureCalc->GmmTexGetMipDepth(pTexInfo, pMapping->Scratch.Lod);
1928
1929 // 3D Std Swizzle traverses slices before MIP's...
1930 pMapping->Scratch.Slice0MipOffset.Physical =
1931 pMapping->__NextSpan.PhysicalOffset += pMapping->Span.Size;
1932
1933 pMapping->Scratch.Slices =
1934 GFX_CEIL_DIV(
1935 MipDepth,
1936 pMapping->Scratch.Tile.Depth);
1937
1938 pMapping->Scratch.SlicePitch.Physical =
1939 MipCols * pMapping->Scratch.Rows * TileSize;
1940 }
1941
1942 GetOffset.ReqRender = 1;
1943 GetOffset.MipLevel = pMapping->Scratch.Lod;
1944 this->GetOffset(GetOffset);
1945
1946 pMapping->__NextSpan.VirtualOffset =
1947 pMapping->Scratch.Slice0MipOffset.Virtual =
1948 GFX_ALIGN_FLOOR(GetOffset.Render.Offset64, TileSize); // Truncate for packed MIP Tail.
1949
1950 pMapping->__NextSpan.Size = MipCols * TileSize;
1951 }
1952 else
1953 {
1954 // If the resource was a planar surface then need to iterate over the remaining planes
1955 WasFinalSpan = pMapping->Scratch.Plane == pMapping->Scratch.LastPlane;
1956 }
1957 }
1958 }
1959 else
1960 {
1961 __GMM_ASSERT(0);
1962 }
1963
1964 return !WasFinalSpan;
1965 }
1966
1967 //=============================================================================
1968 //
1969 // Function: GetTiledResourceMipPacking
1970 //
1971 // Desc: Get number of packed mips and total #tiles for packed mips
1972 //
1973 // Parameters:
1974 // See function arguments.
1975 //
1976 // Returns:
1977 // void
1978 //-----------------------------------------------------------------------------
GetTiledResourceMipPacking(uint32_t * pNumPackedMips,uint32_t * pNumTilesForPackedMips)1979 void GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetTiledResourceMipPacking(uint32_t *pNumPackedMips,
1980 uint32_t *pNumTilesForPackedMips)
1981 {
1982 if(GetMaxLod() == 0)
1983 {
1984 *pNumPackedMips = 0;
1985 *pNumTilesForPackedMips = 0;
1986 return;
1987 }
1988
1989 if(GetResFlags().Info.TiledYf ||
1990 GMM_IS_64KB_TILE(GetResFlags()))
1991 {
1992 if(Surf.Alignment.MipTailStartLod == GMM_TILED_RESOURCE_NO_MIP_TAIL)
1993 {
1994 *pNumPackedMips = 0;
1995 *pNumTilesForPackedMips = 0;
1996 }
1997 else
1998 {
1999 *pNumPackedMips = GetMaxLod() -
2000 Surf.Alignment.MipTailStartLod + 1;
2001 *pNumTilesForPackedMips = 1;
2002 }
2003 }
2004 else
2005 {
2006 // Error, unsupported format.
2007 __GMM_ASSERT(false);
2008 }
2009 }
2010
2011 //=============================================================================
2012 //
2013 // Function: GetPackedMipTailStartLod
2014 //
2015 // Desc: Get Lod of first packed Mip.
2016 //
2017 // Parameters:
2018 // See function arguments.
2019 //
2020 // Returns:
2021 // Lod of first packed Mip
2022 //-----------------------------------------------------------------------------
GetPackedMipTailStartLod()2023 uint32_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetPackedMipTailStartLod()
2024
2025 {
2026 uint32_t NumPackedMips = 0, NumTilesForPackedMips = 0;
2027
2028 const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(&Surf, GetGmmLibContext());
2029
2030 GetTiledResourceMipPacking(&NumPackedMips,
2031 &NumTilesForPackedMips);
2032
2033 return (GetMaxLod() == 0) ?
2034 pPlatform->MaxLod :
2035 GetMaxLod() - NumPackedMips + 1; //GetMaxLod srarts at index 0, while NumPackedMips is just
2036 //the number of mips. So + 1 to bring them to same units.
2037 }
2038
2039 /////////////////////////////////////////////////////////////////////////////////////
2040 /// Verifies if all mips are RCC-aligned
2041 /// @return true/false
2042 /////////////////////////////////////////////////////////////////////////////////////
IsMipRCCAligned(uint8_t & MisAlignedLod)2043 bool GMM_STDCALL GmmLib::GmmResourceInfoCommon::IsMipRCCAligned(uint8_t &MisAlignedLod)
2044 {
2045 const uint8_t RCCCachelineWidth = 32;
2046 const uint8_t RCCCachelineHeight = 4;
2047
2048 for(uint8_t lod = 0; lod <= GetMaxLod(); lod++)
2049 {
2050 if(!(GFX_IS_ALIGNED(GetMipWidth(lod), RCCCachelineWidth) &&
2051 GFX_IS_ALIGNED(GetMipHeight(lod), RCCCachelineHeight)))
2052 {
2053 MisAlignedLod = lod;
2054 return false;
2055 }
2056 }
2057 return true;
2058 }
2059
2060 /////////////////////////////////////////////////////////////////////////////////////
2061 /// Return the logical width of mip level
2062 /// @param[in] MipLevel: Mip level for which the info is needed
2063 /// @return Mip width
2064 /////////////////////////////////////////////////////////////////////////////////////
GetMipWidth(uint32_t MipLevel)2065 GMM_GFX_SIZE_T GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetMipWidth(uint32_t MipLevel)
2066 {
2067 GMM_TEXTURE_CALC *pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
2068 return pTextureCalc->GmmTexGetMipWidth(&Surf, MipLevel);
2069 }
2070
2071 /////////////////////////////////////////////////////////////////////////////////////
2072 /// Return the logical height of mip level
2073 /// @param[in] MipLevel: Mip level for which the info is needed
2074 /// @return Mip width
2075 /////////////////////////////////////////////////////////////////////////////////////
GetMipHeight(uint32_t MipLevel)2076 uint32_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetMipHeight(uint32_t MipLevel)
2077 {
2078 GMM_TEXTURE_CALC *pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
2079 return pTextureCalc->GmmTexGetMipHeight(&Surf, MipLevel);
2080 }
2081
2082 /////////////////////////////////////////////////////////////////////////////////////
2083 /// Return the logical depth of mip level
2084 /// @param[in] MipLevel Mip level for which the info is needed
2085 /// @return Mip width
2086 /////////////////////////////////////////////////////////////////////////////////////
GetMipDepth(uint32_t MipLevel)2087 uint32_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetMipDepth(uint32_t MipLevel)
2088 {
2089 GMM_TEXTURE_CALC *pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
2090 return pTextureCalc->GmmTexGetMipDepth(&Surf, MipLevel);
2091 }
2092