1 /*==============================================================================
2 Copyright(c) 2017 Intel Corporation
3
4 Permission is hereby granted, free of charge, to any person obtaining a
5 copy of this software and associated documentation files(the "Software"),
6 to deal in the Software without restriction, including without limitation
7 the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and / or sell copies of the Software, and to permit persons to whom the
9 Software is furnished to do so, subject to the following conditions:
10
11 The above copyright notice and this permission notice shall be included
12 in all copies or substantial portions of the Software.
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 OTHER DEALINGS IN THE SOFTWARE.
21 ============================================================================*/
22
23 #include "Internal/Common/GmmLibInc.h"
24
25 /////////////////////////////////////////////////////////////////////////////////////
26 /// Checks that clients only set Presentable flag during a resource allocation, ONLY
27 /// when a platform supported render target is selected in ::GMM_RESOURCE_FORMAT enum.
28 ///
29 /// @return true if displayable, false otherwise.
30 /////////////////////////////////////////////////////////////////////////////////////
IsPresentableformat()31 bool GmmLib::GmmResourceInfoCommon::IsPresentableformat()
32 {
33 const GMM_PLATFORM_INFO *pPlatform;
34 const GMM_FORMAT_ENTRY * FormatTable = NULL;
35
36 GMM_DPF_ENTER;
37 __GMM_ASSERTPTR(GetGmmLibContext(), false);
38
39 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(&Surf, GetGmmLibContext());
40 FormatTable = &(pPlatform->FormatTable[0]);
41
42 if(Surf.Flags.Gpu.Presentable == false)
43 {
44 // When Presentable flag is not set, no reason to check for valid RT
45 // platform supported format. Safe to return true.
46 return true;
47 }
48
49 if((Surf.Format > GMM_FORMAT_INVALID) &&
50 (Surf.Format < GMM_RESOURCE_FORMATS))
51 {
52 if((FormatTable[Surf.Format].RenderTarget) &&
53 (FormatTable[Surf.Format].Supported))
54 {
55 return true;
56 }
57 else
58 {
59 GMM_ASSERTDPF(0, "Present flag can only be set w/ a format!");
60 return false;
61 }
62 }
63
64 return false;
65 }
66
67 /////////////////////////////////////////////////////////////////////////////////////
68 /// Returns the restrictions that a particular resource must follow on a particular
69 /// OS or hardware.
70 ///
71 /// @param[out] Restrictions: restrictions that this resource must adhere to
72 /////////////////////////////////////////////////////////////////////////////////////
GetRestrictions(__GMM_BUFFER_TYPE & Restrictions)73 void GmmLib::GmmResourceInfoCommon::GetRestrictions(__GMM_BUFFER_TYPE &Restrictions)
74 {
75 GMM_DPF_ENTER;
76
77 GMM_TEXTURE_CALC *pTextureCalc = NULL;
78 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
79 pTextureCalc->GetResRestrictions(&Surf, Restrictions);
80
81 GMM_DPF_EXIT;
82 }
83
84
85 //=============================================================================
86 //
87 // Function: GmmResGetRestrictions
88 //
89 // Desc: This routine returns resource restrictions
90 //
91 // Parameters:
92 // pPlatform: ptr to HW_DEVICE_EXTENSION
93 // pResourceInfo: ptr to GMM_RESOURCE_INFO
94 // pRestrictions: ptr to restrictions
95 //
96 // Returns:
97 // void
98 //
99 //-----------------------------------------------------------------------------
GmmResGetRestrictions(GMM_RESOURCE_INFO * pResourceInfo,__GMM_BUFFER_TYPE * pRestrictions)100 void GMM_STDCALL GmmResGetRestrictions(GMM_RESOURCE_INFO *pResourceInfo,
101 __GMM_BUFFER_TYPE *pRestrictions)
102 {
103 pResourceInfo->GetRestrictions(*pRestrictions);
104 }
105
106 /////////////////////////////////////////////////////////////////////////////////////
107 /// Returns the best restrictions by comparing two buffer types. Each buffer type
108 /// carries alignment and size restrictions.
109 ///
110 /// @param[in] pFirstBuffer: Contains surface alignment and size restrictions
111 /// @param[in] pSecondBuffer: Contains surface alignment and size restrictions
112 ///
113 /// @return Best Restrictions based on the two parameters passed
114 /////////////////////////////////////////////////////////////////////////////////////
GetBestRestrictions(__GMM_BUFFER_TYPE * pFirstBuffer,const __GMM_BUFFER_TYPE * pSecondBuffer)115 __GMM_BUFFER_TYPE *GmmLib::GmmTextureCalc::GetBestRestrictions(__GMM_BUFFER_TYPE * pFirstBuffer,
116 const __GMM_BUFFER_TYPE *pSecondBuffer)
117 {
118 GMM_DPF_ENTER;
119
120 if(IsRestrictionInvalid(pFirstBuffer)) //default
121 {
122 *pFirstBuffer = *pSecondBuffer;
123 return pFirstBuffer;
124 }
125
126 pFirstBuffer->Alignment = GFX_MAX(pFirstBuffer->Alignment,
127 pSecondBuffer->Alignment);
128
129 pFirstBuffer->PitchAlignment = GFX_MAX(pFirstBuffer->PitchAlignment,
130 pSecondBuffer->PitchAlignment);
131
132 pFirstBuffer->RenderPitchAlignment = GFX_MAX(pFirstBuffer->RenderPitchAlignment,
133 pSecondBuffer->RenderPitchAlignment);
134
135 pFirstBuffer->LockPitchAlignment = GFX_MAX(pFirstBuffer->LockPitchAlignment,
136 pSecondBuffer->LockPitchAlignment);
137
138 pFirstBuffer->MinPitch = GFX_MAX(pFirstBuffer->MinPitch,
139 pSecondBuffer->MinPitch);
140
141 pFirstBuffer->MinAllocationSize = GFX_MAX(pFirstBuffer->MinAllocationSize,
142 pSecondBuffer->MinAllocationSize);
143
144 pFirstBuffer->MinDepth = GFX_MAX(pFirstBuffer->MinDepth,
145 pSecondBuffer->MinDepth);
146
147 pFirstBuffer->MinHeight = GFX_MAX(pFirstBuffer->MinHeight,
148 pSecondBuffer->MinHeight);
149
150 pFirstBuffer->MinWidth = GFX_MAX(pFirstBuffer->MinWidth,
151 pSecondBuffer->MinWidth);
152
153 pFirstBuffer->MaxDepth = GFX_MIN(pFirstBuffer->MaxDepth,
154 pSecondBuffer->MaxDepth);
155
156 pFirstBuffer->MaxHeight = GFX_MIN(pFirstBuffer->MaxHeight,
157 pSecondBuffer->MaxHeight);
158
159 pFirstBuffer->MaxWidth = GFX_MIN(pFirstBuffer->MaxWidth,
160 pSecondBuffer->MaxWidth);
161
162 pFirstBuffer->NeedPow2LockAlignment = pFirstBuffer->NeedPow2LockAlignment |
163 pSecondBuffer->NeedPow2LockAlignment;
164
165 GMM_DPF_EXIT;
166 return pFirstBuffer;
167 }
168
169 /////////////////////////////////////////////////////////////////////////////////////
170 /// Returns restrictions for 1D, 2D, 3D textures depending on how the surface
171 /// may possibliy be used.
172 ///
173 /// @param[out] pBuff: Restrictions filled in this struct
174 /////////////////////////////////////////////////////////////////////////////////////
GetGenericRestrictions(GMM_TEXTURE_INFO * pTexInfo,__GMM_BUFFER_TYPE * pBuff)175 void GmmLib::GmmTextureCalc::GetGenericRestrictions(GMM_TEXTURE_INFO *pTexInfo, __GMM_BUFFER_TYPE *pBuff)
176 {
177 GMM_DPF_ENTER;
178 const GMM_PLATFORM_INFO *pPlatformResource = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo, pGmmLibContext);
179
180 if(pTexInfo->Flags.Gpu.NoRestriction)
181 {
182 // Impose zero restrictions. Ignore any other GPU usage flags
183 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->NoRestriction);
184 return;
185 }
186
187 if(pTexInfo->Flags.Gpu.Texture)
188 {
189 if(pTexInfo->Type == RESOURCE_BUFFER)
190 {
191 *pBuff = pPlatformResource->BufferType;
192 }
193 else if(pTexInfo->Type == RESOURCE_CUBE)
194 {
195 *pBuff = pPlatformResource->CubeSurface;
196 }
197 else if(pTexInfo->Type == RESOURCE_3D)
198 {
199 *pBuff = pPlatformResource->Texture3DSurface;
200 }
201 else
202 {
203 *pBuff = pPlatformResource->Texture2DSurface;
204 if(pTexInfo->Flags.Info.Linear)
205 {
206 *pBuff = pPlatformResource->Texture2DLinearSurface;
207 }
208 if(GmmIsReconstructableSurface(pTexInfo->Format))
209 {
210 pBuff->MaxHeight = pPlatformResource->ReconMaxHeight;
211 pBuff->MaxWidth = pPlatformResource->ReconMaxWidth;
212 }
213 }
214 }
215 if(pTexInfo->Flags.Gpu.RenderTarget ||
216 pTexInfo->Flags.Gpu.CCS ||
217 pTexInfo->Flags.Gpu.MCS)
218 {
219 // Gen7 onwards, bound by SURFACE_STATE constraints.
220 if(pTexInfo->Type == RESOURCE_BUFFER)
221 {
222 *pBuff = pPlatformResource->BufferType;
223 }
224 else if(pTexInfo->Type == RESOURCE_CUBE)
225 {
226 *pBuff = pPlatformResource->CubeSurface;
227 }
228 else if(pTexInfo->Type == RESOURCE_3D)
229 {
230 *pBuff = pPlatformResource->Texture3DSurface;
231 }
232 else
233 {
234 *pBuff = pPlatformResource->Texture2DSurface;
235 if(pTexInfo->Flags.Info.Linear)
236 {
237 *pBuff = pPlatformResource->Texture2DLinearSurface;
238 }
239 if(GmmIsReconstructableSurface(pTexInfo->Format))
240 {
241 pBuff->MaxHeight = pPlatformResource->ReconMaxHeight;
242 pBuff->MaxWidth = pPlatformResource->ReconMaxWidth;
243 }
244 }
245 }
246 if(pTexInfo->Flags.Gpu.Depth)
247 {
248 // Z
249 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Depth);
250 }
251 if(pTexInfo->Flags.Gpu.Vertex)
252 {
253 // VertexData
254 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Vertex);
255 }
256 if(pTexInfo->Flags.Gpu.Index)
257 {
258 // Index buffer
259 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Index);
260 }
261 if(pTexInfo->Flags.Gpu.FlipChain)
262 {
263 // Async Flip
264 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->ASyncFlipSurface);
265 }
266 if(pTexInfo->Flags.Gpu.MotionComp)
267 {
268 // Media buffer
269 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->MotionComp);
270 }
271 if(pTexInfo->Flags.Gpu.State ||
272 pTexInfo->Flags.Gpu.InstructionFlat ||
273 pTexInfo->Flags.Gpu.ScratchFlat)
274 {
275 // indirect state
276 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Vertex);
277 }
278 if(pTexInfo->Flags.Gpu.Query ||
279 pTexInfo->Flags.Gpu.HistoryBuffer)
280 {
281 // Query
282 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->NoRestriction);
283 }
284 if(pTexInfo->Flags.Gpu.Constant)
285 {
286 //
287 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Constant);
288 }
289 if(pTexInfo->Flags.Gpu.Stream)
290 {
291 //
292 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Stream);
293 }
294 if(pTexInfo->Flags.Gpu.InterlacedScan)
295 {
296 //
297 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->InterlacedScan);
298 }
299 if(pTexInfo->Flags.Gpu.TextApi)
300 {
301 //
302 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->TextApi);
303 }
304 if(pTexInfo->Flags.Gpu.SeparateStencil)
305 {
306 //
307 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Stencil);
308 }
309 if(pTexInfo->Flags.Gpu.HiZ)
310 {
311 //
312 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->HiZ);
313 }
314 if(pTexInfo->Flags.Gpu.Video)
315 {
316 //
317 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Video);
318 if(GmmIsReconstructableSurface(pTexInfo->Format))
319 {
320 pBuff->MaxHeight = pPlatformResource->ReconMaxHeight;
321 pBuff->MaxWidth = pPlatformResource->ReconMaxWidth;
322 }
323 }
324 if(pTexInfo->Flags.Gpu.StateDx9ConstantBuffer)
325 {
326 //
327 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->StateDx9ConstantBuffer);
328 }
329 if(pTexInfo->Flags.Gpu.Overlay)
330 {
331 // Overlay buffer use Async Flip values
332 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Overlay);
333
334 if((pTexInfo->Format == GMM_FORMAT_YUY2) && (pTexInfo->BaseWidth == 640))
335 {
336 // override the pitch alignment
337 pBuff->PitchAlignment = 64;
338 }
339 }
340 if(pTexInfo->Flags.Info.XAdapter)
341 {
342 //Add Cross Adapter resource restriction for hybrid graphics.
343 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->XAdapter);
344 if(pTexInfo->Type == RESOURCE_BUFFER)
345 {
346 pBuff->MaxWidth = pPlatformResource->SurfaceMaxSize;
347 pBuff->MaxPitch = pPlatformResource->BufferType.MaxPitch;
348 pBuff->MaxHeight = 1;
349 }
350 }
351
352 //Non Aligned ExistingSysMem Special cases.
353 if((pTexInfo->Flags.Info.ExistingSysMem &&
354 (!pTexInfo->ExistingSysMem.IsGmmAllocated) &&
355 (!pTexInfo->ExistingSysMem.IsPageAligned)))
356 {
357
358 if(pTexInfo->Flags.Info.Linear ||
359 pTexInfo->Flags.Info.SVM)
360 {
361 if(pTexInfo->Type == RESOURCE_BUFFER)
362 {
363 //Use combination of BufferType, NoRestriction to support large buffer with minimal pitch alignment
364 *pBuff = pPlatformResource->BufferType;
365 pBuff->PitchAlignment = pPlatformResource->NoRestriction.PitchAlignment;
366 pBuff->LockPitchAlignment = pPlatformResource->NoRestriction.LockPitchAlignment;
367 pBuff->RenderPitchAlignment = pPlatformResource->NoRestriction.LockPitchAlignment;
368 pBuff->MinPitch = pPlatformResource->NoRestriction.MinPitch;
369 }
370
371 //[To DO] Handle other types when needed!
372 }
373 /*
374 else if(Surf.Flags.Gpu.Texture)
375 {
376 //Override as and when required
377 }
378 else if(Surf.Flags.Gpu.RenderTarget)
379 {
380 //Overide as and when Required
381 }*/
382 }
383
384 GMM_DPF_EXIT;
385 }
386
387 /////////////////////////////////////////////////////////////////////////////////////
388 /// Internal function resets the restrictions and puts the allocation in invalid state
389 ///
390 /// @param[in] pTexInfo: ptr to ::GMM_TEXTURE_INFO,
391 /// @param[in] pRestrictions: reset the restrictions to invalid state.
392 ///
393 /////////////////////////////////////////////////////////////////////////////////////
ResetRestrictions(__GMM_BUFFER_TYPE * pRestriction)394 void GmmLib::GmmTextureCalc::ResetRestrictions(__GMM_BUFFER_TYPE *pRestriction)
395 {
396 pRestriction->MinDepth = 0xffffffff;
397 }
398
399
400 /////////////////////////////////////////////////////////////////////////////////////
401 /// Internal function returns the best restrictions depending on how the surface may
402 /// possibly be used.
403 ///
404 /// @param[in] pTexInfo: ptr to ::GMM_TEXTURE_INFO,
405 /// @param[in] pRestrictions: Reference to surface alignment and size restrictions
406 ///
407 /////////////////////////////////////////////////////////////////////////////////////
GetTexRestrictions(GMM_TEXTURE_INFO * pTexInfo,__GMM_BUFFER_TYPE * pRestrictions)408 void GmmLib::GmmTextureCalc::GetTexRestrictions(GMM_TEXTURE_INFO * pTexInfo,
409 __GMM_BUFFER_TYPE *pRestrictions)
410 {
411 GMM_DPF_ENTER;
412
413 GetResRestrictions(pTexInfo, *pRestrictions);
414
415 GMM_DPF_EXIT;
416 }
417
418 /////////////////////////////////////////////////////////////////////////////////////
419 /// Returns the restrictions that a particular resource must follow on a particular
420 /// OS or hardware.
421 ///
422 /// @param[out] Restrictions: restrictions that this resource must adhere to
423 /////////////////////////////////////////////////////////////////////////////////////
GetResRestrictions(GMM_TEXTURE_INFO * pTexinfo,__GMM_BUFFER_TYPE & Restrictions)424 void GmmLib::GmmTextureCalc::GetResRestrictions(GMM_TEXTURE_INFO * pTexinfo,
425 __GMM_BUFFER_TYPE &Restrictions)
426 {
427 GMM_DPF_ENTER;
428 const GMM_PLATFORM_INFO *pPlatform = NULL;
429 GMM_RESOURCE_FLAG ZeroGpuFlags;
430
431 __GMM_ASSERTPTR(pGmmLibContext, VOIDRETURN);
432
433 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexinfo, pGmmLibContext);
434
435 // Check that at least one usage flag is set for allocations other than
436 // Primary/Shadow/Staging.
437 memset(&ZeroGpuFlags.Gpu, 0, sizeof(ZeroGpuFlags.Gpu));
438 if((pTexinfo->Type <= RESOURCE_KMD_CHECK_START ||
439 pTexinfo->Type >= RESOURCE_KMD_CHECK_END) &&
440 !memcmp(&pTexinfo->Flags.Gpu, &ZeroGpuFlags.Gpu, sizeof(ZeroGpuFlags.Gpu)))
441 {
442 GMM_ASSERTDPF(0, "No GPU Usage specified!");
443 return;
444 }
445
446 ResetRestrictions(&Restrictions); //Set to Default
447
448 // Get worst case restrictions that match GPU flags set in resource
449 switch(pTexinfo->Type)
450 {
451 case RESOURCE_1D:
452 case RESOURCE_2D:
453 case RESOURCE_3D:
454 case RESOURCE_CUBE:
455 case RESOURCE_BUFFER:
456 case RESOURCE_SCRATCH:
457 case RESOURCE_GDI:
458 GetGenericRestrictions(pTexinfo, &Restrictions);
459 break;
460
461 case RESOURCE_HW_CONTEXT:
462 case RESOURCE_TAG_PAGE:
463 if(pTexinfo->Flags.Info.TiledW ||
464 pTexinfo->Flags.Info.TiledX ||
465 GMM_IS_4KB_TILE(pTexinfo->Flags))
466 {
467 GMM_ASSERTDPF(0, "Tiled Pref specified for RESOURCE_LINEAR!");
468 return;
469 }
470 GetLinearRestrictions(pTexinfo, &Restrictions);
471 break;
472
473 case RESOURCE_PRIMARY:
474 case RESOURCE_SHADOW:
475 case RESOURCE_STAGING:
476 GetPrimaryRestrictions(pTexinfo, &Restrictions);
477 break;
478
479 case RESOURCE_NNDI:
480 Restrictions = pPlatform->Nndi;
481 break;
482
483 case RESOURCE_HARDWARE_MBM:
484 case RESOURCE_IFFS_MAPTOGTT:
485 //Hardware MBM resource request can come for overlay allocation or normal
486 //displayable allocation. So get the restrictions accordingly
487 if(pTexinfo->Flags.Gpu.Overlay)
488 {
489 Restrictions = pPlatform->Overlay;
490 }
491 else
492 {
493 Restrictions = pPlatform->HardwareMBM;
494 }
495 break;
496
497 case RESOURCE_CURSOR:
498 case RESOURCE_PWR_CONTEXT:
499 case RESOURCE_KMD_BUFFER:
500 case RESOURCE_NULL_CONTEXT_INDIRECT_STATE:
501 case RESOURCE_PERF_DATA_QUEUE:
502 case RESOURCE_GLOBAL_BUFFER:
503 case RESOURCE_FBC:
504 case RESOURCE_GFX_CLIENT_BUFFER:
505 Restrictions = pPlatform->Cursor;
506 break;
507
508 case RESOURCE_OVERLAY_DMA:
509 Restrictions = pPlatform->NoRestriction;
510 break;
511
512 case RESOURCE_GTT_TRANSFER_REGION:
513 GetGenericRestrictions(pTexinfo, &Restrictions);
514 break;
515
516 case RESOURCE_OVERLAY_INTERMEDIATE_SURFACE:
517 Restrictions = pPlatform->Overlay;
518 break;
519
520 default:
521 GetGenericRestrictions(pTexinfo, &Restrictions);
522 GMM_ASSERTDPF(0, "Unkown Resource type");
523 }
524 // Apply any specific WA
525
526 if(((pTexinfo->Flags.Wa.ILKNeedAvcMprRowStore32KAlign)) ||
527 ((pTexinfo->Flags.Wa.ILKNeedAvcDmvBuffer32KAlign)))
528 {
529 Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(32));
530 }
531
532 if(pGmmLibContext->GetWaTable().WaAlignContextImage && (pTexinfo->Type == RESOURCE_HW_CONTEXT))
533 {
534 Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(64));
535 }
536
537 if(pTexinfo->Flags.Gpu.S3d &&
538 pTexinfo->Flags.Info.Linear &&
539 !pGmmLibContext->GetSkuTable().FtrDisplayEngineS3d)
540 {
541 Restrictions.Alignment = PAGE_SIZE;
542 Restrictions.PitchAlignment = PAGE_SIZE;
543 }
544
545 if(pTexinfo->Flags.Gpu.TiledResource)
546 {
547 // Need at least 64KB alignment to track tile mappings (h/w or s/w tracking).
548 Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(64));
549
550 // Buffer tiled resources are trivially divided into 64KB tiles => Pitch must divide into 64KB tiles
551 if(pTexinfo->Type == RESOURCE_BUFFER)
552 {
553 Restrictions.PitchAlignment = GFX_ALIGN(Restrictions.PitchAlignment, GMM_KBYTE(64));
554 }
555
556 if(GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN9_CORE)
557 {
558 pGmmLibContext->GetPlatformInfo().SurfaceMaxSize = GMM_TBYTE(1);
559 }
560 }
561
562 // SKL TileY Display needs 1MB alignment.
563 if(((pTexinfo->Type == RESOURCE_PRIMARY) ||
564 pTexinfo->Flags.Gpu.FlipChain) &&
565 (GMM_IS_4KB_TILE(pTexinfo->Flags) ||
566 pTexinfo->Flags.Info.TiledYf))
567 {
568 Restrictions.Alignment = GMM_MBYTE(1);
569 }
570
571 if(pTexinfo->Flags.Info.RenderCompressed ||
572 pTexinfo->Flags.Info.MediaCompressed)
573 {
574 if(pGmmLibContext->GetSkuTable().FtrFlatPhysCCS)
575 {
576 Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(64));
577 }
578 else // only for platforms having auxtable
579 {
580 Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, (!WA16K(pGmmLibContext) ? GMM_KBYTE(64) : GMM_KBYTE(16)));
581 }
582 }
583
584 GMM_DPF_EXIT;
585 }
586
587 /////////////////////////////////////////////////////////////////////////////////////
588 /// Calculates surface size based on Non Aligned ExistingSysMem restrictions.
589 ///
590 /// @return ::GMM_STATUS
591 /////////////////////////////////////////////////////////////////////////////////////
ApplyExistingSysMemRestrictions()592 GMM_STATUS GmmLib::GmmResourceInfoCommon::ApplyExistingSysMemRestrictions()
593 {
594 const GMM_PLATFORM_INFO *pPlatform;
595
596 // Handle Minimal Restriction ExistingSysMem Requirements...
597 GMM_GFX_SIZE_T AdditionalPaddingBytes = 0;
598 GMM_GFX_SIZE_T AdditionalPaddingRows = 0;
599 GMM_GFX_SIZE_T BaseAlignment = 1; // 1 = Byte Alignment
600 GMM_GFX_SIZE_T EndAlignment = 1; // 1 = Byte Alignment
601 GMM_GFX_SIZE_T SizePadding = 1; // 1 = Byte Padding
602 uint32_t CompressHeight, CompressWidth, CompressDepth;
603 GMM_GFX_SIZE_T Width, Height;
604 GMM_TEXTURE_INFO *pTexInfo = &Surf;
605 GMM_TEXTURE_CALC *pTextureCalc;
606
607 GMM_DPF_ENTER;
608
609 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo, GetGmmLibContext());
610 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(pTexInfo, GetGmmLibContext());
611
612 Height = pTexInfo->BaseHeight;
613 Width = pTexInfo->BaseWidth;
614
615 #define UPDATE_BASE_ALIGNMENT(a) \
616 { \
617 __GMM_ASSERT((GFX_MAX(BaseAlignment, a) % GFX_MIN(BaseAlignment, a)) == 0); /* Revisit if ever have to support complex alignments. */ \
618 BaseAlignment = GFX_MAX(BaseAlignment, a); \
619 }
620
621 #define UPDATE_PADDING(p) \
622 { \
623 SizePadding = GFX_MAX(SizePadding, p); \
624 }
625
626 #define UPDATE_ADDITIONAL_ROWS(r) \
627 { \
628 AdditionalPaddingRows = GFX_MAX(AdditionalPaddingRows, r); \
629 }
630
631 #define UPDATE_ADDITIONAL_BYTES(b) \
632 { \
633 AdditionalPaddingBytes = GFX_MAX(AdditionalPaddingBytes, b); \
634 }
635
636 #define UPDATE_END_ALIGNMENT(a) \
637 { \
638 __GMM_ASSERT((GFX_MAX(EndAlignment, a) % GFX_MIN(EndAlignment, a)) == 0); /* Revisit if ever have to support complex alignments. */ \
639 EndAlignment = GFX_MAX(EndAlignment, a); \
640 }
641
642
643 if(!pTexInfo->Pitch)
644 {
645 __GMM_ASSERT(pTexInfo->Type == RESOURCE_1D); // Clients can leave pitch zero for 1D, and we'll fill-in...
646 pTexInfo->Pitch = Width * (pTexInfo->BitsPerPixel >> 3);
647 }
648
649 __GMM_ASSERT( // Currently limiting our support...
650 pTexInfo->Flags.Gpu.NoRestriction ||
651 pTexInfo->Flags.Gpu.Index ||
652 pTexInfo->Flags.Gpu.RenderTarget ||
653 pTexInfo->Flags.Gpu.Texture ||
654 pTexInfo->Flags.Gpu.Vertex);
655
656 __GMM_ASSERT( // Trivial, Linear Surface...
657 ((pTexInfo->Type == RESOURCE_BUFFER) || (pTexInfo->Type == RESOURCE_1D) || (pTexInfo->Type == RESOURCE_2D)) &&
658 (pTexInfo->MaxLod == 0) &&
659 !GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]) &&
660 !GmmIsPlanar(pTexInfo->Format) &&
661 ((pTexInfo->ArraySize <= 1) || (pTexInfo->Type == RESOURCE_BUFFER)));
662
663 __GMM_ASSERT( // Valid Surface...
664 (Width > 0) &&
665 !((pTexInfo->Type == RESOURCE_BUFFER) && GmmIsYUVPacked(pTexInfo->Format)));
666
667 // Convert to compression blocks, if applicable...
668 if(GmmIsCompressed(GetGmmLibContext(), pTexInfo->Format))
669 {
670 pTextureCalc->GetCompressionBlockDimensions(pTexInfo->Format, &CompressWidth, &CompressHeight, &CompressDepth);
671
672 Width = GFX_CEIL_DIV(Width, CompressWidth);
673 Height = GFX_CEIL_DIV(Height, CompressHeight);
674 }
675
676 __GMM_ASSERT( // Valid Surface Follow-Up...
677 (pTexInfo->Pitch >= (Width * (pTexInfo->BitsPerPixel >> 3))));
678
679 if(!pTexInfo->Flags.Gpu.NoRestriction && !pTexInfo->Flags.Info.SVM && !pTexInfo->Flags.Info.Linear)
680 {
681 if(pTexInfo->Flags.Gpu.Index) /////////////////////////////////////////////////////////
682 {
683 __GMM_ASSERT(!(
684 pTexInfo->Flags.Gpu.RenderTarget ||
685 pTexInfo->Flags.Gpu.Texture ||
686 pTexInfo->Flags.Gpu.Vertex)); // Can explore if needed what combo's make sense--and how req's should combine.
687
688 // 3DSTATE_INDEX_BUFFER...
689 UPDATE_BASE_ALIGNMENT(4); // 32-bit worst-case, since GMM doesn't receive element-size from clients.
690 if(GetGmmLibContext()->GetWaTable().WaAlignIndexBuffer)
691 {
692 UPDATE_END_ALIGNMENT(64);
693 }
694 else
695 {
696 UPDATE_END_ALIGNMENT(1);
697 }
698 }
699
700 if(pTexInfo->Flags.Gpu.Vertex) ////////////////////////////////////////////////////////
701 {
702 __GMM_ASSERT(!(
703 pTexInfo->Flags.Gpu.Index ||
704 pTexInfo->Flags.Gpu.RenderTarget ||
705 pTexInfo->Flags.Gpu.Texture)); // Can explore if needed what combo's make sense--and how req's should combine.
706
707 // VERTEX_BUFFER_STATE...
708 UPDATE_BASE_ALIGNMENT(1); // VB's have member alignment requirements--but it's up to UMD to enforce.
709 UPDATE_PADDING(1);
710 }
711
712 if(pTexInfo->Flags.Gpu.RenderTarget) //////////////////////////////////////////////////
713 {
714 uint32_t ElementSize;
715
716 // SURFACE_STATE...
717 ElementSize = (pTexInfo->BitsPerPixel >> 3) * (GmmIsYUVPacked(pTexInfo->Format) ? 2 : 1);
718 __GMM_ASSERT((pTexInfo->Pitch % ElementSize) == 0);
719 UPDATE_BASE_ALIGNMENT(ElementSize);
720 UPDATE_PADDING(pTexInfo->Pitch * 2); // "Surface Padding Requirements --> Render Target and Media Surfaces"
721 }
722
723 if(pTexInfo->Flags.Gpu.Texture) // (i.e. Sampler Surfaces) ///////////////////////////
724 {
725 UPDATE_BASE_ALIGNMENT(1); // Sampler supports byte alignment (with performance hit if misaligned).
726
727 if(GetGmmLibContext()->GetWaTable().WaNoMinimizedTrivialSurfacePadding)
728 {
729 if(pTexInfo->Type == RESOURCE_BUFFER)
730 {
731 if(GetGmmLibContext()->GetWaTable().WaNoBufferSamplerPadding)
732 {
733 // Client agreeing to take responsibility for flushing L3 after sampling/etc.
734 }
735 else
736 {
737 // GMM currently receives GENERIC_8BIT for
738 // RESOURCE_BUFFER creations, so we have to assume the
739 // worst-case sample size of 128-bit (unless we alter
740 // our interface meaning):
741 uint32_t ElementSize = 16;
742
743 // "Surface Padding Requirements --> Sampling Engine Surfaces"
744 UPDATE_PADDING(ElementSize * ((GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) == IGFX_GEN8_CORE) ? 512 : 256));
745 UPDATE_ADDITIONAL_BYTES(16);
746 }
747 }
748 else // RESOURCE_1D/2D...
749 {
750 /* Sampler needs Alignment Unit padding--
751 but sampler arch confirms that's overly conservative
752 padding--and for trivial (linear, single-subresource)
753 2D's, even-row (quad-row on BDW.A0) plus additional
754 64B padding is sufficient. (E.g. pitch overfetch will
755 be caught by subsequent rows or the additional 64B. */
756
757 __GMM_ASSERT((GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) <= IGFX_GEN8_CORE));
758
759 if(GmmIsCompressed(GetGmmLibContext(), pTexInfo->Format))
760 {
761 // "For compressed textures...padding at the bottom of the surface is to an even compressed row."
762 UPDATE_PADDING(pTexInfo->Pitch * 2); // (Sampler arch confirmed that even-row is sufficient on BDW despite BDW's 4x4 sampling, since this req is from L2 instead of L1.)
763 }
764 else
765 {
766 UPDATE_PADDING(pTexInfo->Pitch * ((GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) == IGFX_GEN8_CORE) ? 4 : 2)); // Sampler Fetch Rows: BDW ? 4 : 2
767 }
768
769 // "For packed YUV, 96 bpt, 48 bpt, and 24 bpt surface formats, additional padding is required."
770 if(GmmIsYUVPacked(pTexInfo->Format) || (pTexInfo->BitsPerPixel == 96) || (pTexInfo->BitsPerPixel == 48) || (pTexInfo->BitsPerPixel == 24))
771 {
772 UPDATE_ADDITIONAL_BYTES(16);
773 UPDATE_ADDITIONAL_ROWS(1);
774 }
775
776 /* "For linear surfaces, additional padding of 64
777 bytes is required at the bottom of the surface."
778 (Sampler arch confirmed the 64 bytes can overlap with
779 the other "additional 16 bytes" mentions in that section.) */
780 UPDATE_ADDITIONAL_BYTES(64);
781 }
782 }
783 else
784 {
785 /* For SURFTYPE_BUFFER, SURFTYPE_1D, and
786 SURFTYPE_2D non-array, non-MSAA, non-mip-mapped surfaces in
787 linear memory, the only padding requirement is to the next
788 aligned 64-byte boundary beyond the end of the surface. */
789 UPDATE_END_ALIGNMENT(64);
790 }
791 }
792 }
793 else // Gpu.NoRestriction...
794 {
795 // Clients specify NoRestriction at their own risk--e.g. it can be
796 // appropriate when using IA-Coherent L3 combined with L3 being in
797 // unified/"Rest" mode (where there won't be write-->read-only
798 // collisions on unintentionally shared cachelines).
799 }
800
801 { //Finally calculate surf size
802 GMM_GFX_SIZE_T OriginalEnd, RequiredSize;
803
804 ExistingSysMem.pVirtAddress =
805 (ExistingSysMem.pExistingSysMem & (PAGE_SIZE - 1)) ?
806 ((uint64_t)GFX_ALIGN(ExistingSysMem.pExistingSysMem,
807 BaseAlignment)) :
808 ExistingSysMem.pExistingSysMem;
809
810 ExistingSysMem.pGfxAlignedVirtAddress =
811 (uint64_t)GFX_ALIGN(
812 (uint64_t)ExistingSysMem.pVirtAddress, PAGE_SIZE);
813
814 __GMM_ASSERT((ExistingSysMem.pVirtAddress % BaseAlignment) == 0);
815
816 RequiredSize = pTexInfo->Pitch * Height;
817
818 RequiredSize =
819 GFX_ALIGN(RequiredSize, SizePadding) +
820 (AdditionalPaddingRows * pTexInfo->Pitch) +
821 AdditionalPaddingBytes;
822
823 OriginalEnd = ExistingSysMem.pVirtAddress + RequiredSize;
824 RequiredSize += GFX_ALIGN(OriginalEnd, EndAlignment) - OriginalEnd;
825
826 //Ensure sufficient ExistingSysMem available.
827 if(ExistingSysMem.Size < RequiredSize)
828 {
829 return GMM_ERROR;
830 }
831
832 Surf.Size = RequiredSize;
833 }
834
835 GMM_DPF_EXIT;
836
837 return GMM_SUCCESS;
838 }
839