1 /* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrGLGpu.h" 9 #include "GrGLBuffer.h" 10 #include "GrGLGLSL.h" 11 #include "GrGLGpuCommandBuffer.h" 12 #include "GrGLStencilAttachment.h" 13 #include "GrGLTextureRenderTarget.h" 14 #include "GrFixedClip.h" 15 #include "GrGpuResourcePriv.h" 16 #include "GrMesh.h" 17 #include "GrPipeline.h" 18 #include "GrPLSGeometryProcessor.h" 19 #include "GrRenderTargetPriv.h" 20 #include "GrSurfacePriv.h" 21 #include "GrTexturePriv.h" 22 #include "GrTypes.h" 23 #include "builders/GrGLShaderStringBuilder.h" 24 #include "glsl/GrGLSL.h" 25 #include "glsl/GrGLSLCaps.h" 26 #include "glsl/GrGLSLPLSPathRendering.h" 27 #include "instanced/GLInstancedRendering.h" 28 #include "SkMipMap.h" 29 #include "SkPixmap.h" 30 #include "SkStrokeRec.h" 31 #include "SkTemplates.h" 32 #include "SkTypes.h" 33 34 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) 35 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) 36 37 #define SKIP_CACHE_CHECK true 38 39 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR 40 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) 41 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) 42 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) 43 #else 44 #define CLEAR_ERROR_BEFORE_ALLOC(iface) 45 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) 46 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR 47 #endif 48 49 /////////////////////////////////////////////////////////////////////////////// 50 51 using gr_instanced::InstancedRendering; 52 using gr_instanced::GLInstancedRendering; 53 54 static const GrGLenum gXfermodeEquation2Blend[] = { 55 // Basic OpenGL blend equations. 56 GR_GL_FUNC_ADD, 57 GR_GL_FUNC_SUBTRACT, 58 GR_GL_FUNC_REVERSE_SUBTRACT, 59 60 // GL_KHR_blend_equation_advanced. 61 GR_GL_SCREEN, 62 GR_GL_OVERLAY, 63 GR_GL_DARKEN, 64 GR_GL_LIGHTEN, 65 GR_GL_COLORDODGE, 66 GR_GL_COLORBURN, 67 GR_GL_HARDLIGHT, 68 GR_GL_SOFTLIGHT, 69 GR_GL_DIFFERENCE, 70 GR_GL_EXCLUSION, 71 GR_GL_MULTIPLY, 72 GR_GL_HSL_HUE, 73 GR_GL_HSL_SATURATION, 74 GR_GL_HSL_COLOR, 75 GR_GL_HSL_LUMINOSITY 76 }; 77 GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation); 78 GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation); 79 GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation); 80 GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation); 81 GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation); 82 GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation); 83 GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation); 84 GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation); 85 GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation); 86 GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation); 87 GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation); 88 GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation); 89 GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation); 90 GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation); 91 GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation); 92 GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation); 93 GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation); 94 GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation); 95 GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt); 96 97 static const GrGLenum gXfermodeCoeff2Blend[] = { 98 GR_GL_ZERO, 99 GR_GL_ONE, 100 GR_GL_SRC_COLOR, 101 GR_GL_ONE_MINUS_SRC_COLOR, 102 GR_GL_DST_COLOR, 103 GR_GL_ONE_MINUS_DST_COLOR, 104 GR_GL_SRC_ALPHA, 105 GR_GL_ONE_MINUS_SRC_ALPHA, 106 GR_GL_DST_ALPHA, 107 GR_GL_ONE_MINUS_DST_ALPHA, 108 GR_GL_CONSTANT_COLOR, 109 GR_GL_ONE_MINUS_CONSTANT_COLOR, 110 GR_GL_CONSTANT_ALPHA, 111 GR_GL_ONE_MINUS_CONSTANT_ALPHA, 112 113 // extended blend coeffs 114 GR_GL_SRC1_COLOR, 115 GR_GL_ONE_MINUS_SRC1_COLOR, 116 GR_GL_SRC1_ALPHA, 117 GR_GL_ONE_MINUS_SRC1_ALPHA, 118 }; 119 120 bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { 121 static const bool gCoeffReferencesBlendConst[] = { 122 false, 123 false, 124 false, 125 false, 126 false, 127 false, 128 false, 129 false, 130 false, 131 false, 132 true, 133 true, 134 true, 135 true, 136 137 // extended blend coeffs 138 false, 139 false, 140 false, 141 false, 142 }; 143 return gCoeffReferencesBlendConst[coeff]; 144 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst)); 145 146 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff); 147 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff); 148 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff); 149 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff); 150 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff); 151 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff); 152 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff); 153 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff); 154 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff); 155 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff); 156 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff); 157 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff); 158 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff); 159 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff); 160 161 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff); 162 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff); 163 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff); 164 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff); 165 166 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope 167 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend)); 168 } 169 170 /////////////////////////////////////////////////////////////////////////////// 171 172 173 GrGpu* GrGLGpu::Create(GrBackendContext backendContext, const GrContextOptions& options, 174 GrContext* context) { 175 SkAutoTUnref<const GrGLInterface> glInterface( 176 reinterpret_cast<const GrGLInterface*>(backendContext)); 177 if (!glInterface) { 178 glInterface.reset(GrGLDefaultInterface()); 179 } else { 180 glInterface->ref(); 181 } 182 if (!glInterface) { 183 return nullptr; 184 } 185 GrGLContext* glContext = GrGLContext::Create(glInterface, options); 186 if (glContext) { 187 return new GrGLGpu(glContext, context); 188 } 189 return nullptr; 190 } 191 192 static bool gPrintStartupSpew; 193 194 GrGLGpu::GrGLGpu(GrGLContext* ctx, GrContext* context) 195 : GrGpu(context) 196 , fGLContext(ctx) 197 , fProgramCache(new ProgramCache(this)) 198 , fHWProgramID(0) 199 , fTempSrcFBOID(0) 200 , fTempDstFBOID(0) 201 , fStencilClearFBOID(0) 202 , fHWMaxUsedBufferTextureUnit(-1) 203 , fHWPLSEnabled(false) 204 , fPLSHasBeenUsed(false) 205 , fHWMinSampleShading(0.0) { 206 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { 207 fCopyPrograms[i].fProgram = 0; 208 } 209 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { 210 fMipmapPrograms[i].fProgram = 0; 211 } 212 fWireRectProgram.fProgram = 0; 213 fPLSSetupProgram.fProgram = 0; 214 215 SkASSERT(ctx); 216 fCaps.reset(SkRef(ctx->caps())); 217 218 fHWBoundTextureUniqueIDs.reset(this->glCaps().glslCaps()->maxCombinedSamplers()); 219 220 fHWBufferState[kVertex_GrBufferType].fGLTarget = GR_GL_ARRAY_BUFFER; 221 fHWBufferState[kIndex_GrBufferType].fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER; 222 fHWBufferState[kTexel_GrBufferType].fGLTarget = GR_GL_TEXTURE_BUFFER; 223 fHWBufferState[kDrawIndirect_GrBufferType].fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER; 224 if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) { 225 fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = 226 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM; 227 fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = 228 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; 229 } else { 230 fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER; 231 fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = GR_GL_PIXEL_PACK_BUFFER; 232 } 233 GR_STATIC_ASSERT(6 == SK_ARRAY_COUNT(fHWBufferState)); 234 235 if (this->caps()->shaderCaps()->texelBufferSupport()) { 236 fHWBufferTextures.reset(this->glCaps().glslCaps()->maxCombinedSamplers()); 237 } 238 239 if (this->glCaps().shaderCaps()->pathRenderingSupport()) { 240 fPathRendering.reset(new GrGLPathRendering(this)); 241 } 242 243 GrGLClearErr(this->glInterface()); 244 if (gPrintStartupSpew) { 245 const GrGLubyte* vendor; 246 const GrGLubyte* renderer; 247 const GrGLubyte* version; 248 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR)); 249 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER)); 250 GL_CALL_RET(version, GetString(GR_GL_VERSION)); 251 SkDebugf("------------------------- create GrGLGpu %p --------------\n", 252 this); 253 SkDebugf("------ VENDOR %s\n", vendor); 254 SkDebugf("------ RENDERER %s\n", renderer); 255 SkDebugf("------ VERSION %s\n", version); 256 SkDebugf("------ EXTENSIONS\n"); 257 this->glContext().extensions().print(); 258 SkDebugf("\n"); 259 SkDebugf("%s", this->glCaps().dump().c_str()); 260 } 261 } 262 263 GrGLGpu::~GrGLGpu() { 264 // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu 265 // to release the resources held by the objects themselves. 266 fPathRendering.reset(); 267 fCopyProgramArrayBuffer.reset(); 268 fMipmapProgramArrayBuffer.reset(); 269 fWireRectArrayBuffer.reset(); 270 fPLSSetupProgram.fArrayBuffer.reset(); 271 272 if (0 != fHWProgramID) { 273 // detach the current program so there is no confusion on OpenGL's part 274 // that we want it to be deleted 275 GL_CALL(UseProgram(0)); 276 } 277 278 if (0 != fTempSrcFBOID) { 279 GL_CALL(DeleteFramebuffers(1, &fTempSrcFBOID)); 280 } 281 if (0 != fTempDstFBOID) { 282 GL_CALL(DeleteFramebuffers(1, &fTempDstFBOID)); 283 } 284 if (0 != fStencilClearFBOID) { 285 GL_CALL(DeleteFramebuffers(1, &fStencilClearFBOID)); 286 } 287 288 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { 289 if (0 != fCopyPrograms[i].fProgram) { 290 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); 291 } 292 } 293 294 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { 295 if (0 != fMipmapPrograms[i].fProgram) { 296 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); 297 } 298 } 299 300 if (0 != fWireRectProgram.fProgram) { 301 GL_CALL(DeleteProgram(fWireRectProgram.fProgram)); 302 } 303 304 if (0 != fPLSSetupProgram.fProgram) { 305 GL_CALL(DeleteProgram(fPLSSetupProgram.fProgram)); 306 } 307 308 delete fProgramCache; 309 } 310 311 bool GrGLGpu::createPLSSetupProgram() { 312 if (!fPLSSetupProgram.fArrayBuffer) { 313 static const GrGLfloat vdata[] = { 314 0, 0, 315 0, 1, 316 1, 0, 317 1, 1 318 }; 319 fPLSSetupProgram.fArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), 320 kVertex_GrBufferType, 321 kStatic_GrAccessPattern, vdata)); 322 if (!fPLSSetupProgram.fArrayBuffer) { 323 return false; 324 } 325 } 326 327 SkASSERT(!fPLSSetupProgram.fProgram); 328 GL_CALL_RET(fPLSSetupProgram.fProgram, CreateProgram()); 329 if (!fPLSSetupProgram.fProgram) { 330 return false; 331 } 332 333 const GrGLSLCaps* glslCaps = this->glCaps().glslCaps(); 334 const char* version = glslCaps->versionDeclString(); 335 336 GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier); 337 GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType, 338 GrShaderVar::kUniform_TypeModifier); 339 GrGLSLShaderVar uPosXform("u_posXform", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier); 340 GrGLSLShaderVar uTexture("u_texture", kTexture2DSampler_GrSLType, 341 GrShaderVar::kUniform_TypeModifier); 342 GrGLSLShaderVar vTexCoord("v_texCoord", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier); 343 344 SkString vshaderTxt(version); 345 if (glslCaps->noperspectiveInterpolationSupport()) { 346 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) { 347 vshaderTxt.appendf("#extension %s : require\n", extension); 348 } 349 vTexCoord.addModifier("noperspective"); 350 } 351 aVertex.appendDecl(glslCaps, &vshaderTxt); 352 vshaderTxt.append(";"); 353 uTexCoordXform.appendDecl(glslCaps, &vshaderTxt); 354 vshaderTxt.append(";"); 355 uPosXform.appendDecl(glslCaps, &vshaderTxt); 356 vshaderTxt.append(";"); 357 vTexCoord.appendDecl(glslCaps, &vshaderTxt); 358 vshaderTxt.append(";"); 359 360 vshaderTxt.append( 361 "// PLS Setup Program VS\n" 362 "void main() {" 363 " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;" 364 " gl_Position.zw = vec2(0, 1);" 365 "}" 366 ); 367 368 SkString fshaderTxt(version); 369 if (glslCaps->noperspectiveInterpolationSupport()) { 370 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) { 371 fshaderTxt.appendf("#extension %s : require\n", extension); 372 } 373 } 374 fshaderTxt.append("#extension "); 375 fshaderTxt.append(glslCaps->fbFetchExtensionString()); 376 fshaderTxt.append(" : require\n"); 377 fshaderTxt.append("#extension GL_EXT_shader_pixel_local_storage : require\n"); 378 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps, &fshaderTxt); 379 vTexCoord.setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier); 380 vTexCoord.appendDecl(glslCaps, &fshaderTxt); 381 fshaderTxt.append(";"); 382 uTexture.appendDecl(glslCaps, &fshaderTxt); 383 fshaderTxt.append(";"); 384 385 fshaderTxt.appendf( 386 "// PLS Setup Program FS\n" 387 GR_GL_PLS_PATH_DATA_DECL 388 "void main() {\n" 389 " " GR_GL_PLS_DSTCOLOR_NAME " = gl_LastFragColorARM;\n" 390 " pls.windings = ivec4(0, 0, 0, 0);\n" 391 "}" 392 ); 393 394 const char* str; 395 GrGLint length; 396 397 str = vshaderTxt.c_str(); 398 length = SkToInt(vshaderTxt.size()); 399 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fPLSSetupProgram.fProgram, 400 GR_GL_VERTEX_SHADER, &str, &length, 1, &fStats); 401 402 str = fshaderTxt.c_str(); 403 length = SkToInt(fshaderTxt.size()); 404 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fPLSSetupProgram.fProgram, 405 GR_GL_FRAGMENT_SHADER, &str, &length, 1, &fStats); 406 407 GL_CALL(LinkProgram(fPLSSetupProgram.fProgram)); 408 409 GL_CALL_RET(fPLSSetupProgram.fPosXformUniform, GetUniformLocation(fPLSSetupProgram.fProgram, 410 "u_posXform")); 411 412 GL_CALL(BindAttribLocation(fPLSSetupProgram.fProgram, 0, "a_vertex")); 413 414 GL_CALL(DeleteShader(vshader)); 415 GL_CALL(DeleteShader(fshader)); 416 417 return true; 418 } 419 420 void GrGLGpu::disconnect(DisconnectType type) { 421 INHERITED::disconnect(type); 422 if (DisconnectType::kCleanup == type) { 423 if (fHWProgramID) { 424 GL_CALL(UseProgram(0)); 425 } 426 if (fTempSrcFBOID) { 427 GL_CALL(DeleteFramebuffers(1, &fTempSrcFBOID)); 428 } 429 if (fTempDstFBOID) { 430 GL_CALL(DeleteFramebuffers(1, &fTempDstFBOID)); 431 } 432 if (fStencilClearFBOID) { 433 GL_CALL(DeleteFramebuffers(1, &fStencilClearFBOID)); 434 } 435 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { 436 if (fCopyPrograms[i].fProgram) { 437 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); 438 } 439 } 440 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { 441 if (fMipmapPrograms[i].fProgram) { 442 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); 443 } 444 } 445 if (fWireRectProgram.fProgram) { 446 GL_CALL(DeleteProgram(fWireRectProgram.fProgram)); 447 } 448 if (fPLSSetupProgram.fProgram) { 449 GL_CALL(DeleteProgram(fPLSSetupProgram.fProgram)); 450 } 451 } else { 452 if (fProgramCache) { 453 fProgramCache->abandon(); 454 } 455 } 456 457 delete fProgramCache; 458 fProgramCache = nullptr; 459 460 fHWProgramID = 0; 461 fTempSrcFBOID = 0; 462 fTempDstFBOID = 0; 463 fStencilClearFBOID = 0; 464 fCopyProgramArrayBuffer.reset(); 465 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { 466 fCopyPrograms[i].fProgram = 0; 467 } 468 fMipmapProgramArrayBuffer.reset(); 469 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { 470 fMipmapPrograms[i].fProgram = 0; 471 } 472 fWireRectProgram.fProgram = 0; 473 fWireRectArrayBuffer.reset(); 474 fPLSSetupProgram.fProgram = 0; 475 fPLSSetupProgram.fArrayBuffer.reset(); 476 if (this->glCaps().shaderCaps()->pathRenderingSupport()) { 477 this->glPathRendering()->disconnect(type); 478 } 479 } 480 481 /////////////////////////////////////////////////////////////////////////////// 482 483 void GrGLGpu::onResetContext(uint32_t resetBits) { 484 // we don't use the zb at all 485 if (resetBits & kMisc_GrGLBackendState) { 486 GL_CALL(Disable(GR_GL_DEPTH_TEST)); 487 GL_CALL(DepthMask(GR_GL_FALSE)); 488 489 fHWBufferState[kTexel_GrBufferType].invalidate(); 490 fHWBufferState[kDrawIndirect_GrBufferType].invalidate(); 491 fHWBufferState[kXferCpuToGpu_GrBufferType].invalidate(); 492 fHWBufferState[kXferGpuToCpu_GrBufferType].invalidate(); 493 494 fHWDrawFace = GrDrawFace::kInvalid; 495 496 if (kGL_GrGLStandard == this->glStandard()) { 497 // Desktop-only state that we never change 498 if (!this->glCaps().isCoreProfile()) { 499 GL_CALL(Disable(GR_GL_POINT_SMOOTH)); 500 GL_CALL(Disable(GR_GL_LINE_SMOOTH)); 501 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); 502 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); 503 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); 504 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); 505 } 506 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a 507 // core profile. This seems like a bug since the core spec removes any mention of 508 // GL_ARB_imaging. 509 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { 510 GL_CALL(Disable(GR_GL_COLOR_TABLE)); 511 } 512 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); 513 // Since ES doesn't support glPointSize at all we always use the VS to 514 // set the point size 515 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); 516 517 // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't 518 // currently part of our gl interface. There are probably others as 519 // well. 520 } 521 522 if (kGLES_GrGLStandard == this->glStandard() && 523 this->hasExtension("GL_ARM_shader_framebuffer_fetch")) { 524 // The arm extension requires specifically enabling MSAA fetching per sample. 525 // On some devices this may have a perf hit. Also multiple render targets are disabled 526 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM)); 527 } 528 fHWWriteToColor = kUnknown_TriState; 529 // we only ever use lines in hairline mode 530 GL_CALL(LineWidth(1)); 531 GL_CALL(Disable(GR_GL_DITHER)); 532 } 533 534 if (resetBits & kMSAAEnable_GrGLBackendState) { 535 fMSAAEnabled = kUnknown_TriState; 536 537 if (this->caps()->usesMixedSamples()) { 538 if (0 != this->caps()->maxRasterSamples()) { 539 fHWRasterMultisampleEnabled = kUnknown_TriState; 540 fHWNumRasterSamples = 0; 541 } 542 543 // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage 544 // modulation. This state has no effect when not rendering to a mixed sampled target. 545 GL_CALL(CoverageModulation(GR_GL_RGBA)); 546 } 547 } 548 549 fHWActiveTextureUnitIdx = -1; // invalid 550 551 if (resetBits & kTextureBinding_GrGLBackendState) { 552 for (int s = 0; s < fHWBoundTextureUniqueIDs.count(); ++s) { 553 fHWBoundTextureUniqueIDs[s] = SK_InvalidUniqueID; 554 } 555 for (int b = 0; b < fHWBufferTextures.count(); ++b) { 556 SkASSERT(this->caps()->shaderCaps()->texelBufferSupport()); 557 fHWBufferTextures[b].fKnownBound = false; 558 } 559 } 560 561 if (resetBits & kBlend_GrGLBackendState) { 562 fHWBlendState.invalidate(); 563 } 564 565 if (resetBits & kView_GrGLBackendState) { 566 fHWScissorSettings.invalidate(); 567 fHWWindowRectsState.invalidate(); 568 fHWViewport.invalidate(); 569 } 570 571 if (resetBits & kStencil_GrGLBackendState) { 572 fHWStencilSettings.invalidate(); 573 fHWStencilTestEnabled = kUnknown_TriState; 574 } 575 576 // Vertex 577 if (resetBits & kVertex_GrGLBackendState) { 578 fHWVertexArrayState.invalidate(); 579 fHWBufferState[kVertex_GrBufferType].invalidate(); 580 fHWBufferState[kIndex_GrBufferType].invalidate(); 581 } 582 583 if (resetBits & kRenderTarget_GrGLBackendState) { 584 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 585 fHWSRGBFramebuffer = kUnknown_TriState; 586 } 587 588 if (resetBits & kPathRendering_GrGLBackendState) { 589 if (this->caps()->shaderCaps()->pathRenderingSupport()) { 590 this->glPathRendering()->resetContext(); 591 } 592 } 593 594 // we assume these values 595 if (resetBits & kPixelStore_GrGLBackendState) { 596 if (this->glCaps().unpackRowLengthSupport()) { 597 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 598 } 599 if (this->glCaps().packRowLengthSupport()) { 600 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 601 } 602 if (this->glCaps().unpackFlipYSupport()) { 603 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 604 } 605 if (this->glCaps().packFlipYSupport()) { 606 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); 607 } 608 } 609 610 if (resetBits & kProgram_GrGLBackendState) { 611 fHWProgramID = 0; 612 } 613 } 614 615 static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) { 616 // By default, GrRenderTargets are GL's normal orientation so that they 617 // can be drawn to by the outside world without the client having 618 // to render upside down. 619 if (kDefault_GrSurfaceOrigin == origin) { 620 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin; 621 } else { 622 return origin; 623 } 624 } 625 626 GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc, 627 GrWrapOwnership ownership) { 628 #ifdef SK_IGNORE_GL_TEXTURE_TARGET 629 if (!desc.fTextureHandle) { 630 return nullptr; 631 } 632 #else 633 const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(desc.fTextureHandle); 634 if (!info || !info->fID) { 635 return nullptr; 636 } 637 #endif 638 639 // next line relies on GrBackendTextureDesc's flags matching GrTexture's 640 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag); 641 642 GrGLTexture::IDDesc idDesc; 643 GrSurfaceDesc surfDesc; 644 645 #ifdef SK_IGNORE_GL_TEXTURE_TARGET 646 idDesc.fInfo.fID = static_cast<GrGLuint>(desc.fTextureHandle); 647 // When we create the texture, we only 648 // create GL_TEXTURE_2D at the moment. 649 // External clients can do something different. 650 651 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D; 652 #else 653 idDesc.fInfo = *info; 654 #endif 655 656 if (GR_GL_TEXTURE_EXTERNAL == idDesc.fInfo.fTarget) { 657 if (renderTarget) { 658 // This combination is not supported. 659 return nullptr; 660 } 661 if (!this->glCaps().glslCaps()->externalTextureSupport()) { 662 return nullptr; 663 } 664 } else if (GR_GL_TEXTURE_RECTANGLE == idDesc.fInfo.fTarget) { 665 if (!this->glCaps().rectangleTextureSupport()) { 666 return nullptr; dissect_exteap(proto_tree * eap_tree,tvbuff_t * tvb,int offset,gint size _U_,packet_info * pinfo,guint8 eap_code,guint8 eap_identifier)667 } 668 } else if (GR_GL_TEXTURE_2D != idDesc.fInfo.fTarget) { 669 return nullptr; 670 } 671 672 // Sample count is interpreted to mean the number of samples that Gr code should allocate 673 // for a render buffer that resolves to the texture. We don't support MSAA textures. 674 if (desc.fSampleCnt && !renderTarget) { 675 return nullptr; 676 } 677 678 if (kAdopt_GrWrapOwnership == ownership) { 679 idDesc.fOwnership = GrBackendObjectOwnership::kOwned; 680 } else { 681 idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed; 682 } 683 684 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags; 685 surfDesc.fWidth = desc.fWidth; 686 surfDesc.fHeight = desc.fHeight; 687 surfDesc.fConfig = desc.fConfig; 688 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount()); 689 // FIXME: this should be calling resolve_origin(), but Chrome code is currently 690 // assuming the old behaviour, which is that backend textures are always 691 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: 692 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); 693 if (kDefault_GrSurfaceOrigin == desc.fOrigin) { 694 surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin; 695 } else { 696 surfDesc.fOrigin = desc.fOrigin; 697 } 698 test_flag(unsigned char flag,unsigned char mask)699 GrGLTexture* texture = nullptr; 700 if (renderTarget) { 701 GrGLRenderTarget::IDDesc rtIDDesc; 702 if (!this->createRenderTargetObjects(surfDesc, idDesc.fInfo, &rtIDDesc)) { 703 return nullptr; 704 } dissect_eap_mschapv2(proto_tree * eap_tree,tvbuff_t * tvb,packet_info * pinfo,int offset,gint size)705 texture = GrGLTextureRenderTarget::CreateWrapped(this, surfDesc, idDesc, rtIDDesc); 706 } else { 707 texture = GrGLTexture::CreateWrapped(this, surfDesc, idDesc); 708 } 709 if (nullptr == texture) { 710 return nullptr; 711 } 712 713 return texture; 714 } 715 716 GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc, 717 GrWrapOwnership ownership) { 718 GrGLRenderTarget::IDDesc idDesc; 719 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle); 720 idDesc.fMSColorRenderbufferID = 0; 721 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; 722 if (kAdopt_GrWrapOwnership == ownership) { 723 idDesc.fRTFBOOwnership = GrBackendObjectOwnership::kOwned; 724 } else { 725 idDesc.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed; 726 } 727 idDesc.fIsMixedSampled = false; 728 729 GrSurfaceDesc desc; 730 desc.fConfig = wrapDesc.fConfig; 731 desc.fFlags = kCheckAllocation_GrSurfaceFlag | kRenderTarget_GrSurfaceFlag; 732 desc.fWidth = wrapDesc.fWidth; 733 desc.fHeight = wrapDesc.fHeight; 734 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount()); 735 desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true); 736 737 return GrGLRenderTarget::CreateWrapped(this, desc, idDesc, wrapDesc.fStencilBits); 738 } 739 740 GrRenderTarget* GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc& desc) { 741 #ifdef SK_IGNORE_GL_TEXTURE_TARGET 742 if (!desc.fTextureHandle) { 743 return nullptr; 744 } 745 #else 746 const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(desc.fTextureHandle); 747 if (!info || !info->fID) { 748 return nullptr; 749 } 750 #endif 751 752 GrGLTextureInfo texInfo; 753 GrSurfaceDesc surfDesc; 754 755 #ifdef SK_IGNORE_GL_TEXTURE_TARGET 756 texInfo.fID = static_cast<GrGLuint>(desc.fTextureHandle); 757 // We only support GL_TEXTURE_2D at the moment. 758 texInfo.fTarget = GR_GL_TEXTURE_2D; 759 #else 760 texInfo = *info; 761 #endif 762 763 if (GR_GL_TEXTURE_RECTANGLE != texInfo.fTarget && 764 GR_GL_TEXTURE_2D != texInfo.fTarget) { 765 // Only texture rectangle and texture 2d are supported. We do not check whether texture 766 // rectangle is supported by Skia - if the caller provided us with a texture rectangle, 767 // we assume the necessary support exists. 768 return nullptr; 769 } 770 771 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags; 772 surfDesc.fWidth = desc.fWidth; 773 surfDesc.fHeight = desc.fHeight; 774 surfDesc.fConfig = desc.fConfig; 775 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount()); 776 // FIXME: this should be calling resolve_origin(), but Chrome code is currently 777 // assuming the old behaviour, which is that backend textures are always 778 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: 779 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); 780 if (kDefault_GrSurfaceOrigin == desc.fOrigin) { 781 surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin; 782 } else { 783 surfDesc.fOrigin = desc.fOrigin; 784 } 785 786 GrGLRenderTarget::IDDesc rtIDDesc; 787 if (!this->createRenderTargetObjects(surfDesc, texInfo, &rtIDDesc)) { 788 return nullptr; 789 } 790 return GrGLRenderTarget::CreateWrapped(this, surfDesc, rtIDDesc, 0); 791 } 792 793 //////////////////////////////////////////////////////////////////////////////// 794 795 bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height, 796 GrPixelConfig srcConfig, 797 DrawPreference* drawPreference, 798 WritePixelTempDrawInfo* tempDrawInfo) { 799 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) { 800 return false; 801 } 802 803 // This subclass only allows writes to textures. If the dst is not a texture we have to draw dissect_eap_identity_wlan(tvbuff_t * tvb,packet_info * pinfo,proto_tree * tree,int offset,gint size)804 // into it. We could use glDrawPixels on GLs that have it, but we don't today. 805 if (!dstSurface->asTexture()) { 806 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 807 } else { 808 GrGLTexture* texture = static_cast<GrGLTexture*>(dstSurface->asTexture()); 809 if (GR_GL_TEXTURE_EXTERNAL == texture->target()) { 810 // We don't currently support writing pixels to EXTERNAL textures. 811 return false; 812 } 813 } 814 815 if (GrPixelConfigIsSRGB(dstSurface->config()) != GrPixelConfigIsSRGB(srcConfig)) { 816 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 817 } 818 819 // Start off assuming no swizzling 820 tempDrawInfo->fSwizzle = GrSwizzle::RGBA(); 821 tempDrawInfo->fWriteConfig = srcConfig; 822 823 // These settings we will always want if a temp draw is performed. Initially set the config 824 // to srcConfig, though that may be modified if we decide to do a R/G swap. 825 tempDrawInfo->fTempSurfaceDesc.fFlags = kNone_GrSurfaceFlags; 826 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig; 827 tempDrawInfo->fTempSurfaceDesc.fWidth = width; 828 tempDrawInfo->fTempSurfaceDesc.fHeight = height; 829 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0; 830 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL. 831 832 bool configsAreRBSwaps = GrPixelConfigSwapRAndB(srcConfig) == dstSurface->config(); 833 834 if (configsAreRBSwaps) { 835 if (!this->caps()->isConfigTexturable(srcConfig)) { 836 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 837 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config(); 838 tempDrawInfo->fSwizzle = GrSwizzle::BGRA(); 839 tempDrawInfo->fWriteConfig = dstSurface->config(); 840 } else if (this->glCaps().rgba8888PixelsOpsAreSlow() && 841 kRGBA_8888_GrPixelConfig == srcConfig) { 842 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference); 843 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config(); 844 tempDrawInfo->fSwizzle = GrSwizzle::BGRA(); 845 tempDrawInfo->fWriteConfig = dstSurface->config(); 846 } else if (kGLES_GrGLStandard == this->glStandard() && 847 this->glCaps().bgraIsInternalFormat()) { 848 // The internal format and external formats must match texture uploads so we can't 849 // swizzle while uploading when BGRA is a distinct internal format. 850 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 851 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config(); 852 tempDrawInfo->fSwizzle = GrSwizzle::BGRA(); 853 tempDrawInfo->fWriteConfig = dstSurface->config(); 854 } 855 } 856 857 if (!this->glCaps().unpackFlipYSupport() && 858 kBottomLeft_GrSurfaceOrigin == dstSurface->origin()) { 859 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference); 860 } 861 862 return true; 863 } 864 865 static bool check_write_and_transfer_input(GrGLTexture* glTex, GrSurface* surface, 866 GrPixelConfig config) { 867 if (!glTex) { 868 return false; 869 } 870 871 // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pixels. 872 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) { 873 return false; 874 } 875 876 // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures 877 if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) { 878 return false; 879 } 880 881 return true; 882 } 883 884 bool GrGLGpu::onWritePixels(GrSurface* surface, 885 int left, int top, int width, int height, 886 GrPixelConfig config, 887 const SkTArray<GrMipLevel>& texels) { 888 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); 889 890 if (!check_write_and_transfer_input(glTex, surface, config)) { 891 return false; 892 } 893 894 this->setScratchTextureUnit(); 895 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); 896 897 bool success = false; 898 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { 899 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixels() 900 SkASSERT(config == glTex->desc().fConfig); 901 success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), texels, 902 kWrite_UploadType, left, top, width, height); 903 } else { 904 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_UploadType, 905 left, top, width, height, config, texels); 906 } 907 908 return success; 909 } 910 911 bool GrGLGpu::onTransferPixels(GrSurface* surface, 912 int left, int top, int width, int height, 913 GrPixelConfig config, GrBuffer* transferBuffer, 914 size_t offset, size_t rowBytes) { 915 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); 916 917 if (!check_write_and_transfer_input(glTex, surface, config)) { 918 return false; 919 } 920 921 // For the moment, can't transfer compressed data 922 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { 923 return false; 924 } 925 926 this->setScratchTextureUnit(); 927 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); 928 929 SkASSERT(!transferBuffer->isMapped()); 930 SkASSERT(!transferBuffer->isCPUBacked()); 931 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer); 932 this->bindBuffer(kXferCpuToGpu_GrBufferType, glBuffer); 933 934 bool success = false; 935 GrMipLevel mipLevel; 936 mipLevel.fPixels = transferBuffer; 937 mipLevel.fRowBytes = rowBytes; 938 SkSTArray<1, GrMipLevel> texels; dissect_eap_identity(tvbuff_t * tvb,packet_info * pinfo,proto_tree * tree,int offset,gint size)939 texels.push_back(mipLevel); 940 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_UploadType, 941 left, top, width, height, config, texels); 942 return success; 943 } 944 945 // For GL_[UN]PACK_ALIGNMENT. 946 static inline GrGLint config_alignment(GrPixelConfig config) { dissect_eap_sim(proto_tree * eap_tree,tvbuff_t * tvb,packet_info * pinfo,int offset,gint size)947 SkASSERT(!GrPixelConfigIsCompressed(config)); 948 switch (config) { 949 case kAlpha_8_GrPixelConfig: 950 return 1; 951 case kRGB_565_GrPixelConfig: 952 case kRGBA_4444_GrPixelConfig: 953 case kAlpha_half_GrPixelConfig: 954 case kRGBA_half_GrPixelConfig: 955 return 2; 956 case kRGBA_8888_GrPixelConfig: 957 case kBGRA_8888_GrPixelConfig: 958 case kSRGBA_8888_GrPixelConfig: 959 case kSBGRA_8888_GrPixelConfig: 960 case kRGBA_float_GrPixelConfig: 961 return 4; 962 default: 963 return 0; 964 } 965 } 966 967 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc, 968 const GrGLInterface* interface) { 969 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) { 970 return GR_GL_GET_ERROR(interface); 971 } else { 972 return CHECK_ALLOC_ERROR(interface); 973 } 974 } 975 976 /** 977 * Creates storage space for the texture and fills it with texels. 978 * 979 * @param desc The surface descriptor for the texture being created. 980 * @param interface The GL interface in use. 981 * @param caps The capabilities of the GL device. 982 * @param internalFormat The data format used for the internal storage of the texture. May be sized. 983 * @param internalFormatForTexStorage The data format used for the TexStorage API. Must be sized. 984 * @param externalFormat The data format used for the external storage of the texture. 985 * @param externalType The type of the data used for the external storage of the texture. 986 * @param texels The texel data of the texture being created. 987 * @param baseWidth The width of the texture's base mipmap level 988 * @param baseHeight The height of the texture's base mipmap level 989 * @param succeeded Set to true if allocating and populating the texture completed 990 * without error. 991 */ 992 static bool allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc, 993 const GrGLInterface& interface, 994 const GrGLCaps& caps, 995 GrGLenum target, 996 GrGLenum internalFormat, 997 GrGLenum internalFormatForTexStorage, 998 GrGLenum externalFormat, 999 GrGLenum externalType, 1000 const SkTArray<GrMipLevel>& texels, 1001 int baseWidth, int baseHeight) { 1002 CLEAR_ERROR_BEFORE_ALLOC(&interface); 1003 1004 bool useTexStorage = caps.isConfigTexSupportEnabled(desc.fConfig); 1005 // We can only use TexStorage if we know we will not later change the storage requirements. 1006 // This means if we may later want to add mipmaps, we cannot use TexStorage. 1007 // Right now, we cannot know if we will later add mipmaps or not. 1008 // The only time we can use TexStorage is when we already have the 1009 // mipmaps. 1010 useTexStorage &= texels.count() > 1; 1011 1012 if (useTexStorage) { 1013 // We never resize or change formats of textures. 1014 GL_ALLOC_CALL(&interface, 1015 TexStorage2D(target, 1016 texels.count(), 1017 internalFormatForTexStorage, 1018 desc.fWidth, desc.fHeight)); 1019 GrGLenum error = check_alloc_error(desc, &interface); 1020 if (error != GR_GL_NO_ERROR) { 1021 return false; 1022 } else { dissect_eap_aka(proto_tree * eap_tree,tvbuff_t * tvb,packet_info * pinfo,int offset,gint size)1023 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) { 1024 const void* currentMipData = texels[currentMipLevel].fPixels; 1025 if (currentMipData == nullptr) { 1026 continue; 1027 } 1028 int twoToTheMipLevel = 1 << currentMipLevel; 1029 int currentWidth = SkTMax(1, desc.fWidth / twoToTheMipLevel); 1030 int currentHeight = SkTMax(1, desc.fHeight / twoToTheMipLevel); 1031 1032 GR_GL_CALL(&interface, 1033 TexSubImage2D(target, 1034 currentMipLevel, 1035 0, // left 1036 0, // top 1037 currentWidth, 1038 currentHeight, 1039 externalFormat, externalType, 1040 currentMipData)); 1041 } 1042 return true; 1043 } 1044 } else { 1045 if (texels.empty()) { 1046 GL_ALLOC_CALL(&interface, 1047 TexImage2D(target, 1048 0, 1049 internalFormat, 1050 baseWidth, 1051 baseHeight, 1052 0, // border 1053 externalFormat, externalType, 1054 nullptr)); 1055 GrGLenum error = check_alloc_error(desc, &interface); 1056 if (error != GR_GL_NO_ERROR) { 1057 return false; 1058 } 1059 } else { 1060 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) { 1061 int twoToTheMipLevel = 1 << currentMipLevel; 1062 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); 1063 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); 1064 const void* currentMipData = texels[currentMipLevel].fPixels; 1065 // Even if curremtMipData is nullptr, continue to call TexImage2D. 1066 // This will allocate texture memory which we can later populate. 1067 GL_ALLOC_CALL(&interface, 1068 TexImage2D(target, 1069 currentMipLevel, 1070 internalFormat, 1071 currentWidth, 1072 currentHeight, 1073 0, // border 1074 externalFormat, externalType, 1075 currentMipData)); 1076 GrGLenum error = check_alloc_error(desc, &interface); 1077 if (error != GR_GL_NO_ERROR) { 1078 return false; 1079 } 1080 } 1081 } 1082 } 1083 return true; 1084 } 1085 1086 /** 1087 * Creates storage space for the texture and fills it with texels. 1088 * 1089 * @param desc The surface descriptor for the texture being created. 1090 * @param interface The GL interface in use. 1091 * @param caps The capabilities of the GL device. 1092 * @param internalFormat The data format used for the internal storage of the texture. 1093 * @param texels The texel data of the texture being created. 1094 */ 1095 static bool allocate_and_populate_compressed_texture(const GrSurfaceDesc& desc, 1096 const GrGLInterface& interface, 1097 const GrGLCaps& caps, 1098 GrGLenum target, GrGLenum internalFormat, dissect_eap_pax(proto_tree * eap_tree,tvbuff_t * tvb,packet_info * pinfo,int offset,gint size)1099 const SkTArray<GrMipLevel>& texels, 1100 int baseWidth, int baseHeight) { 1101 CLEAR_ERROR_BEFORE_ALLOC(&interface); 1102 1103 bool useTexStorage = caps.isConfigTexSupportEnabled(desc.fConfig); 1104 // We can only use TexStorage if we know we will not later change the storage requirements. 1105 // This means if we may later want to add mipmaps, we cannot use TexStorage. 1106 // Right now, we cannot know if we will later add mipmaps or not. 1107 // The only time we can use TexStorage is when we already have the 1108 // mipmaps. 1109 useTexStorage &= texels.count() > 1; 1110 1111 if (useTexStorage) { 1112 // We never resize or change formats of textures. 1113 GL_ALLOC_CALL(&interface, 1114 TexStorage2D(target, 1115 texels.count(), 1116 internalFormat, 1117 baseWidth, baseHeight)); 1118 GrGLenum error = check_alloc_error(desc, &interface); 1119 if (error != GR_GL_NO_ERROR) { 1120 return false; 1121 } else { 1122 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) { 1123 const void* currentMipData = texels[currentMipLevel].fPixels; 1124 if (currentMipData == nullptr) { 1125 continue; 1126 } 1127 1128 int twoToTheMipLevel = 1 << currentMipLevel; 1129 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); 1130 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); 1131 1132 // Make sure that the width and height that we pass to OpenGL 1133 // is a multiple of the block size. 1134 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, currentWidth, 1135 currentHeight); 1136 GR_GL_CALL(&interface, CompressedTexSubImage2D(target, 1137 currentMipLevel, 1138 0, // left 1139 0, // top 1140 currentWidth, 1141 currentHeight, 1142 internalFormat, 1143 SkToInt(dataSize), 1144 currentMipData)); 1145 } 1146 } 1147 } else { 1148 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) { 1149 int twoToTheMipLevel = 1 << currentMipLevel; 1150 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); 1151 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); 1152 1153 // Make sure that the width and height that we pass to OpenGL 1154 // is a multiple of the block size. 1155 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, baseWidth, baseHeight); 1156 1157 GL_ALLOC_CALL(&interface, 1158 CompressedTexImage2D(target, 1159 currentMipLevel, 1160 internalFormat, 1161 currentWidth, 1162 currentHeight, 1163 0, // border 1164 SkToInt(dataSize), 1165 texels[currentMipLevel].fPixels)); 1166 1167 GrGLenum error = check_alloc_error(desc, &interface); 1168 if (error != GR_GL_NO_ERROR) { 1169 return false; 1170 } 1171 } 1172 } 1173 1174 return true; 1175 } 1176 1177 /** 1178 * After a texture is created, any state which was altered during its creation 1179 * needs to be restored. 1180 * 1181 * @param interface The GL interface to use. 1182 * @param caps The capabilities of the GL device. 1183 * @param restoreGLRowLength Should the row length unpacking be restored? 1184 * @param glFlipY Did GL flip the texture vertically? 1185 */ 1186 static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps, 1187 bool restoreGLRowLength, bool glFlipY) { 1188 if (restoreGLRowLength) { 1189 SkASSERT(caps.unpackRowLengthSupport()); 1190 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 1191 } 1192 if (glFlipY) { 1193 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 1194 } 1195 } 1196 1197 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc, 1198 GrGLenum target, 1199 UploadType uploadType, 1200 int left, int top, int width, int height, 1201 GrPixelConfig dataConfig, 1202 const SkTArray<GrMipLevel>& texels) { 1203 // If we're uploading compressed data then we should be using uploadCompressedTexData dissect_eap_psk_pchannel(proto_tree * eap_tree,tvbuff_t * tvb,int offset,gint size)1204 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); 1205 1206 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); 1207 1208 // texels is const. 1209 // But we may need to flip the texture vertically to prepare it. 1210 // Rather than flip in place and alter the incoming data, 1211 // we allocate a new buffer to flip into. 1212 // This means we need to make a non-const shallow copy of texels. 1213 SkTArray<GrMipLevel> texelsShallowCopy(texels); dissect_eap_psk(proto_tree * eap_tree,tvbuff_t * tvb,packet_info * pinfo,int offset,gint size)1214 1215 for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0; 1216 currentMipLevel--) { 1217 SkASSERT(texelsShallowCopy[currentMipLevel].fPixels || kTransfer_UploadType == uploadType); 1218 } 1219 1220 const GrGLInterface* interface = this->glInterface(); 1221 const GrGLCaps& caps = this->glCaps(); 1222 1223 size_t bpp = GrBytesPerPixel(dataConfig); 1224 1225 if (width == 0 || height == 0) { 1226 return false; 1227 } 1228 1229 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) { 1230 int twoToTheMipLevel = 1 << currentMipLevel; 1231 int currentWidth = SkTMax(1, width / twoToTheMipLevel); 1232 int currentHeight = SkTMax(1, height / twoToTheMipLevel); 1233 1234 if (currentHeight > SK_MaxS32 || 1235 currentWidth > SK_MaxS32) { 1236 return false; 1237 } 1238 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top, 1239 ¤tWidth, 1240 ¤tHeight, 1241 &texelsShallowCopy[currentMipLevel].fPixels, 1242 &texelsShallowCopy[currentMipLevel].fRowBytes)) { 1243 return false; 1244 } 1245 if (currentWidth < 0 || currentHeight < 0) { 1246 return false; 1247 } 1248 } 1249 1250 // Internal format comes from the texture desc. 1251 GrGLenum internalFormat; 1252 // External format and type come from the upload data. 1253 GrGLenum externalFormat; 1254 GrGLenum externalType; 1255 if (!this->glCaps().getTexImageFormats(desc.fConfig, dataConfig, &internalFormat, 1256 &externalFormat, &externalType)) { 1257 return false; 1258 } 1259 // TexStorage requires a sized format, and internalFormat may or may not be 1260 GrGLenum internalFormatForTexStorage = this->glCaps().configSizedInternalFormat(desc.fConfig); 1261 1262 /* 1263 * Check whether to allocate a temporary buffer for flipping y or 1264 * because our srcData has extra bytes past each row. If so, we need 1265 * to trim those off here, since GL ES may not let us specify 1266 * GL_UNPACK_ROW_LENGTH. 1267 */ dissect_eap_gpsk_csuite_sel(proto_tree * eap_tree,tvbuff_t * tvb,int offset)1268 bool restoreGLRowLength = false; 1269 bool swFlipY = false; 1270 bool glFlipY = false; 1271 1272 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsShallowCopy.empty()) { 1273 if (caps.unpackFlipYSupport()) { 1274 glFlipY = true; 1275 } else { 1276 swFlipY = true; 1277 } 1278 } 1279 1280 // in case we need a temporary, trimmed copy of the src pixels dissect_eap_gpsk_csuite_list(proto_tree * eap_tree,tvbuff_t * tvb,int offset)1281 SkAutoMalloc tempStorage; 1282 1283 // find the combined size of all the mip levels and the relative offset of 1284 // each into the collective buffer 1285 size_t combined_buffer_size = 0; 1286 SkTArray<size_t> individual_mip_offsets(texelsShallowCopy.count()); 1287 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) { 1288 int twoToTheMipLevel = 1 << currentMipLevel; 1289 int currentWidth = SkTMax(1, width / twoToTheMipLevel); 1290 int currentHeight = SkTMax(1, height / twoToTheMipLevel); 1291 const size_t trimmedSize = currentWidth * bpp * currentHeight; 1292 individual_mip_offsets.push_back(combined_buffer_size); 1293 combined_buffer_size += trimmedSize; 1294 } 1295 char* buffer = (char*)tempStorage.reset(combined_buffer_size); 1296 1297 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) { 1298 int twoToTheMipLevel = 1 << currentMipLevel; 1299 int currentWidth = SkTMax(1, width / twoToTheMipLevel); 1300 int currentHeight = SkTMax(1, height / twoToTheMipLevel); 1301 const size_t trimRowBytes = currentWidth * bpp; 1302 1303 /* 1304 * check whether to allocate a temporary buffer for flipping y or dissect_eap_sake_attribute(proto_tree * eap_tree,tvbuff_t * tvb,int offset,gint size)1305 * because our srcData has extra bytes past each row. If so, we need 1306 * to trim those off here, since GL ES may not let us specify 1307 * GL_UNPACK_ROW_LENGTH. 1308 */ 1309 restoreGLRowLength = false; 1310 1311 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes; 1312 1313 // TODO: This optimization should be enabled with or without mips. 1314 // For use with mips, we must set GR_GL_UNPACK_ROW_LENGTH once per 1315 // mip level, before calling glTexImage2D. 1316 const bool usesMips = texelsShallowCopy.count() > 1; 1317 if (caps.unpackRowLengthSupport() && !swFlipY && !usesMips) { 1318 // can't use this for flipping, only non-neg values allowed. :( 1319 if (rowBytes != trimRowBytes) { 1320 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); 1321 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); 1322 restoreGLRowLength = true; 1323 } 1324 } else if (kTransfer_UploadType != uploadType) { 1325 if (trimRowBytes != rowBytes || swFlipY) { 1326 // copy data into our new storage, skipping the trailing bytes 1327 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels; 1328 if (swFlipY && currentHeight >= 1) { 1329 src += (currentHeight - 1) * rowBytes; 1330 } 1331 char* dst = buffer + individual_mip_offsets[currentMipLevel]; 1332 for (int y = 0; y < currentHeight; y++) { 1333 memcpy(dst, src, trimRowBytes); 1334 if (swFlipY) { 1335 src -= rowBytes; 1336 } else { 1337 src += rowBytes; 1338 } 1339 dst += trimRowBytes; 1340 } 1341 // now point data to our copied version 1342 texelsShallowCopy[currentMipLevel].fPixels = buffer + 1343 individual_mip_offsets[currentMipLevel]; 1344 texelsShallowCopy[currentMipLevel].fRowBytes = trimRowBytes; 1345 } 1346 } else { 1347 return false; 1348 } 1349 } 1350 1351 if (!texelsShallowCopy.empty()) { 1352 if (glFlipY) { 1353 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); 1354 } 1355 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1356 config_alignment(desc.fConfig))); 1357 } 1358 1359 bool succeeded = true; 1360 if (kNewTexture_UploadType == uploadType && dissect_eap_sake_attributes(proto_tree * eap_tree,tvbuff_t * tvb,int offset,gint size)1361 0 == left && 0 == top && 1362 desc.fWidth == width && desc.fHeight == height) { 1363 succeeded = allocate_and_populate_uncompressed_texture(desc, *interface, caps, target, 1364 internalFormat, 1365 internalFormatForTexStorage, 1366 externalFormat, externalType, 1367 texelsShallowCopy, width, height); 1368 } else { 1369 if (swFlipY || glFlipY) { 1370 top = desc.fHeight - (top + height); 1371 } 1372 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); 1373 currentMipLevel++) { dissect_eap_sake(proto_tree * eap_tree,tvbuff_t * tvb,packet_info * pinfo _U_,int offset,gint size)1374 int twoToTheMipLevel = 1 << currentMipLevel; 1375 int currentWidth = SkTMax(1, width / twoToTheMipLevel); 1376 int currentHeight = SkTMax(1, height / twoToTheMipLevel); 1377 1378 GL_CALL(TexSubImage2D(target, 1379 currentMipLevel, 1380 left, top, 1381 currentWidth, 1382 currentHeight, 1383 externalFormat, externalType, 1384 texelsShallowCopy[currentMipLevel].fPixels)); 1385 } 1386 } 1387 1388 restore_pixelstore_state(*interface, caps, restoreGLRowLength, glFlipY); 1389 1390 return succeeded; 1391 } 1392 1393 // TODO: This function is using a lot of wonky semantics like, if width == -1 1394 // then set width = desc.fWdith ... blah. A better way to do it might be to 1395 // create a CompressedTexData struct that takes a desc/ptr and figures out 1396 // the proper upload semantics. Then users can construct this function how they 1397 // see fit if they want to go against the "standard" way to do it. 1398 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc, 1399 GrGLenum target, 1400 const SkTArray<GrMipLevel>& texels, 1401 UploadType uploadType, 1402 int left, int top, int width, int height) { dissect_eap_gpsk(proto_tree * eap_tree,tvbuff_t * tvb,packet_info * pinfo,int offset,gint size)1403 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); 1404 1405 // No support for software flip y, yet... 1406 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin); 1407 1408 const GrGLInterface* interface = this->glInterface(); 1409 const GrGLCaps& caps = this->glCaps(); 1410 1411 if (-1 == width) { 1412 width = desc.fWidth; 1413 } 1414 #ifdef SK_DEBUG 1415 else { 1416 SkASSERT(width <= desc.fWidth); 1417 } 1418 #endif 1419 1420 if (-1 == height) { 1421 height = desc.fHeight; 1422 } 1423 #ifdef SK_DEBUG 1424 else { 1425 SkASSERT(height <= desc.fHeight); 1426 } 1427 #endif 1428 1429 // We only need the internal format for compressed 2D textures. 1430 GrGLenum internalFormat; 1431 if (!caps.getCompressedTexImageFormats(desc.fConfig, &internalFormat)) { 1432 return false; 1433 } 1434 1435 if (kNewTexture_UploadType == uploadType) { 1436 return allocate_and_populate_compressed_texture(desc, *interface, caps, target, 1437 internalFormat, texels, width, height); 1438 } else { 1439 // Paletted textures can't be updated. 1440 if (GR_GL_PALETTE8_RGBA8 == internalFormat) { 1441 return false; 1442 } 1443 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) { 1444 SkASSERT(texels[currentMipLevel].fPixels || kTransfer_UploadType == uploadType); 1445 1446 int twoToTheMipLevel = 1 << currentMipLevel; 1447 int currentWidth = SkTMax(1, width / twoToTheMipLevel); 1448 int currentHeight = SkTMax(1, height / twoToTheMipLevel); 1449 1450 // Make sure that the width and height that we pass to OpenGL 1451 // is a multiple of the block size. 1452 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, currentWidth, 1453 currentHeight); 1454 GL_CALL(CompressedTexSubImage2D(target, 1455 currentMipLevel, 1456 left, top, 1457 currentWidth, 1458 currentHeight, 1459 internalFormat, 1460 SkToInt(dataSize), 1461 texels[currentMipLevel].fPixels)); 1462 } 1463 } 1464 1465 return true; 1466 } 1467 1468 static bool renderbuffer_storage_msaa(const GrGLContext& ctx, 1469 int sampleCount, 1470 GrGLenum format, 1471 int width, int height) { 1472 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); 1473 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); 1474 switch (ctx.caps()->msFBOType()) { 1475 case GrGLCaps::kDesktop_ARB_MSFBOType: 1476 case GrGLCaps::kDesktop_EXT_MSFBOType: 1477 case GrGLCaps::kMixedSamples_MSFBOType: 1478 case GrGLCaps::kES_3_0_MSFBOType: 1479 GL_ALLOC_CALL(ctx.interface(), 1480 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, 1481 sampleCount, 1482 format, 1483 width, height)); 1484 break; 1485 case GrGLCaps::kES_Apple_MSFBOType: 1486 GL_ALLOC_CALL(ctx.interface(), 1487 RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER, 1488 sampleCount, 1489 format, 1490 width, height)); 1491 break; 1492 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: 1493 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: 1494 GL_ALLOC_CALL(ctx.interface(), 1495 RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER, 1496 sampleCount, 1497 format, dissect_eap(tvbuff_t * tvb,packet_info * pinfo,proto_tree * tree,void * data _U_)1498 width, height)); 1499 break; 1500 case GrGLCaps::kNone_MSFBOType: 1501 SkFAIL("Shouldn't be here if we don't support multisampled renderbuffers."); 1502 break; 1503 } 1504 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface())); 1505 } 1506 1507 bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc, 1508 const GrGLTextureInfo& texInfo, 1509 GrGLRenderTarget::IDDesc* idDesc) { 1510 idDesc->fMSColorRenderbufferID = 0; 1511 idDesc->fRTFBOID = 0; 1512 idDesc->fRTFBOOwnership = GrBackendObjectOwnership::kOwned; 1513 idDesc->fTexFBOID = 0; 1514 SkASSERT((GrGLCaps::kMixedSamples_MSFBOType == this->glCaps().msFBOType()) == 1515 this->caps()->usesMixedSamples()); 1516 idDesc->fIsMixedSampled = desc.fSampleCnt > 0 && this->caps()->usesMixedSamples(); 1517 1518 GrGLenum status; 1519 1520 GrGLenum colorRenderbufferFormat = 0; // suppress warning 1521 1522 if (desc.fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { 1523 goto FAILED; 1524 } 1525 1526 GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID)); 1527 if (!idDesc->fTexFBOID) { 1528 goto FAILED; 1529 } 1530 1531 // If we are using multisampling we will create two FBOS. We render to one and then resolve to 1532 // the texture bound to the other. The exception is the IMG multisample extension. With this 1533 // extension the texture is multisampled when rendered to and then auto-resolves it when it is 1534 // rendered from. 1535 if (desc.fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) { 1536 GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID)); 1537 GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); 1538 if (!idDesc->fRTFBOID || 1539 !idDesc->fMSColorRenderbufferID) { 1540 goto FAILED; 1541 } 1542 if (!this->glCaps().getRenderbufferFormat(desc.fConfig, &colorRenderbufferFormat)) { 1543 return false; 1544 } 1545 } else { 1546 idDesc->fRTFBOID = idDesc->fTexFBOID; 1547 } 1548 1549 // below here we may bind the FBO 1550 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 1551 if (idDesc->fRTFBOID != idDesc->fTexFBOID) { 1552 SkASSERT(desc.fSampleCnt > 0); 1553 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbufferID)); 1554 if (!renderbuffer_storage_msaa(*fGLContext, 1555 desc.fSampleCnt, 1556 colorRenderbufferFormat, 1557 desc.fWidth, desc.fHeight)) { 1558 goto FAILED; 1559 } 1560 fStats.incRenderTargetBinds(); 1561 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID)); 1562 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1563 GR_GL_COLOR_ATTACHMENT0, 1564 GR_GL_RENDERBUFFER, 1565 idDesc->fMSColorRenderbufferID)); 1566 if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) || 1567 !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { 1568 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1569 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1570 goto FAILED; 1571 } 1572 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig); 1573 } 1574 } 1575 fStats.incRenderTargetBinds(); 1576 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID)); 1577 1578 if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 0) { 1579 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, 1580 GR_GL_COLOR_ATTACHMENT0, 1581 texInfo.fTarget, 1582 texInfo.fID, 0, desc.fSampleCnt)); 1583 } else { 1584 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, 1585 GR_GL_COLOR_ATTACHMENT0, 1586 texInfo.fTarget, 1587 texInfo.fID, 0)); 1588 } 1589 if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) || 1590 !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { 1591 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1592 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1593 goto FAILED; 1594 } 1595 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig); 1596 } 1597 1598 return true; 1599 1600 FAILED: 1601 if (idDesc->fMSColorRenderbufferID) { 1602 GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); 1603 } 1604 if (idDesc->fRTFBOID != idDesc->fTexFBOID) { 1605 GL_CALL(DeleteFramebuffers(1, &idDesc->fRTFBOID)); 1606 } 1607 if (idDesc->fTexFBOID) { 1608 GL_CALL(DeleteFramebuffers(1, &idDesc->fTexFBOID)); 1609 } 1610 return false; 1611 } 1612 1613 // good to set a break-point here to know when createTexture fails 1614 static GrTexture* return_null_texture() { 1615 // SkDEBUGFAIL("null texture"); 1616 return nullptr; 1617 } 1618 1619 #if 0 && defined(SK_DEBUG) 1620 static size_t as_size_t(int x) { 1621 return x; 1622 } 1623 #endif 1624 1625 static GrGLTexture::IDDesc generate_gl_texture(const GrGLInterface* interface) { 1626 GrGLTexture::IDDesc idDesc; 1627 idDesc.fInfo.fID = 0; 1628 GR_GL_CALL(interface, GenTextures(1, &idDesc.fInfo.fID)); 1629 idDesc.fOwnership = GrBackendObjectOwnership::kOwned; 1630 // When we create the texture, we only 1631 // create GL_TEXTURE_2D at the moment. 1632 // External clients can do something different. 1633 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D; 1634 return idDesc; 1635 } 1636 1637 static void set_initial_texture_params(const GrGLInterface* interface, 1638 const GrGLTextureInfo& info, 1639 GrGLTexture::TexParams* initialTexParams) { 1640 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some 1641 // drivers have a bug where an FBO won't be complete if it includes a 1642 // texture that is not mipmap complete (considering the filter in use). 1643 // we only set a subset here so invalidate first 1644 initialTexParams->invalidate(); 1645 initialTexParams->fMinFilter = GR_GL_NEAREST; 1646 initialTexParams->fMagFilter = GR_GL_NEAREST; 1647 initialTexParams->fWrapS = GR_GL_CLAMP_TO_EDGE; 1648 initialTexParams->fWrapT = GR_GL_CLAMP_TO_EDGE; 1649 GR_GL_CALL(interface, TexParameteri(info.fTarget, 1650 GR_GL_TEXTURE_MAG_FILTER, 1651 initialTexParams->fMagFilter)); 1652 GR_GL_CALL(interface, TexParameteri(info.fTarget, 1653 GR_GL_TEXTURE_MIN_FILTER, 1654 initialTexParams->fMinFilter)); 1655 GR_GL_CALL(interface, TexParameteri(info.fTarget, 1656 GR_GL_TEXTURE_WRAP_S, 1657 initialTexParams->fWrapS)); 1658 GR_GL_CALL(interface, TexParameteri(info.fTarget, 1659 GR_GL_TEXTURE_WRAP_T, 1660 initialTexParams->fWrapT)); 1661 } 1662 1663 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc, 1664 SkBudgeted budgeted, 1665 const SkTArray<GrMipLevel>& texels) { 1666 // We fail if the MSAA was requested and is not available. 1667 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) { 1668 //SkDebugf("MSAA RT requested but not supported on this platform."); 1669 return return_null_texture(); 1670 } 1671 1672 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); 1673 1674 GrGLTexture::IDDesc idDesc; 1675 idDesc.fOwnership = GrBackendObjectOwnership::kOwned; 1676 GrGLTexture::TexParams initialTexParams; 1677 if (!this->createTextureImpl(desc, &idDesc.fInfo, renderTarget, &initialTexParams, texels)) { 1678 return return_null_texture(); 1679 } 1680 1681 GrGLTexture* tex; 1682 if (renderTarget) { 1683 // unbind the texture from the texture unit before binding it to the frame buffer 1684 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0)); 1685 GrGLRenderTarget::IDDesc rtIDDesc; 1686 1687 if (!this->createRenderTargetObjects(desc, idDesc.fInfo, &rtIDDesc)) { 1688 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); 1689 return return_null_texture(); 1690 } 1691 tex = new GrGLTextureRenderTarget(this, budgeted, desc, idDesc, rtIDDesc); 1692 } else { 1693 bool wasMipMapDataProvided = false; 1694 if (texels.count() > 1) { 1695 wasMipMapDataProvided = true; 1696 } 1697 tex = new GrGLTexture(this, budgeted, desc, idDesc, wasMipMapDataProvided); 1698 } 1699 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); 1700 #ifdef TRACE_TEXTURE_CREATION 1701 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n", 1702 idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig); 1703 #endif 1704 return tex; 1705 } 1706 1707 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc, 1708 SkBudgeted budgeted, 1709 const SkTArray<GrMipLevel>& texels) { 1710 // Make sure that we're not flipping Y. 1711 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { 1712 return return_null_texture(); 1713 } 1714 1715 GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface()); 1716 if (!idDesc.fInfo.fID) { 1717 return return_null_texture(); 1718 } 1719 1720 this->setScratchTextureUnit(); 1721 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID)); 1722 1723 GrGLTexture::TexParams initialTexParams; 1724 set_initial_texture_params(this->glInterface(), idDesc.fInfo, &initialTexParams); 1725 1726 if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, texels)) { 1727 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); 1728 return return_null_texture(); 1729 } 1730 1731 GrGLTexture* tex; 1732 tex = new GrGLTexture(this, budgeted, desc, idDesc); 1733 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); 1734 #ifdef TRACE_TEXTURE_CREATION 1735 SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n", 1736 idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig); 1737 #endif 1738 return tex; 1739 } 1740 1741 namespace { 1742 1743 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount; 1744 1745 void inline get_stencil_rb_sizes(const GrGLInterface* gl, 1746 GrGLStencilAttachment::Format* format) { 1747 1748 // we shouldn't ever know one size and not the other 1749 SkASSERT((kUnknownBitCount == format->fStencilBits) == 1750 (kUnknownBitCount == format->fTotalBits)); 1751 if (kUnknownBitCount == format->fStencilBits) { 1752 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1753 GR_GL_RENDERBUFFER_STENCIL_SIZE, 1754 (GrGLint*)&format->fStencilBits); 1755 if (format->fPacked) { 1756 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1757 GR_GL_RENDERBUFFER_DEPTH_SIZE, 1758 (GrGLint*)&format->fTotalBits); 1759 format->fTotalBits += format->fStencilBits; 1760 } else { 1761 format->fTotalBits = format->fStencilBits; 1762 } 1763 } 1764 } 1765 } 1766 1767 int GrGLGpu::getCompatibleStencilIndex(GrPixelConfig config) { 1768 static const int kSize = 16; 1769 SkASSERT(this->caps()->isConfigRenderable(config, false)); 1770 if (!this->glCaps().hasStencilFormatBeenDeterminedForConfig(config)) { 1771 // Default to unsupported, set this if we find a stencil format that works. 1772 int firstWorkingStencilFormatIndex = -1; 1773 // Create color texture 1774 GrGLuint colorID = 0; 1775 GL_CALL(GenTextures(1, &colorID)); 1776 this->setScratchTextureUnit(); 1777 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, colorID)); 1778 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1779 GR_GL_TEXTURE_MAG_FILTER, 1780 GR_GL_NEAREST)); 1781 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1782 GR_GL_TEXTURE_MIN_FILTER, 1783 GR_GL_NEAREST)); 1784 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1785 GR_GL_TEXTURE_WRAP_S, 1786 GR_GL_CLAMP_TO_EDGE)); 1787 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1788 GR_GL_TEXTURE_WRAP_T, 1789 GR_GL_CLAMP_TO_EDGE)); 1790 1791 GrGLenum internalFormat; 1792 GrGLenum externalFormat; 1793 GrGLenum externalType; 1794 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat, 1795 &externalType)) { 1796 return false; 1797 } 1798 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1799 GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D, 1800 0, 1801 internalFormat, 1802 kSize, 1803 kSize, 1804 0, 1805 externalFormat, 1806 externalType, 1807 NULL)); 1808 if (GR_GL_NO_ERROR != CHECK_ALLOC_ERROR(this->glInterface())) { 1809 GL_CALL(DeleteTextures(1, &colorID)); 1810 return -1; 1811 } 1812 1813 // unbind the texture from the texture unit before binding it to the frame buffer 1814 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); 1815 1816 // Create Framebuffer 1817 GrGLuint fb = 0; 1818 GL_CALL(GenFramebuffers(1, &fb)); 1819 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fb)); 1820 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 1821 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, 1822 GR_GL_COLOR_ATTACHMENT0, 1823 GR_GL_TEXTURE_2D, 1824 colorID, 1825 0)); 1826 GrGLuint sbRBID = 0; 1827 GL_CALL(GenRenderbuffers(1, &sbRBID)); 1828 1829 // look over formats till I find a compatible one 1830 int stencilFmtCnt = this->glCaps().stencilFormats().count(); 1831 if (sbRBID) { 1832 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID)); 1833 for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) { 1834 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i]; 1835 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1836 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER, 1837 sFmt.fInternalFormat, 1838 kSize, kSize)); 1839 if (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())) { 1840 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1841 GR_GL_STENCIL_ATTACHMENT, 1842 GR_GL_RENDERBUFFER, sbRBID)); 1843 if (sFmt.fPacked) { 1844 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1845 GR_GL_DEPTH_ATTACHMENT, 1846 GR_GL_RENDERBUFFER, sbRBID)); 1847 } else { 1848 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1849 GR_GL_DEPTH_ATTACHMENT, 1850 GR_GL_RENDERBUFFER, 0)); 1851 } 1852 GrGLenum status; 1853 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1854 if (status == GR_GL_FRAMEBUFFER_COMPLETE) { 1855 firstWorkingStencilFormatIndex = i; 1856 break; 1857 } 1858 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1859 GR_GL_STENCIL_ATTACHMENT, 1860 GR_GL_RENDERBUFFER, 0)); 1861 if (sFmt.fPacked) { 1862 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1863 GR_GL_DEPTH_ATTACHMENT, 1864 GR_GL_RENDERBUFFER, 0)); 1865 } 1866 } 1867 } 1868 GL_CALL(DeleteRenderbuffers(1, &sbRBID)); 1869 } 1870 GL_CALL(DeleteTextures(1, &colorID)); 1871 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, 0)); 1872 GL_CALL(DeleteFramebuffers(1, &fb)); 1873 fGLContext->caps()->setStencilFormatIndexForConfig(config, firstWorkingStencilFormatIndex); 1874 } 1875 return this->glCaps().getStencilFormatIndexForConfig(config); 1876 } 1877 1878 bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info, 1879 bool renderTarget, GrGLTexture::TexParams* initialTexParams, 1880 const SkTArray<GrMipLevel>& texels) { 1881 info->fID = 0; 1882 info->fTarget = GR_GL_TEXTURE_2D; 1883 GL_CALL(GenTextures(1, &(info->fID))); 1884 1885 if (!info->fID) { 1886 return false; 1887 } 1888 1889 this->setScratchTextureUnit(); 1890 GL_CALL(BindTexture(info->fTarget, info->fID)); 1891 1892 if (renderTarget && this->glCaps().textureUsageSupport()) { 1893 // provides a hint about how this texture will be used 1894 GL_CALL(TexParameteri(info->fTarget, 1895 GR_GL_TEXTURE_USAGE, 1896 GR_GL_FRAMEBUFFER_ATTACHMENT)); 1897 } 1898 1899 if (info) { 1900 set_initial_texture_params(this->glInterface(), *info, initialTexParams); 1901 } 1902 if (!this->uploadTexData(desc, info->fTarget, kNewTexture_UploadType, 0, 0, 1903 desc.fWidth, desc.fHeight, 1904 desc.fConfig, texels)) { 1905 GL_CALL(DeleteTextures(1, &(info->fID))); 1906 return false; 1907 } 1908 return true; 1909 } 1910 1911 GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt, 1912 int width, 1913 int height) { 1914 SkASSERT(width >= rt->width()); 1915 SkASSERT(height >= rt->height()); 1916 1917 int samples = rt->numStencilSamples(); 1918 GrGLStencilAttachment::IDDesc sbDesc; 1919 1920 int sIdx = this->getCompatibleStencilIndex(rt->config()); 1921 if (sIdx < 0) { 1922 return nullptr; 1923 } 1924 1925 if (!sbDesc.fRenderbufferID) { 1926 GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID)); 1927 } 1928 if (!sbDesc.fRenderbufferID) { 1929 return nullptr; 1930 } 1931 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID)); 1932 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx]; 1933 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1934 // we do this "if" so that we don't call the multisample 1935 // version on a GL that doesn't have an MSAA extension. 1936 if (samples > 0) { 1937 SkAssertResult(renderbuffer_storage_msaa(*fGLContext, 1938 samples, 1939 sFmt.fInternalFormat, 1940 width, height)); 1941 } else { 1942 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER, 1943 sFmt.fInternalFormat, 1944 width, height)); 1945 SkASSERT(GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface())); 1946 } 1947 fStats.incStencilAttachmentCreates(); 1948 // After sized formats we attempt an unsized format and take 1949 // whatever sizes GL gives us. In that case we query for the size. 1950 GrGLStencilAttachment::Format format = sFmt; 1951 get_stencil_rb_sizes(this->glInterface(), &format); 1952 GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this, 1953 sbDesc, 1954 width, 1955 height, 1956 samples, 1957 format); 1958 return stencil; 1959 } 1960 1961 //////////////////////////////////////////////////////////////////////////////// 1962 1963 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer 1964 // objects are implemented as client-side-arrays on tile-deferred architectures. 1965 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW 1966 1967 GrBuffer* GrGLGpu::onCreateBuffer(size_t size, GrBufferType intendedType, 1968 GrAccessPattern accessPattern, const void* data) { 1969 return GrGLBuffer::Create(this, size, intendedType, accessPattern, data); 1970 } 1971 1972 InstancedRendering* GrGLGpu::onCreateInstancedRendering() { 1973 return new GLInstancedRendering(this); 1974 } 1975 1976 void GrGLGpu::flushScissor(const GrScissorState& scissorState, 1977 const GrGLIRect& rtViewport, 1978 GrSurfaceOrigin rtOrigin) { 1979 if (scissorState.enabled()) { 1980 GrGLIRect scissor; 1981 scissor.setRelativeTo(rtViewport, 1982 scissorState.rect().fLeft, 1983 scissorState.rect().fTop, 1984 scissorState.rect().width(), 1985 scissorState.rect().height(), 1986 rtOrigin); 1987 // if the scissor fully contains the viewport then we fall through and 1988 // disable the scissor test. 1989 if (!scissor.contains(rtViewport)) { 1990 if (fHWScissorSettings.fRect != scissor) { 1991 scissor.pushToGLScissor(this->glInterface()); 1992 fHWScissorSettings.fRect = scissor; 1993 } 1994 if (kYes_TriState != fHWScissorSettings.fEnabled) { 1995 GL_CALL(Enable(GR_GL_SCISSOR_TEST)); 1996 fHWScissorSettings.fEnabled = kYes_TriState; 1997 } 1998 return; 1999 } 2000 } 2001 2002 // See fall through note above 2003 this->disableScissor(); 2004 } 2005 2006 void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState, 2007 const GrGLRenderTarget* rt) { 2008 typedef GrWindowRectsState::Mode Mode; 2009 SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen. 2010 SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles()); 2011 2012 if (!this->caps()->maxWindowRectangles() || 2013 fHWWindowRectsState.knownEqualTo(rt->origin(), rt->getViewport(), windowState)) { 2014 return; 2015 } 2016 2017 // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above 2018 // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912 2019 int numWindows = SkTMin(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows)); 2020 SkASSERT(windowState.numWindows() == numWindows); 2021 2022 GrGLIRect glwindows[GrWindowRectangles::kMaxWindows]; 2023 const SkIRect* skwindows = windowState.windows().data(); 2024 int dx = -windowState.origin().x(), dy = -windowState.origin().y(); 2025 for (int i = 0; i < numWindows; ++i) { 2026 const SkIRect& skwindow = skwindows[i].makeOffset(dx, dy); 2027 glwindows[i].setRelativeTo(rt->getViewport(), skwindow, rt->origin()); 2028 } 2029 2030 GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE; 2031 GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts())); 2032 2033 fHWWindowRectsState.set(rt->origin(), rt->getViewport(), windowState); 2034 } 2035 2036 void GrGLGpu::disableWindowRectangles() { 2037 if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) { 2038 return; 2039 } 2040 GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr)); 2041 fHWWindowRectsState.setDisabled(); 2042 } 2043 2044 void GrGLGpu::flushMinSampleShading(float minSampleShading) { 2045 if (fHWMinSampleShading != minSampleShading) { 2046 if (minSampleShading > 0.0) { 2047 GL_CALL(Enable(GR_GL_SAMPLE_SHADING)); 2048 GL_CALL(MinSampleShading(minSampleShading)); 2049 } 2050 else { 2051 GL_CALL(Disable(GR_GL_SAMPLE_SHADING)); 2052 } 2053 fHWMinSampleShading = minSampleShading; 2054 } 2055 } 2056 2057 bool GrGLGpu::flushGLState(const GrPipeline& pipeline, const GrPrimitiveProcessor& primProc, 2058 bool willDrawPoints) { 2059 SkAutoTUnref<GrGLProgram> program(fProgramCache->refProgram(this, pipeline, primProc, 2060 willDrawPoints)); 2061 if (!program) { 2062 GrCapsDebugf(this->caps(), "Failed to create program!\n"); 2063 return false; 2064 } 2065 2066 program->generateMipmaps(primProc, pipeline); 2067 2068 GrXferProcessor::BlendInfo blendInfo; 2069 pipeline.getXferProcessor().getBlendInfo(&blendInfo); 2070 2071 this->flushColorWrite(blendInfo.fWriteColor); 2072 this->flushDrawFace(pipeline.getDrawFace()); 2073 this->flushMinSampleShading(primProc.getSampleShading()); 2074 2075 GrGLuint programID = program->programID(); 2076 if (fHWProgramID != programID) { 2077 GL_CALL(UseProgram(programID)); 2078 fHWProgramID = programID; 2079 } 2080 2081 if (blendInfo.fWriteColor) { 2082 // Swizzle the blend to match what the shader will output. 2083 const GrSwizzle& swizzle = this->glCaps().glslCaps()->configOutputSwizzle( 2084 pipeline.getRenderTarget()->config()); 2085 this->flushBlend(blendInfo, swizzle); 2086 } 2087 2088 program->setData(primProc, pipeline); 2089 2090 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(pipeline.getRenderTarget()); 2091 this->flushStencil(pipeline.getStencil()); 2092 this->flushScissor(pipeline.getScissorState(), glRT->getViewport(), glRT->origin()); 2093 this->flushWindowRectangles(pipeline.getWindowRectsState(), glRT); 2094 this->flushHWAAState(glRT, pipeline.isHWAntialiasState(), !pipeline.getStencil().isDisabled()); 2095 2096 // This must come after textures are flushed because a texture may need 2097 // to be msaa-resolved (which will modify bound FBO state). 2098 this->flushRenderTarget(glRT, nullptr, pipeline.getDisableOutputConversionToSRGB()); 2099 2100 return true; 2101 } 2102 2103 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc, 2104 const GrNonInstancedMesh& mesh, 2105 size_t* indexOffsetInBytes) { 2106 const GrBuffer* vbuf = mesh.vertexBuffer(); 2107 SkASSERT(vbuf); 2108 SkASSERT(!vbuf->isMapped()); 2109 2110 GrGLAttribArrayState* attribState; 2111 if (mesh.isIndexed()) { 2112 SkASSERT(indexOffsetInBytes); 2113 2114 *indexOffsetInBytes = 0; 2115 const GrBuffer* ibuf = mesh.indexBuffer(); 2116 SkASSERT(ibuf); 2117 SkASSERT(!ibuf->isMapped()); 2118 *indexOffsetInBytes += ibuf->baseOffset(); 2119 attribState = fHWVertexArrayState.bindInternalVertexArray(this, ibuf); 2120 } else { 2121 attribState = fHWVertexArrayState.bindInternalVertexArray(this); 2122 } 2123 2124 int vaCount = primProc.numAttribs(); 2125 if (vaCount > 0) { 2126 2127 GrGLsizei stride = static_cast<GrGLsizei>(primProc.getVertexStride()); 2128 2129 size_t vertexOffsetInBytes = stride * mesh.startVertex(); 2130 2131 vertexOffsetInBytes += vbuf->baseOffset(); 2132 2133 uint32_t usedAttribArraysMask = 0; 2134 size_t offset = 0; 2135 2136 for (int attribIndex = 0; attribIndex < vaCount; attribIndex++) { 2137 const GrGeometryProcessor::Attribute& attrib = primProc.getAttrib(attribIndex); 2138 usedAttribArraysMask |= (1 << attribIndex); 2139 GrVertexAttribType attribType = attrib.fType; 2140 attribState->set(this, 2141 attribIndex, 2142 vbuf, 2143 attribType, 2144 stride, 2145 reinterpret_cast<GrGLvoid*>(vertexOffsetInBytes + offset)); 2146 offset += attrib.fOffset; 2147 } proto_register_eap(void)2148 attribState->disableUnusedArrays(this, usedAttribArraysMask); 2149 } 2150 } 2151 2152 GrGLenum GrGLGpu::bindBuffer(GrBufferType type, const GrBuffer* buffer) { 2153 this->handleDirtyContext(); 2154 2155 // Index buffer state is tied to the vertex array. 2156 if (kIndex_GrBufferType == type) { 2157 this->bindVertexArray(0); 2158 } 2159 2160 SkASSERT(type >= 0 && type <= kLast_GrBufferType); 2161 auto& bufferState = fHWBufferState[type]; 2162 2163 if (buffer->uniqueID() != bufferState.fBoundBufferUniqueID) { 2164 if (buffer->isCPUBacked()) { 2165 if (!bufferState.fBufferZeroKnownBound) { 2166 GL_CALL(BindBuffer(bufferState.fGLTarget, 0)); 2167 } 2168 } else { 2169 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer); 2170 GL_CALL(BindBuffer(bufferState.fGLTarget, glBuffer->bufferID())); 2171 } 2172 bufferState.fBufferZeroKnownBound = buffer->isCPUBacked(); 2173 bufferState.fBoundBufferUniqueID = buffer->uniqueID(); 2174 } 2175 2176 return bufferState.fGLTarget; 2177 } 2178 2179 void GrGLGpu::notifyBufferReleased(const GrGLBuffer* buffer) { 2180 if (buffer->hasAttachedToTexture()) { 2181 // Detach this buffer from any textures to ensure the underlying memory is freed. 2182 uint32_t uniqueID = buffer->uniqueID(); 2183 for (int i = fHWMaxUsedBufferTextureUnit; i >= 0; --i) { 2184 auto& buffTex = fHWBufferTextures[i]; 2185 if (uniqueID != buffTex.fAttachedBufferUniqueID) { 2186 continue; 2187 } 2188 if (i == fHWMaxUsedBufferTextureUnit) { 2189 --fHWMaxUsedBufferTextureUnit; 2190 } 2191 2192 this->setTextureUnit(i); 2193 if (!buffTex.fKnownBound) { 2194 SkASSERT(buffTex.fTextureID); 2195 GL_CALL(BindTexture(GR_GL_TEXTURE_BUFFER, buffTex.fTextureID)); 2196 buffTex.fKnownBound = true; 2197 } 2198 GL_CALL(TexBuffer(GR_GL_TEXTURE_BUFFER, 2199 this->glCaps().configSizedInternalFormat(buffTex.fTexelConfig), 0)); 2200 } 2201 } 2202 } 2203 2204 void GrGLGpu::disableScissor() { 2205 if (kNo_TriState != fHWScissorSettings.fEnabled) { 2206 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); 2207 fHWScissorSettings.fEnabled = kNo_TriState; 2208 return; 2209 } 2210 } 2211 2212 void GrGLGpu::clear(const GrFixedClip& clip, GrColor color, GrRenderTarget* target) { 2213 this->handleDirtyContext(); 2214 2215 // parent class should never let us get here with no RT 2216 SkASSERT(target); 2217 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 2218 2219 this->flushRenderTarget(glRT, clip.scissorEnabled() ? &clip.scissorRect() : nullptr); 2220 this->flushScissor(clip.scissorState(), glRT->getViewport(), glRT->origin()); 2221 this->flushWindowRectangles(clip.windowRectsState(), glRT); 2222 2223 GrGLfloat r, g, b, a; 2224 static const GrGLfloat scale255 = 1.f / 255.f; 2225 a = GrColorUnpackA(color) * scale255; 2226 GrGLfloat scaleRGB = scale255; 2227 r = GrColorUnpackR(color) * scaleRGB; 2228 g = GrColorUnpackG(color) * scaleRGB; 2229 b = GrColorUnpackB(color) * scaleRGB; 2230 2231 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 2232 fHWWriteToColor = kYes_TriState; 2233 GL_CALL(ClearColor(r, g, b, a)); 2234 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); 2235 } 2236 2237 void GrGLGpu::clearStencil(GrRenderTarget* target) { 2238 if (nullptr == target) { 2239 return; 2240 } 2241 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 2242 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect()); 2243 2244 this->disableScissor(); 2245 this->disableWindowRectangles(); 2246 2247 GL_CALL(StencilMask(0xffffffff)); 2248 GL_CALL(ClearStencil(0)); 2249 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 2250 fHWStencilSettings.invalidate(); 2251 } 2252 2253 void GrGLGpu::clearStencilClip(const GrFixedClip& clip, 2254 bool insideStencilMask, 2255 GrRenderTarget* target) { 2256 SkASSERT(target); 2257 this->handleDirtyContext(); 2258 2259 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); 2260 // this should only be called internally when we know we have a 2261 // stencil buffer. 2262 SkASSERT(sb); 2263 GrGLint stencilBitCount = sb->bits(); 2264 #if 0 2265 SkASSERT(stencilBitCount > 0); 2266 GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); 2267 #else 2268 // we could just clear the clip bit but when we go through 2269 // ANGLE a partial stencil mask will cause clears to be 2270 // turned into draws. Our contract on GrDrawTarget says that 2271 // changing the clip between stencil passes may or may not 2272 // zero the client's clip bits. So we just clear the whole thing. 2273 static const GrGLint clipStencilMask = ~0; 2274 #endif 2275 GrGLint value; 2276 if (insideStencilMask) { 2277 value = (1 << (stencilBitCount - 1)); 2278 } else { 2279 value = 0; 2280 } 2281 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 2282 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect()); 2283 2284 this->flushScissor(clip.scissorState(), glRT->getViewport(), glRT->origin()); 2285 this->flushWindowRectangles(clip.windowRectsState(), glRT); 2286 2287 GL_CALL(StencilMask((uint32_t) clipStencilMask)); 2288 GL_CALL(ClearStencil(value)); 2289 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 2290 fHWStencilSettings.invalidate(); 2291 } 2292 2293 static bool read_pixels_pays_for_y_flip(GrRenderTarget* renderTarget, const GrGLCaps& caps, 2294 int width, int height, GrPixelConfig config, 2295 size_t rowBytes) { 2296 // If this render target is already TopLeft, we don't need to flip. 2297 if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) { 2298 return false; 2299 } 2300 2301 // If the read is really small or smaller than the min texture size, don't force a draw. 2302 static const int kMinSize = 32; 2303 if (width < kMinSize || height < kMinSize) { 2304 return false; 2305 } 2306 2307 // if GL can do the flip then we'll never pay for it. 2308 if (caps.packFlipYSupport()) { 2309 return false; 2310 } 2311 2312 // If we have to do memcpy to handle non-trim rowBytes then we 2313 // get the flip for free. Otherwise it costs. 2314 // Note that we're assuming that 0 rowBytes has already been handled and that the width has been 2315 // clipped. 2316 return caps.packRowLengthSupport() || GrBytesPerPixel(config) * width == rowBytes; 2317 } 2318 2319 bool GrGLGpu::readPixelsSupported(GrRenderTarget* target, GrPixelConfig readConfig) { 2320 auto bindRenderTarget = [this, target]() -> bool { 2321 this->flushRenderTarget(static_cast<GrGLRenderTarget*>(target), &SkIRect::EmptyIRect()); 2322 return true; 2323 }; 2324 auto getIntegerv = [this](GrGLenum query, GrGLint* value) { 2325 GR_GL_GetIntegerv(this->glInterface(), query, value); 2326 }; 2327 GrPixelConfig rtConfig = target->config(); 2328 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget); 2329 } 2330 2331 bool GrGLGpu::readPixelsSupported(GrPixelConfig rtConfig, GrPixelConfig readConfig) { 2332 auto bindRenderTarget = [this, rtConfig]() -> bool { 2333 GrTextureDesc desc; 2334 desc.fConfig = rtConfig; 2335 desc.fWidth = desc.fHeight = 16; 2336 desc.fFlags = kRenderTarget_GrSurfaceFlag; 2337 SkAutoTUnref<GrTexture> temp(this->createTexture(desc, 2338 SkBudgeted::kNo)); 2339 if (!temp) { 2340 return false; 2341 } 2342 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(temp->asRenderTarget()); 2343 this->flushRenderTarget(glrt, &SkIRect::EmptyIRect()); 2344 return true; 2345 }; 2346 auto getIntegerv = [this](GrGLenum query, GrGLint* value) { 2347 GR_GL_GetIntegerv(this->glInterface(), query, value); 2348 }; 2349 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget); 2350 } 2351 2352 bool GrGLGpu::readPixelsSupported(GrSurface* surfaceForConfig, GrPixelConfig readConfig) { 2353 if (GrRenderTarget* rt = surfaceForConfig->asRenderTarget()) { 2354 return this->readPixelsSupported(rt, readConfig); 2355 } else { 2356 GrPixelConfig config = surfaceForConfig->config(); 2357 return this->readPixelsSupported(config, readConfig); 2358 } 2359 } 2360 2361 static bool requires_srgb_conversion(GrPixelConfig a, GrPixelConfig b) { 2362 if (GrPixelConfigIsSRGB(a)) { 2363 return !GrPixelConfigIsSRGB(b) && !GrPixelConfigIsAlphaOnly(b); 2364 } else if (GrPixelConfigIsSRGB(b)) { 2365 return !GrPixelConfigIsSRGB(a) && !GrPixelConfigIsAlphaOnly(a); 2366 } 2367 return false; 2368 } 2369 2370 bool GrGLGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes, 2371 GrPixelConfig readConfig, DrawPreference* drawPreference, 2372 ReadPixelTempDrawInfo* tempDrawInfo) { 2373 GrPixelConfig srcConfig = srcSurface->config(); 2374 2375 // These settings we will always want if a temp draw is performed. 2376 tempDrawInfo->fTempSurfaceDesc.fFlags = kRenderTarget_GrSurfaceFlag; 2377 tempDrawInfo->fTempSurfaceDesc.fWidth = width; 2378 tempDrawInfo->fTempSurfaceDesc.fHeight = height; 2379 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0; 2380 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL. 2381 tempDrawInfo->fTempSurfaceFit = this->glCaps().partialFBOReadIsSlow() ? SkBackingFit::kExact 2382 : SkBackingFit::kApprox; 2383 // For now assume no swizzling, we may change that below. 2384 tempDrawInfo->fSwizzle = GrSwizzle::RGBA(); 2385 2386 // Depends on why we need/want a temp draw. Start off assuming no change, the surface we read 2387 // from will be srcConfig and we will read readConfig pixels from it. 2388 // Not that if we require a draw and return a non-renderable format for the temp surface the 2389 // base class will fail for us. 2390 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig; 2391 tempDrawInfo->fReadConfig = readConfig; 2392 2393 if (requires_srgb_conversion(srcConfig, readConfig)) { 2394 if (!this->readPixelsSupported(readConfig, readConfig)) { 2395 return false; 2396 } 2397 // Draw to do srgb to linear conversion or vice versa. 2398 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 2399 tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig; 2400 tempDrawInfo->fReadConfig = readConfig; 2401 return true; 2402 } 2403 2404 GrRenderTarget* srcAsRT = srcSurface->asRenderTarget(); 2405 if (!srcAsRT) { 2406 // For now keep assuming the draw is not a format transformation, just a draw to get to a 2407 // RT. We may add additional transformations below. 2408 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 2409 } 2410 if (this->glCaps().rgba8888PixelsOpsAreSlow() && kRGBA_8888_GrPixelConfig == readConfig && 2411 this->readPixelsSupported(kBGRA_8888_GrPixelConfig, kBGRA_8888_GrPixelConfig)) { 2412 tempDrawInfo->fTempSurfaceDesc.fConfig = kBGRA_8888_GrPixelConfig; 2413 tempDrawInfo->fSwizzle = GrSwizzle::BGRA(); 2414 tempDrawInfo->fReadConfig = kBGRA_8888_GrPixelConfig; 2415 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference); 2416 } else if (this->glCaps().rgbaToBgraReadbackConversionsAreSlow() && 2417 GrBytesPerPixel(readConfig) == 4 && 2418 GrPixelConfigSwapRAndB(readConfig) == srcConfig && 2419 this->readPixelsSupported(srcSurface, srcConfig)) { 2420 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa. 2421 // Better to do a draw with a R/B swap and then read as the original config. 2422 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig; 2423 tempDrawInfo->fSwizzle = GrSwizzle::BGRA(); 2424 tempDrawInfo->fReadConfig = srcConfig; 2425 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference); 2426 } else if (!this->readPixelsSupported(srcSurface, readConfig)) { 2427 if (readConfig == kBGRA_8888_GrPixelConfig && 2428 this->glCaps().isConfigRenderable(kRGBA_8888_GrPixelConfig, false) && 2429 this->readPixelsSupported(kRGBA_8888_GrPixelConfig, kRGBA_8888_GrPixelConfig)) { 2430 // We're trying to read BGRA but it's not supported. If RGBA is renderable and 2431 // we can read it back, then do a swizzling draw to a RGBA and read it back (which 2432 // will effectively be BGRA). 2433 tempDrawInfo->fTempSurfaceDesc.fConfig = kRGBA_8888_GrPixelConfig; 2434 tempDrawInfo->fSwizzle = GrSwizzle::BGRA(); 2435 tempDrawInfo->fReadConfig = kRGBA_8888_GrPixelConfig; 2436 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 2437 } else if (readConfig == kSBGRA_8888_GrPixelConfig && 2438 this->glCaps().isConfigRenderable(kSRGBA_8888_GrPixelConfig, false) && 2439 this->readPixelsSupported(kSRGBA_8888_GrPixelConfig, kSRGBA_8888_GrPixelConfig)) { 2440 // We're trying to read sBGRA but it's not supported. If sRGBA is renderable and 2441 // we can read it back, then do a swizzling draw to a sRGBA and read it back (which 2442 // will effectively be sBGRA). 2443 tempDrawInfo->fTempSurfaceDesc.fConfig = kSRGBA_8888_GrPixelConfig; 2444 tempDrawInfo->fSwizzle = GrSwizzle::BGRA(); 2445 tempDrawInfo->fReadConfig = kSRGBA_8888_GrPixelConfig; 2446 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 2447 } else if (readConfig == kAlpha_8_GrPixelConfig) { 2448 // onReadPixels implements a fallback for cases where we are want to read kAlpha_8, 2449 // it's unsupported, but 32bit RGBA reads are supported. 2450 // Don't attempt to do any srgb conversions since we only care about alpha. 2451 GrPixelConfig cpuTempConfig = kRGBA_8888_GrPixelConfig; 2452 if (GrPixelConfigIsSRGB(srcSurface->config())) { 2453 cpuTempConfig = kSRGBA_8888_GrPixelConfig; 2454 } 2455 if (!this->readPixelsSupported(srcSurface, cpuTempConfig)) { 2456 // If we can't read RGBA from the src try to draw to a kRGBA_8888 (or kSRGBA_8888) 2457 // first and then onReadPixels will read that to a 32bit temporary buffer. 2458 if (this->caps()->isConfigRenderable(cpuTempConfig, false)) { 2459 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 2460 tempDrawInfo->fTempSurfaceDesc.fConfig = cpuTempConfig; 2461 tempDrawInfo->fReadConfig = kAlpha_8_GrPixelConfig; 2462 } else { 2463 return false; 2464 } 2465 } else { 2466 SkASSERT(tempDrawInfo->fTempSurfaceDesc.fConfig == srcConfig); 2467 SkASSERT(tempDrawInfo->fReadConfig == kAlpha_8_GrPixelConfig); 2468 } 2469 } else if (this->caps()->isConfigRenderable(readConfig, false) && 2470 this->readPixelsSupported(readConfig, readConfig)) { 2471 // Do a draw to convert from the src config to the read config. 2472 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 2473 tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig; 2474 tempDrawInfo->fReadConfig = readConfig; 2475 } else { 2476 return false; 2477 } 2478 } 2479 2480 if (srcAsRT && 2481 read_pixels_pays_for_y_flip(srcAsRT, this->glCaps(), width, height, readConfig, rowBytes)) { 2482 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference); 2483 } 2484 2485 return true; 2486 } 2487 2488 bool GrGLGpu::onReadPixels(GrSurface* surface, 2489 int left, int top, 2490 int width, int height, 2491 GrPixelConfig config, 2492 void* buffer, 2493 size_t rowBytes) { 2494 SkASSERT(surface); 2495 2496 GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); 2497 if (!renderTarget) { 2498 return false; 2499 } 2500 2501 // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pixels. 2502 if (requires_srgb_conversion(surface->config(), config)) { 2503 return false; 2504 } 2505 2506 // We have a special case fallback for reading eight bit alpha. We will read back all four 8 2507 // bit channels as RGBA and then extract A. 2508 if (!this->readPixelsSupported(renderTarget, config)) { 2509 // Don't attempt to do any srgb conversions since we only care about alpha. 2510 GrPixelConfig tempConfig = kRGBA_8888_GrPixelConfig; 2511 if (GrPixelConfigIsSRGB(renderTarget->config())) { 2512 tempConfig = kSRGBA_8888_GrPixelConfig; 2513 } 2514 if (kAlpha_8_GrPixelConfig == config && 2515 this->readPixelsSupported(renderTarget, tempConfig)) { 2516 SkAutoTDeleteArray<uint32_t> temp(new uint32_t[width * height * 4]); 2517 if (this->onReadPixels(renderTarget, left, top, width, height, tempConfig, temp.get(), 2518 width*4)) { 2519 uint8_t* dst = reinterpret_cast<uint8_t*>(buffer); 2520 for (int j = 0; j < height; ++j) { 2521 for (int i = 0; i < width; ++i) { 2522 dst[j*rowBytes + i] = (0xFF000000U & temp[j*width+i]) >> 24; 2523 } 2524 } 2525 return true; 2526 } 2527 } 2528 return false; 2529 } 2530 2531 GrGLenum externalFormat; 2532 GrGLenum externalType; 2533 if (!this->glCaps().getReadPixelsFormat(renderTarget->config(), config, &externalFormat, 2534 &externalType)) { 2535 return false; 2536 } 2537 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin(); 2538 2539 // resolve the render target if necessary 2540 switch (renderTarget->getResolveType()) { 2541 case GrGLRenderTarget::kCantResolve_ResolveType: 2542 return false; 2543 case GrGLRenderTarget::kAutoResolves_ResolveType: 2544 this->flushRenderTarget(renderTarget, &SkIRect::EmptyIRect()); 2545 break; 2546 case GrGLRenderTarget::kCanResolve_ResolveType: 2547 this->onResolveRenderTarget(renderTarget); 2548 // we don't track the state of the READ FBO ID. 2549 fStats.incRenderTargetBinds(); 2550 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID())); 2551 break; 2552 default: 2553 SkFAIL("Unknown resolve type"); 2554 } 2555 2556 const GrGLIRect& glvp = renderTarget->getViewport(); 2557 2558 // the read rect is viewport-relative 2559 GrGLIRect readRect; 2560 readRect.setRelativeTo(glvp, left, top, width, height, renderTarget->origin()); 2561 2562 size_t bytesPerPixel = GrBytesPerPixel(config); 2563 size_t tightRowBytes = bytesPerPixel * width; 2564 2565 size_t readDstRowBytes = tightRowBytes; 2566 void* readDst = buffer; 2567 2568 // determine if GL can read using the passed rowBytes or if we need 2569 // a scratch buffer. 2570 SkAutoMalloc scratch; 2571 if (rowBytes != tightRowBytes) { 2572 if (this->glCaps().packRowLengthSupport() && !(rowBytes % bytesPerPixel)) { 2573 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 2574 static_cast<GrGLint>(rowBytes / bytesPerPixel))); 2575 readDstRowBytes = rowBytes; 2576 } else { 2577 scratch.reset(tightRowBytes * height); 2578 readDst = scratch.get(); 2579 } 2580 } 2581 if (flipY && this->glCaps().packFlipYSupport()) { 2582 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1)); 2583 } 2584 GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, config_alignment(config))); 2585 2586 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom, 2587 readRect.fWidth, readRect.fHeight, 2588 externalFormat, externalType, readDst)); 2589 if (readDstRowBytes != tightRowBytes) { 2590 SkASSERT(this->glCaps().packRowLengthSupport()); 2591 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 2592 } 2593 if (flipY && this->glCaps().packFlipYSupport()) { 2594 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0)); 2595 flipY = false; 2596 } 2597 2598 // now reverse the order of the rows, since GL's are bottom-to-top, but our 2599 // API presents top-to-bottom. We must preserve the padding contents. Note 2600 // that the above readPixels did not overwrite the padding. 2601 if (readDst == buffer) { 2602 SkASSERT(rowBytes == readDstRowBytes); 2603 if (flipY) { 2604 scratch.reset(tightRowBytes); 2605 void* tmpRow = scratch.get(); 2606 // flip y in-place by rows 2607 const int halfY = height >> 1; 2608 char* top = reinterpret_cast<char*>(buffer); 2609 char* bottom = top + (height - 1) * rowBytes; 2610 for (int y = 0; y < halfY; y++) { 2611 memcpy(tmpRow, top, tightRowBytes); 2612 memcpy(top, bottom, tightRowBytes); 2613 memcpy(bottom, tmpRow, tightRowBytes); 2614 top += rowBytes; 2615 bottom -= rowBytes; 2616 } 2617 } 2618 } else { 2619 SkASSERT(readDst != buffer); 2620 SkASSERT(rowBytes != tightRowBytes); 2621 // copy from readDst to buffer while flipping y 2622 // const int halfY = height >> 1; 2623 const char* src = reinterpret_cast<const char*>(readDst); 2624 char* dst = reinterpret_cast<char*>(buffer); 2625 if (flipY) { 2626 dst += (height-1) * rowBytes; 2627 } 2628 for (int y = 0; y < height; y++) { 2629 memcpy(dst, src, tightRowBytes); 2630 src += readDstRowBytes; 2631 if (!flipY) { 2632 dst += rowBytes; 2633 } else { 2634 dst -= rowBytes; 2635 } 2636 } 2637 } 2638 return true; 2639 } 2640 2641 GrGpuCommandBuffer* GrGLGpu::createCommandBuffer( 2642 GrRenderTarget* target, 2643 const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo, 2644 const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) { 2645 return new GrGLGpuCommandBuffer(this); 2646 } 2647 2648 void GrGLGpu::finishDrawTarget() { 2649 if (fPLSHasBeenUsed) { 2650 /* There is an ARM driver bug where if we use PLS, and then draw a frame which does not 2651 * use PLS, it leaves garbage all over the place. As a workaround, we use PLS in a 2652 * trivial way every frame. And since we use it every frame, there's never a point at which 2653 * it becomes safe to stop using this workaround once we start. 2654 */ 2655 this->disableScissor(); 2656 this->disableWindowRectangles(); 2657 // using PLS in the presence of MSAA results in GL_INVALID_OPERATION 2658 this->flushHWAAState(nullptr, false, false); 2659 SkASSERT(!fHWPLSEnabled); 2660 SkASSERT(fMSAAEnabled != kYes_TriState); 2661 GL_CALL(Enable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE)); 2662 this->stampPLSSetupRect(SkRect::MakeXYWH(-100.0f, -100.0f, 0.01f, 0.01f)); 2663 GL_CALL(Disable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE)); 2664 } 2665 } 2666 2667 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bounds, bool disableSRGB) { 2668 SkASSERT(target); 2669 2670 uint32_t rtID = target->uniqueID(); 2671 if (fHWBoundRenderTargetUniqueID != rtID) { 2672 fStats.incRenderTargetBinds(); 2673 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID())); 2674 #ifdef SK_DEBUG 2675 // don't do this check in Chromium -- this is causing 2676 // lots of repeated command buffer flushes when the compositor is 2677 // rendering with Ganesh, which is really slow; even too slow for 2678 // Debug mode. 2679 if (kChromium_GrGLDriver != this->glContext().driver()) { 2680 GrGLenum status; 2681 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 2682 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 2683 SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n", status); 2684 } 2685 } 2686 #endif 2687 fHWBoundRenderTargetUniqueID = rtID; 2688 this->flushViewport(target->getViewport()); 2689 } 2690 2691 if (this->glCaps().srgbWriteControl()) { 2692 this->flushFramebufferSRGB(GrPixelConfigIsSRGB(target->config()) && !disableSRGB); 2693 } 2694 2695 this->didWriteToSurface(target, bounds); 2696 } 2697 2698 void GrGLGpu::flushFramebufferSRGB(bool enable) { 2699 if (enable && kYes_TriState != fHWSRGBFramebuffer) { 2700 GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB)); 2701 fHWSRGBFramebuffer = kYes_TriState; 2702 } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) { 2703 GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB)); 2704 fHWSRGBFramebuffer = kNo_TriState; 2705 } 2706 } 2707 2708 void GrGLGpu::flushViewport(const GrGLIRect& viewport) { 2709 if (fHWViewport != viewport) { 2710 viewport.pushToGLViewport(this->glInterface()); 2711 fHWViewport = viewport; 2712 } 2713 } 2714 2715 GrGLenum gPrimitiveType2GLMode[] = { 2716 GR_GL_TRIANGLES, 2717 GR_GL_TRIANGLE_STRIP, 2718 GR_GL_TRIANGLE_FAN, 2719 GR_GL_POINTS, 2720 GR_GL_LINES, 2721 GR_GL_LINE_STRIP 2722 }; 2723 2724 #define SWAP_PER_DRAW 0 2725 2726 #if SWAP_PER_DRAW 2727 #if defined(SK_BUILD_FOR_MAC) 2728 #include <AGL/agl.h> 2729 #elif defined(SK_BUILD_FOR_WIN32) 2730 #include <gl/GL.h> 2731 void SwapBuf() { 2732 DWORD procID = GetCurrentProcessId(); 2733 HWND hwnd = GetTopWindow(GetDesktopWindow()); 2734 while(hwnd) { 2735 DWORD wndProcID = 0; 2736 GetWindowThreadProcessId(hwnd, &wndProcID); 2737 if(wndProcID == procID) { 2738 SwapBuffers(GetDC(hwnd)); 2739 } 2740 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT); 2741 } 2742 } 2743 #endif 2744 #endif 2745 2746 void GrGLGpu::draw(const GrPipeline& pipeline, 2747 const GrPrimitiveProcessor& primProc, 2748 const GrMesh meshes[], 2749 int meshCount) { 2750 this->handleDirtyContext(); 2751 2752 bool hasPoints = false; 2753 for (int i = 0; i < meshCount; ++i) { 2754 if (meshes[i].primitiveType() == kPoints_GrPrimitiveType) { 2755 hasPoints = true; 2756 break; 2757 } 2758 } 2759 if (!this->flushGLState(pipeline, primProc, hasPoints)) { 2760 return; 2761 } 2762 GrPixelLocalStorageState plsState = primProc.getPixelLocalStorageState(); 2763 if (!fHWPLSEnabled && plsState != 2764 GrPixelLocalStorageState::kDisabled_GrPixelLocalStorageState) { 2765 GL_CALL(Enable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE)); 2766 this->setupPixelLocalStorage(pipeline, primProc); 2767 fHWPLSEnabled = true; 2768 } 2769 if (plsState == GrPixelLocalStorageState::kFinish_GrPixelLocalStorageState) { 2770 GrStencilSettings stencil; 2771 stencil.setDisabled(); 2772 this->flushStencil(stencil); 2773 } 2774 2775 for (int i = 0; i < meshCount; ++i) { 2776 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) { 2777 this->xferBarrier(pipeline.getRenderTarget(), barrierType); 2778 } 2779 2780 const GrMesh& mesh = meshes[i]; 2781 GrMesh::Iterator iter; 2782 const GrNonInstancedMesh* nonInstMesh = iter.init(mesh); 2783 do { 2784 size_t indexOffsetInBytes = 0; 2785 this->setupGeometry(primProc, *nonInstMesh, &indexOffsetInBytes); 2786 if (nonInstMesh->isIndexed()) { 2787 GrGLvoid* indices = 2788 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + 2789 sizeof(uint16_t) * nonInstMesh->startIndex()); 2790 // info.startVertex() was accounted for by setupGeometry. 2791 if (this->glCaps().drawRangeElementsSupport()) { 2792 // We assume here that the batch that generated the mesh used the full 2793 // 0..vertexCount()-1 range. 2794 int start = 0; 2795 int end = nonInstMesh->vertexCount() - 1; 2796 GL_CALL(DrawRangeElements(gPrimitiveType2GLMode[nonInstMesh->primitiveType()], 2797 start, end, 2798 nonInstMesh->indexCount(), 2799 GR_GL_UNSIGNED_SHORT, 2800 indices)); 2801 } else { 2802 GL_CALL(DrawElements(gPrimitiveType2GLMode[nonInstMesh->primitiveType()], 2803 nonInstMesh->indexCount(), 2804 GR_GL_UNSIGNED_SHORT, 2805 indices)); 2806 } 2807 } else { 2808 // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account 2809 // for startVertex in the DrawElements case. So we always rely on setupGeometry to 2810 // have accounted for startVertex. 2811 GL_CALL(DrawArrays(gPrimitiveType2GLMode[nonInstMesh->primitiveType()], 0, 2812 nonInstMesh->vertexCount())); 2813 } 2814 fStats.incNumDraws(); 2815 } while ((nonInstMesh = iter.next())); 2816 } 2817 2818 if (fHWPLSEnabled && plsState == GrPixelLocalStorageState::kFinish_GrPixelLocalStorageState) { 2819 // PLS draws always involve multiple draws, finishing up with a non-PLS 2820 // draw that writes to the color buffer. That draw ends up here; we wait 2821 // until after it is complete to actually disable PLS. 2822 GL_CALL(Disable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE)); 2823 fHWPLSEnabled = false; 2824 this->disableScissor(); 2825 this->disableWindowRectangles(); 2826 } 2827 2828 #if SWAP_PER_DRAW 2829 glFlush(); 2830 #if defined(SK_BUILD_FOR_MAC) 2831 aglSwapBuffers(aglGetCurrentContext()); 2832 int set_a_break_pt_here = 9; 2833 aglSwapBuffers(aglGetCurrentContext()); 2834 #elif defined(SK_BUILD_FOR_WIN32) 2835 SwapBuf(); 2836 int set_a_break_pt_here = 9; 2837 SwapBuf(); 2838 #endif 2839 #endif 2840 } 2841 2842 void GrGLGpu::stampPLSSetupRect(const SkRect& bounds) { 2843 SkASSERT(this->glCaps().glslCaps()->plsPathRenderingSupport()); 2844 2845 if (!fPLSSetupProgram.fProgram) { 2846 if (!this->createPLSSetupProgram()) { 2847 SkDebugf("Failed to create PLS setup program.\n"); 2848 return; proto_reg_handoff_eap(void)2849 } 2850 } 2851 2852 GL_CALL(UseProgram(fPLSSetupProgram.fProgram)); 2853 this->fHWVertexArrayState.setVertexArrayID(this, 0); 2854 2855 GrGLAttribArrayState* attribs = this->fHWVertexArrayState.bindInternalVertexArray(this); 2856 attribs->set(this, 0, fPLSSetupProgram.fArrayBuffer, kVec2f_GrVertexAttribType, 2857 2 * sizeof(GrGLfloat), 0); 2858 attribs->disableUnusedArrays(this, 0x1); 2859 2860 GL_CALL(Uniform4f(fPLSSetupProgram.fPosXformUniform, bounds.width(), bounds.height(), 2861 bounds.left(), bounds.top())); 2862 2863 GrXferProcessor::BlendInfo blendInfo; 2864 blendInfo.reset(); 2865 this->flushBlend(blendInfo, GrSwizzle()); 2866 this->flushColorWrite(true); 2867 this->flushDrawFace(GrDrawFace::kBoth); 2868 if (!fHWStencilSettings.isDisabled()) { 2869 GL_CALL(Disable(GR_GL_STENCIL_TEST)); 2870 } 2871 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4)); 2872 GL_CALL(UseProgram(fHWProgramID)); 2873 if (!fHWStencilSettings.isDisabled()) { 2874 GL_CALL(Enable(GR_GL_STENCIL_TEST)); 2875 } 2876 } 2877 2878 void GrGLGpu::setupPixelLocalStorage(const GrPipeline& pipeline, 2879 const GrPrimitiveProcessor& primProc) { 2880 fPLSHasBeenUsed = true; 2881 const SkRect& bounds = 2882 static_cast<const GrPLSGeometryProcessor&>(primProc).getBounds(); 2883 // setup pixel local storage -- this means capturing and storing the current framebuffer color 2884 // and initializing the winding counts to zero 2885 GrRenderTarget* rt = pipeline.getRenderTarget(); 2886 SkScalar width = SkIntToScalar(rt->width()); 2887 SkScalar height = SkIntToScalar(rt->height()); 2888 // dst rect edges in NDC (-1 to 1) 2889 // having some issues with rounding, just expand the bounds by 1 and trust the scissor to keep 2890 // it contained properly 2891 GrGLfloat dx0 = 2.0f * (bounds.left() - 1) / width - 1.0f; 2892 GrGLfloat dx1 = 2.0f * (bounds.right() + 1) / width - 1.0f; 2893 GrGLfloat dy0 = -2.0f * (bounds.top() - 1) / height + 1.0f; 2894 GrGLfloat dy1 = -2.0f * (bounds.bottom() + 1) / height + 1.0f; 2895 SkRect deviceBounds = SkRect::MakeXYWH(dx0, dy0, dx1 - dx0, dy1 - dy0); 2896 2897 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM)); 2898 this->stampPLSSetupRect(deviceBounds); 2899 } 2900 2901 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) { 2902 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); 2903 if (rt->needsResolve()) { 2904 // Some extensions automatically resolves the texture when it is read. 2905 if (this->glCaps().usesMSAARenderBuffers()) { 2906 SkASSERT(rt->textureFBOID() != rt->renderFBOID()); 2907 fStats.incRenderTargetBinds(); 2908 fStats.incRenderTargetBinds(); 2909 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID())); 2910 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID())); 2911 // make sure we go through flushRenderTarget() since we've modified 2912 // the bound DRAW FBO ID. 2913 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 2914 const GrGLIRect& vp = rt->getViewport(); 2915 const SkIRect dirtyRect = rt->getResolveRect(); 2916 2917 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { 2918 // Apple's extension uses the scissor as the blit bounds. 2919 GrScissorState scissorState; 2920 scissorState.set(dirtyRect); 2921 this->flushScissor(scissorState, vp, rt->origin()); 2922 this->disableWindowRectangles(); 2923 GL_CALL(ResolveMultisampleFramebuffer()); 2924 } else { 2925 GrGLIRect r; 2926 r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop, 2927 dirtyRect.width(), dirtyRect.height(), target->origin()); 2928 2929 int right = r.fLeft + r.fWidth; 2930 int top = r.fBottom + r.fHeight; 2931 2932 // BlitFrameBuffer respects the scissor, so disable it. 2933 this->disableScissor(); 2934 this->disableWindowRectangles(); 2935 GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top, 2936 r.fLeft, r.fBottom, right, top, 2937 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 2938 } 2939 } 2940 rt->flagAsResolved(); 2941 } 2942 } 2943 2944 namespace { 2945 2946 2947 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { 2948 static const GrGLenum gTable[kGrStencilOpCount] = { 2949 GR_GL_KEEP, // kKeep 2950 GR_GL_ZERO, // kZero 2951 GR_GL_REPLACE, // kReplace 2952 GR_GL_INVERT, // kInvert 2953 GR_GL_INCR_WRAP, // kIncWrap 2954 GR_GL_DECR_WRAP, // kDecWrap 2955 GR_GL_INCR, // kIncClamp 2956 GR_GL_DECR, // kDecClamp 2957 }; 2958 GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep); 2959 GR_STATIC_ASSERT(1 == (int)GrStencilOp::kZero); 2960 GR_STATIC_ASSERT(2 == (int)GrStencilOp::kReplace); 2961 GR_STATIC_ASSERT(3 == (int)GrStencilOp::kInvert); 2962 GR_STATIC_ASSERT(4 == (int)GrStencilOp::kIncWrap); 2963 GR_STATIC_ASSERT(5 == (int)GrStencilOp::kDecWrap); 2964 GR_STATIC_ASSERT(6 == (int)GrStencilOp::kIncClamp); 2965 GR_STATIC_ASSERT(7 == (int)GrStencilOp::kDecClamp); 2966 SkASSERT(op < (GrStencilOp)kGrStencilOpCount); 2967 return gTable[(int)op]; 2968 } 2969 2970 void set_gl_stencil(const GrGLInterface* gl, 2971 const GrStencilSettings::Face& face, 2972 GrGLenum glFace) { 2973 GrGLenum glFunc = GrToGLStencilFunc(face.fTest); 2974 GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp); 2975 GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp); 2976 2977 GrGLint ref = face.fRef; 2978 GrGLint mask = face.fTestMask; 2979 GrGLint writeMask = face.fWriteMask; 2980 2981 if (GR_GL_FRONT_AND_BACK == glFace) { 2982 // we call the combined func just in case separate stencil is not 2983 // supported. 2984 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); 2985 GR_GL_CALL(gl, StencilMask(writeMask)); 2986 GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp)); 2987 } else { 2988 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); 2989 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); 2990 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp)); 2991 } 2992 } 2993 } 2994 2995 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings) { 2996 if (fHWStencilSettings != stencilSettings) { 2997 if (stencilSettings.isDisabled()) { 2998 if (kNo_TriState != fHWStencilTestEnabled) { 2999 GL_CALL(Disable(GR_GL_STENCIL_TEST)); 3000 fHWStencilTestEnabled = kNo_TriState; 3001 } 3002 } else { 3003 if (kYes_TriState != fHWStencilTestEnabled) { 3004 GL_CALL(Enable(GR_GL_STENCIL_TEST)); 3005 fHWStencilTestEnabled = kYes_TriState; 3006 } 3007 } 3008 if (!stencilSettings.isDisabled()) { 3009 if (stencilSettings.isTwoSided()) { 3010 SkASSERT(this->caps()->twoSidedStencilSupport()); 3011 set_gl_stencil(this->glInterface(), 3012 stencilSettings.front(), 3013 GR_GL_FRONT); 3014 set_gl_stencil(this->glInterface(), 3015 stencilSettings.back(), 3016 GR_GL_BACK); 3017 } else { 3018 set_gl_stencil(this->glInterface(), 3019 stencilSettings.front(), 3020 GR_GL_FRONT_AND_BACK); 3021 } 3022 } 3023 fHWStencilSettings = stencilSettings; 3024 } 3025 } 3026 3027 void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA, bool stencilEnabled) { 3028 // rt is only optional if useHWAA is false. 3029 SkASSERT(rt || !useHWAA); 3030 SkASSERT(!useHWAA || rt->isStencilBufferMultisampled()); 3031 3032 if (this->caps()->multisampleDisableSupport()) { 3033 if (useHWAA) { 3034 if (kYes_TriState != fMSAAEnabled) { 3035 GL_CALL(Enable(GR_GL_MULTISAMPLE)); 3036 fMSAAEnabled = kYes_TriState; 3037 } 3038 } else { 3039 if (kNo_TriState != fMSAAEnabled) { 3040 GL_CALL(Disable(GR_GL_MULTISAMPLE)); 3041 fMSAAEnabled = kNo_TriState; 3042 } 3043 } 3044 } 3045 3046 if (0 != this->caps()->maxRasterSamples()) { 3047 if (useHWAA && rt->isMixedSampled() && !stencilEnabled) { 3048 // Since stencil is disabled and we want more samples than are in the color buffer, we 3049 // need to tell the rasterizer explicitly how many to run. 3050 if (kYes_TriState != fHWRasterMultisampleEnabled) { 3051 GL_CALL(Enable(GR_GL_RASTER_MULTISAMPLE)); 3052 fHWRasterMultisampleEnabled = kYes_TriState; 3053 } 3054 if (rt->numStencilSamples() != fHWNumRasterSamples) { 3055 SkASSERT(rt->numStencilSamples() <= this->caps()->maxRasterSamples()); 3056 GL_CALL(RasterSamples(rt->numStencilSamples(), GR_GL_TRUE)); 3057 fHWNumRasterSamples = rt->numStencilSamples(); 3058 } 3059 } else { 3060 if (kNo_TriState != fHWRasterMultisampleEnabled) { 3061 GL_CALL(Disable(GR_GL_RASTER_MULTISAMPLE)); 3062 fHWRasterMultisampleEnabled = kNo_TriState; 3063 } 3064 } 3065 } else { 3066 SkASSERT(!useHWAA || !rt->isMixedSampled() || stencilEnabled); 3067 } 3068 } 3069 3070 void GrGLGpu::flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) { 3071 // Any optimization to disable blending should have already been applied and 3072 // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0). 3073 3074 GrBlendEquation equation = blendInfo.fEquation; 3075 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend; 3076 GrBlendCoeff dstCoeff = blendInfo.fDstBlend; 3077 bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) && 3078 kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff; 3079 if (blendOff) { 3080 if (kNo_TriState != fHWBlendState.fEnabled) { 3081 GL_CALL(Disable(GR_GL_BLEND)); 3082 3083 // Workaround for the ARM KHR_blend_equation_advanced blacklist issue 3084 // https://code.google.com/p/skia/issues/detail?id=3943 3085 if (kARM_GrGLVendor == this->ctxInfo().vendor() && 3086 GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) { 3087 SkASSERT(this->caps()->advancedBlendEquationSupport()); 3088 // Set to any basic blending equation. 3089 GrBlendEquation blend_equation = kAdd_GrBlendEquation; 3090 GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation])); 3091 fHWBlendState.fEquation = blend_equation; 3092 } 3093 3094 fHWBlendState.fEnabled = kNo_TriState; 3095 } 3096 return; 3097 } 3098 3099 if (kYes_TriState != fHWBlendState.fEnabled) { 3100 GL_CALL(Enable(GR_GL_BLEND)); 3101 fHWBlendState.fEnabled = kYes_TriState; 3102 } 3103 3104 if (fHWBlendState.fEquation != equation) { 3105 GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation])); 3106 fHWBlendState.fEquation = equation; 3107 } 3108 3109 if (GrBlendEquationIsAdvanced(equation)) { 3110 SkASSERT(this->caps()->advancedBlendEquationSupport()); 3111 // Advanced equations have no other blend state. 3112 return; 3113 } 3114 3115 if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) { 3116 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], 3117 gXfermodeCoeff2Blend[dstCoeff])); 3118 fHWBlendState.fSrcCoeff = srcCoeff; 3119 fHWBlendState.fDstCoeff = dstCoeff; 3120 } 3121 3122 if ((BlendCoeffReferencesConstant(srcCoeff) || BlendCoeffReferencesConstant(dstCoeff))) { 3123 GrColor blendConst = blendInfo.fBlendConstant; 3124 blendConst = swizzle.applyTo(blendConst); 3125 if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) { 3126 GrGLfloat c[4]; 3127 GrColorToRGBAFloat(blendConst, c); 3128 GL_CALL(BlendColor(c[0], c[1], c[2], c[3])); 3129 fHWBlendState.fConstColor = blendConst; 3130 fHWBlendState.fConstColorValid = true; 3131 } 3132 } 3133 } 3134 3135 static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) { 3136 static const GrGLenum gWrapModes[] = { 3137 GR_GL_CLAMP_TO_EDGE, 3138 GR_GL_REPEAT, 3139 GR_GL_MIRRORED_REPEAT 3140 }; 3141 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes)); 3142 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode); 3143 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode); 3144 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode); 3145 return gWrapModes[tm]; 3146 } 3147 3148 static GrGLenum get_component_enum_from_char(char component) { 3149 switch (component) { 3150 case 'r': 3151 return GR_GL_RED; 3152 case 'g': 3153 return GR_GL_GREEN; 3154 case 'b': 3155 return GR_GL_BLUE; 3156 case 'a': 3157 return GR_GL_ALPHA; 3158 default: 3159 SkFAIL("Unsupported component"); 3160 return 0; 3161 } 3162 } 3163 3164 /** If texture swizzling is available using tex parameters then it is preferred over mangling 3165 the generated shader code. This potentially allows greater reuse of cached shaders. */ 3166 static void get_tex_param_swizzle(GrPixelConfig config, 3167 const GrGLCaps& caps, 3168 GrGLenum* glSwizzle) { 3169 const GrSwizzle& swizzle = caps.configSwizzle(config); 3170 for (int i = 0; i < 4; ++i) { 3171 glSwizzle[i] = get_component_enum_from_char(swizzle.c_str()[i]); 3172 } 3173 } 3174 3175 void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, bool allowSRGBInputs, 3176 GrGLTexture* texture) { 3177 SkASSERT(texture); 3178 3179 #ifdef SK_DEBUG 3180 if (!this->caps()->npotTextureTileSupport()) { 3181 const bool tileX = SkShader::kClamp_TileMode != params.getTileModeX(); 3182 const bool tileY = SkShader::kClamp_TileMode != params.getTileModeY(); 3183 if (tileX || tileY) { 3184 const int w = texture->width(); 3185 const int h = texture->height(); 3186 SkASSERT(SkIsPow2(w) && SkIsPow2(h)); 3187 } 3188 } 3189 #endif 3190 3191 // If we created a rt/tex and rendered to it without using a texture and now we're texturing 3192 // from the rt it will still be the last bound texture, but it needs resolving. So keep this 3193 // out of the "last != next" check. 3194 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget()); 3195 if (texRT) { 3196 this->onResolveRenderTarget(texRT); 3197 } 3198 3199 uint32_t textureID = texture->uniqueID(); 3200 GrGLenum target = texture->target(); 3201 if (fHWBoundTextureUniqueIDs[unitIdx] != textureID) { 3202 this->setTextureUnit(unitIdx); 3203 GL_CALL(BindTexture(target, texture->textureID())); 3204 fHWBoundTextureUniqueIDs[unitIdx] = textureID; 3205 } 3206 3207 ResetTimestamp timestamp; 3208 const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp); 3209 bool setAll = timestamp < this->getResetTimestamp(); 3210 GrGLTexture::TexParams newTexParams; 3211 3212 static GrGLenum glMinFilterModes[] = { 3213 GR_GL_NEAREST, 3214 GR_GL_LINEAR, 3215 GR_GL_LINEAR_MIPMAP_LINEAR 3216 }; 3217 static GrGLenum glMagFilterModes[] = { 3218 GR_GL_NEAREST, 3219 GR_GL_LINEAR, 3220 GR_GL_LINEAR 3221 }; 3222 GrTextureParams::FilterMode filterMode = params.filterMode(); 3223 3224 if (GrTextureParams::kMipMap_FilterMode == filterMode) { 3225 if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture->config())) { 3226 filterMode = GrTextureParams::kBilerp_FilterMode; 3227 } 3228 } 3229 3230 newTexParams.fMinFilter = glMinFilterModes[filterMode]; 3231 newTexParams.fMagFilter = glMagFilterModes[filterMode]; 3232 3233 if (GrPixelConfigIsSRGB(texture->config())) { 3234 newTexParams.fSRGBDecode = allowSRGBInputs ? GR_GL_DECODE_EXT : GR_GL_SKIP_DECODE_EXT; 3235 if (setAll || newTexParams.fSRGBDecode != oldTexParams.fSRGBDecode) { 3236 this->setTextureUnit(unitIdx); 3237 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SRGB_DECODE_EXT, newTexParams.fSRGBDecode)); 3238 } 3239 } 3240 3241 #ifdef SK_DEBUG 3242 // We were supposed to ensure MipMaps were up-to-date and built correctly before getting here. 3243 if (GrTextureParams::kMipMap_FilterMode == filterMode) { 3244 SkASSERT(!texture->texturePriv().mipMapsAreDirty()); 3245 if (GrPixelConfigIsSRGB(texture->config())) { 3246 SkSourceGammaTreatment gammaTreatment = allowSRGBInputs ? 3247 SkSourceGammaTreatment::kRespect : SkSourceGammaTreatment::kIgnore; 3248 SkASSERT(texture->texturePriv().gammaTreatment() == gammaTreatment); 3249 } 3250 } 3251 #endif 3252 3253 newTexParams.fMaxMipMapLevel = texture->texturePriv().maxMipMapLevel(); 3254 3255 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX()); 3256 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY()); 3257 get_tex_param_swizzle(texture->config(), this->glCaps(), newTexParams.fSwizzleRGBA); 3258 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) { 3259 this->setTextureUnit(unitIdx); 3260 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newTexParams.fMagFilter)); 3261 } 3262 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) { 3263 this->setTextureUnit(unitIdx); 3264 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newTexParams.fMinFilter)); 3265 } 3266 if (setAll || newTexParams.fMaxMipMapLevel != oldTexParams.fMaxMipMapLevel) { 3267 // These are not supported in ES2 contexts 3268 if (this->glCaps().mipMapLevelAndLodControlSupport()) { 3269 if (newTexParams.fMaxMipMapLevel != 0) { 3270 this->setTextureUnit(unitIdx); 3271 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0)); 3272 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0)); 3273 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LOD, 3274 newTexParams.fMaxMipMapLevel)); 3275 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL, 3276 newTexParams.fMaxMipMapLevel)); 3277 } 3278 } 3279 } 3280 if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) { 3281 this->setTextureUnit(unitIdx); 3282 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newTexParams.fWrapS)); 3283 } 3284 if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) { 3285 this->setTextureUnit(unitIdx); 3286 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newTexParams.fWrapT)); 3287 } 3288 if (this->glCaps().textureSwizzleSupport() && 3289 (setAll || memcmp(newTexParams.fSwizzleRGBA, 3290 oldTexParams.fSwizzleRGBA, 3291 sizeof(newTexParams.fSwizzleRGBA)))) { 3292 this->setTextureSwizzle(unitIdx, target, newTexParams.fSwizzleRGBA); 3293 } 3294 texture->setCachedTexParams(newTexParams, this->getResetTimestamp()); 3295 } 3296 3297 void GrGLGpu::bindTexelBuffer(int unitIdx, GrPixelConfig texelConfig, GrGLBuffer* buffer) { 3298 SkASSERT(this->glCaps().canUseConfigWithTexelBuffer(texelConfig)); 3299 SkASSERT(unitIdx >= 0 && unitIdx < fHWBufferTextures.count()); 3300 3301 BufferTexture& buffTex = fHWBufferTextures[unitIdx]; 3302 3303 if (!buffTex.fKnownBound) { 3304 if (!buffTex.fTextureID) { 3305 GL_CALL(GenTextures(1, &buffTex.fTextureID)); 3306 if (!buffTex.fTextureID) { 3307 return; 3308 } 3309 } 3310 3311 this->setTextureUnit(unitIdx); 3312 GL_CALL(BindTexture(GR_GL_TEXTURE_BUFFER, buffTex.fTextureID)); 3313 3314 buffTex.fKnownBound = true; 3315 } 3316 3317 if (buffer->uniqueID() != buffTex.fAttachedBufferUniqueID || 3318 buffTex.fTexelConfig != texelConfig) { 3319 3320 this->setTextureUnit(unitIdx); 3321 GL_CALL(TexBuffer(GR_GL_TEXTURE_BUFFER, 3322 this->glCaps().configSizedInternalFormat(texelConfig), 3323 buffer->bufferID())); 3324 3325 buffTex.fTexelConfig = texelConfig; 3326 buffTex.fAttachedBufferUniqueID = buffer->uniqueID(); 3327 3328 if (this->glCaps().textureSwizzleSupport() && 3329 this->glCaps().configSwizzle(texelConfig) != buffTex.fSwizzle) { 3330 GrGLenum glSwizzle[4]; 3331 get_tex_param_swizzle(texelConfig, this->glCaps(), glSwizzle); 3332 this->setTextureSwizzle(unitIdx, GR_GL_TEXTURE_BUFFER, glSwizzle); 3333 buffTex.fSwizzle = this->glCaps().configSwizzle(texelConfig); 3334 } 3335 3336 buffer->setHasAttachedToTexture(); 3337 fHWMaxUsedBufferTextureUnit = SkTMax(unitIdx, fHWMaxUsedBufferTextureUnit); 3338 } 3339 } 3340 3341 void GrGLGpu::generateMipmaps(const GrTextureParams& params, bool allowSRGBInputs, 3342 GrGLTexture* texture) { 3343 SkASSERT(texture); 3344 3345 // First, figure out if we need mips for this texture at all: 3346 GrTextureParams::FilterMode filterMode = params.filterMode(); 3347 3348 if (GrTextureParams::kMipMap_FilterMode == filterMode) { 3349 if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture->config())) { 3350 filterMode = GrTextureParams::kBilerp_FilterMode; 3351 } 3352 } 3353 3354 if (GrTextureParams::kMipMap_FilterMode != filterMode) { 3355 return; 3356 } 3357 3358 // If this is an sRGB texture and the mips were previously built the "other" way 3359 // (gamma-correct vs. not), then we need to rebuild them. We don't need to check for 3360 // srgbSupport - we'll *never* get an sRGB pixel config if we don't support it. 3361 SkSourceGammaTreatment gammaTreatment = allowSRGBInputs 3362 ? SkSourceGammaTreatment::kRespect : SkSourceGammaTreatment::kIgnore; 3363 if (GrPixelConfigIsSRGB(texture->config()) && 3364 gammaTreatment != texture->texturePriv().gammaTreatment()) { 3365 texture->texturePriv().dirtyMipMaps(true); 3366 } 3367 3368 // If the mips aren't dirty, we're done: 3369 if (!texture->texturePriv().mipMapsAreDirty()) { 3370 return; 3371 } 3372 3373 // If we created a rt/tex and rendered to it without using a texture and now we're texturing 3374 // from the rt it will still be the last bound texture, but it needs resolving. 3375 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget()); 3376 if (texRT) { 3377 this->onResolveRenderTarget(texRT); 3378 } 3379 3380 GrGLenum target = texture->target(); 3381 this->setScratchTextureUnit(); 3382 GL_CALL(BindTexture(target, texture->textureID())); 3383 3384 // Configure sRGB decode, if necessary. This state is the only thing needed for the driver 3385 // call (glGenerateMipmap) to work correctly. Our manual method dirties other state, too. 3386 if (GrPixelConfigIsSRGB(texture->config())) { 3387 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SRGB_DECODE_EXT, 3388 allowSRGBInputs ? GR_GL_DECODE_EXT : GR_GL_SKIP_DECODE_EXT)); 3389 } 3390 3391 // Either do manual mipmap generation or (if that fails), just rely on the driver: 3392 if (!this->generateMipmap(texture, allowSRGBInputs)) { 3393 GL_CALL(GenerateMipmap(target)); 3394 } 3395 3396 texture->texturePriv().dirtyMipMaps(false); 3397 texture->texturePriv().setMaxMipMapLevel(SkMipMap::ComputeLevelCount( 3398 texture->width(), texture->height())); 3399 texture->texturePriv().setGammaTreatment(gammaTreatment); 3400 3401 // We have potentially set lots of state on the texture. Easiest to dirty it all: 3402 texture->textureParamsModified(); 3403 } 3404 3405 void GrGLGpu::setTextureSwizzle(int unitIdx, GrGLenum target, const GrGLenum swizzle[]) { 3406 this->setTextureUnit(unitIdx); 3407 if (this->glStandard() == kGLES_GrGLStandard) { 3408 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA. 3409 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0])); 3410 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1])); 3411 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2])); 3412 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3])); 3413 } else { 3414 GR_STATIC_ASSERT(sizeof(swizzle[0]) == sizeof(GrGLint)); 3415 GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA, 3416 reinterpret_cast<const GrGLint*>(swizzle))); 3417 } 3418 } 3419 3420 void GrGLGpu::flushColorWrite(bool writeColor) { 3421 if (!writeColor) { 3422 if (kNo_TriState != fHWWriteToColor) { 3423 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE, 3424 GR_GL_FALSE, GR_GL_FALSE)); 3425 fHWWriteToColor = kNo_TriState; 3426 } 3427 } else { 3428 if (kYes_TriState != fHWWriteToColor) { 3429 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 3430 fHWWriteToColor = kYes_TriState; 3431 } 3432 } 3433 } 3434 3435 void GrGLGpu::flushDrawFace(GrDrawFace face) { 3436 if (fHWDrawFace != face) { 3437 switch (face) { 3438 case GrDrawFace::kCCW: 3439 GL_CALL(Enable(GR_GL_CULL_FACE)); 3440 GL_CALL(CullFace(GR_GL_BACK)); 3441 break; 3442 case GrDrawFace::kCW: 3443 GL_CALL(Enable(GR_GL_CULL_FACE)); 3444 GL_CALL(CullFace(GR_GL_FRONT)); 3445 break; 3446 case GrDrawFace::kBoth: 3447 GL_CALL(Disable(GR_GL_CULL_FACE)); 3448 break; 3449 default: 3450 SkFAIL("Unknown draw face."); 3451 } 3452 fHWDrawFace = face; 3453 } 3454 } 3455 3456 void GrGLGpu::setTextureUnit(int unit) { 3457 SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count()); 3458 if (unit != fHWActiveTextureUnitIdx) { 3459 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); 3460 fHWActiveTextureUnitIdx = unit; 3461 } 3462 } 3463 3464 void GrGLGpu::setScratchTextureUnit() { 3465 // Bind the last texture unit since it is the least likely to be used by GrGLProgram. 3466 int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1; 3467 if (lastUnitIdx != fHWActiveTextureUnitIdx) { 3468 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); 3469 fHWActiveTextureUnitIdx = lastUnitIdx; 3470 } 3471 // clear out the this field so that if a program does use this unit it will rebind the correct 3472 // texture. 3473 fHWBoundTextureUniqueIDs[lastUnitIdx] = SK_InvalidUniqueID; 3474 } 3475 3476 // Determines whether glBlitFramebuffer could be used between src and dst. 3477 static inline bool can_blit_framebuffer(const GrSurface* dst, 3478 const GrSurface* src, 3479 const GrGLGpu* gpu) { 3480 if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) && 3481 gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0)) { 3482 switch (gpu->glCaps().blitFramebufferSupport()) { 3483 case GrGLCaps::kNone_BlitFramebufferSupport: 3484 return false; 3485 case GrGLCaps::kNoScalingNoMirroring_BlitFramebufferSupport: 3486 // Our copy surface doesn't support scaling so just check for mirroring. 3487 if (dst->origin() != src->origin()) { 3488 return false; 3489 } 3490 break; 3491 case GrGLCaps::kFull_BlitFramebufferSupport: 3492 break; 3493 } 3494 // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match 3495 // or the rects are not the same (not just the same size but have the same edges). 3496 if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() && 3497 (src->desc().fSampleCnt > 0 || src->config() != dst->config())) { 3498 return false; 3499 } 3500 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture()); 3501 if (dstTex && dstTex->target() != GR_GL_TEXTURE_2D) { 3502 return false; 3503 } 3504 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(dst->asTexture()); 3505 if (srcTex && srcTex->target() != GR_GL_TEXTURE_2D) { 3506 return false; 3507 } 3508 return true; 3509 } else { 3510 return false; 3511 } 3512 } 3513 3514 static inline bool can_copy_texsubimage(const GrSurface* dst, 3515 const GrSurface* src, 3516 const GrGLGpu* gpu) { 3517 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage 3518 // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps 3519 // many drivers would allow it to work, but ANGLE does not. 3520 if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() && 3521 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) { 3522 return false; 3523 } 3524 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget()); 3525 // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer) 3526 // then we don't want to copy to the texture but to the MSAA buffer. 3527 if (dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) { 3528 return false; 3529 } 3530 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); 3531 // If the src is multisampled (and uses an extension where there is a separate MSAA 3532 // renderbuffer) then it is an invalid operation to call CopyTexSubImage 3533 if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { 3534 return false; 3535 } 3536 3537 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture()); 3538 // CopyTex(Sub)Image writes to a texture and we have no way of dynamically wrapping a RT in a 3539 // texture. 3540 if (!dstTex) { 3541 return false; 3542 } 3543 3544 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture()); 3545 3546 // Check that we could wrap the source in an FBO, that the dst is TEXTURE_2D, that no mirroring 3547 // is required. 3548 if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) && 3549 !GrPixelConfigIsCompressed(src->config()) && 3550 (!srcTex || srcTex->target() == GR_GL_TEXTURE_2D) && 3551 dstTex->target() == GR_GL_TEXTURE_2D && 3552 dst->origin() == src->origin()) { 3553 return true; 3554 } else { 3555 return false; 3556 } 3557 } 3558 3559 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is 3560 // relative to is output. 3561 void GrGLGpu::bindSurfaceFBOForCopy(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport, 3562 TempFBOTarget tempFBOTarget) { 3563 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); 3564 if (!rt) { 3565 SkASSERT(surface->asTexture()); 3566 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID(); 3567 GrGLenum target = static_cast<GrGLTexture*>(surface->asTexture())->target(); 3568 GrGLuint* tempFBOID; 3569 tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID; 3570 3571 if (0 == *tempFBOID) { 3572 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID)); 3573 } 3574 3575 fStats.incRenderTargetBinds(); 3576 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, *tempFBOID)); 3577 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, 3578 GR_GL_COLOR_ATTACHMENT0, 3579 target, 3580 texID, 3581 0)); 3582 viewport->fLeft = 0; 3583 viewport->fBottom = 0; 3584 viewport->fWidth = surface->width(); 3585 viewport->fHeight = surface->height(); 3586 } else { 3587 fStats.incRenderTargetBinds(); 3588 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBOID())); 3589 *viewport = rt->getViewport(); 3590 } 3591 } 3592 3593 void GrGLGpu::unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface) { 3594 // bindSurfaceFBOForCopy temporarily binds textures that are not render targets to 3595 if (!surface->asRenderTarget()) { 3596 SkASSERT(surface->asTexture()); 3597 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target(); 3598 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, 3599 GR_GL_COLOR_ATTACHMENT0, 3600 textureTarget, 3601 0, 3602 0)); 3603 } 3604 } 3605 3606 bool GrGLGpu::initDescForDstCopy(const GrRenderTarget* src, GrSurfaceDesc* desc) const { 3607 // If the src is a texture, we can implement the blit as a draw assuming the config is 3608 // renderable. 3609 if (src->asTexture() && this->caps()->isConfigRenderable(src->config(), false)) { 3610 desc->fOrigin = kDefault_GrSurfaceOrigin; 3611 desc->fFlags = kRenderTarget_GrSurfaceFlag; 3612 desc->fConfig = src->config(); 3613 return true; 3614 } 3615 3616 const GrGLTexture* srcTexture = static_cast<const GrGLTexture*>(src->asTexture()); 3617 if (srcTexture && srcTexture->target() != GR_GL_TEXTURE_2D) { 3618 // Not supported for FBO blit or CopyTexSubImage 3619 return false; 3620 } 3621 3622 // We look for opportunities to use CopyTexSubImage, or fbo blit. If neither are 3623 // possible and we return false to fallback to creating a render target dst for render-to- 3624 // texture. This code prefers CopyTexSubImage to fbo blit and avoids triggering temporary fbo 3625 // creation. It isn't clear that avoiding temporary fbo creation is actually optimal. 3626 3627 GrSurfaceOrigin originForBlitFramebuffer = kDefault_GrSurfaceOrigin; 3628 if (this->glCaps().blitFramebufferSupport() == 3629 GrGLCaps::kNoScalingNoMirroring_BlitFramebufferSupport) { 3630 originForBlitFramebuffer = src->origin(); 3631 } 3632 3633 // Check for format issues with glCopyTexSubImage2D 3634 if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() && 3635 kBGRA_8888_GrPixelConfig == src->config()) { 3636 // glCopyTexSubImage2D doesn't work with this config. If the bgra can be used with fbo blit 3637 // then we set up for that, otherwise fail. 3638 if (this->caps()->isConfigRenderable(kBGRA_8888_GrPixelConfig, false)) { 3639 desc->fOrigin = originForBlitFramebuffer; 3640 desc->fFlags = kRenderTarget_GrSurfaceFlag; 3641 desc->fConfig = kBGRA_8888_GrPixelConfig; 3642 return true; 3643 } 3644 return false; 3645 } 3646 3647 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src); 3648 if (srcRT->renderFBOID() != srcRT->textureFBOID()) { 3649 // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. Set up for FBO blit or 3650 // fail. 3651 if (this->caps()->isConfigRenderable(src->config(), false)) { 3652 desc->fOrigin = originForBlitFramebuffer; 3653 desc->fFlags = kRenderTarget_GrSurfaceFlag; 3654 desc->fConfig = src->config(); 3655 return true; 3656 } 3657 return false; 3658 } 3659 3660 // We'll do a CopyTexSubImage. Make the dst a plain old texture. 3661 desc->fConfig = src->config(); 3662 desc->fOrigin = src->origin(); 3663 desc->fFlags = kNone_GrSurfaceFlags; 3664 return true; 3665 } 3666 3667 bool GrGLGpu::onCopySurface(GrSurface* dst, 3668 GrSurface* src, 3669 const SkIRect& srcRect, 3670 const SkIPoint& dstPoint) { 3671 // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDraw handle the 3672 // swizzle. 3673 if (this->glCaps().glslCaps()->configOutputSwizzle(src->config()) != 3674 this->glCaps().glslCaps()->configOutputSwizzle(dst->config())) { 3675 return false; 3676 } 3677 // Don't prefer copying as a draw if the dst doesn't already have a FBO object. 3678 bool preferCopy = SkToBool(dst->asRenderTarget()); 3679 if (preferCopy && src->asTexture()) { 3680 if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) { 3681 return true; 3682 } 3683 } 3684 3685 if (can_copy_texsubimage(dst, src, this)) { 3686 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint); 3687 return true; 3688 } 3689 3690 if (can_blit_framebuffer(dst, src, this)) { 3691 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint); 3692 } 3693 3694 if (!preferCopy && src->asTexture()) { 3695 if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) { 3696 return true; 3697 } 3698 } 3699 3700 return false; 3701 } 3702 3703 bool GrGLGpu::createCopyProgram(int progIdx) { 3704 const GrGLSLCaps* glslCaps = this->glCaps().glslCaps(); 3705 static const GrSLType kSamplerTypes[3] = { kTexture2DSampler_GrSLType, 3706 kTextureExternalSampler_GrSLType, 3707 kTexture2DRectSampler_GrSLType }; 3708 if (kTextureExternalSampler_GrSLType == kSamplerTypes[progIdx] && 3709 !this->glCaps().glslCaps()->externalTextureSupport()) { 3710 return false; 3711 } 3712 if (kTexture2DRectSampler_GrSLType == kSamplerTypes[progIdx] && 3713 !this->glCaps().rectangleTextureSupport()) { 3714 return false; 3715 } 3716 3717 if (!fCopyProgramArrayBuffer) { 3718 static const GrGLfloat vdata[] = { 3719 0, 0, 3720 0, 1, 3721 1, 0, 3722 1, 1 3723 }; 3724 fCopyProgramArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), kVertex_GrBufferType, 3725 kStatic_GrAccessPattern, vdata)); 3726 } 3727 if (!fCopyProgramArrayBuffer) { 3728 return false; 3729 } 3730 3731 SkASSERT(!fCopyPrograms[progIdx].fProgram); 3732 GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram()); 3733 if (!fCopyPrograms[progIdx].fProgram) { 3734 return false; 3735 } 3736 3737 const char* version = glslCaps->versionDeclString(); 3738 GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier); 3739 GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType, 3740 GrShaderVar::kUniform_TypeModifier); 3741 GrGLSLShaderVar uPosXform("u_posXform", kVec4f_GrSLType, 3742 GrShaderVar::kUniform_TypeModifier); 3743 GrGLSLShaderVar uTexture("u_texture", kSamplerTypes[progIdx], 3744 GrShaderVar::kUniform_TypeModifier); 3745 GrGLSLShaderVar vTexCoord("v_texCoord", kVec2f_GrSLType, 3746 GrShaderVar::kVaryingOut_TypeModifier); 3747 GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType, 3748 GrShaderVar::kOut_TypeModifier); 3749 3750 SkString vshaderTxt(version); 3751 if (glslCaps->noperspectiveInterpolationSupport()) { 3752 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) { 3753 vshaderTxt.appendf("#extension %s : require\n", extension); 3754 } 3755 vTexCoord.addModifier("noperspective"); 3756 } 3757 3758 aVertex.appendDecl(glslCaps, &vshaderTxt); 3759 vshaderTxt.append(";"); 3760 uTexCoordXform.appendDecl(glslCaps, &vshaderTxt); 3761 vshaderTxt.append(";"); 3762 uPosXform.appendDecl(glslCaps, &vshaderTxt); 3763 vshaderTxt.append(";"); 3764 vTexCoord.appendDecl(glslCaps, &vshaderTxt); 3765 vshaderTxt.append(";"); 3766 3767 vshaderTxt.append( 3768 "// Copy Program VS\n" 3769 "void main() {" 3770 " v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw;" 3771 " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;" 3772 " gl_Position.zw = vec2(0, 1);" 3773 "}" 3774 ); 3775 3776 SkString fshaderTxt(version); 3777 if (glslCaps->noperspectiveInterpolationSupport()) { 3778 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) { 3779 fshaderTxt.appendf("#extension %s : require\n", extension); 3780 } 3781 } 3782 if (kSamplerTypes[progIdx] == kTextureExternalSampler_GrSLType) { 3783 fshaderTxt.appendf("#extension %s : require\n", 3784 glslCaps->externalTextureExtensionString()); 3785 } 3786 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps, 3787 &fshaderTxt); 3788 vTexCoord.setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier); 3789 vTexCoord.appendDecl(glslCaps, &fshaderTxt); 3790 fshaderTxt.append(";"); 3791 uTexture.appendDecl(glslCaps, &fshaderTxt); 3792 fshaderTxt.append(";"); 3793 const char* fsOutName; 3794 if (glslCaps->mustDeclareFragmentShaderOutput()) { 3795 oFragColor.appendDecl(glslCaps, &fshaderTxt); 3796 fshaderTxt.append(";"); 3797 fsOutName = oFragColor.c_str(); 3798 } else { 3799 fsOutName = "gl_FragColor"; 3800 } 3801 fshaderTxt.appendf( 3802 "// Copy Program FS\n" 3803 "void main() {" 3804 " %s = %s(u_texture, v_texCoord);" 3805 "}", 3806 fsOutName, 3807 GrGLSLTexture2DFunctionName(kVec2f_GrSLType, kSamplerTypes[progIdx], this->glslGeneration()) 3808 ); 3809 3810 const char* str; 3811 GrGLint length; 3812 3813 str = vshaderTxt.c_str(); 3814 length = SkToInt(vshaderTxt.size()); 3815 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram, 3816 GR_GL_VERTEX_SHADER, &str, &length, 1, 3817 &fStats); 3818 3819 str = fshaderTxt.c_str(); 3820 length = SkToInt(fshaderTxt.size()); 3821 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram, 3822 GR_GL_FRAGMENT_SHADER, &str, &length, 1, 3823 &fStats); 3824 3825 GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram)); 3826 3827 GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform, 3828 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture")); 3829 GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform, 3830 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform")); 3831 GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform, 3832 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform")); 3833 3834 GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex")); 3835 3836 GL_CALL(DeleteShader(vshader)); 3837 GL_CALL(DeleteShader(fshader)); 3838 3839 return true; 3840 } 3841 3842 bool GrGLGpu::createMipmapProgram(int progIdx) { 3843 const bool oddWidth = SkToBool(progIdx & 0x2); 3844 const bool oddHeight = SkToBool(progIdx & 0x1); 3845 const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1); 3846 3847 const GrGLSLCaps* glslCaps = this->glCaps().glslCaps(); 3848 3849 SkASSERT(!fMipmapPrograms[progIdx].fProgram); 3850 GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram()); 3851 if (!fMipmapPrograms[progIdx].fProgram) { 3852 return false; 3853 } 3854 3855 const char* version = glslCaps->versionDeclString(); 3856 GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier); 3857 GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType, 3858 GrShaderVar::kUniform_TypeModifier); 3859 GrGLSLShaderVar uTexture("u_texture", kTexture2DSampler_GrSLType, 3860 GrShaderVar::kUniform_TypeModifier); 3861 // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension): 3862 GrGLSLShaderVar vTexCoords[] = { 3863 GrGLSLShaderVar("v_texCoord0", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier), 3864 GrGLSLShaderVar("v_texCoord1", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier), 3865 GrGLSLShaderVar("v_texCoord2", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier), 3866 GrGLSLShaderVar("v_texCoord3", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier), 3867 }; 3868 GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType, 3869 GrShaderVar::kOut_TypeModifier); 3870 3871 SkString vshaderTxt(version); 3872 if (glslCaps->noperspectiveInterpolationSupport()) { 3873 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) { 3874 vshaderTxt.appendf("#extension %s : require\n", extension); 3875 } 3876 vTexCoords[0].addModifier("noperspective"); 3877 vTexCoords[1].addModifier("noperspective"); 3878 vTexCoords[2].addModifier("noperspective"); 3879 vTexCoords[3].addModifier("noperspective"); 3880 } 3881 3882 aVertex.appendDecl(glslCaps, &vshaderTxt); 3883 vshaderTxt.append(";"); 3884 uTexCoordXform.appendDecl(glslCaps, &vshaderTxt); 3885 vshaderTxt.append(";"); 3886 for (int i = 0; i < numTaps; ++i) { 3887 vTexCoords[i].appendDecl(glslCaps, &vshaderTxt); 3888 vshaderTxt.append(";"); 3889 } 3890 3891 vshaderTxt.append( 3892 "// Mipmap Program VS\n" 3893 "void main() {" 3894 " gl_Position.xy = a_vertex * vec2(2, 2) - vec2(1, 1);" 3895 " gl_Position.zw = vec2(0, 1);" 3896 ); 3897 3898 // Insert texture coordinate computation: 3899 if (oddWidth && oddHeight) { 3900 vshaderTxt.append( 3901 " v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;" 3902 " v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + vec2(u_texCoordXform.x, 0);" 3903 " v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + vec2(0, u_texCoordXform.z);" 3904 " v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;" 3905 ); 3906 } else if (oddWidth) { 3907 vshaderTxt.append( 3908 " v_texCoord0 = a_vertex.xy * vec2(u_texCoordXform.y, 1);" 3909 " v_texCoord1 = a_vertex.xy * vec2(u_texCoordXform.y, 1) + vec2(u_texCoordXform.x, 0);" 3910 ); 3911 } else if (oddHeight) { 3912 vshaderTxt.append( 3913 " v_texCoord0 = a_vertex.xy * vec2(1, u_texCoordXform.w);" 3914 " v_texCoord1 = a_vertex.xy * vec2(1, u_texCoordXform.w) + vec2(0, u_texCoordXform.z);" 3915 ); 3916 } else { 3917 vshaderTxt.append( 3918 " v_texCoord0 = a_vertex.xy;" 3919 ); 3920 } 3921 3922 vshaderTxt.append("}"); 3923 3924 SkString fshaderTxt(version); 3925 if (glslCaps->noperspectiveInterpolationSupport()) { 3926 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) { 3927 fshaderTxt.appendf("#extension %s : require\n", extension); 3928 } 3929 } 3930 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps, 3931 &fshaderTxt); 3932 for (int i = 0; i < numTaps; ++i) { 3933 vTexCoords[i].setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier); 3934 vTexCoords[i].appendDecl(glslCaps, &fshaderTxt); 3935 fshaderTxt.append(";"); 3936 } 3937 uTexture.appendDecl(glslCaps, &fshaderTxt); 3938 fshaderTxt.append(";"); 3939 const char* fsOutName; 3940 if (glslCaps->mustDeclareFragmentShaderOutput()) { 3941 oFragColor.appendDecl(glslCaps, &fshaderTxt); 3942 fshaderTxt.append(";"); 3943 fsOutName = oFragColor.c_str(); 3944 } else { 3945 fsOutName = "gl_FragColor"; 3946 } 3947 const char* sampleFunction = GrGLSLTexture2DFunctionName(kVec2f_GrSLType, 3948 kTexture2DSampler_GrSLType, 3949 this->glslGeneration()); 3950 fshaderTxt.append( 3951 "// Mipmap Program FS\n" 3952 "void main() {" 3953 ); 3954 3955 if (oddWidth && oddHeight) { 3956 fshaderTxt.appendf( 3957 " %s = (%s(u_texture, v_texCoord0) + %s(u_texture, v_texCoord1) + " 3958 " %s(u_texture, v_texCoord2) + %s(u_texture, v_texCoord3)) * 0.25;", 3959 fsOutName, sampleFunction, sampleFunction, sampleFunction, sampleFunction 3960 ); 3961 } else if (oddWidth || oddHeight) { 3962 fshaderTxt.appendf( 3963 " %s = (%s(u_texture, v_texCoord0) + %s(u_texture, v_texCoord1)) * 0.5;", 3964 fsOutName, sampleFunction, sampleFunction 3965 ); 3966 } else { 3967 fshaderTxt.appendf( 3968 " %s = %s(u_texture, v_texCoord0);", 3969 fsOutName, sampleFunction 3970 ); 3971 } 3972 3973 fshaderTxt.append("}"); 3974 3975 const char* str; 3976 GrGLint length; 3977 3978 str = vshaderTxt.c_str(); 3979 length = SkToInt(vshaderTxt.size()); 3980 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram, 3981 GR_GL_VERTEX_SHADER, &str, &length, 1, 3982 &fStats); 3983 3984 str = fshaderTxt.c_str(); 3985 length = SkToInt(fshaderTxt.size()); 3986 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram, 3987 GR_GL_FRAGMENT_SHADER, &str, &length, 1, 3988 &fStats); 3989 3990 GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram)); 3991 3992 GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform, 3993 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture")); 3994 GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform, 3995 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform")); 3996 3997 GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex")); 3998 3999 GL_CALL(DeleteShader(vshader)); 4000 GL_CALL(DeleteShader(fshader)); 4001 4002 return true; 4003 } 4004 4005 bool GrGLGpu::createWireRectProgram() { 4006 if (!fWireRectArrayBuffer) { 4007 static const GrGLfloat vdata[] = { 4008 0, 0, 4009 0, 1, 4010 1, 1, 4011 1, 0 4012 }; 4013 fWireRectArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), kVertex_GrBufferType, 4014 kStatic_GrAccessPattern, vdata)); 4015 if (!fWireRectArrayBuffer) { 4016 return false; 4017 } 4018 } 4019 4020 SkASSERT(!fWireRectProgram.fProgram); 4021 GL_CALL_RET(fWireRectProgram.fProgram, CreateProgram()); 4022 if (!fWireRectProgram.fProgram) { 4023 return false; 4024 } 4025 4026 GrGLSLShaderVar uColor("u_color", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier); 4027 GrGLSLShaderVar uRect("u_rect", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier); 4028 GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier); 4029 const char* version = this->glCaps().glslCaps()->versionDeclString(); 4030 4031 // The rect uniform specifies the rectangle in NDC space as a vec4 (left,top,right,bottom). The 4032 // program is used with a vbo containing the unit square. Vertices are computed from the rect 4033 // uniform using the 4 vbo vertices. 4034 SkString vshaderTxt(version); 4035 aVertex.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); 4036 vshaderTxt.append(";"); 4037 uRect.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); 4038 vshaderTxt.append(";"); 4039 vshaderTxt.append( 4040 "// Wire Rect Program VS\n" 4041 "void main() {" 4042 " gl_Position.x = u_rect.x + a_vertex.x * (u_rect.z - u_rect.x);" 4043 " gl_Position.y = u_rect.y + a_vertex.y * (u_rect.w - u_rect.y);" 4044 " gl_Position.zw = vec2(0, 1);" 4045 "}" 4046 ); 4047 4048 GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType, GrShaderVar::kOut_TypeModifier); 4049 4050 SkString fshaderTxt(version); 4051 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, 4052 *this->glCaps().glslCaps(), 4053 &fshaderTxt); 4054 uColor.appendDecl(this->glCaps().glslCaps(), &fshaderTxt); 4055 fshaderTxt.append(";"); 4056 const char* fsOutName; 4057 if (this->glCaps().glslCaps()->mustDeclareFragmentShaderOutput()) { 4058 oFragColor.appendDecl(this->glCaps().glslCaps(), &fshaderTxt); 4059 fshaderTxt.append(";"); 4060 fsOutName = oFragColor.c_str(); 4061 } else { 4062 fsOutName = "gl_FragColor"; 4063 } 4064 fshaderTxt.appendf( 4065 "// Write Rect Program FS\n" 4066 "void main() {" 4067 " %s = %s;" 4068 "}", 4069 fsOutName, 4070 uColor.c_str() 4071 ); 4072 4073 const char* str; 4074 GrGLint length; 4075 4076 str = vshaderTxt.c_str(); 4077 length = SkToInt(vshaderTxt.size()); 4078 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fWireRectProgram.fProgram, 4079 GR_GL_VERTEX_SHADER, &str, &length, 1, 4080 &fStats); 4081 4082 str = fshaderTxt.c_str(); 4083 length = SkToInt(fshaderTxt.size()); 4084 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fWireRectProgram.fProgram, 4085 GR_GL_FRAGMENT_SHADER, &str, &length, 1, 4086 &fStats); 4087 4088 GL_CALL(LinkProgram(fWireRectProgram.fProgram)); 4089 4090 GL_CALL_RET(fWireRectProgram.fColorUniform, 4091 GetUniformLocation(fWireRectProgram.fProgram, "u_color")); 4092 GL_CALL_RET(fWireRectProgram.fRectUniform, 4093 GetUniformLocation(fWireRectProgram.fProgram, "u_rect")); 4094 GL_CALL(BindAttribLocation(fWireRectProgram.fProgram, 0, "a_vertex")); 4095 4096 GL_CALL(DeleteShader(vshader)); 4097 GL_CALL(DeleteShader(fshader)); 4098 4099 return true; 4100 } 4101 4102 void GrGLGpu::drawDebugWireRect(GrRenderTarget* rt, const SkIRect& rect, GrColor color) { 4103 // TODO: This should swizzle the output to match dst's config, though it is a debugging 4104 // visualization. 4105 4106 this->handleDirtyContext(); 4107 if (!fWireRectProgram.fProgram) { 4108 if (!this->createWireRectProgram()) { 4109 SkDebugf("Failed to create wire rect program.\n"); 4110 return; 4111 } 4112 } 4113 4114 int w = rt->width(); 4115 int h = rt->height(); 4116 4117 // Compute the edges of the rectangle (top,left,right,bottom) in NDC space. Must consider 4118 // whether the render target is flipped or not. 4119 GrGLfloat edges[4]; 4120 edges[0] = SkIntToScalar(rect.fLeft) + 0.5f; 4121 edges[2] = SkIntToScalar(rect.fRight) - 0.5f; 4122 if (kBottomLeft_GrSurfaceOrigin == rt->origin()) { 4123 edges[1] = h - (SkIntToScalar(rect.fTop) + 0.5f); 4124 edges[3] = h - (SkIntToScalar(rect.fBottom) - 0.5f); 4125 } else { 4126 edges[1] = SkIntToScalar(rect.fTop) + 0.5f; 4127 edges[3] = SkIntToScalar(rect.fBottom) - 0.5f; 4128 } 4129 edges[0] = 2 * edges[0] / w - 1.0f; 4130 edges[1] = 2 * edges[1] / h - 1.0f; 4131 edges[2] = 2 * edges[2] / w - 1.0f; 4132 edges[3] = 2 * edges[3] / h - 1.0f; 4133 4134 GrGLfloat channels[4]; 4135 static const GrGLfloat scale255 = 1.f / 255.f; 4136 channels[0] = GrColorUnpackR(color) * scale255; 4137 channels[1] = GrColorUnpackG(color) * scale255; 4138 channels[2] = GrColorUnpackB(color) * scale255; 4139 channels[3] = GrColorUnpackA(color) * scale255; 4140 4141 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(rt->asRenderTarget()); 4142 this->flushRenderTarget(glRT, &rect); 4143 4144 GL_CALL(UseProgram(fWireRectProgram.fProgram)); 4145 fHWProgramID = fWireRectProgram.fProgram; 4146 4147 fHWVertexArrayState.setVertexArrayID(this, 0); 4148 4149 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this); 4150 attribs->set(this, 0, fWireRectArrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat), 4151 0); 4152 attribs->disableUnusedArrays(this, 0x1); 4153 4154 GL_CALL(Uniform4fv(fWireRectProgram.fRectUniform, 1, edges)); 4155 GL_CALL(Uniform4fv(fWireRectProgram.fColorUniform, 1, channels)); 4156 4157 GrXferProcessor::BlendInfo blendInfo; 4158 blendInfo.reset(); 4159 this->flushBlend(blendInfo, GrSwizzle::RGBA()); 4160 this->flushColorWrite(true); 4161 this->flushDrawFace(GrDrawFace::kBoth); 4162 this->flushHWAAState(glRT, false, false); 4163 this->disableScissor(); 4164 this->disableWindowRectangles(); 4165 GrStencilSettings stencil; 4166 stencil.setDisabled(); 4167 this->flushStencil(stencil); 4168 4169 GL_CALL(DrawArrays(GR_GL_LINE_LOOP, 0, 4)); 4170 } 4171 4172 4173 bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, 4174 GrSurface* src, 4175 const SkIRect& srcRect, 4176 const SkIPoint& dstPoint) { 4177 GrGLTexture* srcTex = static_cast<GrGLTexture*>(src->asTexture()); 4178 int progIdx = TextureTargetToCopyProgramIdx(srcTex->target()); 4179 4180 if (!fCopyPrograms[progIdx].fProgram) { 4181 if (!this->createCopyProgram(progIdx)) { 4182 SkDebugf("Failed to create copy program.\n"); 4183 return false; 4184 } 4185 } 4186 4187 int w = srcRect.width(); 4188 int h = srcRect.height(); 4189 4190 GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode); 4191 this->bindTexture(0, params, true, srcTex); 4192 4193 GrGLIRect dstVP; 4194 this->bindSurfaceFBOForCopy(dst, GR_GL_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget); 4195 this->flushViewport(dstVP); 4196 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 4197 4198 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h); 4199 4200 GL_CALL(UseProgram(fCopyPrograms[progIdx].fProgram)); 4201 fHWProgramID = fCopyPrograms[progIdx].fProgram; 4202 4203 fHWVertexArrayState.setVertexArrayID(this, 0); 4204 4205 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this); 4206 attribs->set(this, 0, fCopyProgramArrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat), 4207 0); 4208 attribs->disableUnusedArrays(this, 0x1); 4209 4210 // dst rect edges in NDC (-1 to 1) 4211 int dw = dst->width(); 4212 int dh = dst->height(); 4213 GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f; 4214 GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f; 4215 GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f; 4216 GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f; 4217 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) { 4218 dy0 = -dy0; 4219 dy1 = -dy1; 4220 } 4221 4222 GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft; 4223 GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w); 4224 GrGLfloat sy0 = (GrGLfloat)srcRect.fTop; 4225 GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h); 4226 int sh = src->height(); 4227 if (kBottomLeft_GrSurfaceOrigin == src->origin()) { 4228 sy0 = sh - sy0; 4229 sy1 = sh - sy1; 4230 } 4231 // src rect edges in normalized texture space (0 to 1) unless we're using a RECTANGLE texture. 4232 GrGLenum srcTarget = srcTex->target(); 4233 if (GR_GL_TEXTURE_RECTANGLE != srcTarget) { 4234 int sw = src->width(); 4235 sx0 /= sw; 4236 sx1 /= sw; 4237 sy0 /= sh; 4238 sy1 /= sh; 4239 } 4240 4241 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0)); 4242 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform, 4243 sx1 - sx0, sy1 - sy0, sx0, sy0)); 4244 GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0)); 4245 4246 GrXferProcessor::BlendInfo blendInfo; 4247 blendInfo.reset(); 4248 this->flushBlend(blendInfo, GrSwizzle::RGBA()); 4249 this->flushColorWrite(true); 4250 this->flushDrawFace(GrDrawFace::kBoth); 4251 this->flushHWAAState(nullptr, false, false); 4252 this->disableScissor(); 4253 this->disableWindowRectangles(); 4254 GrStencilSettings stencil; 4255 stencil.setDisabled(); 4256 this->flushStencil(stencil); 4257 4258 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4)); 4259 this->unbindTextureFBOForCopy(GR_GL_FRAMEBUFFER, dst); 4260 this->didWriteToSurface(dst, &dstRect); 4261 4262 return true; 4263 } 4264 4265 void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, 4266 GrSurface* src, 4267 const SkIRect& srcRect, 4268 const SkIPoint& dstPoint) { 4269 SkASSERT(can_copy_texsubimage(dst, src, this)); 4270 GrGLIRect srcVP; 4271 this->bindSurfaceFBOForCopy(src, GR_GL_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget); 4272 GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture()); 4273 SkASSERT(dstTex); 4274 // We modified the bound FBO 4275 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 4276 GrGLIRect srcGLRect; 4277 srcGLRect.setRelativeTo(srcVP, 4278 srcRect.fLeft, 4279 srcRect.fTop, 4280 srcRect.width(), 4281 srcRect.height(), 4282 src->origin()); 4283 4284 this->setScratchTextureUnit(); 4285 GL_CALL(BindTexture(dstTex->target(), dstTex->textureID())); 4286 GrGLint dstY; 4287 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) { 4288 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight); 4289 } else { 4290 dstY = dstPoint.fY; 4291 } 4292 GL_CALL(CopyTexSubImage2D(dstTex->target(), 0, 4293 dstPoint.fX, dstY, 4294 srcGLRect.fLeft, srcGLRect.fBottom, 4295 srcGLRect.fWidth, srcGLRect.fHeight)); 4296 this->unbindTextureFBOForCopy(GR_GL_FRAMEBUFFER, src); 4297 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 4298 srcRect.width(), srcRect.height()); 4299 this->didWriteToSurface(dst, &dstRect); 4300 } 4301 4302 bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, 4303 GrSurface* src, 4304 const SkIRect& srcRect, 4305 const SkIPoint& dstPoint) { 4306 SkASSERT(can_blit_framebuffer(dst, src, this)); 4307 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 4308 srcRect.width(), srcRect.height()); 4309 if (dst == src) { 4310 if (SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) { 4311 return false; 4312 } 4313 } 4314 4315 GrGLIRect dstVP; 4316 GrGLIRect srcVP; 4317 this->bindSurfaceFBOForCopy(dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget); 4318 this->bindSurfaceFBOForCopy(src, GR_GL_READ_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget); 4319 // We modified the bound FBO 4320 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 4321 GrGLIRect srcGLRect; 4322 GrGLIRect dstGLRect; 4323 srcGLRect.setRelativeTo(srcVP, 4324 srcRect.fLeft, 4325 srcRect.fTop, 4326 srcRect.width(), 4327 srcRect.height(), 4328 src->origin()); 4329 dstGLRect.setRelativeTo(dstVP, 4330 dstRect.fLeft, 4331 dstRect.fTop, 4332 dstRect.width(), 4333 dstRect.height(), 4334 dst->origin()); 4335 4336 // BlitFrameBuffer respects the scissor, so disable it. 4337 this->disableScissor(); 4338 this->disableWindowRectangles(); 4339 4340 GrGLint srcY0; 4341 GrGLint srcY1; 4342 // Does the blit need to y-mirror or not? 4343 if (src->origin() == dst->origin()) { 4344 srcY0 = srcGLRect.fBottom; 4345 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight; 4346 } else { 4347 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight; 4348 srcY1 = srcGLRect.fBottom; 4349 } 4350 GL_CALL(BlitFramebuffer(srcGLRect.fLeft, 4351 srcY0, 4352 srcGLRect.fLeft + srcGLRect.fWidth, 4353 srcY1, 4354 dstGLRect.fLeft, 4355 dstGLRect.fBottom, 4356 dstGLRect.fLeft + dstGLRect.fWidth, 4357 dstGLRect.fBottom + dstGLRect.fHeight, 4358 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 4359 this->unbindTextureFBOForCopy(GR_GL_DRAW_FRAMEBUFFER, dst); 4360 this->unbindTextureFBOForCopy(GR_GL_READ_FRAMEBUFFER, src); 4361 this->didWriteToSurface(dst, &dstRect); 4362 return true; 4363 } 4364 4365 // Manual implementation of mipmap generation, to work around driver bugs w/sRGB. 4366 // Uses draw calls to do a series of downsample operations to successive mips. 4367 // If this returns false, then the calling code falls back to using glGenerateMipmap. 4368 bool GrGLGpu::generateMipmap(GrGLTexture* texture, bool gammaCorrect) { 4369 // Our iterative downsample requires the ability to limit which level we're sampling: 4370 if (!this->glCaps().doManualMipmapping()) { 4371 return false; 4372 } 4373 4374 // Mipmaps are only supported on 2D textures: 4375 if (GR_GL_TEXTURE_2D != texture->target()) { 4376 return false; 4377 } 4378 4379 // We need to be able to render to the texture for this to work: 4380 if (!this->caps()->isConfigRenderable(texture->config(), false)) { 4381 return false; 4382 } 4383 4384 // If we're mipping an sRGB texture, we need to ensure FB sRGB is correct: 4385 if (GrPixelConfigIsSRGB(texture->config())) { 4386 // If we have write-control, just set the state that we want: 4387 if (this->glCaps().srgbWriteControl()) { 4388 this->flushFramebufferSRGB(gammaCorrect); 4389 } else if (!gammaCorrect) { 4390 // If we don't have write-control we can't do non-gamma-correct mipmapping: 4391 return false; 4392 } 4393 } 4394 4395 int width = texture->width(); 4396 int height = texture->height(); 4397 int levelCount = SkMipMap::ComputeLevelCount(width, height) + 1; 4398 4399 // Define all mips, if we haven't previously done so: 4400 if (0 == texture->texturePriv().maxMipMapLevel()) { 4401 GrGLenum internalFormat; 4402 GrGLenum externalFormat; 4403 GrGLenum externalType; 4404 if (!this->glCaps().getTexImageFormats(texture->config(), texture->config(), 4405 &internalFormat, &externalFormat, &externalType)) { 4406 return false; 4407 } 4408 4409 for (GrGLint level = 1; level < levelCount; ++level) { 4410 // Define the next mip: 4411 width = SkTMax(1, width / 2); 4412 height = SkTMax(1, height / 2); 4413 GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D, level, internalFormat, 4414 width, height, 0, 4415 externalFormat, externalType, nullptr)); 4416 } 4417 } 4418 4419 // Create (if necessary), then bind temporary FBO: 4420 if (0 == fTempDstFBOID) { 4421 GL_CALL(GenFramebuffers(1, &fTempDstFBOID)); 4422 } 4423 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID)); 4424 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 4425 4426 // Bind the texture, to get things configured for filtering. 4427 // We'll be changing our base level further below: 4428 this->setTextureUnit(0); 4429 GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kBilerp_FilterMode); 4430 this->bindTexture(0, params, gammaCorrect, texture); 4431 4432 // Vertex data: 4433 if (!fMipmapProgramArrayBuffer) { 4434 static const GrGLfloat vdata[] = { 4435 0, 0, 4436 0, 1, 4437 1, 0, 4438 1, 1 4439 }; 4440 fMipmapProgramArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), 4441 kVertex_GrBufferType, 4442 kStatic_GrAccessPattern, vdata)); 4443 } 4444 if (!fMipmapProgramArrayBuffer) { 4445 return false; 4446 } 4447 4448 fHWVertexArrayState.setVertexArrayID(this, 0); 4449 4450 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this); 4451 attribs->set(this, 0, fMipmapProgramArrayBuffer, kVec2f_GrVertexAttribType, 4452 2 * sizeof(GrGLfloat), 0); 4453 attribs->disableUnusedArrays(this, 0x1); 4454 4455 // Set "simple" state once: 4456 GrXferProcessor::BlendInfo blendInfo; 4457 blendInfo.reset(); 4458 this->flushBlend(blendInfo, GrSwizzle::RGBA()); 4459 this->flushColorWrite(true); 4460 this->flushDrawFace(GrDrawFace::kBoth); 4461 this->flushHWAAState(nullptr, false, false); 4462 this->disableScissor(); 4463 this->disableWindowRectangles(); 4464 GrStencilSettings stencil; 4465 stencil.setDisabled(); 4466 this->flushStencil(stencil); 4467 4468 // Do all the blits: 4469 width = texture->width(); 4470 height = texture->height(); 4471 GrGLIRect viewport; 4472 viewport.fLeft = 0; 4473 viewport.fBottom = 0; 4474 for (GrGLint level = 1; level < levelCount; ++level) { 4475 // Get and bind the program for this particular downsample (filter shape can vary): 4476 int progIdx = TextureSizeToMipmapProgramIdx(width, height); 4477 if (!fMipmapPrograms[progIdx].fProgram) { 4478 if (!this->createMipmapProgram(progIdx)) { 4479 SkDebugf("Failed to create mipmap program.\n"); 4480 return false; 4481 } 4482 } 4483 GL_CALL(UseProgram(fMipmapPrograms[progIdx].fProgram)); 4484 fHWProgramID = fMipmapPrograms[progIdx].fProgram; 4485 4486 // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h) 4487 const float invWidth = 1.0f / width; 4488 const float invHeight = 1.0f / height; 4489 GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform, 4490 invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight)); 4491 GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0)); 4492 4493 // Only sample from previous mip 4494 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1)); 4495 4496 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, 4497 GR_GL_TEXTURE_2D, texture->textureID(), level)); 4498 4499 width = SkTMax(1, width / 2); 4500 height = SkTMax(1, height / 2); 4501 viewport.fWidth = width; 4502 viewport.fHeight = height; 4503 this->flushViewport(viewport); 4504 4505 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4)); 4506 } 4507 4508 // Unbind: 4509 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, 4510 GR_GL_TEXTURE_2D, 0, 0)); 4511 4512 return true; 4513 } 4514 4515 void GrGLGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings& stencil, 4516 int* effectiveSampleCnt, SamplePattern* samplePattern) { 4517 SkASSERT(!rt->isMixedSampled() || rt->renderTargetPriv().getStencilAttachment() || 4518 stencil.isDisabled()); 4519 4520 this->flushStencil(stencil); 4521 this->flushHWAAState(rt, true, !stencil.isDisabled()); 4522 this->flushRenderTarget(static_cast<GrGLRenderTarget*>(rt), &SkIRect::EmptyIRect()); 4523 4524 if (0 != this->caps()->maxRasterSamples()) { 4525 GR_GL_GetIntegerv(this->glInterface(), GR_GL_EFFECTIVE_RASTER_SAMPLES, effectiveSampleCnt); 4526 } else { 4527 GR_GL_GetIntegerv(this->glInterface(), GR_GL_SAMPLES, effectiveSampleCnt); 4528 } 4529 4530 SkASSERT(*effectiveSampleCnt >= rt->desc().fSampleCnt); 4531 4532 if (this->caps()->sampleLocationsSupport()) { 4533 samplePattern->reset(*effectiveSampleCnt); 4534 for (int i = 0; i < *effectiveSampleCnt; ++i) { 4535 GrGLfloat pos[2]; 4536 GL_CALL(GetMultisamplefv(GR_GL_SAMPLE_POSITION, i, pos)); 4537 if (kTopLeft_GrSurfaceOrigin == rt->origin()) { 4538 (*samplePattern)[i].set(pos[0], pos[1]); 4539 } else { 4540 (*samplePattern)[i].set(pos[0], 1 - pos[1]); 4541 } 4542 } 4543 } 4544 } 4545 4546 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) { 4547 SkASSERT(type); 4548 switch (type) { 4549 case kTexture_GrXferBarrierType: { 4550 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt); 4551 if (glrt->textureFBOID() != glrt->renderFBOID()) { 4552 // The render target uses separate storage so no need for glTextureBarrier. 4553 // FIXME: The render target will resolve automatically when its texture is bound, 4554 // but we could resolve only the bounds that will be read if we do it here instead. 4555 return; 4556 } 4557 SkASSERT(this->caps()->textureBarrierSupport()); 4558 GL_CALL(TextureBarrier()); 4559 return; 4560 } 4561 case kBlend_GrXferBarrierType: 4562 SkASSERT(GrCaps::kAdvanced_BlendEquationSupport == 4563 this->caps()->blendEquationSupport()); 4564 GL_CALL(BlendBarrier()); 4565 return; 4566 default: break; // placate compiler warnings that kNone not handled 4567 } 4568 } 4569 4570 GrBackendObject GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h, 4571 GrPixelConfig config, bool /*isRT*/) { 4572 if (!this->caps()->isConfigTexturable(config)) { 4573 return false; 4574 } 4575 GrGLTextureInfo* info = new GrGLTextureInfo; 4576 info->fTarget = GR_GL_TEXTURE_2D; 4577 info->fID = 0; 4578 GL_CALL(GenTextures(1, &info->fID)); 4579 GL_CALL(ActiveTexture(GR_GL_TEXTURE0)); 4580 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1)); 4581 GL_CALL(BindTexture(info->fTarget, info->fID)); 4582 fHWBoundTextureUniqueIDs[0] = 0; 4583 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MAG_FILTER, GR_GL_NEAREST)); 4584 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MIN_FILTER, GR_GL_NEAREST)); 4585 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_S, GR_GL_CLAMP_TO_EDGE)); 4586 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_T, GR_GL_CLAMP_TO_EDGE)); 4587 4588 GrGLenum internalFormat; 4589 GrGLenum externalFormat; 4590 GrGLenum externalType; 4591 4592 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat, 4593 &externalType)) { 4594 delete info; 4595 #ifdef SK_IGNORE_GL_TEXTURE_TARGET 4596 return 0; 4597 #else 4598 return reinterpret_cast<GrBackendObject>(nullptr); 4599 #endif 4600 } 4601 4602 GL_CALL(TexImage2D(info->fTarget, 0, internalFormat, w, h, 0, externalFormat, 4603 externalType, pixels)); 4604 4605 #ifdef SK_IGNORE_GL_TEXTURE_TARGET 4606 GrGLuint id = info->fID; 4607 delete info; 4608 return id; 4609 #else 4610 return reinterpret_cast<GrBackendObject>(info); 4611 #endif 4612 } 4613 4614 bool GrGLGpu::isTestingOnlyBackendTexture(GrBackendObject id) const { 4615 #ifdef SK_IGNORE_GL_TEXTURE_TARGET 4616 GrGLuint texID = (GrGLuint)id; 4617 #else 4618 GrGLuint texID = reinterpret_cast<const GrGLTextureInfo*>(id)->fID; 4619 #endif 4620 4621 GrGLboolean result; 4622 GL_CALL_RET(result, IsTexture(texID)); 4623 4624 return (GR_GL_TRUE == result); 4625 } 4626 4627 void GrGLGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) { 4628 #ifdef SK_IGNORE_GL_TEXTURE_TARGET 4629 GrGLuint texID = (GrGLuint)id; 4630 #else 4631 const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(id); 4632 GrGLuint texID = info->fID; 4633 #endif 4634 4635 if (!abandonTexture) { 4636 GL_CALL(DeleteTextures(1, &texID)); 4637 } 4638 4639 #ifndef SK_IGNORE_GL_TEXTURE_TARGET 4640 delete info; 4641 #endif 4642 } 4643 4644 void GrGLGpu::resetShaderCacheForTesting() const { 4645 fProgramCache->abandon(); 4646 } 4647 4648 /////////////////////////////////////////////////////////////////////////////// 4649 4650 GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu, 4651 const GrBuffer* ibuf) { 4652 GrGLAttribArrayState* attribState; 4653 4654 if (gpu->glCaps().isCoreProfile()) { 4655 if (!fCoreProfileVertexArray) { 4656 GrGLuint arrayID; 4657 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); 4658 int attrCount = gpu->glCaps().maxVertexAttributes(); 4659 fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount); 4660 } 4661 if (ibuf) { 4662 attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf); 4663 } else { 4664 attribState = fCoreProfileVertexArray->bind(gpu); 4665 } 4666 } else { 4667 if (ibuf) { 4668 // bindBuffer implicitly binds VAO 0 when binding an index buffer. 4669 gpu->bindBuffer(kIndex_GrBufferType, ibuf); 4670 } else { 4671 this->setVertexArrayID(gpu, 0); 4672 } 4673 int attrCount = gpu->glCaps().maxVertexAttributes(); 4674 if (fDefaultVertexArrayAttribState.count() != attrCount) { 4675 fDefaultVertexArrayAttribState.resize(attrCount); 4676 } 4677 attribState = &fDefaultVertexArrayAttribState; 4678 } 4679 return attribState; 4680 } 4681 4682 bool GrGLGpu::onMakeCopyForTextureParams(GrTexture* texture, const GrTextureParams& textureParams, 4683 GrTextureProducer::CopyParams* copyParams) const { 4684 if (textureParams.isTiled() || 4685 GrTextureParams::kMipMap_FilterMode == textureParams.filterMode()) { 4686 GrGLTexture* glTexture = static_cast<GrGLTexture*>(texture); 4687 if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() || 4688 GR_GL_TEXTURE_RECTANGLE == glTexture->target()) { 4689 copyParams->fFilter = GrTextureParams::kNone_FilterMode; 4690 copyParams->fWidth = texture->width(); 4691 copyParams->fHeight = texture->height(); 4692 return true; 4693 } 4694 } 4695 return false; 4696 } 4697 4698 GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() const { 4699 GrGLsync fence; 4700 GL_CALL_RET(fence, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); 4701 return (GrFence)fence; 4702 } 4703 4704 bool GrGLGpu::waitFence(GrFence fence, uint64_t timeout) const { 4705 GrGLenum result; 4706 GL_CALL_RET(result, ClientWaitSync((GrGLsync)fence, GR_GL_SYNC_FLUSH_COMMANDS_BIT, timeout)); 4707 return (GR_GL_CONDITION_SATISFIED == result); 4708 } 4709 4710 void GrGLGpu::deleteFence(GrFence fence) const { 4711 GL_CALL(DeleteSync((GrGLsync)fence)); 4712 } 4713