1 /*
2 ===========================================================================
3
4 Return to Castle Wolfenstein multiplayer GPL Source Code
5 Copyright (C) 1999-2010 id Software LLC, a ZeniMax Media company.
6
7 This file is part of the Return to Castle Wolfenstein multiplayer GPL Source Code (RTCW MP Source Code).
8
9 RTCW MP Source Code is free software: you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation, either version 3 of the License, or
12 (at your option) any later version.
13
14 RTCW MP Source Code is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with RTCW MP Source Code. If not, see <http://www.gnu.org/licenses/>.
21
22 In addition, the RTCW MP Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the RTCW MP Source Code. If not, please request a copy in writing from id Software at the address below.
23
24 If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
25
26 ===========================================================================
27 */
28
29 // tr_map.c
30
31 #include "tr_local.h"
32
33 /*
34
35 Loads and prepares a map file for scene rendering.
36
37 A single entry point:
38
39 void RE_LoadWorldMap( const char *name );
40
41 */
42
43 static world_t s_worldData;
44 static byte *fileBase;
45
46 int c_subdivisions;
47 int c_gridVerts;
48
49 //===============================================================================
50
HSVtoRGB(float h,float s,float v,float rgb[3])51 static void HSVtoRGB( float h, float s, float v, float rgb[3] ) {
52 int i;
53 float f;
54 float p, q, t;
55
56 h *= 5;
57
58 i = floor( h );
59 f = h - i;
60
61 p = v * ( 1 - s );
62 q = v * ( 1 - s * f );
63 t = v * ( 1 - s * ( 1 - f ) );
64
65 switch ( i )
66 {
67 case 0:
68 rgb[0] = v;
69 rgb[1] = t;
70 rgb[2] = p;
71 break;
72 case 1:
73 rgb[0] = q;
74 rgb[1] = v;
75 rgb[2] = p;
76 break;
77 case 2:
78 rgb[0] = p;
79 rgb[1] = v;
80 rgb[2] = t;
81 break;
82 case 3:
83 rgb[0] = p;
84 rgb[1] = q;
85 rgb[2] = v;
86 break;
87 case 4:
88 rgb[0] = t;
89 rgb[1] = p;
90 rgb[2] = v;
91 break;
92 case 5:
93 rgb[0] = v;
94 rgb[1] = p;
95 rgb[2] = q;
96 break;
97 }
98 }
99
100 /*
101 ===============
102 R_ColorShiftLightingBytes
103
104 ===============
105 */
R_ColorShiftLightingBytes(byte in[4],byte out[4])106 static void R_ColorShiftLightingBytes( byte in[4], byte out[4] ) {
107 int shift, r, g, b;
108
109 // shift the color data based on overbright range
110 shift = r_mapOverBrightBits->integer - tr.overbrightBits;
111
112 // shift the data based on overbright range
113 r = in[0] << shift;
114 g = in[1] << shift;
115 b = in[2] << shift;
116
117 // normalize by color instead of saturating to white
118 if ( ( r | g | b ) > 255 ) {
119 int max;
120
121 max = r > g ? r : g;
122 max = max > b ? max : b;
123 r = r * 255 / max;
124 g = g * 255 / max;
125 b = b * 255 / max;
126 }
127
128 out[0] = r;
129 out[1] = g;
130 out[2] = b;
131 out[3] = in[3];
132 }
133
134 /*
135 ===============
136 R_LoadLightmaps
137
138 ===============
139 */
140 #define LIGHTMAP_SIZE 128
R_LoadLightmaps(lump_t * l)141 static void R_LoadLightmaps( lump_t *l ) {
142 byte *buf, *buf_p;
143 int len;
144 byte image[LIGHTMAP_SIZE * LIGHTMAP_SIZE * 4];
145 int i, j;
146 float maxIntensity = 0;
147 double sumIntensity = 0;
148
149 len = l->filelen;
150 if ( !len ) {
151 return;
152 }
153 buf = fileBase + l->fileofs;
154
155 // we are about to upload textures
156 R_IssuePendingRenderCommands();
157
158 // create all the lightmaps
159 tr.numLightmaps = len / ( LIGHTMAP_SIZE * LIGHTMAP_SIZE * 3 );
160 if ( tr.numLightmaps == 1 ) {
161 //FIXME: HACK: maps with only one lightmap turn up fullbright for some reason.
162 //this avoids this, but isn't the correct solution.
163 tr.numLightmaps++;
164 }
165
166 // if we are in r_vertexLight mode, we don't need the lightmaps at all
167 if ( r_vertexLight->integer || glConfig.hardwareType == GLHW_PERMEDIA2 ) {
168 return;
169 }
170
171 tr.lightmaps = ri.Hunk_Alloc( tr.numLightmaps * sizeof(image_t *), h_low );
172 for ( i = 0 ; i < tr.numLightmaps ; i++ ) {
173 // expand the 24 bit on-disk to 32 bit
174 buf_p = buf + i * LIGHTMAP_SIZE * LIGHTMAP_SIZE * 3;
175
176 if ( r_lightmap->integer == 2 ) { // color code by intensity as development tool (FIXME: check range)
177 for ( j = 0; j < LIGHTMAP_SIZE * LIGHTMAP_SIZE; j++ )
178 {
179 float r = buf_p[j * 3 + 0];
180 float g = buf_p[j * 3 + 1];
181 float b = buf_p[j * 3 + 2];
182 float intensity;
183 float out[3] = {0.0, 0.0, 0.0};
184
185 intensity = 0.33f * r + 0.685f * g + 0.063f * b;
186
187 if ( intensity > 255 ) {
188 intensity = 1.0f;
189 } else {
190 intensity /= 255.0f;
191 }
192
193 if ( intensity > maxIntensity ) {
194 maxIntensity = intensity;
195 }
196
197 HSVtoRGB( intensity, 1.00, 0.50, out );
198
199 image[j * 4 + 0] = out[0] * 255;
200 image[j * 4 + 1] = out[1] * 255;
201 image[j * 4 + 2] = out[2] * 255;
202 image[j * 4 + 3] = 255;
203
204 sumIntensity += intensity;
205 }
206 } else {
207 for ( j = 0 ; j < LIGHTMAP_SIZE * LIGHTMAP_SIZE; j++ ) {
208 R_ColorShiftLightingBytes( &buf_p[j * 3], &image[j * 4] );
209 image[j * 4 + 3] = 255;
210 }
211 }
212 tr.lightmaps[i] = R_CreateImage( va( "*lightmap%d",i ), image,
213 LIGHTMAP_SIZE, LIGHTMAP_SIZE, IMGTYPE_COLORALPHA,
214 IMGFLAG_NOLIGHTSCALE | IMGFLAG_NO_COMPRESSION | IMGFLAG_CLAMPTOEDGE, 0 );
215 }
216
217 if ( r_lightmap->integer == 2 ) {
218 ri.Printf( PRINT_ALL, "Brightest lightmap value: %d\n", ( int ) ( maxIntensity * 255 ) );
219 }
220 }
221
222
223 /*
224 =================
225 RE_SetWorldVisData
226
227 This is called by the clipmodel subsystem so we can share the 1.8 megs of
228 space in big maps...
229 =================
230 */
RE_SetWorldVisData(const byte * vis)231 void RE_SetWorldVisData( const byte *vis ) {
232 tr.externalVisData = vis;
233 }
234
235
236 /*
237 =================
238 R_LoadVisibility
239 =================
240 */
R_LoadVisibility(lump_t * l)241 static void R_LoadVisibility( lump_t *l ) {
242 int len;
243 byte *buf;
244
245 len = ( s_worldData.numClusters + 63 ) & ~63;
246 s_worldData.novis = ri.Hunk_Alloc( len, h_low );
247 memset( s_worldData.novis, 0xff, len );
248
249 len = l->filelen;
250 if ( !len ) {
251 return;
252 }
253 buf = fileBase + l->fileofs;
254
255 s_worldData.numClusters = LittleLong( ( (int *)buf )[0] );
256 s_worldData.clusterBytes = LittleLong( ( (int *)buf )[1] );
257
258 // CM_Load should have given us the vis data to share, so
259 // we don't need to allocate another copy
260 if ( tr.externalVisData ) {
261 s_worldData.vis = tr.externalVisData;
262 } else {
263 byte *dest;
264
265 dest = ri.Hunk_Alloc( len - 8, h_low );
266 memcpy( dest, buf + 8, len - 8 );
267 s_worldData.vis = dest;
268 }
269 }
270
271 //===============================================================================
272
273
274 /*
275 ===============
276 ShaderForShaderNum
277 ===============
278 */
ShaderForShaderNum(int shaderNum,int lightmapNum)279 static shader_t *ShaderForShaderNum( int shaderNum, int lightmapNum ) {
280 shader_t *shader;
281 dshader_t *dsh;
282
283 int _shaderNum = LittleLong( shaderNum );
284 if ( _shaderNum < 0 || _shaderNum >= s_worldData.numShaders ) {
285 ri.Error( ERR_DROP, "ShaderForShaderNum: bad num %i", _shaderNum );
286 }
287 dsh = &s_worldData.shaders[ _shaderNum ];
288
289 if ( r_vertexLight->integer || glConfig.hardwareType == GLHW_PERMEDIA2 ) {
290 lightmapNum = LIGHTMAP_BY_VERTEX;
291 }
292
293 // JPW NERVE removed per atvi request
294
295 if ( r_fullbright->integer ) {
296 lightmapNum = LIGHTMAP_WHITEIMAGE;
297 }
298
299 shader = R_FindShader( dsh->shader, lightmapNum, qtrue );
300
301 // if the shader had errors, just use default shader
302 if ( shader->defaultShader ) {
303 return tr.defaultShader;
304 }
305
306 return shader;
307 }
308
309 // Ridah, optimizations here
310 // memory block for use by surfaces
311 static byte *surfHunkPtr;
312 static int surfHunkSize;
313 #define SURF_HUNK_MAXSIZE 0x40000
314 #define LL( x ) LittleLong( x )
315
316 /*
317 ==============
318 R_InitSurfMemory
319 ==============
320 */
R_InitSurfMemory(void)321 void R_InitSurfMemory( void ) {
322 // allocate a new chunk
323 surfHunkPtr = ri.Hunk_Alloc( SURF_HUNK_MAXSIZE, h_low );
324 surfHunkSize = 0;
325 }
326
327 /*
328 ==============
329 R_GetSurfMemory
330 ==============
331 */
R_GetSurfMemory(int size)332 void *R_GetSurfMemory( int size ) {
333 byte *retval;
334
335 // round to cacheline
336 size = ( size + 31 ) & ~31;
337
338 surfHunkSize += size;
339 if ( surfHunkSize >= SURF_HUNK_MAXSIZE ) {
340 // allocate a new chunk
341 R_InitSurfMemory();
342 surfHunkSize += size; // since it just got reset
343 }
344 retval = surfHunkPtr;
345 surfHunkPtr += size;
346
347 return (void *)retval;
348 }
349
350 /*
351 ===============
352 ParseFace
353 ===============
354 */
ParseFace(dsurface_t * ds,drawVert_t * verts,msurface_t * surf,int * indexes)355 static void ParseFace( dsurface_t *ds, drawVert_t *verts, msurface_t *surf, int *indexes ) {
356 int i, j;
357 srfSurfaceFace_t *cv;
358 int numPoints, numIndexes;
359 int lightmapNum;
360 int sfaceSize, ofsIndexes;
361
362 lightmapNum = LittleLong( ds->lightmapNum );
363
364 // get fog volume
365 surf->fogIndex = LittleLong( ds->fogNum ) + 1;
366
367 // get shader value
368 surf->shader = ShaderForShaderNum( ds->shaderNum, lightmapNum );
369 if ( r_singleShader->integer && !surf->shader->isSky ) {
370 surf->shader = tr.defaultShader;
371 }
372
373 numPoints = LittleLong( ds->numVerts );
374 if ( numPoints > MAX_FACE_POINTS ) {
375 ri.Printf( PRINT_WARNING, "WARNING: MAX_FACE_POINTS exceeded: %i\n", numPoints );
376 numPoints = MAX_FACE_POINTS;
377 surf->shader = tr.defaultShader;
378 }
379
380 numIndexes = LittleLong( ds->numIndexes );
381
382 // create the srfSurfaceFace_t
383 sfaceSize = offsetof( srfSurfaceFace_t, points ) + sizeof( *cv->points ) * numPoints;
384 ofsIndexes = sfaceSize;
385 sfaceSize += sizeof( int ) * numIndexes;
386
387 //cv = ri.Hunk_Alloc( sfaceSize );
388 cv = R_GetSurfMemory( sfaceSize );
389
390 cv->surfaceType = SF_FACE;
391 cv->numPoints = numPoints;
392 cv->numIndices = numIndexes;
393 cv->ofsIndices = ofsIndexes;
394
395 verts += LittleLong( ds->firstVert );
396 for ( i = 0 ; i < numPoints ; i++ ) {
397 for ( j = 0 ; j < 3 ; j++ ) {
398 cv->points[i][j] = LittleFloat( verts[i].xyz[j] );
399 }
400 for ( j = 0 ; j < 2 ; j++ ) {
401 cv->points[i][3 + j] = LittleFloat( verts[i].st[j] );
402 cv->points[i][5 + j] = LittleFloat( verts[i].lightmap[j] );
403 }
404 R_ColorShiftLightingBytes( verts[i].color, (byte *)&cv->points[i][7] );
405 }
406
407 indexes += LittleLong( ds->firstIndex );
408 for ( i = 0 ; i < numIndexes ; i++ ) {
409 ( ( int * )( (byte *)cv + cv->ofsIndices ) )[i] = LittleLong( indexes[ i ] );
410 }
411
412 // take the plane information from the lightmap vector
413 for ( i = 0 ; i < 3 ; i++ ) {
414 cv->plane.normal[i] = LittleFloat( ds->lightmapVecs[2][i] );
415 }
416 cv->plane.dist = DotProduct( cv->points[0], cv->plane.normal );
417 SetPlaneSignbits( &cv->plane );
418 cv->plane.type = PlaneTypeForNormal( cv->plane.normal );
419
420 surf->data = (surfaceType_t *)cv;
421 }
422
423
424 /*
425 ===============
426 ParseMesh
427 ===============
428 */
ParseMesh(dsurface_t * ds,drawVert_t * verts,msurface_t * surf)429 static void ParseMesh( dsurface_t *ds, drawVert_t *verts, msurface_t *surf ) {
430 srfGridMesh_t *grid;
431 int i, j;
432 int width, height, numPoints;
433 drawVert_t points[MAX_PATCH_SIZE * MAX_PATCH_SIZE];
434 int lightmapNum;
435 vec3_t bounds[2];
436 vec3_t tmpVec;
437 static surfaceType_t skipData = SF_SKIP;
438
439 lightmapNum = LittleLong( ds->lightmapNum );
440
441 // get fog volume
442 surf->fogIndex = LittleLong( ds->fogNum ) + 1;
443
444 // get shader value
445 surf->shader = ShaderForShaderNum( ds->shaderNum, lightmapNum );
446 if ( r_singleShader->integer && !surf->shader->isSky ) {
447 surf->shader = tr.defaultShader;
448 }
449
450 // we may have a nodraw surface, because they might still need to
451 // be around for movement clipping
452 if ( s_worldData.shaders[ LittleLong( ds->shaderNum ) ].surfaceFlags & SURF_NODRAW ) {
453 surf->data = &skipData;
454 return;
455 }
456
457 width = LittleLong( ds->patchWidth );
458 height = LittleLong( ds->patchHeight );
459
460 verts += LittleLong( ds->firstVert );
461 numPoints = width * height;
462 for ( i = 0 ; i < numPoints ; i++ ) {
463 for ( j = 0 ; j < 3 ; j++ ) {
464 points[i].xyz[j] = LittleFloat( verts[i].xyz[j] );
465 points[i].normal[j] = LittleFloat( verts[i].normal[j] );
466 }
467 for ( j = 0 ; j < 2 ; j++ ) {
468 points[i].st[j] = LittleFloat( verts[i].st[j] );
469 points[i].lightmap[j] = LittleFloat( verts[i].lightmap[j] );
470 }
471 R_ColorShiftLightingBytes( verts[i].color, points[i].color );
472 }
473
474 // pre-tesseleate
475 grid = R_SubdividePatchToGrid( width, height, points );
476 surf->data = (surfaceType_t *)grid;
477
478 // copy the level of detail origin, which is the center
479 // of the group of all curves that must subdivide the same
480 // to avoid cracking
481 for ( i = 0 ; i < 3 ; i++ ) {
482 bounds[0][i] = LittleFloat( ds->lightmapVecs[0][i] );
483 bounds[1][i] = LittleFloat( ds->lightmapVecs[1][i] );
484 }
485 VectorAdd( bounds[0], bounds[1], bounds[1] );
486 VectorScale( bounds[1], 0.5f, grid->lodOrigin );
487 VectorSubtract( bounds[0], grid->lodOrigin, tmpVec );
488 grid->lodRadius = VectorLength( tmpVec );
489 }
490
491 /*
492 ===============
493 ParseTriSurf
494 ===============
495 */
ParseTriSurf(dsurface_t * ds,drawVert_t * verts,msurface_t * surf,int * indexes)496 static void ParseTriSurf( dsurface_t *ds, drawVert_t *verts, msurface_t *surf, int *indexes ) {
497 srfTriangles_t *tri;
498 int i, j;
499 int numVerts, numIndexes;
500
501 // get fog volume
502 surf->fogIndex = LittleLong( ds->fogNum ) + 1;
503
504 // get shader
505 surf->shader = ShaderForShaderNum( ds->shaderNum, LIGHTMAP_BY_VERTEX );
506 if ( r_singleShader->integer && !surf->shader->isSky ) {
507 surf->shader = tr.defaultShader;
508 }
509
510 numVerts = LittleLong( ds->numVerts );
511 numIndexes = LittleLong( ds->numIndexes );
512
513 //tri = ri.Hunk_Alloc( sizeof( *tri ) + numVerts * sizeof( tri->verts[0] )
514 // + numIndexes * sizeof( tri->indexes[0] ) );
515 tri = R_GetSurfMemory( sizeof( *tri ) + numVerts * sizeof( tri->verts[0] )
516 + numIndexes * sizeof( tri->indexes[0] ) );
517
518 tri->surfaceType = SF_TRIANGLES;
519 tri->numVerts = numVerts;
520 tri->numIndexes = numIndexes;
521 tri->verts = ( drawVert_t * )( tri + 1 );
522 tri->indexes = ( int * )( tri->verts + tri->numVerts );
523
524 surf->data = (surfaceType_t *)tri;
525
526 // copy vertexes
527 ClearBounds( tri->bounds[0], tri->bounds[1] );
528 verts += LittleLong( ds->firstVert );
529 for ( i = 0 ; i < numVerts ; i++ ) {
530 for ( j = 0 ; j < 3 ; j++ ) {
531 tri->verts[i].xyz[j] = LittleFloat( verts[i].xyz[j] );
532 tri->verts[i].normal[j] = LittleFloat( verts[i].normal[j] );
533 }
534 AddPointToBounds( tri->verts[i].xyz, tri->bounds[0], tri->bounds[1] );
535 for ( j = 0 ; j < 2 ; j++ ) {
536 tri->verts[i].st[j] = LittleFloat( verts[i].st[j] );
537 tri->verts[i].lightmap[j] = LittleFloat( verts[i].lightmap[j] );
538 }
539
540 R_ColorShiftLightingBytes( verts[i].color, tri->verts[i].color );
541 }
542
543 // copy indexes
544 indexes += LittleLong( ds->firstIndex );
545 for ( i = 0 ; i < numIndexes ; i++ ) {
546 tri->indexes[i] = LittleLong( indexes[i] );
547 if ( tri->indexes[i] < 0 || tri->indexes[i] >= numVerts ) {
548 ri.Error( ERR_DROP, "Bad index in triangle surface" );
549 }
550 }
551 }
552
553 /*
554 ===============
555 ParseFlare
556 ===============
557 */
ParseFlare(dsurface_t * ds,drawVert_t * verts,msurface_t * surf,int * indexes)558 static void ParseFlare( dsurface_t *ds, drawVert_t *verts, msurface_t *surf, int *indexes ) {
559 srfFlare_t *flare;
560 int i;
561
562 // get fog volume
563 surf->fogIndex = LittleLong( ds->fogNum ) + 1;
564
565 // get shader
566 surf->shader = ShaderForShaderNum( ds->shaderNum, LIGHTMAP_BY_VERTEX );
567 if ( r_singleShader->integer && !surf->shader->isSky ) {
568 surf->shader = tr.defaultShader;
569 }
570
571 flare = ri.Hunk_Alloc( sizeof( *flare ), h_low );
572 flare->surfaceType = SF_FLARE;
573
574 surf->data = (surfaceType_t *)flare;
575
576 for ( i = 0 ; i < 3 ; i++ ) {
577 flare->origin[i] = LittleFloat( ds->lightmapOrigin[i] );
578 flare->color[i] = LittleFloat( ds->lightmapVecs[0][i] );
579 flare->normal[i] = LittleFloat( ds->lightmapVecs[2][i] );
580 }
581 }
582
583
584 /*
585 =================
586 R_MergedWidthPoints
587
588 returns true if there are grid points merged on a width edge
589 =================
590 */
R_MergedWidthPoints(srfGridMesh_t * grid,int offset)591 int R_MergedWidthPoints( srfGridMesh_t *grid, int offset ) {
592 int i, j;
593
594 for ( i = 1; i < grid->width - 1; i++ ) {
595 for ( j = i + 1; j < grid->width - 1; j++ ) {
596 if ( fabs( grid->verts[i + offset].xyz[0] - grid->verts[j + offset].xyz[0] ) > .1 ) {
597 continue;
598 }
599 if ( fabs( grid->verts[i + offset].xyz[1] - grid->verts[j + offset].xyz[1] ) > .1 ) {
600 continue;
601 }
602 if ( fabs( grid->verts[i + offset].xyz[2] - grid->verts[j + offset].xyz[2] ) > .1 ) {
603 continue;
604 }
605 return qtrue;
606 }
607 }
608 return qfalse;
609 }
610
611 /*
612 =================
613 R_MergedHeightPoints
614
615 returns true if there are grid points merged on a height edge
616 =================
617 */
R_MergedHeightPoints(srfGridMesh_t * grid,int offset)618 int R_MergedHeightPoints( srfGridMesh_t *grid, int offset ) {
619 int i, j;
620
621 for ( i = 1; i < grid->height - 1; i++ ) {
622 for ( j = i + 1; j < grid->height - 1; j++ ) {
623 if ( fabs( grid->verts[grid->width * i + offset].xyz[0] - grid->verts[grid->width * j + offset].xyz[0] ) > .1 ) {
624 continue;
625 }
626 if ( fabs( grid->verts[grid->width * i + offset].xyz[1] - grid->verts[grid->width * j + offset].xyz[1] ) > .1 ) {
627 continue;
628 }
629 if ( fabs( grid->verts[grid->width * i + offset].xyz[2] - grid->verts[grid->width * j + offset].xyz[2] ) > .1 ) {
630 continue;
631 }
632 return qtrue;
633 }
634 }
635 return qfalse;
636 }
637
638 /*
639 =================
640 R_FixSharedVertexLodError_r
641
642 NOTE: never sync LoD through grid edges with merged points!
643
644 FIXME: write generalized version that also avoids cracks between a patch and one that meets half way?
645 =================
646 */
R_FixSharedVertexLodError_r(int start,srfGridMesh_t * grid1)647 void R_FixSharedVertexLodError_r( int start, srfGridMesh_t *grid1 ) {
648 int j, k, l, m, n, offset1, offset2, touch;
649 srfGridMesh_t *grid2;
650
651 for ( j = start; j < s_worldData.numsurfaces; j++ ) {
652 //
653 grid2 = (srfGridMesh_t *) s_worldData.surfaces[j].data;
654 // if this surface is not a grid
655 if ( grid2->surfaceType != SF_GRID ) {
656 continue;
657 }
658 // if the LOD errors are already fixed for this patch
659 if ( grid2->lodFixed == 2 ) {
660 continue;
661 }
662 // grids in the same LOD group should have the exact same lod radius
663 if ( grid1->lodRadius != grid2->lodRadius ) {
664 continue;
665 }
666 // grids in the same LOD group should have the exact same lod origin
667 if ( grid1->lodOrigin[0] != grid2->lodOrigin[0] ) {
668 continue;
669 }
670 if ( grid1->lodOrigin[1] != grid2->lodOrigin[1] ) {
671 continue;
672 }
673 if ( grid1->lodOrigin[2] != grid2->lodOrigin[2] ) {
674 continue;
675 }
676 //
677 touch = qfalse;
678 for ( n = 0; n < 2; n++ ) {
679 //
680 if ( n ) {
681 offset1 = ( grid1->height - 1 ) * grid1->width;
682 } else { offset1 = 0;}
683 if ( R_MergedWidthPoints( grid1, offset1 ) ) {
684 continue;
685 }
686 for ( k = 1; k < grid1->width - 1; k++ ) {
687 for ( m = 0; m < 2; m++ ) {
688
689 if ( m ) {
690 offset2 = ( grid2->height - 1 ) * grid2->width;
691 } else { offset2 = 0;}
692 if ( R_MergedWidthPoints( grid2, offset2 ) ) {
693 continue;
694 }
695 for ( l = 1; l < grid2->width - 1; l++ ) {
696 //
697 if ( fabs( grid1->verts[k + offset1].xyz[0] - grid2->verts[l + offset2].xyz[0] ) > .1 ) {
698 continue;
699 }
700 if ( fabs( grid1->verts[k + offset1].xyz[1] - grid2->verts[l + offset2].xyz[1] ) > .1 ) {
701 continue;
702 }
703 if ( fabs( grid1->verts[k + offset1].xyz[2] - grid2->verts[l + offset2].xyz[2] ) > .1 ) {
704 continue;
705 }
706 // ok the points are equal and should have the same lod error
707 grid2->widthLodError[l] = grid1->widthLodError[k];
708 touch = qtrue;
709 }
710 }
711 for ( m = 0; m < 2; m++ ) {
712
713 if ( m ) {
714 offset2 = grid2->width - 1;
715 } else { offset2 = 0;}
716 if ( R_MergedHeightPoints( grid2, offset2 ) ) {
717 continue;
718 }
719 for ( l = 1; l < grid2->height - 1; l++ ) {
720 //
721 if ( fabs( grid1->verts[k + offset1].xyz[0] - grid2->verts[grid2->width * l + offset2].xyz[0] ) > .1 ) {
722 continue;
723 }
724 if ( fabs( grid1->verts[k + offset1].xyz[1] - grid2->verts[grid2->width * l + offset2].xyz[1] ) > .1 ) {
725 continue;
726 }
727 if ( fabs( grid1->verts[k + offset1].xyz[2] - grid2->verts[grid2->width * l + offset2].xyz[2] ) > .1 ) {
728 continue;
729 }
730 // ok the points are equal and should have the same lod error
731 grid2->heightLodError[l] = grid1->widthLodError[k];
732 touch = qtrue;
733 }
734 }
735 }
736 }
737 for ( n = 0; n < 2; n++ ) {
738 //
739 if ( n ) {
740 offset1 = grid1->width - 1;
741 } else { offset1 = 0;}
742 if ( R_MergedHeightPoints( grid1, offset1 ) ) {
743 continue;
744 }
745 for ( k = 1; k < grid1->height - 1; k++ ) {
746 for ( m = 0; m < 2; m++ ) {
747
748 if ( m ) {
749 offset2 = ( grid2->height - 1 ) * grid2->width;
750 } else { offset2 = 0;}
751 if ( R_MergedWidthPoints( grid2, offset2 ) ) {
752 continue;
753 }
754 for ( l = 1; l < grid2->width - 1; l++ ) {
755 //
756 if ( fabs( grid1->verts[grid1->width * k + offset1].xyz[0] - grid2->verts[l + offset2].xyz[0] ) > .1 ) {
757 continue;
758 }
759 if ( fabs( grid1->verts[grid1->width * k + offset1].xyz[1] - grid2->verts[l + offset2].xyz[1] ) > .1 ) {
760 continue;
761 }
762 if ( fabs( grid1->verts[grid1->width * k + offset1].xyz[2] - grid2->verts[l + offset2].xyz[2] ) > .1 ) {
763 continue;
764 }
765 // ok the points are equal and should have the same lod error
766 grid2->widthLodError[l] = grid1->heightLodError[k];
767 touch = qtrue;
768 }
769 }
770 for ( m = 0; m < 2; m++ ) {
771
772 if ( m ) {
773 offset2 = grid2->width - 1;
774 } else { offset2 = 0;}
775 if ( R_MergedHeightPoints( grid2, offset2 ) ) {
776 continue;
777 }
778 for ( l = 1; l < grid2->height - 1; l++ ) {
779 //
780 if ( fabs( grid1->verts[grid1->width * k + offset1].xyz[0] - grid2->verts[grid2->width * l + offset2].xyz[0] ) > .1 ) {
781 continue;
782 }
783 if ( fabs( grid1->verts[grid1->width * k + offset1].xyz[1] - grid2->verts[grid2->width * l + offset2].xyz[1] ) > .1 ) {
784 continue;
785 }
786 if ( fabs( grid1->verts[grid1->width * k + offset1].xyz[2] - grid2->verts[grid2->width * l + offset2].xyz[2] ) > .1 ) {
787 continue;
788 }
789 // ok the points are equal and should have the same lod error
790 grid2->heightLodError[l] = grid1->heightLodError[k];
791 touch = qtrue;
792 }
793 }
794 }
795 }
796 if ( touch ) {
797 grid2->lodFixed = 2;
798 R_FixSharedVertexLodError_r( start, grid2 );
799 //NOTE: this would be correct but makes things really slow
800 //grid2->lodFixed = 1;
801 }
802 }
803 }
804
805 /*
806 =================
807 R_FixSharedVertexLodError
808
809 This function assumes that all patches in one group are nicely stitched together for the highest LoD.
810 If this is not the case this function will still do its job but won't fix the highest LoD cracks.
811 =================
812 */
R_FixSharedVertexLodError(void)813 void R_FixSharedVertexLodError( void ) {
814 int i;
815 srfGridMesh_t *grid1;
816
817 for ( i = 0; i < s_worldData.numsurfaces; i++ ) {
818 //
819 grid1 = (srfGridMesh_t *) s_worldData.surfaces[i].data;
820 // if this surface is not a grid
821 if ( grid1->surfaceType != SF_GRID ) {
822 continue;
823 }
824 //
825 if ( grid1->lodFixed ) {
826 continue;
827 }
828 //
829 grid1->lodFixed = 2;
830 // recursively fix other patches in the same LOD group
831 R_FixSharedVertexLodError_r( i + 1, grid1 );
832 }
833 }
834
835
836 /*
837 ===============
838 R_StitchPatches
839 ===============
840 */
R_StitchPatches(int grid1num,int grid2num)841 int R_StitchPatches( int grid1num, int grid2num ) {
842 int k, l, m, n, offset1, offset2, row, column;
843 srfGridMesh_t *grid1, *grid2;
844 float *v1, *v2;
845
846 grid1 = (srfGridMesh_t *) s_worldData.surfaces[grid1num].data;
847 grid2 = (srfGridMesh_t *) s_worldData.surfaces[grid2num].data;
848 for ( n = 0; n < 2; n++ ) {
849 //
850 if ( n ) {
851 offset1 = ( grid1->height - 1 ) * grid1->width;
852 } else { offset1 = 0;}
853 if ( R_MergedWidthPoints( grid1, offset1 ) ) {
854 continue;
855 }
856 for ( k = 0; k < grid1->width - 2; k += 2 ) {
857
858 for ( m = 0; m < 2; m++ ) {
859
860 if ( grid2->width >= MAX_GRID_SIZE ) {
861 break;
862 }
863 if ( m ) {
864 offset2 = ( grid2->height - 1 ) * grid2->width;
865 } else { offset2 = 0;}
866 //if (R_MergedWidthPoints(grid2, offset2))
867 // continue;
868 for ( l = 0; l < grid2->width - 1; l++ ) {
869 //
870 v1 = grid1->verts[k + offset1].xyz;
871 v2 = grid2->verts[l + offset2].xyz;
872 if ( fabs( v1[0] - v2[0] ) > .1 ) {
873 continue;
874 }
875 if ( fabs( v1[1] - v2[1] ) > .1 ) {
876 continue;
877 }
878 if ( fabs( v1[2] - v2[2] ) > .1 ) {
879 continue;
880 }
881
882 v1 = grid1->verts[k + 2 + offset1].xyz;
883 v2 = grid2->verts[l + 1 + offset2].xyz;
884 if ( fabs( v1[0] - v2[0] ) > .1 ) {
885 continue;
886 }
887 if ( fabs( v1[1] - v2[1] ) > .1 ) {
888 continue;
889 }
890 if ( fabs( v1[2] - v2[2] ) > .1 ) {
891 continue;
892 }
893 //
894 v1 = grid2->verts[l + offset2].xyz;
895 v2 = grid2->verts[l + 1 + offset2].xyz;
896 if ( fabs( v1[0] - v2[0] ) < .01 &&
897 fabs( v1[1] - v2[1] ) < .01 &&
898 fabs( v1[2] - v2[2] ) < .01 ) {
899 continue;
900 }
901 //
902 //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" );
903 // insert column into grid2 right after after column l
904 if ( m ) {
905 row = grid2->height - 1;
906 } else { row = 0;}
907 grid2 = R_GridInsertColumn( grid2, l + 1, row,
908 grid1->verts[k + 1 + offset1].xyz, grid1->widthLodError[k + 1] );
909 grid2->lodStitched = qfalse;
910 s_worldData.surfaces[grid2num].data = (void *) grid2;
911 return qtrue;
912 }
913 }
914 for ( m = 0; m < 2; m++ ) {
915
916 if ( grid2->height >= MAX_GRID_SIZE ) {
917 break;
918 }
919 if ( m ) {
920 offset2 = grid2->width - 1;
921 } else { offset2 = 0;}
922 //if (R_MergedHeightPoints(grid2, offset2))
923 // continue;
924 for ( l = 0; l < grid2->height - 1; l++ ) {
925 //
926 v1 = grid1->verts[k + offset1].xyz;
927 v2 = grid2->verts[grid2->width * l + offset2].xyz;
928 if ( fabs( v1[0] - v2[0] ) > .1 ) {
929 continue;
930 }
931 if ( fabs( v1[1] - v2[1] ) > .1 ) {
932 continue;
933 }
934 if ( fabs( v1[2] - v2[2] ) > .1 ) {
935 continue;
936 }
937
938 v1 = grid1->verts[k + 2 + offset1].xyz;
939 v2 = grid2->verts[grid2->width * ( l + 1 ) + offset2].xyz;
940 if ( fabs( v1[0] - v2[0] ) > .1 ) {
941 continue;
942 }
943 if ( fabs( v1[1] - v2[1] ) > .1 ) {
944 continue;
945 }
946 if ( fabs( v1[2] - v2[2] ) > .1 ) {
947 continue;
948 }
949 //
950 v1 = grid2->verts[grid2->width * l + offset2].xyz;
951 v2 = grid2->verts[grid2->width * ( l + 1 ) + offset2].xyz;
952 if ( fabs( v1[0] - v2[0] ) < .01 &&
953 fabs( v1[1] - v2[1] ) < .01 &&
954 fabs( v1[2] - v2[2] ) < .01 ) {
955 continue;
956 }
957 //
958 //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" );
959 // insert row into grid2 right after after row l
960 if ( m ) {
961 column = grid2->width - 1;
962 } else { column = 0;}
963 grid2 = R_GridInsertRow( grid2, l + 1, column,
964 grid1->verts[k + 1 + offset1].xyz, grid1->widthLodError[k + 1] );
965 grid2->lodStitched = qfalse;
966 s_worldData.surfaces[grid2num].data = (void *) grid2;
967 return qtrue;
968 }
969 }
970 }
971 }
972 for ( n = 0; n < 2; n++ ) {
973 //
974 if ( n ) {
975 offset1 = grid1->width - 1;
976 } else { offset1 = 0;}
977 if ( R_MergedHeightPoints( grid1, offset1 ) ) {
978 continue;
979 }
980 for ( k = 0; k < grid1->height - 2; k += 2 ) {
981 for ( m = 0; m < 2; m++ ) {
982
983 if ( grid2->width >= MAX_GRID_SIZE ) {
984 break;
985 }
986 if ( m ) {
987 offset2 = ( grid2->height - 1 ) * grid2->width;
988 } else { offset2 = 0;}
989 //if (R_MergedWidthPoints(grid2, offset2))
990 // continue;
991 for ( l = 0; l < grid2->width - 1; l++ ) {
992 //
993 v1 = grid1->verts[grid1->width * k + offset1].xyz;
994 v2 = grid2->verts[l + offset2].xyz;
995 if ( fabs( v1[0] - v2[0] ) > .1 ) {
996 continue;
997 }
998 if ( fabs( v1[1] - v2[1] ) > .1 ) {
999 continue;
1000 }
1001 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1002 continue;
1003 }
1004
1005 v1 = grid1->verts[grid1->width * ( k + 2 ) + offset1].xyz;
1006 v2 = grid2->verts[l + 1 + offset2].xyz;
1007 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1008 continue;
1009 }
1010 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1011 continue;
1012 }
1013 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1014 continue;
1015 }
1016 //
1017 v1 = grid2->verts[l + offset2].xyz;
1018 v2 = grid2->verts[( l + 1 ) + offset2].xyz;
1019 if ( fabs( v1[0] - v2[0] ) < .01 &&
1020 fabs( v1[1] - v2[1] ) < .01 &&
1021 fabs( v1[2] - v2[2] ) < .01 ) {
1022 continue;
1023 }
1024 //
1025 //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" );
1026 // insert column into grid2 right after after column l
1027 if ( m ) {
1028 row = grid2->height - 1;
1029 } else { row = 0;}
1030 grid2 = R_GridInsertColumn( grid2, l + 1, row,
1031 grid1->verts[grid1->width * ( k + 1 ) + offset1].xyz, grid1->heightLodError[k + 1] );
1032 grid2->lodStitched = qfalse;
1033 s_worldData.surfaces[grid2num].data = (void *) grid2;
1034 return qtrue;
1035 }
1036 }
1037 for ( m = 0; m < 2; m++ ) {
1038
1039 if ( grid2->height >= MAX_GRID_SIZE ) {
1040 break;
1041 }
1042 if ( m ) {
1043 offset2 = grid2->width - 1;
1044 } else { offset2 = 0;}
1045 //if (R_MergedHeightPoints(grid2, offset2))
1046 // continue;
1047 for ( l = 0; l < grid2->height - 1; l++ ) {
1048 //
1049 v1 = grid1->verts[grid1->width * k + offset1].xyz;
1050 v2 = grid2->verts[grid2->width * l + offset2].xyz;
1051 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1052 continue;
1053 }
1054 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1055 continue;
1056 }
1057 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1058 continue;
1059 }
1060
1061 v1 = grid1->verts[grid1->width * ( k + 2 ) + offset1].xyz;
1062 v2 = grid2->verts[grid2->width * ( l + 1 ) + offset2].xyz;
1063 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1064 continue;
1065 }
1066 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1067 continue;
1068 }
1069 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1070 continue;
1071 }
1072 //
1073 v1 = grid2->verts[grid2->width * l + offset2].xyz;
1074 v2 = grid2->verts[grid2->width * ( l + 1 ) + offset2].xyz;
1075 if ( fabs( v1[0] - v2[0] ) < .01 &&
1076 fabs( v1[1] - v2[1] ) < .01 &&
1077 fabs( v1[2] - v2[2] ) < .01 ) {
1078 continue;
1079 }
1080 //
1081 //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" );
1082 // insert row into grid2 right after after row l
1083 if ( m ) {
1084 column = grid2->width - 1;
1085 } else { column = 0;}
1086 grid2 = R_GridInsertRow( grid2, l + 1, column,
1087 grid1->verts[grid1->width * ( k + 1 ) + offset1].xyz, grid1->heightLodError[k + 1] );
1088 grid2->lodStitched = qfalse;
1089 s_worldData.surfaces[grid2num].data = (void *) grid2;
1090 return qtrue;
1091 }
1092 }
1093 }
1094 }
1095 for ( n = 0; n < 2; n++ ) {
1096 //
1097 if ( n ) {
1098 offset1 = ( grid1->height - 1 ) * grid1->width;
1099 } else { offset1 = 0;}
1100 if ( R_MergedWidthPoints( grid1, offset1 ) ) {
1101 continue;
1102 }
1103 for ( k = grid1->width - 1; k > 1; k -= 2 ) {
1104
1105 for ( m = 0; m < 2; m++ ) {
1106
1107 if ( !grid2 || grid2->width >= MAX_GRID_SIZE ) {
1108 break;
1109 }
1110 if ( m ) {
1111 offset2 = ( grid2->height - 1 ) * grid2->width;
1112 } else { offset2 = 0;}
1113 //if (R_MergedWidthPoints(grid2, offset2))
1114 // continue;
1115 for ( l = 0; l < grid2->width - 1; l++ ) {
1116 //
1117 v1 = grid1->verts[k + offset1].xyz;
1118 v2 = grid2->verts[l + offset2].xyz;
1119 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1120 continue;
1121 }
1122 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1123 continue;
1124 }
1125 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1126 continue;
1127 }
1128
1129 v1 = grid1->verts[k - 2 + offset1].xyz;
1130 v2 = grid2->verts[l + 1 + offset2].xyz;
1131 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1132 continue;
1133 }
1134 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1135 continue;
1136 }
1137 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1138 continue;
1139 }
1140 //
1141 v1 = grid2->verts[l + offset2].xyz;
1142 v2 = grid2->verts[( l + 1 ) + offset2].xyz;
1143 if ( fabs( v1[0] - v2[0] ) < .01 &&
1144 fabs( v1[1] - v2[1] ) < .01 &&
1145 fabs( v1[2] - v2[2] ) < .01 ) {
1146 continue;
1147 }
1148 //
1149 //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" );
1150 // insert column into grid2 right after after column l
1151 if ( m ) {
1152 row = grid2->height - 1;
1153 } else { row = 0;}
1154 grid2 = R_GridInsertColumn( grid2, l + 1, row,
1155 grid1->verts[k - 1 + offset1].xyz, grid1->widthLodError[k + 1] );
1156 grid2->lodStitched = qfalse;
1157 s_worldData.surfaces[grid2num].data = (void *) grid2;
1158 return qtrue;
1159 }
1160 }
1161 for ( m = 0; m < 2; m++ ) {
1162
1163 if ( !grid2 || grid2->height >= MAX_GRID_SIZE ) {
1164 break;
1165 }
1166 if ( m ) {
1167 offset2 = grid2->width - 1;
1168 } else { offset2 = 0;}
1169 //if (R_MergedHeightPoints(grid2, offset2))
1170 // continue;
1171 for ( l = 0; l < grid2->height - 1; l++ ) {
1172 //
1173 v1 = grid1->verts[k + offset1].xyz;
1174 v2 = grid2->verts[grid2->width * l + offset2].xyz;
1175 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1176 continue;
1177 }
1178 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1179 continue;
1180 }
1181 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1182 continue;
1183 }
1184
1185 v1 = grid1->verts[k - 2 + offset1].xyz;
1186 v2 = grid2->verts[grid2->width * ( l + 1 ) + offset2].xyz;
1187 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1188 continue;
1189 }
1190 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1191 continue;
1192 }
1193 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1194 continue;
1195 }
1196 //
1197 v1 = grid2->verts[grid2->width * l + offset2].xyz;
1198 v2 = grid2->verts[grid2->width * ( l + 1 ) + offset2].xyz;
1199 if ( fabs( v1[0] - v2[0] ) < .01 &&
1200 fabs( v1[1] - v2[1] ) < .01 &&
1201 fabs( v1[2] - v2[2] ) < .01 ) {
1202 continue;
1203 }
1204 //
1205 //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" );
1206 // insert row into grid2 right after after row l
1207 if ( m ) {
1208 column = grid2->width - 1;
1209 } else { column = 0;}
1210 grid2 = R_GridInsertRow( grid2, l + 1, column,
1211 grid1->verts[k - 1 + offset1].xyz, grid1->widthLodError[k + 1] );
1212 if ( !grid2 ) {
1213 break;
1214 }
1215 grid2->lodStitched = qfalse;
1216 s_worldData.surfaces[grid2num].data = (void *) grid2;
1217 return qtrue;
1218 }
1219 }
1220 }
1221 }
1222 for ( n = 0; n < 2; n++ ) {
1223 //
1224 if ( n ) {
1225 offset1 = grid1->width - 1;
1226 } else { offset1 = 0;}
1227 if ( R_MergedHeightPoints( grid1, offset1 ) ) {
1228 continue;
1229 }
1230 for ( k = grid1->height - 1; k > 1; k -= 2 ) {
1231 for ( m = 0; m < 2; m++ ) {
1232
1233 if ( !grid2 || grid2->width >= MAX_GRID_SIZE ) {
1234 break;
1235 }
1236 if ( m ) {
1237 offset2 = ( grid2->height - 1 ) * grid2->width;
1238 } else { offset2 = 0;}
1239 //if (R_MergedWidthPoints(grid2, offset2))
1240 // continue;
1241 for ( l = 0; l < grid2->width - 1; l++ ) {
1242 //
1243 v1 = grid1->verts[grid1->width * k + offset1].xyz;
1244 v2 = grid2->verts[l + offset2].xyz;
1245 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1246 continue;
1247 }
1248 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1249 continue;
1250 }
1251 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1252 continue;
1253 }
1254
1255 v1 = grid1->verts[grid1->width * ( k - 2 ) + offset1].xyz;
1256 v2 = grid2->verts[l + 1 + offset2].xyz;
1257 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1258 continue;
1259 }
1260 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1261 continue;
1262 }
1263 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1264 continue;
1265 }
1266 //
1267 v1 = grid2->verts[l + offset2].xyz;
1268 v2 = grid2->verts[( l + 1 ) + offset2].xyz;
1269 if ( fabs( v1[0] - v2[0] ) < .01 &&
1270 fabs( v1[1] - v2[1] ) < .01 &&
1271 fabs( v1[2] - v2[2] ) < .01 ) {
1272 continue;
1273 }
1274 //
1275 //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" );
1276 // insert column into grid2 right after after column l
1277 if ( m ) {
1278 row = grid2->height - 1;
1279 } else { row = 0;}
1280 grid2 = R_GridInsertColumn( grid2, l + 1, row,
1281 grid1->verts[grid1->width * ( k - 1 ) + offset1].xyz, grid1->heightLodError[k + 1] );
1282 grid2->lodStitched = qfalse;
1283 s_worldData.surfaces[grid2num].data = (void *) grid2;
1284 return qtrue;
1285 }
1286 }
1287 for ( m = 0; m < 2; m++ ) {
1288
1289 if ( !grid2 || grid2->height >= MAX_GRID_SIZE ) {
1290 break;
1291 }
1292 if ( m ) {
1293 offset2 = grid2->width - 1;
1294 } else { offset2 = 0;}
1295 //if (R_MergedHeightPoints(grid2, offset2))
1296 // continue;
1297 for ( l = 0; l < grid2->height - 1; l++ ) {
1298 //
1299 v1 = grid1->verts[grid1->width * k + offset1].xyz;
1300 v2 = grid2->verts[grid2->width * l + offset2].xyz;
1301 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1302 continue;
1303 }
1304 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1305 continue;
1306 }
1307 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1308 continue;
1309 }
1310
1311 v1 = grid1->verts[grid1->width * ( k - 2 ) + offset1].xyz;
1312 v2 = grid2->verts[grid2->width * ( l + 1 ) + offset2].xyz;
1313 if ( fabs( v1[0] - v2[0] ) > .1 ) {
1314 continue;
1315 }
1316 if ( fabs( v1[1] - v2[1] ) > .1 ) {
1317 continue;
1318 }
1319 if ( fabs( v1[2] - v2[2] ) > .1 ) {
1320 continue;
1321 }
1322 //
1323 v1 = grid2->verts[grid2->width * l + offset2].xyz;
1324 v2 = grid2->verts[grid2->width * ( l + 1 ) + offset2].xyz;
1325 if ( fabs( v1[0] - v2[0] ) < .01 &&
1326 fabs( v1[1] - v2[1] ) < .01 &&
1327 fabs( v1[2] - v2[2] ) < .01 ) {
1328 continue;
1329 }
1330 //
1331 //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" );
1332 // insert row into grid2 right after after row l
1333 if ( m ) {
1334 column = grid2->width - 1;
1335 } else { column = 0;}
1336 grid2 = R_GridInsertRow( grid2, l + 1, column,
1337 grid1->verts[grid1->width * ( k - 1 ) + offset1].xyz, grid1->heightLodError[k + 1] );
1338 grid2->lodStitched = qfalse;
1339 s_worldData.surfaces[grid2num].data = (void *) grid2;
1340 return qtrue;
1341 }
1342 }
1343 }
1344 }
1345 return qfalse;
1346 }
1347
1348 /*
1349 ===============
1350 R_TryStitchPatch
1351
1352 This function will try to stitch patches in the same LoD group together for the highest LoD.
1353
1354 Only single missing vertice cracks will be fixed.
1355
1356 Vertices will be joined at the patch side a crack is first found, at the other side
1357 of the patch (on the same row or column) the vertices will not be joined and cracks
1358 might still appear at that side.
1359 ===============
1360 */
R_TryStitchingPatch(int grid1num)1361 int R_TryStitchingPatch( int grid1num ) {
1362 int j, numstitches;
1363 srfGridMesh_t *grid1, *grid2;
1364
1365 numstitches = 0;
1366 grid1 = (srfGridMesh_t *) s_worldData.surfaces[grid1num].data;
1367 for ( j = 0; j < s_worldData.numsurfaces; j++ ) {
1368 //
1369 grid2 = (srfGridMesh_t *) s_worldData.surfaces[j].data;
1370 // if this surface is not a grid
1371 if ( grid2->surfaceType != SF_GRID ) {
1372 continue;
1373 }
1374 // grids in the same LOD group should have the exact same lod radius
1375 if ( grid1->lodRadius != grid2->lodRadius ) {
1376 continue;
1377 }
1378 // grids in the same LOD group should have the exact same lod origin
1379 if ( grid1->lodOrigin[0] != grid2->lodOrigin[0] ) {
1380 continue;
1381 }
1382 if ( grid1->lodOrigin[1] != grid2->lodOrigin[1] ) {
1383 continue;
1384 }
1385 if ( grid1->lodOrigin[2] != grid2->lodOrigin[2] ) {
1386 continue;
1387 }
1388 //
1389 while ( R_StitchPatches( grid1num, j ) )
1390 {
1391 numstitches++;
1392 }
1393 }
1394 return numstitches;
1395 }
1396
1397 /*
1398 ===============
1399 R_StitchAllPatches
1400 ===============
1401 */
R_StitchAllPatches(void)1402 void R_StitchAllPatches( void ) {
1403 int i, stitched, numstitches;
1404 srfGridMesh_t *grid1;
1405
1406 numstitches = 0;
1407 do
1408 {
1409 stitched = qfalse;
1410 for ( i = 0; i < s_worldData.numsurfaces; i++ ) {
1411 //
1412 grid1 = (srfGridMesh_t *) s_worldData.surfaces[i].data;
1413 // if this surface is not a grid
1414 if ( grid1->surfaceType != SF_GRID ) {
1415 continue;
1416 }
1417 //
1418 if ( grid1->lodStitched ) {
1419 continue;
1420 }
1421 //
1422 grid1->lodStitched = qtrue;
1423 stitched = qtrue;
1424 //
1425 numstitches += R_TryStitchingPatch( i );
1426 }
1427 }
1428 while ( stitched );
1429 ri.Printf( PRINT_ALL, "stitched %d LoD cracks\n", numstitches );
1430 }
1431
1432 /*
1433 ===============
1434 R_MovePatchSurfacesToHunk
1435 ===============
1436 */
R_MovePatchSurfacesToHunk(void)1437 void R_MovePatchSurfacesToHunk( void ) {
1438 int i, size;
1439 srfGridMesh_t *grid, *hunkgrid;
1440
1441 for ( i = 0; i < s_worldData.numsurfaces; i++ ) {
1442 //
1443 grid = (srfGridMesh_t *) s_worldData.surfaces[i].data;
1444 // if this surface is not a grid
1445 if ( grid->surfaceType != SF_GRID ) {
1446 continue;
1447 }
1448 //
1449 size = ( grid->width * grid->height - 1 ) * sizeof( drawVert_t ) + sizeof( *grid );
1450 hunkgrid = ri.Hunk_Alloc( size, h_low );
1451 Com_Memcpy( hunkgrid, grid, size );
1452
1453 hunkgrid->widthLodError = ri.Hunk_Alloc( grid->width * 4, h_low );
1454 Com_Memcpy( hunkgrid->widthLodError, grid->widthLodError, grid->width * 4 );
1455
1456 hunkgrid->heightLodError = ri.Hunk_Alloc( grid->height * 4, h_low );
1457 Com_Memcpy( hunkgrid->heightLodError, grid->heightLodError, grid->height * 4 );
1458
1459 R_FreeSurfaceGridMesh( grid );
1460
1461 s_worldData.surfaces[i].data = (void *) hunkgrid;
1462 }
1463 }
1464
1465 /*
1466 ===============
1467 R_LoadSurfaces
1468 ===============
1469 */
R_LoadSurfaces(lump_t * surfs,lump_t * verts,lump_t * indexLump)1470 static void R_LoadSurfaces( lump_t *surfs, lump_t *verts, lump_t *indexLump ) {
1471 dsurface_t *in;
1472 msurface_t *out;
1473 drawVert_t *dv;
1474 int *indexes;
1475 int count;
1476 int numFaces, numMeshes, numTriSurfs, numFlares;
1477 int i;
1478
1479 numFaces = 0;
1480 numMeshes = 0;
1481 numTriSurfs = 0;
1482 numFlares = 0;
1483
1484 in = ( void * )( fileBase + surfs->fileofs );
1485 if ( surfs->filelen % sizeof( *in ) ) {
1486 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1487 }
1488 count = surfs->filelen / sizeof( *in );
1489
1490 dv = ( void * )( fileBase + verts->fileofs );
1491 if ( verts->filelen % sizeof( *dv ) ) {
1492 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1493 }
1494
1495 indexes = ( void * )( fileBase + indexLump->fileofs );
1496 if ( indexLump->filelen % sizeof( *indexes ) ) {
1497 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1498 }
1499
1500 out = ri.Hunk_Alloc( count * sizeof( *out ), h_low );
1501
1502 s_worldData.surfaces = out;
1503 s_worldData.numsurfaces = count;
1504
1505 // Ridah, init the surface memory. This is optimization, so we don't have to
1506 // look for memory for each surface, we allocate a big block and just chew it up
1507 // as we go
1508 R_InitSurfMemory();
1509
1510 for ( i = 0 ; i < count ; i++, in++, out++ ) {
1511 switch ( LittleLong( in->surfaceType ) ) {
1512 case MST_PATCH:
1513 ParseMesh( in, dv, out );
1514 numMeshes++;
1515 break;
1516 case MST_TRIANGLE_SOUP:
1517 ParseTriSurf( in, dv, out, indexes );
1518 numTriSurfs++;
1519 break;
1520 case MST_PLANAR:
1521 ParseFace( in, dv, out, indexes );
1522 numFaces++;
1523 break;
1524 case MST_FLARE:
1525 ParseFlare( in, dv, out, indexes );
1526 numFlares++;
1527 break;
1528 default:
1529 ri.Error( ERR_DROP, "Bad surfaceType" );
1530 }
1531 }
1532
1533 #ifdef PATCH_STITCHING
1534 R_StitchAllPatches();
1535 #endif
1536
1537 R_FixSharedVertexLodError();
1538
1539 #ifdef PATCH_STITCHING
1540 R_MovePatchSurfacesToHunk();
1541 #endif
1542
1543 ri.Printf( PRINT_ALL, "...loaded %d faces, %i meshes, %i trisurfs, %i flares\n",
1544 numFaces, numMeshes, numTriSurfs, numFlares );
1545 }
1546
1547
1548
1549 /*
1550 =================
1551 R_LoadSubmodels
1552 =================
1553 */
R_LoadSubmodels(lump_t * l)1554 static void R_LoadSubmodels( lump_t *l ) {
1555 dmodel_t *in;
1556 bmodel_t *out;
1557 int i, j, count;
1558
1559 in = ( void * )( fileBase + l->fileofs );
1560 if ( l->filelen % sizeof( *in ) ) {
1561 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1562 }
1563 count = l->filelen / sizeof( *in );
1564
1565 s_worldData.bmodels = out = ri.Hunk_Alloc( count * sizeof( *out ), h_low );
1566
1567 for ( i = 0 ; i < count ; i++, in++, out++ ) {
1568 model_t *model;
1569
1570 model = R_AllocModel();
1571
1572 assert( model != NULL ); // this should never happen
1573 if ( model == NULL ) {
1574 ri.Error(ERR_DROP, "R_LoadSubmodels: R_AllocModel() failed");
1575 }
1576
1577 model->type = MOD_BRUSH;
1578 model->bmodel = out;
1579 Com_sprintf( model->name, sizeof( model->name ), "*%d", i );
1580
1581 for ( j = 0 ; j < 3 ; j++ ) {
1582 out->bounds[0][j] = LittleFloat( in->mins[j] );
1583 out->bounds[1][j] = LittleFloat( in->maxs[j] );
1584 }
1585
1586 out->firstSurface = s_worldData.surfaces + LittleLong( in->firstSurface );
1587 out->numSurfaces = LittleLong( in->numSurfaces );
1588 }
1589 }
1590
1591
1592
1593 //==================================================================
1594
1595 /*
1596 =================
1597 R_SetParent
1598 =================
1599 */
R_SetParent(mnode_t * node,mnode_t * parent)1600 static void R_SetParent( mnode_t *node, mnode_t *parent ) {
1601 node->parent = parent;
1602 if ( node->contents != -1 ) {
1603 return;
1604 }
1605 R_SetParent( node->children[0], node );
1606 R_SetParent( node->children[1], node );
1607 }
1608
1609 /*
1610 =================
1611 R_LoadNodesAndLeafs
1612 =================
1613 */
R_LoadNodesAndLeafs(lump_t * nodeLump,lump_t * leafLump)1614 static void R_LoadNodesAndLeafs( lump_t *nodeLump, lump_t *leafLump ) {
1615 int i, j, p;
1616 dnode_t *in;
1617 dleaf_t *inLeaf;
1618 mnode_t *out;
1619 int numNodes, numLeafs;
1620
1621 in = ( void * )( fileBase + nodeLump->fileofs );
1622 if ( nodeLump->filelen % sizeof( dnode_t ) ||
1623 leafLump->filelen % sizeof( dleaf_t ) ) {
1624 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1625 }
1626 numNodes = nodeLump->filelen / sizeof( dnode_t );
1627 numLeafs = leafLump->filelen / sizeof( dleaf_t );
1628
1629 out = ri.Hunk_Alloc( ( numNodes + numLeafs ) * sizeof( *out ), h_low );
1630
1631 s_worldData.nodes = out;
1632 s_worldData.numnodes = numNodes + numLeafs;
1633 s_worldData.numDecisionNodes = numNodes;
1634
1635 // load nodes
1636 for ( i = 0 ; i < numNodes; i++, in++, out++ )
1637 {
1638 for ( j = 0 ; j < 3 ; j++ )
1639 {
1640 out->mins[j] = LittleLong( in->mins[j] );
1641 out->maxs[j] = LittleLong( in->maxs[j] );
1642 }
1643
1644 p = LittleLong( in->planeNum );
1645 out->plane = s_worldData.planes + p;
1646
1647 out->contents = CONTENTS_NODE; // differentiate from leafs
1648
1649 for ( j = 0 ; j < 2 ; j++ )
1650 {
1651 p = LittleLong( in->children[j] );
1652 if ( p >= 0 ) {
1653 out->children[j] = s_worldData.nodes + p;
1654 } else {
1655 out->children[j] = s_worldData.nodes + numNodes + ( -1 - p );
1656 }
1657 }
1658 }
1659
1660 // load leafs
1661 inLeaf = ( void * )( fileBase + leafLump->fileofs );
1662 for ( i = 0 ; i < numLeafs ; i++, inLeaf++, out++ )
1663 {
1664 for ( j = 0 ; j < 3 ; j++ )
1665 {
1666 out->mins[j] = LittleLong( inLeaf->mins[j] );
1667 out->maxs[j] = LittleLong( inLeaf->maxs[j] );
1668 }
1669
1670 out->cluster = LittleLong( inLeaf->cluster );
1671 out->area = LittleLong( inLeaf->area );
1672
1673 if ( out->cluster >= s_worldData.numClusters ) {
1674 s_worldData.numClusters = out->cluster + 1;
1675 }
1676
1677 out->firstmarksurface = s_worldData.marksurfaces +
1678 LittleLong( inLeaf->firstLeafSurface );
1679 out->nummarksurfaces = LittleLong( inLeaf->numLeafSurfaces );
1680 }
1681
1682 // chain decendants
1683 R_SetParent( s_worldData.nodes, NULL );
1684 }
1685
1686 //=============================================================================
1687
1688 /*
1689 =================
1690 R_LoadShaders
1691 =================
1692 */
R_LoadShaders(lump_t * l)1693 static void R_LoadShaders( lump_t *l ) {
1694 int i, count;
1695 dshader_t *in, *out;
1696
1697 in = ( void * )( fileBase + l->fileofs );
1698 if ( l->filelen % sizeof( *in ) ) {
1699 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1700 }
1701 count = l->filelen / sizeof( *in );
1702 out = ri.Hunk_Alloc( count * sizeof( *out ), h_low );
1703
1704 s_worldData.shaders = out;
1705 s_worldData.numShaders = count;
1706
1707 memcpy( out, in, count * sizeof( *out ) );
1708
1709 for ( i = 0 ; i < count ; i++ ) {
1710 out[i].surfaceFlags = LittleLong( out[i].surfaceFlags );
1711 out[i].contentFlags = LittleLong( out[i].contentFlags );
1712 }
1713 }
1714
1715
1716 /*
1717 =================
1718 R_LoadMarksurfaces
1719 =================
1720 */
R_LoadMarksurfaces(lump_t * l)1721 static void R_LoadMarksurfaces( lump_t *l ) {
1722 int i, j, count;
1723 int *in;
1724 msurface_t **out;
1725
1726 in = ( void * )( fileBase + l->fileofs );
1727 if ( l->filelen % sizeof( *in ) ) {
1728 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1729 }
1730 count = l->filelen / sizeof( *in );
1731 out = ri.Hunk_Alloc( count * sizeof( *out ), h_low );
1732
1733 s_worldData.marksurfaces = out;
1734 s_worldData.nummarksurfaces = count;
1735
1736 for ( i = 0 ; i < count ; i++ )
1737 {
1738 j = LittleLong( in[i] );
1739 out[i] = s_worldData.surfaces + j;
1740 }
1741 }
1742
1743
1744 /*
1745 =================
1746 R_LoadPlanes
1747 =================
1748 */
R_LoadPlanes(lump_t * l)1749 static void R_LoadPlanes( lump_t *l ) {
1750 int i, j;
1751 cplane_t *out;
1752 dplane_t *in;
1753 int count;
1754 int bits;
1755
1756 in = ( void * )( fileBase + l->fileofs );
1757 if ( l->filelen % sizeof( *in ) ) {
1758 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1759 }
1760 count = l->filelen / sizeof( *in );
1761 out = ri.Hunk_Alloc( count * 2 * sizeof( *out ), h_low );
1762
1763 s_worldData.planes = out;
1764 s_worldData.numplanes = count;
1765
1766 for ( i = 0 ; i < count ; i++, in++, out++ ) {
1767 bits = 0;
1768 for ( j = 0 ; j < 3 ; j++ ) {
1769 out->normal[j] = LittleFloat( in->normal[j] );
1770 if ( out->normal[j] < 0 ) {
1771 bits |= 1 << j;
1772 }
1773 }
1774
1775 out->dist = LittleFloat( in->dist );
1776 out->type = PlaneTypeForNormal( out->normal );
1777 out->signbits = bits;
1778 }
1779 }
1780
1781 /*
1782 =================
1783 R_LoadFogs
1784
1785 =================
1786 */
R_LoadFogs(lump_t * l,lump_t * brushesLump,lump_t * sidesLump)1787 static void R_LoadFogs( lump_t *l, lump_t *brushesLump, lump_t *sidesLump ) {
1788 int i;
1789 fog_t *out;
1790 dfog_t *fogs;
1791 dbrush_t *brushes, *brush;
1792 dbrushside_t *sides;
1793 int count, brushesCount, sidesCount;
1794 int sideNum;
1795 int planeNum;
1796 shader_t *shader;
1797 float d;
1798 int firstSide;
1799
1800 fogs = ( void * )( fileBase + l->fileofs );
1801 if ( l->filelen % sizeof( *fogs ) ) {
1802 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1803 }
1804 count = l->filelen / sizeof( *fogs );
1805
1806 // create fog structures for them
1807 s_worldData.numfogs = count + 1;
1808 s_worldData.fogs = ri.Hunk_Alloc( s_worldData.numfogs * sizeof( *out ), h_low );
1809 out = s_worldData.fogs + 1;
1810
1811 if ( !count ) {
1812 return;
1813 }
1814
1815 brushes = ( void * )( fileBase + brushesLump->fileofs );
1816 if ( brushesLump->filelen % sizeof( *brushes ) ) {
1817 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1818 }
1819 brushesCount = brushesLump->filelen / sizeof( *brushes );
1820
1821 sides = ( void * )( fileBase + sidesLump->fileofs );
1822 if ( sidesLump->filelen % sizeof( *sides ) ) {
1823 ri.Error( ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name );
1824 }
1825 sidesCount = sidesLump->filelen / sizeof( *sides );
1826
1827 for ( i = 0 ; i < count ; i++, fogs++ ) {
1828 out->originalBrushNumber = LittleLong( fogs->brushNum );
1829
1830 if ( (unsigned)out->originalBrushNumber >= brushesCount ) {
1831 ri.Error( ERR_DROP, "fog brushNumber out of range" );
1832 }
1833 brush = brushes + out->originalBrushNumber;
1834
1835 firstSide = LittleLong( brush->firstSide );
1836
1837 if ( (unsigned)firstSide > sidesCount - 6 ) {
1838 ri.Error( ERR_DROP, "fog brush sideNumber out of range" );
1839 }
1840
1841 // brushes are always sorted with the axial sides first
1842 sideNum = firstSide + 0;
1843 planeNum = LittleLong( sides[ sideNum ].planeNum );
1844 out->bounds[0][0] = -s_worldData.planes[ planeNum ].dist;
1845
1846 sideNum = firstSide + 1;
1847 planeNum = LittleLong( sides[ sideNum ].planeNum );
1848 out->bounds[1][0] = s_worldData.planes[ planeNum ].dist;
1849
1850 sideNum = firstSide + 2;
1851 planeNum = LittleLong( sides[ sideNum ].planeNum );
1852 out->bounds[0][1] = -s_worldData.planes[ planeNum ].dist;
1853
1854 sideNum = firstSide + 3;
1855 planeNum = LittleLong( sides[ sideNum ].planeNum );
1856 out->bounds[1][1] = s_worldData.planes[ planeNum ].dist;
1857
1858 sideNum = firstSide + 4;
1859 planeNum = LittleLong( sides[ sideNum ].planeNum );
1860 out->bounds[0][2] = -s_worldData.planes[ planeNum ].dist;
1861
1862 sideNum = firstSide + 5;
1863 planeNum = LittleLong( sides[ sideNum ].planeNum );
1864 out->bounds[1][2] = s_worldData.planes[ planeNum ].dist;
1865
1866 // get information from the shader for fog parameters
1867 shader = R_FindShader( fogs->shader, LIGHTMAP_NONE, qtrue );
1868
1869 out->parms = shader->fogParms;
1870
1871 out->colorInt = ColorBytes4( shader->fogParms.color[0] * tr.identityLight,
1872 shader->fogParms.color[1] * tr.identityLight,
1873 shader->fogParms.color[2] * tr.identityLight, 1.0 );
1874
1875 d = shader->fogParms.depthForOpaque < 1 ? 1 : shader->fogParms.depthForOpaque;
1876 out->tcScale = 1.0f / ( d * 8 );
1877
1878 // set the gradient vector
1879 sideNum = LittleLong( fogs->visibleSide );
1880
1881 if ( sideNum == -1 ) {
1882 out->hasSurface = qfalse;
1883 } else {
1884 out->hasSurface = qtrue;
1885 planeNum = LittleLong( sides[ firstSide + sideNum ].planeNum );
1886 VectorSubtract( vec3_origin, s_worldData.planes[ planeNum ].normal, out->surface );
1887 out->surface[3] = -s_worldData.planes[ planeNum ].dist;
1888 }
1889
1890 out++;
1891 }
1892
1893 }
1894
1895
1896 /*
1897 ==============
1898 R_FindLightGridBounds
1899 ==============
1900 */
R_FindLightGridBounds(vec3_t mins,vec3_t maxs)1901 void R_FindLightGridBounds( vec3_t mins, vec3_t maxs ) {
1902 world_t *w;
1903 msurface_t *surf;
1904 srfSurfaceFace_t *surfFace;
1905 // cplane_t *plane;
1906 struct shader_s *shd;
1907
1908 qboolean foundGridBrushes = qfalse;
1909 int i,j;
1910
1911 w = &s_worldData;
1912
1913 //----(SA) temp - disable this whole thing for now
1914 VectorCopy( w->bmodels[0].bounds[0], mins );
1915 VectorCopy( w->bmodels[0].bounds[1], maxs );
1916 return;
1917 //----(SA) temp
1918
1919
1920
1921
1922 ClearBounds( mins, maxs );
1923
1924 // wrong!
1925 for ( i = 0; i < w->bmodels[0].numSurfaces; i++ ) {
1926 surf = w->bmodels[0].firstSurface + i;
1927 shd = surf->shader;
1928
1929 if ( !( *surf->data == SF_FACE ) ) {
1930 continue;
1931 }
1932
1933 if ( !( shd->contentFlags & CONTENTS_LIGHTGRID ) ) {
1934 continue;
1935 }
1936
1937 foundGridBrushes = qtrue;
1938 }
1939
1940
1941 // wrong!
1942 for ( i = 0; i < w->numsurfaces; i++ ) {
1943 surf = &w->surfaces[i];
1944 shd = surf->shader;
1945 if ( !( *surf->data == SF_FACE ) ) {
1946 continue;
1947 }
1948
1949 if ( !( shd->contentFlags & CONTENTS_LIGHTGRID ) ) {
1950 continue;
1951 }
1952
1953 foundGridBrushes = qtrue;
1954
1955 surfFace = ( srfSurfaceFace_t * )surf->data;
1956
1957 for ( j = 0; j < surfFace->numPoints; j++ ) {
1958 AddPointToBounds( surfFace->points[j], mins, maxs );
1959 }
1960
1961 }
1962
1963 // go through brushes looking for lightgrid
1964 // for ( i = 0 ; i < numbrushes ; i++ ) {
1965 // db = &dbrushes[i];
1966 //
1967 // if (!(dshaders[db->shaderNum].contentFlags & CONTENTS_LIGHTGRID)) {
1968 // continue;
1969 // }
1970 //
1971 // foundGridBrushes = qtrue;
1972 //
1973 // // go through light grid surfaces for bounds
1974 // for ( j = 0 ; j < db->numSides ; j++ ) {
1975 // s = &dbrushsides[ db->firstSide + j ];
1976 //
1977 // surfmin[0] = -dplanes[ dbrushsides[ db->firstSide + 0 ].planeNum ].dist - 1;
1978 // surfmin[1] = -dplanes[ dbrushsides[ db->firstSide + 2 ].planeNum ].dist - 1;
1979 // surfmin[2] = -dplanes[ dbrushsides[ db->firstSide + 4 ].planeNum ].dist - 1;
1980 // surfmax[0] = dplanes[ dbrushsides[ db->firstSide + 1 ].planeNum ].dist + 1;
1981 // surfmax[1] = dplanes[ dbrushsides[ db->firstSide + 3 ].planeNum ].dist + 1;
1982 // surfmax[2] = dplanes[ dbrushsides[ db->firstSide + 5 ].planeNum ].dist + 1;
1983 // AddPointToBounds (surfmin, mins, maxs);
1984 // AddPointToBounds (surfmax, mins, maxs);
1985 // }
1986 // }
1987
1988
1989 //----(SA) temp
1990 foundGridBrushes = qfalse; // disable this whole thing for now
1991 //----(SA) temp
1992
1993 if ( !foundGridBrushes ) {
1994 VectorCopy( w->bmodels[0].bounds[0], mins );
1995 VectorCopy( w->bmodels[0].bounds[1], maxs );
1996 }
1997 }
1998
1999 /*
2000 ================
2001 R_LoadLightGrid
2002
2003 ================
2004 */
R_LoadLightGrid(lump_t * l)2005 void R_LoadLightGrid( lump_t *l ) {
2006 int i;
2007 vec3_t maxs;
2008 int numGridPoints;
2009 world_t *w;
2010 // float *wMins, *wMaxs;
2011 vec3_t wMins, wMaxs;
2012
2013 w = &s_worldData;
2014
2015 w->lightGridInverseSize[0] = 1.0 / w->lightGridSize[0];
2016 w->lightGridInverseSize[1] = 1.0 / w->lightGridSize[1];
2017 w->lightGridInverseSize[2] = 1.0 / w->lightGridSize[2];
2018
2019 //----(SA) modified
2020 R_FindLightGridBounds( wMins, wMaxs );
2021 // wMins = w->bmodels[0].bounds[0];
2022 // wMaxs = w->bmodels[0].bounds[1];
2023 //----(SA) end
2024
2025 for ( i = 0 ; i < 3 ; i++ ) {
2026 w->lightGridOrigin[i] = w->lightGridSize[i] * ceil( wMins[i] / w->lightGridSize[i] );
2027 maxs[i] = w->lightGridSize[i] * floor( wMaxs[i] / w->lightGridSize[i] );
2028 w->lightGridBounds[i] = ( maxs[i] - w->lightGridOrigin[i] ) / w->lightGridSize[i] + 1;
2029 }
2030
2031 numGridPoints = w->lightGridBounds[0] * w->lightGridBounds[1] * w->lightGridBounds[2];
2032
2033 if ( l->filelen != numGridPoints * 8 ) {
2034 ri.Printf( PRINT_WARNING, "WARNING: light grid mismatch\n" );
2035 w->lightGridData = NULL;
2036 return;
2037 }
2038
2039 w->lightGridData = ri.Hunk_Alloc( l->filelen, h_low );
2040 memcpy( w->lightGridData, ( void * )( fileBase + l->fileofs ), l->filelen );
2041
2042 // deal with overbright bits
2043 for ( i = 0 ; i < numGridPoints ; i++ ) {
2044 R_ColorShiftLightingBytes( &w->lightGridData[i * 8], &w->lightGridData[i * 8] );
2045 R_ColorShiftLightingBytes( &w->lightGridData[i * 8 + 3], &w->lightGridData[i * 8 + 3] );
2046 }
2047 }
2048
2049 /*
2050 ================
2051 R_LoadEntities
2052 ================
2053 */
R_LoadEntities(lump_t * l)2054 void R_LoadEntities( lump_t *l ) {
2055 char *p, *token, *s;
2056 char keyname[MAX_TOKEN_CHARS];
2057 char value[MAX_TOKEN_CHARS];
2058 world_t *w;
2059
2060 w = &s_worldData;
2061 w->lightGridSize[0] = 64;
2062 w->lightGridSize[1] = 64;
2063 w->lightGridSize[2] = 128;
2064
2065 p = ( char * )( fileBase + l->fileofs );
2066
2067 // store for reference by the cgame
2068 w->entityString = ri.Hunk_Alloc( l->filelen + 1, h_low );
2069 strcpy( w->entityString, p );
2070 w->entityParsePoint = w->entityString;
2071
2072 token = COM_ParseExt( &p, qtrue );
2073 if ( !*token || *token != '{' ) {
2074 return;
2075 }
2076
2077 // only parse the world spawn
2078 while ( 1 ) {
2079 // parse key
2080 token = COM_ParseExt( &p, qtrue );
2081
2082 if ( !*token || *token == '}' ) {
2083 break;
2084 }
2085 Q_strncpyz( keyname, token, sizeof( keyname ) );
2086
2087 // parse value
2088 token = COM_ParseExt( &p, qtrue );
2089
2090 if ( !*token || *token == '}' ) {
2091 break;
2092 }
2093 Q_strncpyz( value, token, sizeof( value ) );
2094
2095 // check for remapping of shaders for vertex lighting
2096 s = "vertexremapshader";
2097 if ( !Q_strncmp( keyname, s, strlen( s ) ) ) {
2098 s = strchr( value, ';' );
2099 if ( !s ) {
2100 ri.Printf( PRINT_WARNING, "WARNING: no semi colon in vertexshaderremap '%s'\n", value );
2101 break;
2102 }
2103 *s++ = 0;
2104 // NERVE - SMF - temp fix, don't allow remapping of shader
2105 // - fixes not drawing terrain surfaces when r_vertexLight is true even when remapped shader is present
2106 // if (r_vertexLight->integer) {
2107 // R_RemapShader(value, s, "0");
2108 // }
2109 continue;
2110 }
2111 // check for remapping of shaders
2112 s = "remapshader";
2113 if ( !Q_strncmp( keyname, s, strlen( s ) ) ) {
2114 s = strchr( value, ';' );
2115 if ( !s ) {
2116 ri.Printf( PRINT_WARNING, "WARNING: no semi colon in shaderremap '%s'\n", value );
2117 break;
2118 }
2119 *s++ = 0;
2120 R_RemapShader( value, s, "0" );
2121 continue;
2122 }
2123 // check for a different grid size
2124 if ( !Q_stricmp( keyname, "gridsize" ) ) {
2125 sscanf( value, "%f %f %f", &w->lightGridSize[0], &w->lightGridSize[1], &w->lightGridSize[2] );
2126 continue;
2127 }
2128 }
2129 }
2130
2131 /*
2132 =================
2133 R_GetEntityToken
2134 =================
2135 */
R_GetEntityToken(char * buffer,int size)2136 qboolean R_GetEntityToken( char *buffer, int size ) {
2137 const char *s;
2138
2139 s = COM_Parse( &s_worldData.entityParsePoint );
2140 Q_strncpyz( buffer, s, size );
2141 if ( !s_worldData.entityParsePoint && !s[0] ) {
2142 s_worldData.entityParsePoint = s_worldData.entityString;
2143 return qfalse;
2144 } else {
2145 return qtrue;
2146 }
2147 }
2148
2149 /*
2150 =================
2151 RE_LoadWorldMap
2152
2153 Called directly from cgame
2154 =================
2155 */
RE_LoadWorldMap(const char * name)2156 void RE_LoadWorldMap( const char *name ) {
2157 int i;
2158 dheader_t *header;
2159 union {
2160 byte *b;
2161 void *v;
2162 } buffer;
2163 byte *startMarker;
2164
2165 skyboxportal = 0;
2166
2167 if ( tr.worldMapLoaded ) {
2168 ri.Error( ERR_DROP, "ERROR: attempted to redundantly load world map" );
2169 }
2170
2171 // set default sun direction to be used if it isn't
2172 // overridden by a shader
2173 tr.sunDirection[0] = 0.45;
2174 tr.sunDirection[1] = 0.3;
2175 tr.sunDirection[2] = 0.9;
2176
2177 tr.sunShader = 0; // clear sunshader so it's not there if the level doesn't specify it
2178
2179 // inalidate fogs (likely to be re-initialized to new values by the current map)
2180 // TODO:(SA)this is sort of silly. I'm going to do a general cleanup on fog stuff
2181 // now that I can see how it's been used. (functionality can narrow since
2182 // it's not used as much as it's designed for.)
2183 R_SetFog( FOG_SKY, 0, 0, 0, 0, 0, 0 );
2184 R_SetFog( FOG_PORTALVIEW,0, 0, 0, 0, 0, 0 );
2185 R_SetFog( FOG_HUD, 0, 0, 0, 0, 0, 0 );
2186 R_SetFog( FOG_MAP, 0, 0, 0, 0, 0, 0 );
2187 R_SetFog( FOG_CURRENT, 0, 0, 0, 0, 0, 0 );
2188 R_SetFog( FOG_TARGET, 0, 0, 0, 0, 0, 0 );
2189 R_SetFog( FOG_WATER, 0, 0, 0, 0, 0, 0 );
2190 R_SetFog( FOG_SERVER, 0, 0, 0, 0, 0, 0 );
2191
2192 VectorNormalize( tr.sunDirection );
2193
2194 tr.worldMapLoaded = qtrue;
2195
2196 // load it
2197 ri.FS_ReadFile( name, &buffer.v );
2198 if ( !buffer.b ) {
2199 ri.Error( ERR_DROP, "RE_LoadWorldMap: %s not found", name );
2200 }
2201
2202 // clear tr.world so if the level fails to load, the next
2203 // try will not look at the partially loaded version
2204 tr.world = NULL;
2205
2206 memset( &s_worldData, 0, sizeof( s_worldData ) );
2207 Q_strncpyz( s_worldData.name, name, sizeof( s_worldData.name ) );
2208
2209 Q_strncpyz( s_worldData.baseName, COM_SkipPath( s_worldData.name ), sizeof( s_worldData.name ) );
2210 COM_StripExtension(s_worldData.baseName, s_worldData.baseName, sizeof(s_worldData.baseName));
2211
2212 startMarker = ri.Hunk_Alloc( 0, h_low );
2213 c_gridVerts = 0;
2214
2215 header = (dheader_t *)buffer.b;
2216 fileBase = (byte *)header;
2217
2218 i = LittleLong( header->version );
2219 if ( i != BSP_VERSION ) {
2220 ri.Error( ERR_DROP, "RE_LoadWorldMap: %s has wrong version number (%i should be %i)",
2221 name, i, BSP_VERSION );
2222 }
2223
2224 // swap all the lumps
2225 for ( i = 0 ; i < sizeof( dheader_t ) / 4 ; i++ ) {
2226 ( (int *)header )[i] = LittleLong( ( (int *)header )[i] );
2227 }
2228
2229 // load into heap
2230 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2231 R_LoadShaders( &header->lumps[LUMP_SHADERS] );
2232 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2233 R_LoadLightmaps( &header->lumps[LUMP_LIGHTMAPS] );
2234 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2235 R_LoadPlanes( &header->lumps[LUMP_PLANES] );
2236 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2237 R_LoadFogs( &header->lumps[LUMP_FOGS], &header->lumps[LUMP_BRUSHES], &header->lumps[LUMP_BRUSHSIDES] );
2238 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2239 R_LoadSurfaces( &header->lumps[LUMP_SURFACES], &header->lumps[LUMP_DRAWVERTS], &header->lumps[LUMP_DRAWINDEXES] );
2240 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2241 R_LoadMarksurfaces( &header->lumps[LUMP_LEAFSURFACES] );
2242 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2243 R_LoadNodesAndLeafs( &header->lumps[LUMP_NODES], &header->lumps[LUMP_LEAFS] );
2244 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2245 R_LoadSubmodels( &header->lumps[LUMP_MODELS] );
2246 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2247 R_LoadVisibility( &header->lumps[LUMP_VISIBILITY] );
2248 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2249 R_LoadEntities( &header->lumps[LUMP_ENTITIES] );
2250 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2251 R_LoadLightGrid( &header->lumps[LUMP_LIGHTGRID] );
2252 ri.Cmd_ExecuteText( EXEC_NOW, "updatescreen\n" );
2253
2254 s_worldData.dataSize = (byte *)ri.Hunk_Alloc( 0, h_low ) - startMarker;
2255
2256 // only set tr.world now that we know the entire level has loaded properly
2257 tr.world = &s_worldData;
2258
2259 // reset fog to world fog (if present)
2260 R_SetFog( FOG_CMD_SWITCHFOG, FOG_MAP,20,0,0,0,0 );
2261
2262 //----(SA) set the sun shader if there is one
2263 if ( tr.sunShaderName ) {
2264 tr.sunShader = R_FindShader( tr.sunShaderName, LIGHTMAP_NONE, qtrue );
2265 }
2266
2267 //----(SA) end
2268 ri.FS_FreeFile( buffer.v );
2269 }
2270
2271