1 /* Copyright (C) 2018 Wildfire Games. 2 * This file is part of 0 A.D. 3 * 4 * 0 A.D. is free software: you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation, either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * 0 A.D. is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with 0 A.D. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #ifndef INCLUDED_GRID 19 #define INCLUDED_GRID 20 21 #include <cstring> 22 23 #ifdef NDEBUG 24 #define GRID_BOUNDS_DEBUG 0 25 #else 26 #define GRID_BOUNDS_DEBUG 1 27 #endif 28 29 /** 30 * Basic 2D array, intended for storing tile data, plus support for lazy updates 31 * by ICmpObstructionManager. 32 * @c T must be a POD type that can be initialised with 0s. 33 */ 34 template<typename T> 35 class Grid 36 { 37 public: Grid()38 Grid() : m_W(0), m_H(0), m_Data(NULL), m_DirtyID(0) 39 { 40 } 41 Grid(u16 w,u16 h)42 Grid(u16 w, u16 h) : m_W(w), m_H(h), m_Data(NULL), m_DirtyID(0) 43 { 44 if (m_W || m_H) 45 m_Data = new T[m_W * m_H]; 46 reset(); 47 } 48 Grid(const Grid & g)49 Grid(const Grid& g) : m_W(0), m_H(0), m_Data(NULL), m_DirtyID(0) 50 { 51 *this = g; 52 } 53 54 Grid& operator=(const Grid& g) 55 { 56 if (this == &g) 57 return *this; 58 59 m_DirtyID = g.m_DirtyID; 60 if (m_W == g.m_W && m_H == g.m_H) 61 { 62 memcpy(m_Data, g.m_Data, m_W*m_H*sizeof(T)); 63 return *this; 64 } 65 66 m_W = g.m_W; 67 m_H = g.m_H; 68 delete[] m_Data; 69 if (g.m_Data) 70 { 71 m_Data = new T[m_W * m_H]; 72 memcpy(m_Data, g.m_Data, m_W*m_H*sizeof(T)); 73 } 74 else 75 m_Data = NULL; 76 return *this; 77 } 78 swap(Grid & g)79 void swap(Grid& g) 80 { 81 std::swap(m_DirtyID, g.m_DirtyID); 82 std::swap(m_Data, g.m_Data); 83 std::swap(m_H, g.m_H); 84 std::swap(m_W, g.m_W); 85 } 86 ~Grid()87 ~Grid() 88 { 89 delete[] m_Data; 90 } 91 92 bool operator==(const Grid& g) const 93 { 94 if (!compare_sizes(&g) || m_DirtyID != g.m_DirtyID) 95 return false; 96 97 return memcmp(m_Data, g.m_Data, m_W*m_H*sizeof(T)) == 0; 98 } 99 blank()100 bool blank() const 101 { 102 return m_W == 0 && m_H == 0; 103 } 104 any_set_in_square(int i0,int j0,int i1,int j1)105 bool any_set_in_square(int i0, int j0, int i1, int j1) const 106 { 107 #if GRID_BOUNDS_DEBUG 108 ENSURE(i0 >= 0 && j0 >= 0 && i1 <= m_W && j1 <= m_H); 109 #endif 110 for (int j = j0; j < j1; ++j) 111 { 112 int sum = 0; 113 for (int i = i0; i < i1; ++i) 114 sum += m_Data[j*m_W + i]; 115 if (sum > 0) 116 return true; 117 } 118 return false; 119 } 120 reset()121 void reset() 122 { 123 if (m_Data) 124 memset(m_Data, 0, m_W*m_H*sizeof(T)); 125 } 126 127 // Add two grids of the same size add(const Grid & g)128 void add(const Grid& g) 129 { 130 #if GRID_BOUNDS_DEBUG 131 ENSURE(g.m_W == m_W && g.m_H == m_H); 132 #endif 133 for (int i=0; i < m_H*m_W; ++i) 134 m_Data[i] += g.m_Data[i]; 135 } 136 bitwise_or(const Grid & g)137 void bitwise_or(const Grid& g) 138 { 139 if (this == &g) 140 return; 141 142 #if GRID_BOUNDS_DEBUG 143 ENSURE(g.m_W == m_W && g.m_H == m_H); 144 #endif 145 for (int i = 0; i < m_H*m_W; ++i) 146 m_Data[i] |= g.m_Data[i]; 147 } 148 set(int i,int j,const T & value)149 void set(int i, int j, const T& value) 150 { 151 #if GRID_BOUNDS_DEBUG 152 ENSURE(0 <= i && i < m_W && 0 <= j && j < m_H); 153 #endif 154 m_Data[j*m_W + i] = value; 155 } 156 get(int i,int j)157 T& get(int i, int j) const 158 { 159 #if GRID_BOUNDS_DEBUG 160 ENSURE(0 <= i && i < m_W && 0 <= j && j < m_H); 161 #endif 162 return m_Data[j*m_W + i]; 163 } 164 165 template<typename U> compare_sizes(const Grid<U> * g)166 bool compare_sizes(const Grid<U>* g) const 167 { 168 return g && m_W == g->m_W && m_H == g->m_H; 169 } 170 171 u16 m_W, m_H; 172 T* m_Data; 173 174 size_t m_DirtyID; // if this is < the id maintained by ICmpObstructionManager then it needs to be updated 175 }; 176 177 /** 178 * Similar to Grid, except optimised for sparse usage (the grid is subdivided into 179 * buckets whose contents are only initialised on demand, to save on memset cost). 180 */ 181 template<typename T> 182 class SparseGrid 183 { 184 NONCOPYABLE(SparseGrid); 185 186 enum { BucketBits = 4, BucketSize = 1 << BucketBits }; 187 GetBucket(int i,int j)188 T* GetBucket(int i, int j) 189 { 190 size_t b = (j >> BucketBits) * m_BW + (i >> BucketBits); 191 if (!m_Data[b]) 192 { 193 m_Data[b] = new T[BucketSize*BucketSize]; 194 memset(m_Data[b], 0, BucketSize*BucketSize*sizeof(T)); 195 } 196 return m_Data[b]; 197 } 198 199 public: SparseGrid(u16 w,u16 h)200 SparseGrid(u16 w, u16 h) : m_W(w), m_H(h), m_DirtyID(0) 201 { 202 ENSURE(m_W && m_H); 203 204 m_BW = (u16)((m_W + BucketSize-1) >> BucketBits); 205 m_BH = (u16)((m_H + BucketSize-1) >> BucketBits); 206 207 m_Data = new T*[m_BW*m_BH]; 208 memset(m_Data, 0, m_BW*m_BH*sizeof(T*)); 209 } 210 ~SparseGrid()211 ~SparseGrid() 212 { 213 reset(); 214 delete[] m_Data; 215 } 216 reset()217 void reset() 218 { 219 for (size_t i = 0; i < (size_t)(m_BW*m_BH); ++i) 220 delete[] m_Data[i]; 221 222 memset(m_Data, 0, m_BW*m_BH*sizeof(T*)); 223 } 224 set(int i,int j,const T & value)225 void set(int i, int j, const T& value) 226 { 227 #if GRID_BOUNDS_DEBUG 228 ENSURE(0 <= i && i < m_W && 0 <= j && j < m_H); 229 #endif 230 GetBucket(i, j)[(j % BucketSize)*BucketSize + (i % BucketSize)] = value; 231 } 232 get(int i,int j)233 T& get(int i, int j) 234 { 235 #if GRID_BOUNDS_DEBUG 236 ENSURE(0 <= i && i < m_W && 0 <= j && j < m_H); 237 #endif 238 return GetBucket(i, j)[(j % BucketSize)*BucketSize + (i % BucketSize)]; 239 } 240 241 u16 m_W, m_H; 242 u16 m_BW, m_BH; 243 T** m_Data; 244 245 size_t m_DirtyID; // if this is < the id maintained by ICmpObstructionManager then it needs to be updated 246 }; 247 248 /** 249 * Structure holding grid dirtiness informations, for clever updates. 250 */ 251 struct GridUpdateInformation 252 { 253 bool dirty; 254 bool globallyDirty; 255 Grid<u8> dirtinessGrid; 256 257 /** 258 * Update the information with additionnal needed updates, then erase the source of additions. 259 * This can usually be optimized through a careful memory management. 260 */ MergeAndClearGridUpdateInformation261 void MergeAndClear(GridUpdateInformation& b) 262 { 263 ENSURE(dirtinessGrid.compare_sizes(&b.dirtinessGrid)); 264 265 bool wasDirty = dirty; 266 267 dirty |= b.dirty; 268 globallyDirty |= b.globallyDirty; 269 270 // If the current grid is useless, swap it 271 if (!wasDirty) 272 dirtinessGrid.swap(b.dirtinessGrid); 273 // If the new grid isn't used, don't bother updating it 274 else if (dirty && !globallyDirty) 275 dirtinessGrid.bitwise_or(b.dirtinessGrid); 276 277 b.Clean(); 278 } 279 280 /** 281 * Mark everything as clean 282 */ CleanGridUpdateInformation283 void Clean() 284 { 285 dirty = false; 286 globallyDirty = false; 287 dirtinessGrid.reset(); 288 } 289 }; 290 291 #endif // INCLUDED_GRID 292