1 // MemCheck.cpp (Oclgrind)
2 // Copyright (c) 2013-2019, James Price and Simon McIntosh-Smith,
3 // University of Bristol. All rights reserved.
4 //
5 // This program is provided under a three-clause BSD license. For full
6 // license terms please see the LICENSE file distributed with this
7 // source code.
8
9 #include "core/common.h"
10
11 #include "core/Context.h"
12 #include "core/Memory.h"
13 #include "core/WorkItem.h"
14
15 #include "llvm/IR/Instructions.h"
16 #include "llvm/IR/Type.h"
17
18 #include "MemCheck.h"
19
20 using namespace oclgrind;
21 using namespace std;
22
MemCheck(const Context * context)23 MemCheck::MemCheck(const Context* context) : Plugin(context) {}
24
instructionExecuted(const WorkItem * workItem,const llvm::Instruction * instruction,const TypedValue & result)25 void MemCheck::instructionExecuted(const WorkItem* workItem,
26 const llvm::Instruction* instruction,
27 const TypedValue& result)
28 {
29 // Check static array bounds if load or store is executed
30 const llvm::Value* PtrOp = nullptr;
31
32 if (auto LI = llvm::dyn_cast<llvm::LoadInst>(instruction))
33 {
34 PtrOp = LI->getPointerOperand();
35 }
36 else if (auto SI = llvm::dyn_cast<llvm::StoreInst>(instruction))
37 {
38 PtrOp = SI->getPointerOperand();
39 }
40 else
41 {
42 return;
43 }
44
45 // Walk up chain of GEP instructions leading to this access
46 while (auto GEPI =
47 llvm::dyn_cast<llvm::GetElementPtrInst>(PtrOp->stripPointerCasts()))
48 {
49 checkArrayAccess(workItem, GEPI);
50
51 PtrOp = GEPI->getPointerOperand();
52 }
53 }
54
memoryAtomicLoad(const Memory * memory,const WorkItem * workItem,AtomicOp op,size_t address,size_t size)55 void MemCheck::memoryAtomicLoad(const Memory* memory, const WorkItem* workItem,
56 AtomicOp op, size_t address, size_t size)
57 {
58 checkLoad(memory, address, size);
59 }
60
memoryAtomicStore(const Memory * memory,const WorkItem * workItem,AtomicOp op,size_t address,size_t size)61 void MemCheck::memoryAtomicStore(const Memory* memory, const WorkItem* workItem,
62 AtomicOp op, size_t address, size_t size)
63 {
64 checkStore(memory, address, size);
65 }
66
memoryLoad(const Memory * memory,const WorkItem * workItem,size_t address,size_t size)67 void MemCheck::memoryLoad(const Memory* memory, const WorkItem* workItem,
68 size_t address, size_t size)
69 {
70 checkLoad(memory, address, size);
71 }
72
memoryLoad(const Memory * memory,const WorkGroup * workGroup,size_t address,size_t size)73 void MemCheck::memoryLoad(const Memory* memory, const WorkGroup* workGroup,
74 size_t address, size_t size)
75 {
76 checkLoad(memory, address, size);
77 }
78
memoryMap(const Memory * memory,size_t address,size_t offset,size_t size,cl_map_flags flags)79 void MemCheck::memoryMap(const Memory* memory, size_t address, size_t offset,
80 size_t size, cl_map_flags flags)
81 {
82 MapRegion map = {address, offset, size, memory->getPointer(address + offset),
83 (flags == CL_MAP_READ ? MapRegion::READ : MapRegion::WRITE)};
84 m_mapRegions.push_back(map);
85 }
86
memoryStore(const Memory * memory,const WorkItem * workItem,size_t address,size_t size,const uint8_t * storeData)87 void MemCheck::memoryStore(const Memory* memory, const WorkItem* workItem,
88 size_t address, size_t size,
89 const uint8_t* storeData)
90 {
91 checkStore(memory, address, size);
92 }
93
memoryStore(const Memory * memory,const WorkGroup * workGroup,size_t address,size_t size,const uint8_t * storeData)94 void MemCheck::memoryStore(const Memory* memory, const WorkGroup* workGroup,
95 size_t address, size_t size,
96 const uint8_t* storeData)
97 {
98 checkStore(memory, address, size);
99 }
100
memoryUnmap(const Memory * memory,size_t address,const void * ptr)101 void MemCheck::memoryUnmap(const Memory* memory, size_t address,
102 const void* ptr)
103 {
104 for (auto region = m_mapRegions.begin(); region != m_mapRegions.end();
105 region++)
106 {
107 if (region->ptr == ptr)
108 {
109 m_mapRegions.erase(region);
110 return;
111 }
112 }
113 }
114
checkArrayAccess(const WorkItem * workItem,const llvm::GetElementPtrInst * GEPI) const115 void MemCheck::checkArrayAccess(const WorkItem* workItem,
116 const llvm::GetElementPtrInst* GEPI) const
117 {
118 // Iterate through GEPI indices
119 const llvm::Type* ptrType = GEPI->getPointerOperandType();
120
121 for (auto opIndex = GEPI->idx_begin(); opIndex != GEPI->idx_end(); opIndex++)
122 {
123 int64_t index = workItem->getOperand(opIndex->get()).getSInt();
124
125 if (ptrType->isArrayTy())
126 {
127 // Check index doesn't exceed size of array
128 uint64_t size = ptrType->getArrayNumElements();
129
130 if ((uint64_t)index >= size)
131 {
132 ostringstream info;
133 info << "Index (" << index << ") exceeds static array size (" << size
134 << ")";
135 m_context->logError(info.str().c_str());
136 }
137
138 ptrType = ptrType->getArrayElementType();
139 }
140 else if (ptrType->isPointerTy())
141 {
142 ptrType = ptrType->getPointerElementType();
143 }
144 else if (ptrType->isVectorTy())
145 {
146 ptrType = llvm::cast<llvm::FixedVectorType>(ptrType)->getElementType();
147 }
148 else if (ptrType->isStructTy())
149 {
150 ptrType = ptrType->getStructElementType(index);
151 }
152 }
153 }
154
checkLoad(const Memory * memory,size_t address,size_t size) const155 void MemCheck::checkLoad(const Memory* memory, size_t address,
156 size_t size) const
157 {
158 if (!memory->isAddressValid(address, size))
159 {
160 logInvalidAccess(true, memory->getAddressSpace(), address, size);
161 return;
162 }
163
164 if (memory->getBuffer(address)->flags & CL_MEM_WRITE_ONLY)
165 {
166 m_context->logError("Invalid read from write-only buffer");
167 }
168
169 if (memory->getAddressSpace() == AddrSpaceLocal ||
170 memory->getAddressSpace() == AddrSpacePrivate)
171 return;
172
173 // Check if memory location is currently mapped for writing
174 for (auto region = m_mapRegions.begin(); region != m_mapRegions.end();
175 region++)
176 {
177 if (region->type == MapRegion::WRITE &&
178 address < region->address + region->size &&
179 address + size >= region->address)
180 {
181 m_context->logError("Invalid read from buffer mapped for writing");
182 }
183 }
184 }
185
checkStore(const Memory * memory,size_t address,size_t size) const186 void MemCheck::checkStore(const Memory* memory, size_t address,
187 size_t size) const
188 {
189 if (!memory->isAddressValid(address, size))
190 {
191 logInvalidAccess(false, memory->getAddressSpace(), address, size);
192 return;
193 }
194
195 if (memory->getBuffer(address)->flags & CL_MEM_READ_ONLY)
196 {
197 m_context->logError("Invalid write to read-only buffer");
198 }
199
200 if (memory->getAddressSpace() == AddrSpaceLocal ||
201 memory->getAddressSpace() == AddrSpacePrivate)
202 return;
203
204 // Check if memory location is currently mapped
205 for (auto region = m_mapRegions.begin(); region != m_mapRegions.end();
206 region++)
207 {
208 if (address < region->address + region->size &&
209 address + size >= region->address)
210 {
211 m_context->logError("Invalid write to mapped buffer");
212 }
213 }
214 }
215
logInvalidAccess(bool read,unsigned addrSpace,size_t address,size_t size) const216 void MemCheck::logInvalidAccess(bool read, unsigned addrSpace, size_t address,
217 size_t size) const
218 {
219 Context::Message msg(ERROR, m_context);
220 msg << "Invalid " << (read ? "read" : "write") << " of size " << size
221 << " at " << getAddressSpaceName(addrSpace) << " memory address 0x" << hex
222 << address << endl
223 << msg.INDENT << "Kernel: " << msg.CURRENT_KERNEL << endl
224 << "Entity: " << msg.CURRENT_ENTITY << endl
225 << msg.CURRENT_LOCATION << endl;
226 msg.send();
227 }
228