1 /*
2  * Copyright (c) 2013-2018, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #ifndef PT_SECTION_H
30 #define PT_SECTION_H
31 
32 #include <stdint.h>
33 #include <stddef.h>
34 
35 #if defined(FEATURE_THREADS)
36 #  include <threads.h>
37 #endif /* defined(FEATURE_THREADS) */
38 
39 #include "intel-pt.h"
40 
41 struct pt_block_cache;
42 
43 
44 /* A section of contiguous memory loaded from a file. */
45 struct pt_section {
46 	/* The name of the file. */
47 	char *filename;
48 
49 	/* The offset into the file. */
50 	uint64_t offset;
51 
52 	/* The (adjusted) size in bytes.  The size is truncated to match the
53 	 * actual file size.
54 	 */
55 	uint64_t size;
56 
57 	/* A pointer to OS-specific file status for detecting changes.
58 	 *
59 	 * The status is initialized on first pt_section_map() and will be
60 	 * left in the section until the section is destroyed.  This field
61 	 * is owned by the OS-specific mmap-based section implementation.
62 	 */
63 	void *status;
64 
65 	/* A pointer to implementation-specific mapping information - NULL if
66 	 * the section is currently not mapped.
67 	 *
68 	 * This field is set in pt_section_map() and owned by the mapping
69 	 * implementation.
70 	 */
71 	void *mapping;
72 
73 	/* A pointer to an optional block cache.
74 	 *
75 	 * The cache is created on request and destroyed implicitly when the
76 	 * section is unmapped.
77 	 *
78 	 * We read this field without locking and only lock the section in order
79 	 * to install the block cache.
80 	 *
81 	 * We rely on guaranteed atomic operations as specified in section 8.1.1
82 	 * in Volume 3A of the Intel(R) Software Developer's Manual at
83 	 * http://www.intel.com/sdm.
84 	 */
85 	struct pt_block_cache *bcache;
86 
87 	/* A pointer to the iscache attached to this section.
88 	 *
89 	 * The pointer is initialized when the iscache attaches and cleared when
90 	 * it detaches again.  There can be at most one iscache attached to this
91 	 * section at any time.
92 	 *
93 	 * In addition to attaching, the iscache will need to obtain a reference
94 	 * to the section, which it needs to drop again after detaching.
95 	 */
96 	struct pt_image_section_cache *iscache;
97 
98 	/* A pointer to the unmap function - NULL if the section is currently
99 	 * not mapped.
100 	 *
101 	 * This field is set in pt_section_map() and owned by the mapping
102 	 * implementation.
103 	 */
104 	int (*unmap)(struct pt_section *sec);
105 
106 	/* A pointer to the read function - NULL if the section is currently
107 	 * not mapped.
108 	 *
109 	 * This field is set in pt_section_map() and owned by the mapping
110 	 * implementation.
111 	 */
112 	int (*read)(const struct pt_section *sec, uint8_t *buffer,
113 		    uint16_t size, uint64_t offset);
114 
115 	/* A pointer to the memsize function - NULL if the section is currently
116 	 * not mapped.
117 	 *
118 	 * This field is set in pt_section_map() and owned by the mapping
119 	 * implementation.
120 	 */
121 	int (*memsize)(const struct pt_section *section, uint64_t *size);
122 
123 #if defined(FEATURE_THREADS)
124 	/* A lock protecting this section.
125 	 *
126 	 * Most operations do not require the section to be locked.  All
127 	 * actual locking should be handled by pt_section_* functions.
128 	 */
129 	mtx_t lock;
130 
131 	/* A lock protecting the @iscache and @acount fields.
132 	 *
133 	 * We need separate locks to protect against a deadlock scenario when
134 	 * the iscache is mapping or unmapping this section.
135 	 *
136 	 * The attach lock must not be taken while holding the section lock; the
137 	 * other way round is OK.
138 	 */
139 	mtx_t alock;
140 #endif /* defined(FEATURE_THREADS) */
141 
142 	/* The number of current users.  The last user destroys the section. */
143 	uint16_t ucount;
144 
145 	/* The number of attaches.  This must be <= @ucount. */
146 	uint16_t acount;
147 
148 	/* The number of current mappers.  The last unmaps the section. */
149 	uint16_t mcount;
150 };
151 
152 /* Create a section.
153  *
154  * The returned section describes the contents of @file starting at @offset
155  * for @size bytes.
156  *
157  * If @file is shorter than the requested @size, the section is silently
158  * truncated to the size of @file.
159  *
160  * If @offset lies beyond the end of @file, no section is created.
161  *
162  * The returned section is not mapped and starts with a user count of one and
163  * instruction caching enabled.
164  *
165  * Returns a new section on success, NULL otherwise.
166  */
167 extern struct pt_section *pt_mk_section(const char *file, uint64_t offset,
168 					uint64_t size);
169 
170 /* Lock a section.
171  *
172  * Locks @section.  The section must not be locked.
173  *
174  * Returns a new section on success, NULL otherwise.
175  * Returns -pte_bad_lock on any locking error.
176  */
177 extern int pt_section_lock(struct pt_section *section);
178 
179 /* Unlock a section.
180  *
181  * Unlocks @section.  The section must be locked.
182  *
183  * Returns a new section on success, NULL otherwise.
184  * Returns -pte_bad_lock on any locking error.
185  */
186 extern int pt_section_unlock(struct pt_section *section);
187 
188 /* Add another user.
189  *
190  * Increments the user count of @section.
191  *
192  * Returns zero on success, a negative error code otherwise.
193  * Returns -pte_internal if @section is NULL.
194  * Returns -pte_overflow if the user count would overflow.
195  * Returns -pte_bad_lock on any locking error.
196  */
197 extern int pt_section_get(struct pt_section *section);
198 
199 /* Remove a user.
200  *
201  * Decrements the user count of @section.  Destroys the section if the
202  * count reaches zero.
203  *
204  * Returns zero on success, a negative error code otherwise.
205  * Returns -pte_internal if @section is NULL.
206  * Returns -pte_internal if the user count is already zero.
207  * Returns -pte_bad_lock on any locking error.
208  */
209 extern int pt_section_put(struct pt_section *section);
210 
211 /* Attaches the image section cache user.
212  *
213  * Similar to pt_section_get() but sets @section->iscache to @iscache.
214  *
215  * Returns zero on success, a negative error code otherwise.
216  * Returns -pte_internal if @section or @iscache is NULL.
217  * Returns -pte_internal if a different cache is already attached.
218  * Returns -pte_overflow if the attach count would overflow.
219  * Returns -pte_bad_lock on any locking error.
220  */
221 extern int pt_section_attach(struct pt_section *section,
222 			     struct pt_image_section_cache *iscache);
223 
224 /* Detaches the image section cache user.
225  *
226  * Similar to pt_section_put() but clears @section->iscache.
227  *
228  * Returns zero on success, a negative error code otherwise.
229  * Returns -pte_internal if @section or @iscache is NULL.
230  * Returns -pte_internal if the attach count is already zero.
231  * Returns -pte_internal if @section->iscache is not equal to @iscache.
232  * Returns -pte_bad_lock on any locking error.
233  */
234 extern int pt_section_detach(struct pt_section *section,
235 			     struct pt_image_section_cache *iscache);
236 
237 /* Return the filename of @section. */
238 extern const char *pt_section_filename(const struct pt_section *section);
239 
240 /* Return the offset of the section in bytes. */
241 extern uint64_t pt_section_offset(const struct pt_section *section);
242 
243 /* Return the size of the section in bytes. */
244 extern uint64_t pt_section_size(const struct pt_section *section);
245 
246 /* Return the amount of memory currently used by the section in bytes.
247  *
248  * We only consider the amount of memory required for mapping @section; we
249  * ignore the size of the section object itself and the size of the status
250  * object.
251  *
252  * If @section is currently not mapped, the size is zero.
253  *
254  * Returns zero on success, a negative pt_error_code otherwise.
255  * Returns -pte_internal if @size of @section is NULL.
256  */
257 extern int pt_section_memsize(struct pt_section *section, uint64_t *size);
258 
259 /* Allocate a block cache.
260  *
261  * Returns zero on success, a negative error code otherwise.
262  * Returns -pte_internal if @section is NULL.
263  * Returns -pte_nomem if the block cache can't be allocated.
264  * Returns -pte_bad_lock on any locking error.
265  */
266 extern int pt_section_alloc_bcache(struct pt_section *section);
267 
268 /* Request block caching.
269  *
270  * The caller must ensure that @section is mapped.
271  */
272 static inline int pt_section_request_bcache(struct pt_section *section)
273 {
274 	if (!section)
275 		return -pte_internal;
276 
277 	if (section->bcache)
278 		return 0;
279 
280 	return pt_section_alloc_bcache(section);
281 }
282 
283 /* Return @section's block cache, if available.
284  *
285  * The caller must ensure that @section is mapped.
286  *
287  * The cache is not use-counted.  It is only valid as long as the caller keeps
288  * @section mapped.
289  */
290 static inline struct pt_block_cache *
291 pt_section_bcache(const struct pt_section *section)
292 {
293 	if (!section)
294 		return NULL;
295 
296 	return section->bcache;
297 }
298 
299 /* Create the OS-specific file status.
300  *
301  * On success, allocates a status object, provides a pointer to it in @pstatus
302  * and provides the file size in @psize.
303  *
304  * The status object will be free()'ed when its section is.
305  *
306  * This function is implemented in the OS-specific section implementation.
307  *
308  * Returns zero on success, a negative error code otherwise.
309  * Returns -pte_internal if @pstatus, @psize, or @filename is NULL.
310  * Returns -pte_bad_image if @filename can't be opened.
311  * Returns -pte_nomem if the status object can't be allocated.
312  */
313 extern int pt_section_mk_status(void **pstatus, uint64_t *psize,
314 				const char *filename);
315 
316 /* Perform on-map maintenance work.
317  *
318  * Notifies an attached image section cache about the mapping of @section.
319  *
320  * This function is called by the OS-specific pt_section_map() implementation
321  * after @section has been successfully mapped and @section has been unlocked.
322  *
323  * Returns zero on success, a negative error code otherwise.
324  * Returns -pte_internal if @section is NULL.
325  * Returns -pte_bad_lock on any locking error.
326  */
327 extern int pt_section_on_map_lock(struct pt_section *section);
328 
329 static inline int pt_section_on_map(struct pt_section *section)
330 {
331 	if (section && !section->iscache)
332 		return 0;
333 
334 	return pt_section_on_map_lock(section);
335 }
336 
337 /* Map a section.
338  *
339  * Maps @section into memory.  Mappings are use-counted.  The number of
340  * pt_section_map() calls must match the number of pt_section_unmap()
341  * calls.
342  *
343  * This function is implemented in the OS-specific section implementation.
344  *
345  * Returns zero on success, a negative error code otherwise.
346  * Returns -pte_internal if @section is NULL.
347  * Returns -pte_bad_image if @section changed or can't be opened.
348  * Returns -pte_bad_lock on any locking error.
349  * Returns -pte_nomem if @section can't be mapped into memory.
350  * Returns -pte_overflow if the map count would overflow.
351  */
352 extern int pt_section_map(struct pt_section *section);
353 
354 /* Share a section mapping.
355  *
356  * Increases the map count for @section without notifying an attached image
357  * section cache.
358  *
359  * This function should only be used by the attached image section cache to
360  * resolve a deadlock scenario when mapping a section it intends to cache.
361  *
362  * Returns zero on success, a negative error code otherwise.
363  * Returns -pte_internal if @section is NULL.
364  * Returns -pte_internal if @section->mcount is zero.
365  * Returns -pte_bad_lock on any locking error.
366  */
367 extern int pt_section_map_share(struct pt_section *section);
368 
369 /* Unmap a section.
370  *
371  * Unmaps @section from memory.
372  *
373  * Returns zero on success, a negative error code otherwise.
374  * Returns -pte_internal if @section is NULL.
375  * Returns -pte_bad_lock on any locking error.
376  * Returns -pte_internal if @section has not been mapped.
377  */
378 extern int pt_section_unmap(struct pt_section *section);
379 
380 /* Read memory from a section.
381  *
382  * Reads at most @size bytes from @section at @offset into @buffer.  @section
383  * must be mapped.
384  *
385  * Returns the number of bytes read on success, a negative error code otherwise.
386  * Returns -pte_internal if @section or @buffer are NULL.
387  * Returns -pte_nomap if @offset is beyond the end of the section.
388  */
389 extern int pt_section_read(const struct pt_section *section, uint8_t *buffer,
390 			   uint16_t size, uint64_t offset);
391 
392 #endif /* PT_SECTION_H */
393