1 /* radare2 - LGPL - Copyright 2008-2019 - condret, pancake, alvaro_fe */
2 
3 #include <r_io.h>
4 #include <sdb.h>
5 #include <config.h>
6 #include "io_private.h"
7 
8 R_LIB_VERSION (r_io);
9 
fd_read_at_wrap(RIO * io,int fd,ut64 addr,ut8 * buf,int len,RIOMap * map,void * user)10 static int fd_read_at_wrap (RIO *io, int fd, ut64 addr, ut8 *buf, int len, RIOMap *map, void *user) {
11 	return r_io_fd_read_at (io, fd, addr, buf, len);
12 }
13 
fd_write_at_wrap(RIO * io,int fd,ut64 addr,ut8 * buf,int len,RIOMap * map,void * user)14 static int fd_write_at_wrap (RIO *io, int fd, ut64 addr, ut8 *buf, int len, RIOMap *map, void *user) {
15 	return r_io_fd_write_at (io, fd, addr, buf, len);
16 }
17 
18 typedef int (*cbOnIterMap)(RIO *io, int fd, ut64 addr, ut8 *buf, int len, RIOMap *map, void *user);
19 
20 // If prefix_mode is true, returns the number of bytes of operated prefix; returns < 0 on error.
21 // If prefix_mode is false, operates in non-stop mode and returns true iff all IO operations on overlapped maps are complete.
on_map_skyline(RIO * io,ut64 vaddr,ut8 * buf,int len,int match_flg,cbOnIterMap op,bool prefix_mode)22 static st64 on_map_skyline(RIO *io, ut64 vaddr, ut8 *buf, int len, int match_flg, cbOnIterMap op, bool prefix_mode) {
23 	RVector *skyline = &io->map_skyline.v;
24 	ut64 addr = vaddr;
25 	size_t i;
26 	bool ret = true, wrap = !prefix_mode && vaddr + len < vaddr;
27 #define CMP(addr, part) ((addr) < r_itv_end (((RSkylineItem *)(part))->itv) - 1 ? -1 : \
28 			(addr) > r_itv_end (((RSkylineItem *)(part))->itv) - 1 ? 1 : 0)
29 	// Let i be the first skyline part whose right endpoint > addr
30 	if (!len) {
31 		i = r_vector_len (skyline);
32 	} else {
33 		r_vector_lower_bound (skyline, addr, i, CMP);
34 		if (i == r_vector_len (skyline) && wrap) {
35 			wrap = false;
36 			i = 0;
37 			addr = 0;
38 		}
39 	}
40 #undef CMP
41 	while (i < r_vector_len (skyline)) {
42 		const RSkylineItem *part = r_vector_index_ptr (skyline, i);
43 		// Right endpoint <= addr
44 		if (r_itv_end (part->itv) - 1 < addr) {
45 			i++;
46 			if (wrap && i == r_vector_len (skyline)) {
47 				wrap = false;
48 				i = 0;
49 				addr = 0;
50 			}
51 			continue;
52 		}
53 		if (addr < part->itv.addr) {
54 			// [addr, part->itv.addr) is a gap
55 			if (prefix_mode || len <= part->itv.addr - vaddr) {
56 				break;
57 			}
58 			addr = part->itv.addr;
59 		}
60 		// Now left endpoint <= addr < right endpoint
61 		ut64 len1 = R_MIN (vaddr + len - addr, r_itv_end (part->itv) - addr);
62 		if (len1 < 1) {
63 			break;
64 		}
65 		RIOMap *map = part->user;
66 		// The map satisfies the permission requirement or p_cache is enabled
67 		if (((map->perm & match_flg) == match_flg || io->p_cache)) {
68 			st64 result = op (io, map->fd, map->delta + addr - r_io_map_begin(map),
69 					buf + (addr - vaddr), len1, map, NULL);
70 			if (prefix_mode) {
71 				if (result < 0) {
72 					return result;
73 				}
74 				addr += result;
75 				if (result != len1) {
76 					break;
77 				}
78 			} else {
79 				if (result != len1) {
80 					ret = false;
81 				}
82 				addr += len1;
83 			}
84 		} else if (prefix_mode) {
85 			break;
86 		} else {
87 			addr += len1;
88 			ret = false;
89 		}
90 		// Reaches the end
91 		if (addr == vaddr + len) {
92 			break;
93 		}
94 		// Wrap to the beginning of skyline if address wraps
95 		if (!addr) {
96 			i = 0;
97 		}
98 	}
99 	return prefix_mode ? addr - vaddr : ret;
100 }
101 
r_io_new(void)102 R_API RIO* r_io_new(void) {
103 	return r_io_init (R_NEW0 (RIO));
104 }
105 
r_io_init(RIO * io)106 R_API RIO* r_io_init(RIO* io) {
107 	r_return_val_if_fail (io, NULL);
108 	io->addrbytes = 1;
109 	r_io_desc_init (io);
110 	r_skyline_init (&io->map_skyline);
111 	r_io_map_init (io);
112 	r_io_cache_init (io);
113 	r_io_plugin_init (io);
114 	r_io_undo_init (io);
115 	io->event = r_event_new (io);
116 	return io;
117 }
118 
r_io_free(RIO * io)119 R_API void r_io_free(RIO *io) {
120 	if (io) {
121 		r_io_fini (io);
122 		r_cache_free (io->buffer);
123 		free (io);
124 	}
125 }
126 
r_io_open_buffer(RIO * io,RBuffer * b,int perm,int mode)127 R_API RIODesc *r_io_open_buffer(RIO *io, RBuffer *b, int perm, int mode) {
128 	ut64 bufSize = r_buf_size (b);
129 	char *uri = r_str_newf ("malloc://%" PFMT64d, bufSize);
130 	RIODesc *desc = r_io_open_nomap (io, uri, perm, mode);
131 	if (desc) {
132 		const ut8 *tmp = r_buf_data (b, &bufSize);
133 		r_io_desc_write (desc, tmp, bufSize);
134 	}
135 	free (uri);
136 	return desc;
137 }
138 
r_io_open_nomap(RIO * io,const char * uri,int perm,int mode)139 R_API RIODesc *r_io_open_nomap(RIO *io, const char *uri, int perm, int mode) {
140 	r_return_val_if_fail (io && uri, NULL);
141 	RIODesc *desc = r_io_desc_open (io, uri, perm, mode);
142 	if ((io->autofd || !io->desc) && desc) {
143 		io->desc = desc;
144 	}
145 	//set desc as current if autofd or io->desc==NULL
146 	return desc;
147 }
148 
149 /* opens a file and maps it to 0x0 */
r_io_open(RIO * io,const char * uri,int perm,int mode)150 R_API RIODesc* r_io_open(RIO* io, const char* uri, int perm, int mode) {
151 	r_return_val_if_fail (io, NULL);
152 	RIODesc* desc = r_io_open_nomap (io, uri, perm, mode);
153 	if (desc) {
154 		r_io_map_new (io, desc->fd, desc->perm, 0LL, 0LL, r_io_desc_size (desc));
155 	}
156 	return desc;
157 }
158 
159 /* opens a file and maps it to an offset specified by the "at"-parameter */
r_io_open_at(RIO * io,const char * uri,int perm,int mode,ut64 at)160 R_API RIODesc* r_io_open_at(RIO* io, const char* uri, int perm, int mode, ut64 at) {
161 	r_return_val_if_fail (io && uri, NULL);
162 
163 	RIODesc* desc = r_io_open_nomap (io, uri, perm, mode);
164 	if (!desc) {
165 		return NULL;
166 	}
167 	ut64 size = r_io_desc_size (desc);
168 	// second map
169 	if (size && ((UT64_MAX - size + 1) < at)) {
170 		// split map into 2 maps if only 1 big map results into interger overflow
171 		io_map_new (io, desc->fd, desc->perm, UT64_MAX - at + 1, 0LL, size - (UT64_MAX - at) - 1);
172 		// someone pls take a look at this confusing stuff
173 		size = UT64_MAX - at + 1;
174 	}
175 	// skyline not updated
176 	r_io_map_new (io, desc->fd, desc->perm, 0LL, at, size);
177 	return desc;
178 }
179 
180 /* opens many files, without mapping them. This should be discussed */
r_io_open_many(RIO * io,const char * uri,int perm,int mode)181 R_API RList* r_io_open_many(RIO* io, const char* uri, int perm, int mode) {
182 	RList* desc_list;
183 	RListIter* iter;
184 	RIODesc* desc;
185 	r_return_val_if_fail (io && io->files && uri, NULL);
186 	RIOPlugin* plugin = r_io_plugin_resolve (io, uri, 1);
187 	if (!plugin || !plugin->open_many || !plugin->close) {
188 		return NULL;
189 	}
190 	if (!(desc_list = plugin->open_many (io, uri, perm, mode))) {
191 		return NULL;
192 	}
193 	r_list_foreach (desc_list, iter, desc) {
194 		if (desc) {
195 			if (!desc->plugin) {
196 				desc->plugin = plugin;
197 			}
198 			if (!desc->uri) {
199 				desc->uri = strdup (uri);
200 			}
201 			//should autofd be honored here?
202 			r_io_desc_add (io, desc);
203 			if (!io->desc) {
204 				io->desc = desc;
205 			}
206 		}
207 	}
208 	// ensure no double free with r_list_close and r_io_free
209 	desc_list->free = NULL;
210 	return desc_list;
211 }
212 
r_io_reopen(RIO * io,int fd,int perm,int mode)213 R_API bool r_io_reopen(RIO* io, int fd, int perm, int mode) {
214 	RIODesc	*old, *new;
215 	char *uri;
216 	if (!(old = r_io_desc_get (io, fd))) {
217 		return false;
218 	}
219 	//does this really work, or do we have to handler debuggers ugly
220 	uri = old->referer? old->referer: old->uri;
221 #if __WINDOWS__ //TODO: workaround, see https://github.com/radareorg/radare2/issues/8840
222 	if (old->plugin->close && old->plugin->close (old)) {
223 		return false; // TODO: this is an unrecoverable scenario
224 	}
225 	if (!(new = r_io_open_nomap (io, uri, perm, mode))) {
226 		return false;
227 	}
228 	r_io_desc_exchange (io, old->fd, new->fd);
229 	r_io_desc_del (io, old->fd);
230 	return true;
231 #else
232 	if (!(new = r_io_open_nomap (io, uri, perm, mode))) {
233 		return false;
234 	}
235 	r_io_desc_exchange (io, old->fd, new->fd);
236 	return r_io_desc_close (old); // magic
237 #endif // __WINDOWS__
238 }
239 
r_io_close_all(RIO * io)240 R_API int r_io_close_all(RIO* io) { // what about undo?
241 	if (!io) {
242 		return false;
243 	}
244 	r_io_desc_fini (io);
245 	r_io_map_fini (io);
246 	ls_free (io->plugins);
247 	r_io_desc_init (io);
248 	r_io_map_init (io);
249 	r_io_cache_fini (io);
250 	r_io_plugin_init (io);
251 	return true;
252 }
253 
r_io_pread_at(RIO * io,ut64 paddr,ut8 * buf,int len)254 R_API int r_io_pread_at(RIO* io, ut64 paddr, ut8* buf, int len) {
255 	r_return_val_if_fail (io && buf && len >= 0, -1);
256 	if (io->ff) {
257 		memset (buf, io->Oxff, len);
258 	}
259 	return r_io_desc_read_at (io->desc, paddr, buf, len);
260 }
261 
r_io_pwrite_at(RIO * io,ut64 paddr,const ut8 * buf,int len)262 R_API int r_io_pwrite_at(RIO* io, ut64 paddr, const ut8* buf, int len) {
263 	r_return_val_if_fail (io && buf && len > 0, -1);
264 	return r_io_desc_write_at (io->desc, paddr, buf, len);
265 }
266 
267 // Returns true iff all reads on mapped regions are successful and complete.
r_io_vread_at_mapped(RIO * io,ut64 vaddr,ut8 * buf,int len)268 R_API bool r_io_vread_at_mapped(RIO* io, ut64 vaddr, ut8* buf, int len) {
269 	r_return_val_if_fail (io && buf && len > 0, false);
270 	if (io->ff) {
271 		memset (buf, io->Oxff, len);
272 	}
273 	return on_map_skyline (io, vaddr, buf, len, R_PERM_R, fd_read_at_wrap, false);
274 }
275 
r_io_vwrite_at(RIO * io,ut64 vaddr,const ut8 * buf,int len)276 static bool r_io_vwrite_at(RIO* io, ut64 vaddr, const ut8* buf, int len) {
277 	return on_map_skyline (io, vaddr, (ut8*)buf, len, R_PERM_W, fd_write_at_wrap, false);
278 }
279 
280 // Deprecated, use either r_io_read_at_mapped or r_io_nread_at instead.
281 // For virtual mode, returns true if all reads on mapped regions are successful
282 // and complete.
283 // For physical mode, the interface is broken because the actual read bytes are
284 // not available. This requires fixes in all call sites.
r_io_read_at(RIO * io,ut64 addr,ut8 * buf,int len)285 R_API bool r_io_read_at(RIO *io, ut64 addr, ut8 *buf, int len) {
286 	r_return_val_if_fail (io && buf && len >= 0, false);
287 	if (len == 0) {
288 		return false;
289 	}
290 	bool ret = (io->va)
291 		? r_io_vread_at_mapped (io, addr, buf, len)
292 		: r_io_pread_at (io, addr, buf, len) > 0;
293 	if (io->cached & R_PERM_R) {
294 		(void)r_io_cache_read (io, addr, buf, len);
295 	}
296 	return ret;
297 }
298 
299 // Returns true iff all reads on mapped regions are successful and complete.
300 // Unmapped regions are filled with io->Oxff in both physical and virtual modes.
301 // Use this function if you want to ignore gaps or do not care about the number
302 // of read bytes.
r_io_read_at_mapped(RIO * io,ut64 addr,ut8 * buf,int len)303 R_API bool r_io_read_at_mapped(RIO *io, ut64 addr, ut8 *buf, int len) {
304 	bool ret;
305 	r_return_val_if_fail (io && buf, false);
306 	if (io->ff) {
307 		memset (buf, io->Oxff, len);
308 	}
309 	if (io->va) {
310 		ret = on_map_skyline (io, addr, buf, len, R_PERM_R, fd_read_at_wrap, false);
311 	} else {
312 		ret = r_io_pread_at (io, addr, buf, len) > 0;
313 	}
314 	if (io->cached & R_PERM_R) {
315 		(void)r_io_cache_read(io, addr, buf, len);
316 	}
317 	return ret;
318 }
319 
320 // For both virtual and physical mode, returns the number of bytes of read
321 // prefix.
322 // Returns -1 on error.
r_io_nread_at(RIO * io,ut64 addr,ut8 * buf,int len)323 R_API int r_io_nread_at(RIO *io, ut64 addr, ut8 *buf, int len) {
324 	int ret;
325 	r_return_val_if_fail (io && buf && len >= 0, -1);
326 	if (len == 0) {
327 		return 0;
328 	}
329 	if (io->va) {
330 		if (io->ff) {
331 			memset (buf, io->Oxff, len);
332 		}
333 		ret = on_map_skyline (io, addr, buf, len, R_PERM_R, fd_read_at_wrap, true);
334 	} else {
335 		ret = r_io_pread_at (io, addr, buf, len);
336 	}
337 	if (ret > 0 && io->cached & R_PERM_R) {
338 		(void)r_io_cache_read (io, addr, buf, len);
339 	}
340 	return ret;
341 }
342 
r_io_write_at(RIO * io,ut64 addr,const ut8 * buf,int len)343 R_API bool r_io_write_at(RIO* io, ut64 addr, const ut8* buf, int len) {
344 	int i;
345 	bool ret = false;
346 	ut8 *mybuf = (ut8*)buf;
347 	r_return_val_if_fail (io && buf && len > 0, false);
348 	if (io->write_mask) {
349 		mybuf = r_mem_dup ((void*)buf, len);
350 		for (i = 0; i < len; i++) {
351 			//this sucks
352 			mybuf[i] &= io->write_mask[i % io->write_mask_len];
353 		}
354 	}
355 	if (io->cached & R_PERM_W) {
356 		ret = r_io_cache_write (io, addr, mybuf, len);
357 	} else if (io->va) {
358 		ret = r_io_vwrite_at (io, addr, mybuf, len);
359 	} else {
360 		ret = r_io_pwrite_at (io, addr, mybuf, len) > 0;
361 	}
362 	if (buf != mybuf) {
363 		free (mybuf);
364 	}
365 	return ret;
366 }
367 
r_io_read(RIO * io,ut8 * buf,int len)368 R_API bool r_io_read(RIO* io, ut8* buf, int len) {
369 	if (io && r_io_read_at (io, io->off, buf, len)) {
370 		io->off += len;
371 		return true;
372 	}
373 	return false;
374 }
375 
r_io_write(RIO * io,ut8 * buf,int len)376 R_API bool r_io_write(RIO* io, ut8* buf, int len) {
377 	if (io && buf && len > 0 && r_io_write_at (io, io->off, buf, len)) {
378 		io->off += len;
379 		return true;
380 	}
381 	return false;
382 }
383 
r_io_size(RIO * io)384 R_API ut64 r_io_size(RIO* io) {
385 // TODO: rethink this, maybe not needed
386 	return io? r_io_desc_size (io->desc): 0LL;
387 }
388 
r_io_is_listener(RIO * io)389 R_API bool r_io_is_listener(RIO* io) {
390 	if (io && io->desc && io->desc->plugin && io->desc->plugin->listener) {
391 		return io->desc->plugin->listener (io->desc);
392 	}
393 	return false;
394 }
395 
r_io_system(RIO * io,const char * cmd)396 R_API char *r_io_system(RIO* io, const char* cmd) {
397 	if (io && io->desc && io->desc->plugin && io->desc->plugin->system) {
398 		return io->desc->plugin->system (io, io->desc, cmd);
399 	}
400 	return NULL;
401 }
402 
r_io_resize(RIO * io,ut64 newsize)403 R_API bool r_io_resize(RIO* io, ut64 newsize) {
404 	if (io) {
405 		RList *maps = r_io_map_get_for_fd (io, io->desc->fd);
406 		RIOMap *current_map;
407 		RListIter *iter;
408 		ut64 fd_size = r_io_fd_size (io, io->desc->fd);
409 		r_list_foreach (maps, iter, current_map) {
410 			// we just resize map of the same size of its fd
411 			if (r_io_map_size (current_map) == fd_size) {
412 				r_io_map_resize (io, current_map->id, newsize);
413 			}
414 		}
415 		r_list_free (maps);
416 		return r_io_desc_resize (io->desc, newsize);
417 	}
418 	return false;
419 }
420 
r_io_close(RIO * io)421 R_API bool r_io_close(RIO *io) {
422 	return io ? r_io_desc_close (io->desc) : false;
423 }
424 
r_io_extend_at(RIO * io,ut64 addr,ut64 size)425 R_API int r_io_extend_at(RIO* io, ut64 addr, ut64 size) {
426 	ut64 cur_size, tmp_size;
427 	ut8* buffer;
428 	if (!io || !io->desc || !io->desc->plugin || !size) {
429 		return false;
430 	}
431 	if (io->desc->plugin->extend) {
432 		int ret;
433 		ut64 cur_off = io->off;
434 		r_io_seek (io, addr, R_IO_SEEK_SET);
435 		ret = r_io_desc_extend (io->desc, size);
436 		//no need to seek here
437 		io->off = cur_off;
438 		return ret;
439 	}
440 	if ((io->desc->perm & R_PERM_RW) != R_PERM_RW) {
441 		return false;
442 	}
443 	cur_size = r_io_desc_size (io->desc);
444 	if (addr > cur_size) {
445 		return false;
446 	}
447 	if ((UT64_MAX - size) < cur_size) {
448 		return false;
449 	}
450 	if (!r_io_resize (io, cur_size + size)) {
451 		return false;
452 	}
453 	if ((tmp_size = cur_size - addr) == 0LL) {
454 		return true;
455 	}
456 	if (!(buffer = calloc (1, (size_t) tmp_size + 1))) {
457 		return false;
458 	}
459 	r_io_pread_at (io, addr, buffer, (int) tmp_size);
460 	/* fill with null bytes */
461 	ut8 *empty = calloc (1, size);
462 	if (empty) {
463 		r_io_pwrite_at (io, addr, empty, size);
464 		free (empty);
465 	}
466 	r_io_pwrite_at (io, addr + size, buffer, (int) tmp_size);
467 	free (buffer);
468 	return true;
469 }
470 
r_io_set_write_mask(RIO * io,const ut8 * mask,int len)471 R_API bool r_io_set_write_mask(RIO* io, const ut8* mask, int len) {
472 	if (!io || len < 1) {
473 		return false;
474 	}
475 	free (io->write_mask);
476 	if (!mask) {
477 		io->write_mask = NULL;
478 		io->write_mask_len = 0;
479 		return true;
480 	}
481 	io->write_mask = (ut8*) malloc (len);
482 	memcpy (io->write_mask, mask, len);
483 	io->write_mask_len = len;
484 	return true;
485 }
486 
r_io_p2v(RIO * io,ut64 pa)487 R_API ut64 r_io_p2v(RIO *io, ut64 pa) {
488 	RIOMap *map = r_io_map_get_paddr (io, pa);
489 	if (map) {
490 		return pa - map->delta + r_io_map_begin (map);
491 	}
492 	return UT64_MAX;
493 }
494 
r_io_v2p(RIO * io,ut64 va)495 R_API ut64 r_io_v2p(RIO *io, ut64 va) {
496 	RIOMap *map = r_io_map_get (io, va);
497 	if (map) {
498 		st64 delta = va - r_io_map_begin (map);
499 		return r_io_map_begin (map) + map->delta + delta;
500 	}
501 	return UT64_MAX;
502 }
503 
r_io_bind(RIO * io,RIOBind * bnd)504 R_API void r_io_bind(RIO *io, RIOBind *bnd) {
505 	r_return_if_fail (io && bnd);
506 
507 	bnd->io = io;
508 	bnd->init = true;
509 	bnd->desc_use = r_io_use_fd;
510 	bnd->desc_get = r_io_desc_get;
511 	bnd->desc_size = r_io_desc_size;
512 	bnd->p2v = r_io_p2v;
513 	bnd->v2p = r_io_v2p;
514 	bnd->open = r_io_open_nomap;
515 	bnd->open_at = r_io_open_at;
516 	bnd->close = r_io_fd_close;
517 	bnd->read_at = r_io_read_at;
518 	bnd->write_at = r_io_write_at;
519 	bnd->system = r_io_system;
520 	bnd->fd_open = r_io_fd_open;
521 	bnd->fd_close = r_io_fd_close;
522 	bnd->fd_seek = r_io_fd_seek;
523 	bnd->fd_size = r_io_fd_size;
524 	bnd->fd_resize = r_io_fd_resize;
525 	bnd->fd_read = r_io_fd_read;
526 	bnd->fd_write = r_io_fd_write;
527 	bnd->fd_read_at = r_io_fd_read_at;
528 	bnd->fd_write_at = r_io_fd_write_at;
529 	bnd->fd_is_dbg = r_io_fd_is_dbg;
530 	bnd->fd_get_name = r_io_fd_get_name;
531 	bnd->fd_get_map = r_io_map_get_for_fd;
532 	bnd->fd_remap = r_io_map_remap_fd;
533 	bnd->is_valid_offset = r_io_is_valid_offset;
534 	bnd->map_get = r_io_map_get;
535 	bnd->map_get_paddr = r_io_map_get_paddr;
536 	bnd->addr_is_mapped = r_io_addr_is_mapped;
537 	bnd->map_add = r_io_map_add;
538 #if HAVE_PTRACE
539 	bnd->ptrace = r_io_ptrace;
540 	bnd->ptrace_func = r_io_ptrace_func;
541 #endif
542 }
543 
544 /* moves bytes up (+) or down (-) within the specified range */
r_io_shift(RIO * io,ut64 start,ut64 end,st64 move)545 R_API bool r_io_shift(RIO* io, ut64 start, ut64 end, st64 move) {
546 	ut8* buf;
547 	ut64 chunksize = 0x10000;
548 	ut64 saved_off = io->off;
549 	ut64 src, shiftsize = r_num_abs (move);
550 	if (!shiftsize || (end - start) <= shiftsize) {
551 		return false;
552 	}
553 	ut64 rest = (end - start) - shiftsize;
554 	if (!(buf = calloc (1, chunksize + 1))) {
555 		return false;
556 	}
557 	if (move > 0) {
558 		src = end - shiftsize;
559 	} else {
560 		src = start + shiftsize;
561 	}
562 	while (rest > 0) {
563 		if (chunksize > rest) {
564 			chunksize = rest;
565 		}
566 		if (move > 0) {
567 			src -= chunksize;
568 		}
569 		r_io_read_at (io, src, buf, chunksize);
570 		r_io_write_at (io, src + move, buf, chunksize);
571 		if (move < 0) {
572 			src += chunksize;
573 		}
574 		rest -= chunksize;
575 	}
576 	free (buf);
577 	io->off = r_io_desc_seek (io->desc, saved_off, R_IO_SEEK_SET);
578 	return true;
579 }
580 
r_io_seek(RIO * io,ut64 offset,int whence)581 R_API ut64 r_io_seek(RIO* io, ut64 offset, int whence) {
582 	if (!io) {
583 		return 0LL;
584 	}
585 	switch (whence) {
586 	case R_IO_SEEK_SET:
587 		io->off = offset;
588 		break;
589 	case R_IO_SEEK_CUR:
590 		io->off += offset;
591 		break;
592 	case R_IO_SEEK_END:
593 	default:
594 		io->off = r_io_desc_seek (io->desc, offset, whence);
595 		break;
596 	}
597 	return io->off;
598 }
599 
600 #if HAVE_PTRACE
601 
602 #if USE_PTRACE_WRAP
603 #include <ptrace_wrap.h>
604 #include <errno.h>
605 
io_ptrace_wrap_instance(RIO * io)606 static ptrace_wrap_instance *io_ptrace_wrap_instance(RIO *io) {
607 	if (!io->ptrace_wrap) {
608 		io->ptrace_wrap = R_NEW (ptrace_wrap_instance);
609 		if (!io->ptrace_wrap) {
610 			return NULL;
611 		}
612 		if (ptrace_wrap_instance_start (io->ptrace_wrap) < 0) {
613 			R_FREE (io->ptrace_wrap);
614 			return NULL;
615 		}
616 	}
617 	return io->ptrace_wrap;
618 }
619 #endif
620 
r_io_ptrace(RIO * io,r_ptrace_request_t request,pid_t pid,void * addr,r_ptrace_data_t data)621 R_API long r_io_ptrace(RIO *io, r_ptrace_request_t request, pid_t pid, void *addr, r_ptrace_data_t data) {
622 #if USE_PTRACE_WRAP
623 	ptrace_wrap_instance *wrap = io_ptrace_wrap_instance (io);
624 	if (!wrap) {
625 		errno = 0;
626 		return -1;
627 	}
628 	return ptrace_wrap (wrap, request, pid, addr, data);
629 #else
630 	return ptrace (request, pid, addr, data);
631 #endif
632 }
633 
r_io_ptrace_fork(RIO * io,void (* child_callback)(void *),void * child_callback_user)634 R_API pid_t r_io_ptrace_fork(RIO *io, void (*child_callback)(void *), void *child_callback_user) {
635 #if USE_PTRACE_WRAP
636 	ptrace_wrap_instance *wrap = io_ptrace_wrap_instance (io);
637 	if (!wrap) {
638 		errno = 0;
639 		return -1;
640 	}
641 	return ptrace_wrap_fork (wrap, child_callback, child_callback_user);
642 #else
643 	pid_t r = r_sys_fork ();
644 	if (r == 0) {
645 		child_callback (child_callback_user);
646 	}
647 	return r;
648 #endif
649 }
650 
r_io_ptrace_func(RIO * io,void * (* func)(void *),void * user)651 R_API void *r_io_ptrace_func(RIO *io, void *(*func)(void *), void *user) {
652 #if USE_PTRACE_WRAP
653 	ptrace_wrap_instance *wrap = io_ptrace_wrap_instance (io);
654 	if (wrap) {
655 		return ptrace_wrap_func (wrap, func, user);
656 	}
657 #endif
658 	return func (user);
659 }
660 #endif
661 
662 
663 //remove all descs and maps
r_io_fini(RIO * io)664 R_API int r_io_fini(RIO* io) {
665 	if (!io) {
666 		return false;
667 	}
668 	r_io_desc_cache_fini_all (io);
669 	r_io_desc_fini (io);
670 	r_io_map_fini (io);
671 	ls_free (io->plugins);
672 	r_io_cache_fini (io);
673 	r_list_free (io->undo.w_list);
674 	if (io->runprofile) {
675 		R_FREE (io->runprofile);
676 	}
677 	r_event_free (io->event);
678 #if R_IO_USE_PTRACE_WRAP
679 	if (io->ptrace_wrap) {
680 		ptrace_wrap_instance_stop (io->ptrace_wrap);
681 		free (io->ptrace_wrap);
682 	}
683 #endif
684 	return true;
685 }
686