1 /* radare - LGPL - Copyright 2007-2018 - pancake */
2
3 #include <r_io.h>
4
5 #if 0
6 * TODO:
7 * - make path of indirections shortr (io->undo.foo is slow) */
8 * - Plugin changes in write and seeks
9 * - Per-fd history log
10 #endif
11
r_io_undo_init(RIO * io)12 R_API int r_io_undo_init(RIO *io) {
13 /* seek undo */
14 r_io_sundo_reset (io);
15
16 /* write undo */
17 io->undo.w_init = 0;
18 io->undo.w_enable = 0;
19 io->undo.w_enable = 0;
20 io->undo.w_list = r_list_new ();
21
22 return true;
23 }
24
r_io_undo_enable(RIO * io,int s,int w)25 R_API void r_io_undo_enable(RIO *io, int s, int w) {
26 io->undo.s_enable = s;
27 io->undo.w_enable = w;
28 }
29
30 /* undo seekz */
31
r_io_sundo(RIO * io,ut64 offset)32 R_API RIOUndos *r_io_sundo(RIO *io, ut64 offset) {
33 if (!io->undo.s_enable || !io->undo.undos) {
34 return NULL;
35 }
36
37 RIOUndos *undo;
38 /* No redos yet, store the current seek so we can redo to it. */
39 if (!io->undo.redos) {
40 undo = &io->undo.seek[io->undo.idx];
41 undo->off = offset;
42 undo->cursor = 0;
43 }
44
45 io->undo.idx = (io->undo.idx - 1 + R_IO_UNDOS) % R_IO_UNDOS;
46 io->undo.undos--;
47 io->undo.redos++;
48
49 undo = &io->undo.seek[io->undo.idx];
50 RIOMap *map = r_io_map_get (io, undo->off);
51 if (!map || (map->delta == r_io_map_begin (map))) {
52 io->off = undo->off;
53 } else {
54 io->off = undo->off - (r_io_map_begin (map) + map->delta);
55 }
56 return undo;
57 }
58
r_io_sundo_redo(RIO * io)59 R_API RIOUndos *r_io_sundo_redo(RIO *io) {
60 RIOUndos *undo;
61 RIOMap *map;
62
63 if (!io->undo.s_enable || !io->undo.redos) {
64 return NULL;
65 }
66
67 io->undo.idx = (io->undo.idx + 1) % R_IO_UNDOS;
68 io->undo.undos++;
69 io->undo.redos--;
70
71 undo = &io->undo.seek[io->undo.idx];
72 map = r_io_map_get (io, undo->off);
73 if (!map || (map->delta == r_io_map_begin (map))) {
74 io->off = undo->off;
75 } else {
76 io->off = undo->off - r_io_map_begin (map) + map->delta;
77 }
78 return undo;
79 }
80
r_io_sundo_push(RIO * io,ut64 off,int cursor)81 R_API void r_io_sundo_push(RIO *io, ut64 off, int cursor) {
82 RIOUndos *undo;
83 if (!io->undo.s_enable) {
84 return;
85 }
86 // don't push duplicate seek
87 if (io->undo.undos > 0) {
88 undo = &io->undo.seek[(io->undo.idx - 1 + R_IO_UNDOS) % R_IO_UNDOS];
89 if (undo->off == off && undo->cursor == cursor) {
90 return;
91 }
92 }
93
94 undo = &io->undo.seek[io->undo.idx];
95 undo->off = off;
96 undo->cursor = cursor;
97 io->undo.idx = (io->undo.idx + 1) % R_IO_UNDOS;
98 /* Only R_IO_UNDOS - 1 undos can be used because r_io_sundo_undo () must
99 * push the current position for redo as well, which takes one entry in
100 * the table. */
101 if (io->undo.undos < R_IO_UNDOS - 1) {
102 io->undo.undos++;
103 }
104 /* We only have linear undo/redo, no tree. So after this new possible
105 * undo, all redos are lost. */
106 io->undo.redos = 0;
107 }
108
r_io_sundo_reset(RIO * io)109 R_API void r_io_sundo_reset(RIO *io) {
110 io->undo.idx = 0;
111 io->undo.undos = 0;
112 io->undo.redos = 0;
113 }
114
r_io_sundo_list(RIO * io,int mode)115 R_API RList *r_io_sundo_list(RIO *io, int mode) {
116 int idx, undos, redos, i, j, start, end;
117 RList* list = NULL;
118
119 if (mode == '!') {
120 mode = 0;
121 }
122 if (!io->undo.s_enable) {
123 return NULL;
124 }
125 undos = io->undo.undos;
126 redos = io->undo.redos;
127
128 idx = io->undo.idx;
129 start = (idx - undos + R_IO_UNDOS) % R_IO_UNDOS;
130 end = (idx + redos + 1 - 1) % R_IO_UNDOS; // +1 slot for current position, -1 due to inclusive end
131
132 j = 0;
133 switch (mode) {
134 case 'j':
135 io->cb_printf ("[");
136 break;
137 case 0:
138 list = r_list_newf (free);
139 break;
140 }
141 for (i = start;/* condition at the end of loop */; i = (i + 1) % R_IO_UNDOS) {
142 int idx = (j < undos)? undos - j - 1: j - undos - 1;
143 RIOUndos *undo = &io->undo.seek[i];
144 ut64 addr = undo->off;
145 bool notLast = (j + 1 < undos);
146 switch (mode) {
147 case '=':
148 if (j < undos) {
149 io->cb_printf ("0x%"PFMT64x"%s", addr, notLast? " > ": "");
150 }
151 break;
152 case '*':
153 if (j < undos) {
154 io->cb_printf ("f undo_%d @ 0x%"PFMT64x"\n", idx, addr);
155 } else if (j == undos && j != 0 && redos != 0) {
156 io->cb_printf ("# Current undo/redo position.\n");
157 } else if (j != undos) {
158 io->cb_printf ("f redo_%d @ 0x%"PFMT64x"\n", idx, addr);
159 }
160 break;
161 case 0:
162 if (list) {
163 RIOUndos *u = R_NEW0 (RIOUndos);
164 if (u) {
165 if (!(j == undos && redos == 0)) {
166 // Current position gets pushed before seek, so there
167 // is no valid offset when we are at the end of list.
168 memcpy (u, undo, sizeof (RIOUndos));
169 } else {
170 u->off = io->off;
171 }
172 r_list_append (list, u);
173 }
174 }
175 break;
176 }
177 j++;
178 if (i == end) {
179 break;
180 }
181 }
182 switch (mode) {
183 case '=':
184 io->cb_printf ("\n");
185 break;
186 }
187 return list;
188 }
189
190 /* undo writez */
191
r_io_wundo_new(RIO * io,ut64 off,const ut8 * data,int len)192 R_API void r_io_wundo_new(RIO *io, ut64 off, const ut8 *data, int len) {
193 RIOUndoWrite *uw;
194 if (!io->undo.w_enable) {
195 return;
196 }
197 /* undo write changes */
198 uw = R_NEW0 (RIOUndoWrite);
199 if (!uw) {
200 return;
201 }
202 uw->set = true;
203 uw->off = off;
204 uw->len = len;
205 uw->n = (ut8*) malloc (len);
206 if (!uw->n) {
207 free (uw);
208 return;
209 }
210 memcpy (uw->n, data, len);
211 uw->o = (ut8*) malloc (len);
212 if (!uw->o) {
213 R_FREE (uw);
214 return;
215 }
216 memset (uw->o, 0xff, len);
217 r_io_read_at (io, off, uw->o, len);
218 r_list_append (io->undo.w_list, uw);
219 }
220
r_io_wundo_clear(RIO * io)221 R_API void r_io_wundo_clear(RIO *io) {
222 // XXX memory leak
223 io->undo.w_list = r_list_new ();
224 }
225
226 // rename to r_io_undo_length ?
r_io_wundo_size(RIO * io)227 R_API int r_io_wundo_size(RIO *io) {
228 return r_list_length (io->undo.w_list);
229 }
230
231 // TODO: Deprecate or so? iterators must be language-wide, but helpers are useful
r_io_wundo_list(RIO * io)232 R_API void r_io_wundo_list(RIO *io) {
233 #define BW 8 /* byte wrap */
234 RListIter *iter;
235 RIOUndoWrite *u;
236 int i = 0, j, len;
237
238 if (io->undo.w_init) {
239 r_list_foreach (io->undo.w_list, iter, u) {
240 io->cb_printf ("%02d %c %d %08" PFMT64x ": ", i, u->set ? '+' : '-', u->len, u->off);
241 len = (u->len > BW) ? BW : u->len;
242 for (j = 0; j < len; j++) {
243 io->cb_printf ("%02x ", u->o[j]);
244 }
245 if (len == BW) {
246 io->cb_printf (".. ");
247 }
248 io->cb_printf ("=> ");
249 for (j = 0; j < len; j++) {
250 io->cb_printf ("%02x ", u->n[j]);
251 }
252 if (len == BW) {
253 io->cb_printf (".. ");
254 }
255 io->cb_printf ("\n");
256 i++;
257 }
258 }
259 }
260
r_io_wundo_apply(RIO * io,RIOUndoWrite * u,int set)261 R_API int r_io_wundo_apply(RIO *io, RIOUndoWrite *u, int set) {
262 int orig = io->undo.w_enable;
263 io->undo.w_enable = 0;
264 if (set) {
265 r_io_write_at (io, u->off, u->n, u->len);
266 u->set = true;
267 } else {
268 r_io_write_at (io, u->off, u->o, u->len);
269 u->set = false;
270 }
271 io->undo.w_enable = orig;
272 return 0;
273 }
274
r_io_wundo_apply_all(RIO * io,int set)275 R_API void r_io_wundo_apply_all(RIO *io, int set) {
276 RListIter *iter;
277 RIOUndoWrite *u;
278
279 r_list_foreach_prev (io->undo.w_list, iter, u) {
280 r_io_wundo_apply (io, u, set); //UNDO_WRITE_UNSET);
281 eprintf ("%s 0x%08"PFMT64x"\n", set?"redo":"undo", u->off);
282 }
283 }
284
285 /* sets or unsets the writes done */
286 /* if ( set == 0 ) unset(n) */
r_io_wundo_set(RIO * io,int n,int set)287 R_API int r_io_wundo_set(RIO *io, int n, int set) {
288 RListIter *iter;
289 RIOUndoWrite *u = NULL;
290 int i = 0;
291 if (io->undo.w_init) {
292 r_list_foreach_prev (io->undo.w_list, iter, u) {
293 if (i++ == n) {
294 break;
295 }
296 }
297 if (u) { // wtf?
298 r_io_wundo_apply (io, u, set);
299 return true;
300 }
301 eprintf ("invalid undo-write index\n");
302 } else {
303 eprintf ("no writes done\n");
304 }
305 return false;
306 }
307