1 /*
2 * Copyright (c) 2013-2014 Douglas Gilbert.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30 /*
31 * This file contains SCSI Extended copy command helper functions for ddpt.
32 */
33
34 /* Was needed for posix_fadvise() */
35 /* #define _XOPEN_SOURCE 600 */
36
37 /* Need _GNU_SOURCE for O_DIRECT */
38 #ifndef _GNU_SOURCE
39 #define _GNU_SOURCE
40 #endif
41
42 #include <unistd.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <ctype.h>
47 #include <errno.h>
48 #include <limits.h>
49 #include <sys/types.h>
50 #include <sys/stat.h>
51 #include <fcntl.h>
52 #define __STDC_FORMAT_MACROS 1
53 #include <inttypes.h>
54 #include <sys/types.h>
55 #include <sys/stat.h>
56
57 /* N.B. config.h must precede anything that depends on HAVE_* */
58 #ifdef HAVE_CONFIG_H
59 #include "config.h"
60 #endif
61
62 #include "ddpt.h" /* includes <signal.h> */
63
64 #include "sg_lib.h"
65 #include "sg_cmds_basic.h"
66 #include "sg_unaligned.h"
67
68
69 #define DEF_3PC_OUT_TIMEOUT (10 * 60) /* is 10 minutes enough? */
70 #define DEF_3PC_IN_TIMEOUT 60 /* these should be fast */
71
72 /* Target descriptor variety */
73 #define TD_FC_WWPN 1
74 #define TD_FC_PORT 2
75 #define TD_FC_WWPN_AND_PORT 4
76 #define TD_SPI 8
77 #define TD_VPD 16
78 #define TD_IPV4 32
79 #define TD_ALIAS 64
80 #define TD_RDMA 128
81 #define TD_FW 256
82 #define TD_SAS 512
83 #define TD_IPV6 1024
84 #define TD_IP_COPY_SERVICE 2048
85 #define TD_ROD 4096
86
87 #define MAX_IN_PROGRESS 10
88
89
90 #define LOCAL_ROD_TOKEN_SIZE 1024
91 static unsigned char local_rod_token[LOCAL_ROD_TOKEN_SIZE];
92
93 static const char * rec_copy_op_params_str = "Receive copy operating "
94 "parameters";
95
96 static struct val_str_t xcopy4_cpy_op_status[] = {
97 {0x1, "Operation completed without errors"},
98 {0x2, "Operation completed with errors"},
99 {0x3, "Operation completed without errors but with partial ROD token "
100 "usage"},
101 {0x4, "Operation completed without errors but with residual data"},
102 {0x10, "Operation in progress, foreground or background unknown"},
103 {0x11, "Operation in progress in foreground"},
104 {0x12, "Operation in progress in background"},
105 {0x60, "Operation terminated"},
106 {0x0, NULL},
107 };
108
109
110 static int
simplified_dt(const struct dev_info_t * dip)111 simplified_dt(const struct dev_info_t * dip)
112 {
113 int d_type = dip->d_type;
114
115 switch (d_type) {
116 case FT_BLOCK:
117 case FT_TAPE:
118 case FT_REG:
119 case FT_DEV_NULL:
120 case FT_FIFO:
121 case FT_ERROR:
122 return d_type;
123 default:
124 if (FT_PT & d_type) {
125 if ((0 == dip->pdt) || (0xe == dip->pdt)) /* D-A or RBC */
126 return FT_BLOCK;
127 else if (0x1 == dip->pdt)
128 return FT_TAPE;
129 }
130 return FT_OTHER;
131 }
132 }
133
134 static int
seg_desc_from_d_type(int in_dt,int in_off,int out_dt,int out_off)135 seg_desc_from_d_type(int in_dt, int in_off, int out_dt, int out_off)
136 {
137 int desc_type = -1;
138
139 switch (in_dt) {
140 case FT_BLOCK:
141 switch (out_dt) {
142 case FT_TAPE:
143 if (out_off)
144 break;
145 if (in_off)
146 desc_type = 0x8;
147 else
148 desc_type = 0;
149 break;
150 case FT_BLOCK:
151 if (in_off || out_off)
152 desc_type = 0xA;
153 else
154 desc_type = 2;
155 break;
156 default:
157 break;
158 }
159 break;
160 case FT_TAPE:
161 if (in_off)
162 break;
163
164 switch (out_dt) {
165 case FT_TAPE:
166 if (!out_off) {
167 desc_type = 3;
168 break;
169 }
170 break;
171 case FT_BLOCK:
172 if (out_off)
173 desc_type = 9;
174 else
175 desc_type = 3;
176 break;
177 case FT_DEV_NULL:
178 desc_type = 6;
179 break;
180 default:
181 break;
182 }
183 break;
184 default:
185 break;
186 }
187 return desc_type;
188 }
189
190 static int
scsi_encode_seg_desc(struct opts_t * op,unsigned char * seg_desc,int seg_desc_type,int64_t num_blk,uint64_t src_lba,uint64_t dst_lba)191 scsi_encode_seg_desc(struct opts_t * op, unsigned char *seg_desc,
192 int seg_desc_type, int64_t num_blk, uint64_t src_lba,
193 uint64_t dst_lba)
194 {
195 int seg_desc_len = 0;
196
197 seg_desc[0] = seg_desc_type;
198 seg_desc[1] = (op->xc_dc << 1) | op->xc_cat;
199 if (seg_desc_type == 0x02) {
200 seg_desc_len = 0x18;
201 seg_desc[4] = 0;
202 seg_desc[5] = 0; /* Source target index */
203 seg_desc[7] = 1; /* Destination target index */
204 seg_desc[10] = (num_blk >> 8) & 0xff;
205 seg_desc[11] = num_blk & 0xff;
206 seg_desc[12] = (src_lba >> 56) & 0xff;
207 seg_desc[13] = (src_lba >> 48) & 0xff;
208 seg_desc[14] = (src_lba >> 40) & 0xff;
209 seg_desc[15] = (src_lba >> 32) & 0xff;
210 seg_desc[16] = (src_lba >> 24) & 0xff;
211 seg_desc[17] = (src_lba >> 16) & 0xff;
212 seg_desc[18] = (src_lba >> 8) & 0xff;
213 seg_desc[19] = src_lba & 0xff;
214 seg_desc[20] = (dst_lba >> 56) & 0xff;
215 seg_desc[21] = (dst_lba >> 48) & 0xff;
216 seg_desc[22] = (dst_lba >> 40) & 0xff;
217 seg_desc[23] = (dst_lba >> 32) & 0xff;
218 seg_desc[24] = (dst_lba >> 24) & 0xff;
219 seg_desc[25] = (dst_lba >> 16) & 0xff;
220 seg_desc[26] = (dst_lba >> 8) & 0xff;
221 seg_desc[27] = dst_lba & 0xff;
222 }
223 seg_desc[2] = (seg_desc_len >> 8) & 0xFF;
224 seg_desc[3] = seg_desc_len & 0xFF;
225
226 return seg_desc_len + 4;
227 }
228
229 static int
a_xcopy_lid1_cmd(struct opts_t * op,unsigned char * src_desc,int src_desc_len,unsigned char * dst_desc,int dst_desc_len,int seg_desc_type,int64_t num_blk)230 a_xcopy_lid1_cmd(struct opts_t * op, unsigned char *src_desc,
231 int src_desc_len, unsigned char *dst_desc, int dst_desc_len,
232 int seg_desc_type, int64_t num_blk)
233 {
234 unsigned char xcopyBuff[256];
235 int desc_offset = 16;
236 int seg_desc_len, verb, err_vb, fd, tmout;
237 uint64_t src_lba = op->skip;
238 uint64_t dst_lba = op->seek;
239
240 fd = (op->iflagp->xcopy) ? op->idip->fd : op->odip->fd;
241 verb = (op->verbose > 1) ? (op->verbose - 2) : 0;
242 if (op->verbose > 0)
243 err_vb = (verb > 1) ? verb : 1;
244 else
245 err_vb = 0;
246 memset(xcopyBuff, 0, 256);
247 xcopyBuff[0] = op->list_id;
248 xcopyBuff[1] = (op->id_usage << 3) | op->prio;
249 xcopyBuff[2] = 0;
250 xcopyBuff[3] = src_desc_len + dst_desc_len; /* Two target descriptors */
251 memcpy(xcopyBuff + desc_offset, src_desc, src_desc_len);
252 desc_offset += src_desc_len;
253 memcpy(xcopyBuff + desc_offset, dst_desc, dst_desc_len);
254 desc_offset += dst_desc_len;
255 seg_desc_len = scsi_encode_seg_desc(op, xcopyBuff + desc_offset,
256 seg_desc_type, num_blk,
257 src_lba, dst_lba);
258 xcopyBuff[11] = seg_desc_len; /* One segment descriptor */
259 desc_offset += seg_desc_len;
260 tmout = (op->timeout_xcopy < 1) ? DEF_3PC_OUT_TIMEOUT : op->timeout_xcopy;
261 if (verb)
262 pr2serr("xcopy(LID1) cmd; src_lba=0x%" PRIx64 ", num_blks=%" PRId64
263 "\n", src_lba, num_blk);
264 return pt_3party_copy_out(fd, SA_XCOPY_LID1, op->list_id, DEF_GROUP_NUM,
265 tmout, xcopyBuff, desc_offset, 1, verb, err_vb);
266 }
267
268 /* Returns target descriptor variety encoded into an int. There may be
269 * more than one, OR-ed together. A return value of zero or less is
270 * considered as an error. */
271 static int
scsi_operating_parameter(struct opts_t * op,int is_dest)272 scsi_operating_parameter(struct opts_t * op, int is_dest)
273 {
274 int res, fd, ftype, pdt, snlid, verb;
275 unsigned char rcBuff[256];
276 unsigned int rcBuffLen = 256, len, n, td_list = 0;
277 unsigned long num, max_target_num, max_segment_num, max_segment_len;
278 unsigned long max_desc_len, max_inline_data, held_data_limit;
279 int valid = 0;
280 struct dev_info_t * dip;
281
282 verb = (op->verbose ? (op->verbose - 1) : 0);
283 dip = is_dest ? op->odip : op->idip;
284 fd = dip->fd;
285 ftype = dip->d_type;
286 if (FT_PT & ftype) {
287 pdt = dip->pdt;
288 if ((0 == pdt) || (0xe == pdt)) /* direct-access or RBC */
289 ftype |= FT_BLOCK;
290 else if (0x1 == pdt)
291 ftype |= FT_TAPE;
292 } else if (FT_FIFO & ftype)
293 ftype |= FT_REG;
294 if (FT_REG & ftype) {
295 pr2serr("%s: not expecting a regular file here: %s\n", __func__,
296 dip->fn);
297 return -SG_LIB_FILE_ERROR;
298 }
299
300 /* Third Party Copy IN command; sa: RECEIVE COPY OPERATING PARAMETERS */
301 res = pt_3party_copy_in(fd, SA_COPY_OP_PARAMS, 0, DEF_3PC_IN_TIMEOUT,
302 rcBuff, rcBuffLen, 1, verb, op->verbose);
303 if (0 != res)
304 return -res;
305
306 len = ((rcBuff[0] << 24) | (rcBuff[1] << 16) | (rcBuff[2] << 8) |
307 rcBuff[3]) + 4;
308 if (len > rcBuffLen) {
309 pr2serr(" <<report len %d > %d too long for internal buffer, output "
310 "truncated\n", len, rcBuffLen);
311 }
312 if (op->verbose > 2) {
313 pr2serr("\nOutput %s response in hex:\n", rec_copy_op_params_str);
314 dStrHexErr((const char *)rcBuff, len, 1);
315 }
316 snlid = rcBuff[4] & 0x1;
317 max_target_num = rcBuff[8] << 8 | rcBuff[9];
318 max_segment_num = rcBuff[10] << 8 | rcBuff[11];
319 max_desc_len = rcBuff[12] << 24 | rcBuff[13] << 16 | rcBuff[14] << 8 |
320 rcBuff[15];
321 max_segment_len = rcBuff[16] << 24 | rcBuff[17] << 16 |
322 rcBuff[18] << 8 | rcBuff[19];
323 dip->xc_max_bytes = max_segment_len ? max_segment_len : ULONG_MAX;
324 max_inline_data = rcBuff[20] << 24 | rcBuff[21] << 16 | rcBuff[22] << 8 |
325 rcBuff[23];
326 if (op->verbose) {
327 pr2serr(" >> %s, %sput [%s]:\n", rec_copy_op_params_str,
328 (is_dest ? "out" : "in"), dip->fn);
329 pr2serr(" Support No List IDentifier (SNLID): %d\n", snlid);
330 pr2serr(" Maximum target descriptor count: %lu\n", max_target_num);
331 pr2serr(" Maximum segment descriptor count: %lu\n",
332 max_segment_num);
333 pr2serr(" Maximum descriptor list length: %lu\n", max_desc_len);
334 pr2serr(" Maximum segment length: %lu\n", max_segment_len);
335 pr2serr(" Maximum inline data length: %lu\n", max_inline_data);
336 }
337 held_data_limit = rcBuff[24] << 24 | rcBuff[25] << 16 |
338 rcBuff[26] << 8 | rcBuff[27];
339 if (op->id_usage < 0) {
340 if (! held_data_limit)
341 op->id_usage = 2;
342 else
343 op->id_usage = 0;
344 }
345 if (op->verbose) {
346 pr2serr(" Held data limit: %lu (list_id_usage: %d)\n",
347 held_data_limit, op->id_usage);
348 num = rcBuff[28] << 24 | rcBuff[29] << 16 | rcBuff[30] << 8 |
349 rcBuff[31];
350 pr2serr(" Maximum stream device transfer size: %lu\n", num);
351 pr2serr(" Maximum concurrent copies: %u\n", rcBuff[36]);
352 pr2serr(" Data segment granularity: %u bytes\n", 1 << rcBuff[37]);
353 pr2serr(" Inline data granularity: %u bytes\n", 1 << rcBuff[38]);
354 pr2serr(" Held data granularity: %u bytes\n", 1 << rcBuff[39]);
355
356 pr2serr(" Implemented descriptor list:\n");
357 }
358 dip->xc_min_bytes = 1 << rcBuff[37];
359
360 for (n = 0; n < rcBuff[43]; n++) {
361 switch(rcBuff[44 + n]) {
362 case 0x00: /* copy block to stream device */
363 if (!is_dest && (ftype & FT_BLOCK))
364 valid++;
365 if (is_dest && (ftype & FT_TAPE))
366 valid++;
367 if (op->verbose)
368 pr2serr(" Copy Block to Stream device\n");
369 break;
370 case 0x01: /* copy stream to block device */
371 if (!is_dest && (ftype & FT_TAPE))
372 valid++;
373 if (is_dest && (ftype & FT_BLOCK))
374 valid++;
375 if (op->verbose)
376 pr2serr(" Copy Stream to Block device\n");
377 break;
378 case 0x02: /* copy block to block device */
379 if (!is_dest && (ftype & FT_BLOCK))
380 valid++;
381 if (is_dest && (ftype & FT_BLOCK))
382 valid++;
383 if (op->verbose)
384 pr2serr(" Copy Block to Block device\n");
385 break;
386 case 0x03: /* copy stream to stream device */
387 if (!is_dest && (ftype & FT_TAPE))
388 valid++;
389 if (is_dest && (ftype & FT_TAPE))
390 valid++;
391 if (op->verbose)
392 pr2serr(" Copy Stream to Stream device\n");
393 break;
394 case 0x04: /* copy inline data to stream device */
395 if (!is_dest && (ftype & FT_REG))
396 valid++;
397 if (is_dest && (ftype & FT_TAPE))
398 valid++;
399 if (op->verbose)
400 pr2serr(" Copy inline data to Stream device\n");
401 break;
402 case 0x05: /* copy embedded data to stream device */
403 if (!is_dest && (ftype & FT_REG))
404 valid++;
405 if (is_dest && (ftype & FT_TAPE))
406 valid++;
407 if (op->verbose)
408 pr2serr(" Copy embedded data to Stream device\n");
409 break;
410 case 0x06: /* Read from stream device and discard */
411 if (!is_dest && (ftype & FT_TAPE))
412 valid++;
413 if (is_dest && (ftype & FT_DEV_NULL))
414 valid++;
415 if (op->verbose)
416 pr2serr(" Read from stream device and discard\n");
417 break;
418 case 0x07: /* Verify block or stream device operation */
419 if (!is_dest && (ftype & (FT_TAPE | FT_BLOCK)))
420 valid++;
421 if (is_dest && (ftype & (FT_TAPE | FT_BLOCK)))
422 valid++;
423 if (op->verbose)
424 pr2serr(" Verify block or stream device operation\n");
425 break;
426 case 0x08: /* copy block device with offset to stream device */
427 if (!is_dest && (ftype & FT_BLOCK))
428 valid++;
429 if (is_dest && (ftype & FT_TAPE))
430 valid++;
431 if (op->verbose)
432 pr2serr(" Copy block device with offset to stream "
433 "device\n");
434 break;
435 case 0x09: /* copy stream device to block device with offset */
436 if (!is_dest && (ftype & FT_TAPE))
437 valid++;
438 if (is_dest && (ftype & FT_BLOCK))
439 valid++;
440 if (op->verbose)
441 pr2serr(" Copy stream device to block device with "
442 "offset\n");
443 break;
444 case 0x0a: /* copy block device with offset to block device with
445 * offset */
446 if (!is_dest && (ftype & FT_BLOCK))
447 valid++;
448 if (is_dest && (ftype & FT_BLOCK))
449 valid++;
450 if (op->verbose)
451 pr2serr(" Copy block device with offset to block "
452 "device with offset\n");
453 break;
454 case 0x0b: /* copy block device to stream device and hold data */
455 if (!is_dest && (ftype & FT_BLOCK))
456 valid++;
457 if (is_dest && (ftype & FT_TAPE))
458 valid++;
459 if (op->verbose)
460 pr2serr(" Copy block device to stream device and hold "
461 "data\n");
462 break;
463 case 0x0c: /* copy stream device to block device and hold data */
464 if (!is_dest && (ftype & FT_TAPE))
465 valid++;
466 if (is_dest && (ftype & FT_BLOCK))
467 valid++;
468 if (op->verbose)
469 pr2serr(" Copy stream device to block device and hold "
470 "data\n");
471 break;
472 case 0x0d: /* copy block device to block device and hold data */
473 if (!is_dest && (ftype & FT_BLOCK))
474 valid++;
475 if (is_dest && (ftype & FT_BLOCK))
476 valid++;
477 if (op->verbose)
478 pr2serr(" Copy block device to block device and hold "
479 "data\n");
480 break;
481 case 0x0e: /* copy stream device to stream device and hold data */
482 if (!is_dest && (ftype & FT_TAPE))
483 valid++;
484 if (is_dest && (ftype & FT_TAPE))
485 valid++;
486 if (op->verbose)
487 pr2serr(" Copy block device to block device and hold "
488 "data\n");
489 break;
490 case 0x0f: /* read from stream device and hold data */
491 if (!is_dest && (ftype & FT_TAPE))
492 valid++;
493 if (is_dest && (ftype & FT_DEV_NULL))
494 valid++;
495 if (op->verbose)
496 pr2serr(" Read from stream device and hold data\n");
497 break;
498 case 0xe0: /* FC N_Port_Name */
499 if (op->verbose)
500 pr2serr(" FC N_Port_Name target descriptor\n");
501 td_list |= TD_FC_WWPN;
502 break;
503 case 0xe1: /* FC Port_ID */
504 if (op->verbose)
505 pr2serr(" FC Port_ID target descriptor\n");
506 td_list |= TD_FC_PORT;
507 break;
508 case 0xe2: /* FC N_Port_ID with N_Port_Name checking */
509 if (op->verbose)
510 pr2serr(" FC N_Port_ID with N_Port_Name target "
511 "descriptor\n");
512 td_list |= TD_FC_WWPN_AND_PORT;
513 break;
514 case 0xe3: /* Parallel Interface T_L */
515 if (op->verbose)
516 pr2serr(" SPI T_L target descriptor\n");
517 td_list |= TD_SPI;
518 break;
519 case 0xe4: /* identification descriptor */
520 if (op->verbose)
521 pr2serr(" Identification target descriptor\n");
522 td_list |= TD_VPD;
523 break;
524 case 0xe5: /* IPv4 */
525 if (op->verbose)
526 pr2serr(" IPv4 target descriptor\n");
527 td_list |= TD_IPV4;
528 break;
529 case 0xe6: /* Alias */
530 if (op->verbose)
531 pr2serr(" Alias target descriptor\n");
532 td_list |= TD_ALIAS;
533 break;
534 case 0xe7: /* RDMA */
535 if (op->verbose)
536 pr2serr(" RDMA target descriptor\n");
537 td_list |= TD_RDMA;
538 break;
539 case 0xe8: /* FireWire */
540 if (op->verbose)
541 pr2serr(" IEEE 1394 target descriptor\n");
542 td_list |= TD_FW;
543 break;
544 case 0xe9: /* SAS */
545 if (op->verbose)
546 pr2serr(" SAS target descriptor\n");
547 td_list |= TD_SAS;
548 break;
549 case 0xea: /* IPv6 */
550 if (op->verbose)
551 pr2serr(" IPv6 target descriptor\n");
552 td_list |= TD_IPV6;
553 break;
554 case 0xeb: /* IP Copy Service */
555 if (op->verbose)
556 pr2serr(" IP Copy Service target descriptor\n");
557 td_list |= TD_IP_COPY_SERVICE;
558 break;
559 case 0xfe: /* ROD */
560 if (op->verbose)
561 pr2serr(" ROD target descriptor\n");
562 td_list |= TD_ROD;
563 break;
564 default:
565 pr2serr(">> Unhandled target descriptor 0x%02x\n",
566 rcBuff[44 + n]);
567 break;
568 }
569 }
570 if (!valid) {
571 pr2serr(">> no matching target descriptor supported\n");
572 td_list = 0;
573 }
574 return td_list;
575 }
576
577 /* build xcopy(lid1) CSCD descriptor using device id VPD page */
578 static int
desc_from_vpd_id(struct opts_t * op,unsigned char * desc,int desc_len,int is_dest)579 desc_from_vpd_id(struct opts_t * op, unsigned char *desc, int desc_len,
580 int is_dest)
581 {
582 int fd, res, u, i_len, assoc, desig, verb;
583 unsigned char rcBuff[256], *ucp, *best = NULL;
584 unsigned int len = 254;
585 unsigned int block_size;
586 int off = -1;
587 int best_len = 0;
588 int f_desig = 0;
589 struct flags_t * flp;
590 struct dev_info_t * dip;
591
592 verb = (op->verbose ? (op->verbose - 1) : 0);
593 dip = is_dest ? op->odip : op->idip;
594 fd = dip->fd;
595 flp = is_dest ? op->oflagp : op->iflagp;
596 block_size = is_dest ? op->obs : op->ibs;
597 memset(rcBuff, 0xff, len);
598 res = sg_ll_inquiry(fd, 0, 1, VPD_DEVICE_ID, rcBuff, 4, 1, verb);
599 if (0 != res) {
600 if (SG_LIB_CAT_ILLEGAL_REQ == res)
601 pr2serr("Device identification VPD page not found [%s]\n",
602 dip->fn);
603 else
604 pr2serr("VPD inquiry failed with %d [%s] , try again with "
605 "'-vv'\n", res, dip->fn);
606 return res;
607 } else if (rcBuff[1] != VPD_DEVICE_ID) {
608 pr2serr("invalid VPD response\n");
609 return SG_LIB_CAT_MALFORMED;
610 }
611 len = ((rcBuff[2] << 8) + rcBuff[3]) + 4;
612 res = sg_ll_inquiry(fd, 0, 1, VPD_DEVICE_ID, rcBuff, len, 1, verb);
613 if (0 != res) {
614 pr2serr("VPD inquiry failed with %d\n", res);
615 return res;
616 } else if (rcBuff[1] != VPD_DEVICE_ID) {
617 pr2serr("invalid VPD response\n");
618 return SG_LIB_CAT_MALFORMED;
619 }
620 if (op->verbose > 2) {
621 pr2serr("Output VPD_DEVICE_ID (0x83) page in hex:\n");
622 dStrHexErr((const char *)rcBuff, len, 1);
623 }
624
625 while ((u = sg_vpd_dev_id_iter(rcBuff + 4, len - 4, &off, 0, -1, -1)) ==
626 0) {
627 ucp = rcBuff + 4 + off;
628 i_len = ucp[3];
629 if (((unsigned int)off + i_len + 4) > len) {
630 pr2serr(" VPD page error: designator length %d longer than\n"
631 " remaining response length=%d\n", i_len,
632 (len - off));
633 return SG_LIB_CAT_MALFORMED;
634 }
635 assoc = ((ucp[1] >> 4) & 0x3);
636 desig = (ucp[1] & 0xf);
637 if (op->verbose > 2)
638 pr2serr(" Desc %d: assoc %u desig %u len %d\n", off, assoc,
639 desig, i_len);
640 /* Descriptor must be less than 16 bytes */
641 if (i_len > 16)
642 continue;
643 if (desig == 3) {
644 best = ucp;
645 best_len = i_len;
646 break;
647 }
648 if (desig == 2) {
649 if (!best || f_desig < 2) {
650 best = ucp;
651 best_len = i_len;
652 f_desig = 2;
653 }
654 } else if (desig == 1) {
655 if (!best || f_desig == 0) {
656 best = ucp;
657 best_len = i_len;
658 f_desig = desig;
659 }
660 } else if (desig == 0) {
661 if (!best) {
662 best = ucp;
663 best_len = i_len;
664 f_desig = desig;
665 }
666 }
667 }
668 if (best) {
669 if (op->verbose)
670 decode_designation_descriptor(best, best_len, 1, op->verbose);
671 if (best_len + 4 < desc_len) {
672 memset(desc, 0, 32);
673 desc[0] = 0xe4;
674 memcpy(desc + 4, best, best_len + 4);
675 desc[4] &= 0x1f;
676 desc[28] = flp->pad << 2;
677 desc[29] = (block_size >> 16) & 0xff;
678 desc[30] = (block_size >> 8) & 0xff;
679 desc[31] = block_size & 0xff;
680 if (op->verbose > 3) {
681 pr2serr("Descriptor in hex (bs %d):\n", block_size);
682 dStrHexErr((const char *)desc, 32, 1);
683 }
684 return 32;
685 }
686 return best_len + 8;
687 }
688 return 0;
689 }
690
691 /* Called from main() in ddpt.c . Returns 0 on success or a positive
692 * errno value if problems. This is for a xcopy(LID1) disk->disk copy. */
693 int
do_xcopy_lid1(struct opts_t * op)694 do_xcopy_lid1(struct opts_t * op)
695 {
696 int res, ibpt, obpt, bs_same, max_bpt, blocks, oblocks;
697 int src_desc_len, dst_desc_len, seg_desc_type;
698 unsigned char src_desc[256];
699 unsigned char dst_desc[256];
700 const struct flags_t * ifp = op->iflagp;
701 const struct flags_t * ofp = op->oflagp;
702 const struct dev_info_t * idip = op->idip;
703 const struct dev_info_t * odip = op->odip;
704
705 if (op->list_id_given && (op->list_id > UCHAR_MAX)) {
706 pr2serr("list_id for xcopy(LID1) cannot exceed 255\n");
707 return SG_LIB_SYNTAX_ERROR;
708 }
709 if (op->id_usage == 3) { /* list_id usage disabled */
710 if (op->list_id_given && (0 != op->list_id)) {
711 pr2serr("list_id disabled by id_usage flag\n");
712 return SG_LIB_SYNTAX_ERROR;
713 } else
714 op->list_id = 0;
715 }
716 res = scsi_operating_parameter(op, 0);
717 if (SG_LIB_CAT_UNIT_ATTENTION == -res)
718 res = scsi_operating_parameter(op, 0);
719 if (res < 0) {
720 if (-res == SG_LIB_CAT_INVALID_OP) {
721 pr2serr("%s command not supported on %s\n",
722 rec_copy_op_params_str, idip->fn);
723 return EINVAL;
724 } else if (-res == SG_LIB_CAT_NOT_READY)
725 pr2serr("%s failed on %s - not ready\n",
726 rec_copy_op_params_str, idip->fn);
727 else {
728 pr2serr("Unable to %s on %s\n", rec_copy_op_params_str, idip->fn);
729 return -res;
730 }
731 } else if (res == 0)
732 return SG_LIB_CAT_INVALID_OP;
733 if (res & TD_VPD) {
734 if (op->verbose)
735 pr2serr(" >> using VPD identification for source %s\n",
736 op->idip->fn);
737 src_desc_len = desc_from_vpd_id(op, src_desc, sizeof(src_desc), 0);
738 if (src_desc_len > (int)sizeof(src_desc)) {
739 pr2serr("source descriptor too large (%d bytes)\n", res);
740 return SG_LIB_CAT_MALFORMED;
741 }
742 } else
743 return SG_LIB_CAT_INVALID_OP;
744
745 res = scsi_operating_parameter(op, 1);
746 if (res < 0) {
747 if (SG_LIB_CAT_UNIT_ATTENTION == -res) {
748 pr2serr("Unit attention (%s), continuing\n",
749 rec_copy_op_params_str);
750 res = scsi_operating_parameter(op, 1);
751 } else {
752 if (-res == SG_LIB_CAT_INVALID_OP) {
753 pr2serr("%s command not supported on %s\n",
754 rec_copy_op_params_str, odip->fn);
755 return EINVAL;
756 } else if (-res == SG_LIB_CAT_NOT_READY)
757 pr2serr("%s failed on %s - not ready\n",
758 rec_copy_op_params_str, odip->fn);
759 else {
760 pr2serr("Unable to %s on %s\n", rec_copy_op_params_str,
761 odip->fn);
762 return -res;
763 }
764 }
765 } else if (res == 0)
766 return SG_LIB_CAT_INVALID_OP;
767 if (res & TD_VPD) {
768 if (op->verbose)
769 pr2serr(" >> using VPD identification for destination %s\n",
770 odip->fn);
771 dst_desc_len = desc_from_vpd_id(op, dst_desc, sizeof(dst_desc), 1);
772 if (dst_desc_len > (int)sizeof(dst_desc)) {
773 pr2serr("destination descriptor too large (%d bytes)\n", res);
774 return SG_LIB_CAT_MALFORMED;
775 }
776 } else
777 return SG_LIB_CAT_INVALID_OP;
778
779 bs_same = (op->ibs == op->obs);
780 max_bpt = bs_same ? MAX_XC_BPT : MAX_XC_BPT_POW2;
781 /* Beware, xc_max_bytes may be ULONG_MAX hence unsigned long division */
782 if (op->bpt_given) {
783 ibpt = op->bpt_i;
784 ibpt = (ibpt > max_bpt) ? max_bpt : ibpt;
785 obpt = bs_same ? ibpt : ((op->ibs * op->bpt_i) / op->obs);
786 if (ifp->dc || ofp->dc) {
787 if ((unsigned long)obpt * op->obs > odip->xc_max_bytes) {
788 pr2serr("bpt too large (max %ld blocks)\n",
789 odip->xc_max_bytes / op->obs);
790 return SG_LIB_SYNTAX_ERROR;
791 }
792 } else {
793 if ((unsigned long)ibpt * op->ibs > idip->xc_max_bytes) {
794 pr2serr("bpt too large (max %ld blocks)\n",
795 idip->xc_max_bytes / op->ibs);
796 return SG_LIB_SYNTAX_ERROR;
797 }
798 }
799 } else {
800 unsigned long r;
801
802 if (ifp->dc || ofp->dc) {
803 r = odip->xc_max_bytes / (unsigned long)op->obs;
804 obpt = (r > INT_MAX) ? INT_MAX : (int)r;
805 ibpt = bs_same ? obpt : ((op->obs * obpt) / op->ibs);
806 ibpt = (ibpt > max_bpt) ? max_bpt : ibpt;
807 obpt = bs_same ? ibpt : ((op->ibs * ibpt) / op->obs);
808 } else {
809 r = idip->xc_max_bytes / (unsigned long)op->ibs;
810 ibpt = (r > (unsigned long)max_bpt) ? max_bpt : (int)r;
811 obpt = bs_same ? ibpt : ((op->ibs * ibpt) / op->obs);
812 }
813 }
814 if (op->verbose > 1)
815 pr2serr("do_xcopy_lid1: xcopy->%s will use ibpt=%d, obpt=%d\n",
816 (ifp->xcopy ? idip->fn : odip->fn), ibpt, obpt);
817 seg_desc_type = seg_desc_from_d_type(simplified_dt(op->idip), 0,
818 simplified_dt(op->odip), 0);
819
820 res = 0;
821 while (op->dd_count > 0) {
822 blocks = (op->dd_count > ibpt) ? ibpt : op->dd_count;
823 oblocks = bs_same ? blocks : ((op->ibs * blocks) / op->obs);
824
825 res = a_xcopy_lid1_cmd(op, src_desc, src_desc_len, dst_desc,
826 dst_desc_len, seg_desc_type, blocks);
827 if (res != 0) {
828 if ((op->verbose > 0) && (op->verbose < 3)) {
829 pr2serr("a_xcopy_lid1_cmd: ");
830 switch (res) {
831 case SG_LIB_CAT_INVALID_OP:
832 pr2serr("invalid opcode\n");
833 break;
834 case SG_LIB_CAT_ILLEGAL_REQ:
835 pr2serr("illegal request\n");
836 break;
837 case SG_LIB_CAT_UNIT_ATTENTION:
838 pr2serr("unit attention\n");
839 break;
840 case SG_LIB_CAT_NOT_READY:
841 pr2serr("not ready\n");
842 break;
843 case SG_LIB_CAT_ABORTED_COMMAND:
844 pr2serr("aborted command\n");
845 break;
846 default:
847 pr2serr("unknown result=%d\n", res);
848 break;
849 }
850 pr2serr(" use 'verbose=3' (or '-vvv') for more "
851 "information\n");
852 }
853 break;
854 }
855 op->in_full += blocks;
856 op->out_full += oblocks;
857 op->skip += blocks;
858 op->seek += oblocks;
859 op->num_xcopy++;
860 op->dd_count -= blocks;
861 if (op->dd_count > 0)
862 signals_process_delay(op, DELAY_COPY_SEGMENT);
863 }
864 return res;
865 }
866
867 /* vvvvvvvvv ODX [SBC-3's POPULATE TOKEN + WRITE USING TOKEN] vvvvvvv */
868
869 int
open_rtf(struct opts_t * op)870 open_rtf(struct opts_t * op)
871 {
872 int res, fd, must_exist, r_w1, flags;
873 struct stat a_st;
874
875 if (op->rtf_fd >= 0) {
876 pr2serr("%s: rtf already open\n", __func__ );
877 return -1;
878 }
879 must_exist = 0;
880 switch (op->odx_request) {
881 case ODX_COPY:
882 if (RODT_BLK_ZERO == op->rod_type) {
883 if (op->verbose)
884 pr2serr("ignoring rtf %s since token is fixed\n", op->rtf);
885 return 0;
886 }
887 r_w1 = 1;
888 break;
889 case ODX_READ_INTO_RODS:
890 r_w1 = 1;
891 break;
892 case ODX_WRITE_FROM_RODS:
893 r_w1 = 0;
894 must_exist = 1;
895 break;
896 default:
897 r_w1 = 1;
898 break;
899 }
900 if (! op->rtf[0])
901 return must_exist ? -1 : 0;
902 res = stat(op->rtf, &a_st);
903 if (res < 0) {
904 if (ENOENT == errno) {
905 if (must_exist) {
906 pr2serr("%s not found but rtf required\n", op->rtf);
907 return -1;
908 }
909 } else {
910 perror("rtf");
911 return -1;
912 }
913 fd = creat(op->rtf, 0644);
914 if (fd < 0) {
915 perror(op->rtf);
916 return -1;
917 }
918 op->rtf_fd = fd;
919 return 0;
920 }
921 if (S_ISDIR(a_st.st_mode)) {
922 pr2serr("%s: %s is a directory, expected a file\n", __func__,
923 op->rtf);
924 return -1;
925 }
926 if (S_ISBLK(a_st.st_mode) || S_ISCHR(a_st.st_mode)) {
927 pr2serr("%s: %s is a block or char device, unexpected\n", __func__,
928 op->rtf);
929 return -1;
930 }
931 flags = (r_w1 ? O_WRONLY : O_RDONLY);
932 if (S_ISREG(a_st.st_mode) && r_w1)
933 flags |= (op->rtf_append ? O_APPEND : O_TRUNC);
934 fd = open(op->rtf, flags);
935 if (fd < 0) {
936 perror(op->rtf);
937 return -1;
938 }
939 op->rtf_fd = fd;
940 return 0;
941 }
942
943 const char *
cpy_op_status_str(int cos,char * b,int blen)944 cpy_op_status_str(int cos, char * b, int blen)
945 {
946 const struct val_str_t * vsp;
947 const char * p = NULL;
948
949 for (vsp = xcopy4_cpy_op_status; vsp->name; ++vsp) {
950 if (cos == vsp->num) {
951 p = vsp->name;
952 break;
953 }
954 }
955 if (p)
956 snprintf(b, blen, "%s", p);
957 else
958 snprintf(b, blen, "copy operation status 0x%x not found\n", cos);
959 return b;
960 }
961
962 /* This is xcopy(LID4) related: "ROD" == Representation Of Data
963 * Used by VPD_3PARTY_COPY */
964 static void
decode_rod_descriptor(const unsigned char * buff,int len,int to_stderr)965 decode_rod_descriptor(const unsigned char * buff, int len, int to_stderr)
966 {
967 const unsigned char * ucp = buff;
968 int k, bump;
969 int (*print_p)(const char *, ...);
970
971 print_p = to_stderr ? pr2serr : printf;
972 for (k = 0; k < len; k += bump, ucp += bump) {
973 bump = sg_get_unaligned_be16(ucp + 2) + 4;
974 switch (ucp[0]) {
975 case 0:
976 /* Block ROD device type specific descriptor */
977 print_p(" Optimal block ROD length granularity: %d\n",
978 sg_get_unaligned_be16(ucp + 6));
979 print_p(" Maximum Bytes in block ROD: %" PRIu64 "\n",
980 sg_get_unaligned_be64(ucp + 8));
981 print_p(" Optimal Bytes in block ROD transfer: %" PRIu64 "\n",
982 sg_get_unaligned_be64(ucp + 16));
983 print_p(" Optimal Bytes to token per segment: %" PRIu64 "\n",
984 sg_get_unaligned_be64(ucp + 24));
985 print_p(" Optimal Bytes from token per segment:"
986 " %" PRIu64 "\n", sg_get_unaligned_be64(ucp + 32));
987 break;
988 case 1:
989 /* Stream ROD device type specific descriptor */
990 print_p(" Maximum Bytes in stream ROD: %" PRIu64 "\n",
991 sg_get_unaligned_be64(ucp + 8));
992 print_p(" Optimal Bytes in stream ROD transfer:"
993 " %" PRIu64 "\n", sg_get_unaligned_be64(ucp + 16));
994 break;
995 case 3:
996 /* Copy manager ROD device type specific descriptor */
997 print_p(" Maximum Bytes in processor ROD: %" PRIu64 "\n",
998 sg_get_unaligned_be64(ucp + 8));
999 print_p(" Optimal Bytes in processor ROD transfer:"
1000 " %" PRIu64 "\n", sg_get_unaligned_be64(ucp + 16));
1001 break;
1002 default:
1003 print_p(" Unhandled descriptor (format %d, device type %d)\n",
1004 ucp[0] >> 5, ucp[0] & 0x1F);
1005 break;
1006 }
1007 }
1008 }
1009
1010 struct tpc_desc_type {
1011 unsigned char code;
1012 const char * name;
1013 };
1014
1015 static struct tpc_desc_type tpc_desc_arr[] = {
1016 {0x0, "block -> stream"},
1017 {0x1, "stream -> block"},
1018 {0x2, "block -> block"},
1019 {0x3, "stream -> stream"},
1020 {0x4, "inline -> stream"},
1021 {0x5, "embedded -> stream"},
1022 {0x6, "stream -> discard"},
1023 {0x7, "verify CSCD"},
1024 {0x8, "block<o> -> stream"},
1025 {0x9, "stream -> block<o>"},
1026 {0xa, "block<o> -> block<o>"},
1027 {0xb, "block -> stream & application_client"},
1028 {0xc, "stream -> block & application_client"},
1029 {0xd, "block -> block & application_client"},
1030 {0xe, "stream -> stream&application_client"},
1031 {0xf, "stream -> discard&application_client"},
1032 {0x10, "filemark -> tape"},
1033 {0x11, "space -> tape"},
1034 {0x12, "locate -> tape"},
1035 {0x13, "<i>tape -> <i>tape"},
1036 {0x14, "register persistent reservation key"},
1037 {0x15, "third party persistent reservation source I_T nexus"},
1038 {0x16, "<i>block -> <i>block"},
1039 {0xbe, "ROD <- block range(n)"},
1040 {0xbf, "ROD <- block range(1)"},
1041 {0xe0, "CSCD: FC N_Port_Name"},
1042 {0xe1, "CSCD: FC N_Port_ID"},
1043 {0xe2, "CSCD: FC N_Port_ID with N_Port_Name, checking"},
1044 {0xe3, "CSCD: Parallel interface: I_T"},
1045 {0xe4, "CSCD: Identification Descriptor"},
1046 {0xe5, "CSCD: IPv4"},
1047 {0xe6, "CSCD: Alias"},
1048 {0xe7, "CSCD: RDMA"},
1049 {0xe8, "CSCD: IEEE 1394 EUI-64"},
1050 {0xe9, "CSCD: SAS SSP"},
1051 {0xea, "CSCD: IPv6"},
1052 {0xeb, "CSCD: IP copy service"},
1053 {0xfe, "CSCD: ROD"},
1054 {0xff, "CSCD: extension"},
1055 {0x0, NULL},
1056 };
1057
1058 static const char *
get_tpc_desc_name(unsigned char code)1059 get_tpc_desc_name(unsigned char code)
1060 {
1061 const struct tpc_desc_type * dtp;
1062
1063 for (dtp = tpc_desc_arr; dtp->name; ++dtp) {
1064 if (code == dtp->code)
1065 return dtp->name;
1066 }
1067 return "";
1068 }
1069
1070 struct tpc_rod_type {
1071 uint32_t type;
1072 const char * name;
1073 };
1074
1075 static struct tpc_rod_type tpc_rod_arr[] = {
1076 {0x0, "copy manager internal"},
1077 {0x10000, "access upon reference"},
1078 {0x800000, "point in time copy - default"},
1079 {0x800001, "point in time copy - change vulnerable"},
1080 {0x800002, "point in time copy - persistent"},
1081 {0x80ffff, "point in time copy - any"},
1082 {0xffff0001, "block device zero"},
1083 {0x0, NULL},
1084 };
1085
1086 static const char *
get_tpc_rod_name(uint32_t rod_type)1087 get_tpc_rod_name(uint32_t rod_type)
1088 {
1089 const struct tpc_rod_type * rtp;
1090
1091 for (rtp = tpc_rod_arr; rtp->name; ++rtp) {
1092 if (rod_type == rtp->type)
1093 return rtp->name;
1094 }
1095 return "";
1096 }
1097
1098 /* VPD_3PARTY_COPY [3PC, third party copy] */
1099 static void
decode_3party_copy_vpd(unsigned char * buff,int len,int to_stderr,int verbose)1100 decode_3party_copy_vpd(unsigned char * buff, int len, int to_stderr,
1101 int verbose)
1102 {
1103 int k, j, m, bump, desc_type, desc_len, sa_len;
1104 unsigned int u;
1105 const unsigned char * ucp;
1106 const char * cp;
1107 uint64_t ull;
1108 char b[80];
1109 int (*print_p)(const char *, ...);
1110
1111 print_p = to_stderr ? pr2serr : printf;
1112 if (len < 4) {
1113 print_p("Third-party Copy VPD page length too short=%d\n", len);
1114 return;
1115 }
1116 len -= 4;
1117 ucp = buff + 4;
1118 for (k = 0; k < len; k += bump, ucp += bump) {
1119 desc_type = sg_get_unaligned_be16(ucp);
1120 desc_len = sg_get_unaligned_be16(ucp + 2);
1121 if (verbose)
1122 print_p("Descriptor type=%d, len=%d\n", desc_type, desc_len);
1123 bump = 4 + desc_len;
1124 if ((k + bump) > len) {
1125 print_p("Third-party Copy VPD page, short descriptor length=%d, "
1126 "left=%d\n", bump, (len - k));
1127 return;
1128 }
1129 if (0 == desc_len)
1130 continue;
1131 switch (desc_type) {
1132 case 0x0000: /* Required if POPULATE TOKEN (or friend) used */
1133 print_p(" Block Device ROD Token Limits:\n");
1134 print_p(" Maximum Range Descriptors: %d\n",
1135 sg_get_unaligned_be16(ucp + 10));
1136 u = sg_get_unaligned_be32(ucp + 12);
1137 print_p(" Maximum Inactivity Timeout: %u seconds\n", u);
1138 u = sg_get_unaligned_be32(ucp + 16);
1139 print_p(" Default Inactivity Timeout: %u seconds\n", u);
1140 ull = sg_get_unaligned_be64(ucp + 20);
1141 print_p(" Maximum Token Transfer Size: %" PRIu64 "\n", ull);
1142 ull = sg_get_unaligned_be64(ucp + 28);
1143 print_p(" Optimal Transfer Count: %" PRIu64 "\n", ull);
1144 break;
1145 case 0x0001: /* Mandatory (SPC-4) */
1146 print_p(" Supported Commands:\n");
1147 j = 0;
1148 while (j < ucp[4]) {
1149 sa_len = ucp[6 + j];
1150 for (m = 0; m < sa_len; ++m) {
1151 sg_get_opcode_sa_name(ucp[5 + j], ucp[7 + j + m],
1152 0, sizeof(b), b);
1153 print_p(" %s\n", b);
1154 }
1155 j += sa_len + 2;
1156 }
1157 break;
1158 case 0x0004:
1159 print_p(" Parameter Data:\n");
1160 print_p(" Maximum CSCD Descriptor Count: %d\n",
1161 sg_get_unaligned_be16(ucp + 8));;
1162 print_p(" Maximum Segment Descriptor Count: %d\n",
1163 sg_get_unaligned_be16(ucp + 10));
1164 u = sg_get_unaligned_be32(ucp + 12);
1165 print_p(" Maximum Descriptor List Length: %u\n", u);
1166 u = sg_get_unaligned_be32(ucp + 16);
1167 print_p(" Maximum Inline Data Length: %u\n", u);
1168 break;
1169 case 0x0008:
1170 print_p(" Supported Descriptors:\n");
1171 for (j = 0; j < ucp[4]; j++) {
1172 cp = get_tpc_desc_name(ucp[5 + j]);
1173 if (strlen(cp) > 0)
1174 printf(" %s [0x%x]\n", cp, ucp[5 + j]);
1175 else
1176 printf(" 0x%x\n", ucp[5 + j]);
1177 }
1178 break;
1179 case 0x000C:
1180 print_p(" Supported CSCD IDs:\n");
1181 for (j = 0; j < (ucp[4] << 8) + ucp[5]; j += 2) {
1182 u = sg_get_unaligned_be16(ucp + 6 + j);
1183 print_p(" 0x%04x\n", u);
1184 }
1185 break;
1186 case 0x0106:
1187 print_p(" ROD Token Features:\n");
1188 print_p(" Remote Tokens: %d\n", ucp[4] & 0x0f);
1189 u = sg_get_unaligned_be32(ucp + 16);
1190 print_p(" Minimum Token Lifetime: %u seconds\n", u);
1191 u = sg_get_unaligned_be32(ucp + 20);
1192 print_p(" Maximum Token Lifetime: %u seconds\n", u);
1193 u = sg_get_unaligned_be32(ucp + 24);
1194 print_p(" Maximum Token inactivity timeout: %d\n", u);
1195 decode_rod_descriptor(ucp + 48,
1196 sg_get_unaligned_be16(ucp + 46), to_stderr);
1197 break;
1198 case 0x0108:
1199 print_p(" Supported ROD Token and ROD Types:\n");
1200 for (j = 0; j < sg_get_unaligned_be16(ucp + 6); j+= 64) {
1201 u = sg_get_unaligned_be32(ucp + 8 + j);
1202 cp = get_tpc_rod_name(u);
1203 if (strlen(cp) > 0)
1204 printf(" ROD Type: %s [0x%x]\n", cp, u);
1205 else
1206 printf(" ROD Type: 0x%x\n", u);
1207 print_p(" Internal: %s\n",
1208 ucp[8 + j + 4] & 0x80 ? "yes" : "no");
1209 print_p(" Token In: %s\n",
1210 ucp[8 + j + 4] & 0x02 ? "yes" : "no");
1211 print_p(" Token Out: %s\n",
1212 ucp[8 + j + 4] & 0x01 ? "yes" : "no");
1213 print_p(" Preference: %d\n",
1214 sg_get_unaligned_be16(ucp + 8 + j + 6));
1215 }
1216 break;
1217 case 0x8001: /* Mandatory (SPC-4) */
1218 print_p(" General Copy Operations:\n");
1219 u = sg_get_unaligned_be32(ucp + 4);
1220 print_p(" Total Concurrent Copies: %u\n", u);
1221 u = sg_get_unaligned_be32(ucp + 8);
1222 print_p(" Maximum Identified Concurrent Copies: %u\n", u);
1223 u = sg_get_unaligned_be32(ucp + 12);
1224 print_p(" Maximum Segment Length: %u\n", u);
1225 ull = (1 << ucp[16]); /* field is power of 2 */
1226 print_p(" Data Segment Granularity: %" PRIu64 "\n", ull);
1227 ull = (1 << ucp[17]);
1228 print_p(" Inline Data Granularity: %" PRIu64 "\n", ull);
1229 break;
1230 case 0x9101:
1231 print_p(" Stream Copy Operations:\n");
1232 u = sg_get_unaligned_be32(ucp + 4);
1233 print_p(" Maximum Stream Device Transfer Size: %u\n", u);
1234 break;
1235 case 0xC001:
1236 print_p(" Held Data:\n");
1237 u = sg_get_unaligned_be32(ucp + 4);
1238 print_p(" Held Data Limit: %u\n", u);
1239 ull = (1 << ucp[8]);
1240 print_p(" Held Data Granularity: %" PRIu64 "\n", ull);
1241 break;
1242 default:
1243 print_p("Unexpected type=%d\n", desc_type);
1244 dStrHexErr((const char *)ucp, bump, 1);
1245 break;
1246 }
1247 }
1248 }
1249
1250
1251 /* Note this function passes back a malloc-ed buffer if it returns 0 and
1252 * fixed_b != *alloc_bp which caller should free. Returns 0 on success. */
1253 static int
fetch_3pc_vpd(int fd,const char * fn,unsigned char * fixed_b,int fixed_blen,unsigned char ** alloc_bp,int verb)1254 fetch_3pc_vpd(int fd, const char * fn, unsigned char * fixed_b,
1255 int fixed_blen, unsigned char ** alloc_bp, int verb)
1256 {
1257 int res, len;
1258 unsigned char * rp;
1259
1260 rp = fixed_b;
1261 if (alloc_bp)
1262 *alloc_bp = fixed_b;
1263 res = sg_ll_inquiry(fd, 0, 1, VPD_3PARTY_COPY, rp, fixed_blen, 1, verb);
1264 if (res) {
1265 if (SG_LIB_CAT_ILLEGAL_REQ == res) {
1266 if (fn)
1267 pr2serr("Third Party Copy VPD page not found [%s]\n", fn);
1268 else
1269 pr2serr("Third Party Copy VPD page not found\n");
1270 } else
1271 pr2serr("Third Party Copy VPD inquiry failed with %d, try again "
1272 "with '-vv'\n", res);
1273 return res;
1274 } else if (rp[1] != VPD_3PARTY_COPY) {
1275 pr2serr("invalid 3PARTY_COPY VPD response\n");
1276 return SG_LIB_CAT_MALFORMED;
1277 }
1278 len = ((rp[2] << 8) + rp[3]) + 4;
1279 if (len > fixed_blen) {
1280 rp = (unsigned char *)malloc(len);
1281 if (NULL == rp) {
1282 pr2serr("Not enough user memory for %s\n", __func__);
1283 return SG_LIB_CAT_OTHER;
1284 }
1285 if (alloc_bp)
1286 *alloc_bp = rp;
1287 res = sg_ll_inquiry(fd, 0, 1, VPD_3PARTY_COPY, rp, len, 1, verb);
1288 if (res) {
1289 pr2serr("3PARTY_COPY VPD inquiry failed with %d\n", res);
1290 if (fixed_b != rp)
1291 free(rp);
1292 return res;
1293 }
1294 }
1295 return 0;
1296 }
1297
1298 static int
get_3pc_vpd_blkdev_lims(struct opts_t * op,struct dev_info_t * dip)1299 get_3pc_vpd_blkdev_lims(struct opts_t * op, struct dev_info_t * dip)
1300 {
1301 unsigned char rBuff[256];
1302 unsigned char * rp;
1303 unsigned char * ucp;
1304 int res, verb, n, len, bump, desc_type, desc_len, k, j;
1305 int found = 0;
1306 uint32_t max_ito = 0;
1307 uint64_t ull;
1308
1309 verb = (op->verbose ? (op->verbose - 1) : 0);
1310 rp = rBuff;
1311 n = (int)sizeof(rBuff);
1312 res = fetch_3pc_vpd(dip->fd, dip->fn, rBuff, n, &rp, verb);
1313 if (res)
1314 return res;
1315 len = ((rp[2] << 8) + rp[3]) + 4;
1316 len -= 4;
1317 ucp = rp + 4;
1318 for (k = 0; k < len; k += bump, ucp += bump) {
1319 desc_type = (ucp[0] << 8) + ucp[1];
1320 desc_len = (ucp[2] << 8) + ucp[3];
1321 if (op->verbose > 4)
1322 pr2serr("Descriptor type=%d, len=%d\n", desc_type, desc_len);
1323 bump = 4 + desc_len;
1324 if ((k + bump) > len) {
1325 pr2serr("3PARTY_COPY Copy VPD page, short descriptor length=%d, "
1326 "left=%d\n", bump, (len - k));
1327 if (rBuff != rp)
1328 free(rp);
1329 return SG_LIB_CAT_OTHER;
1330 }
1331 if (0 == desc_len)
1332 continue;
1333 switch (desc_type) {
1334 case 0x0000: /* Block Device ROD Token Limits */
1335 ++found;
1336 if (op->verbose > 3) {
1337 pr2serr("3PARTY_COPY Copy VPD, Block Device ROD Token "
1338 "Limits descriptor:\n");
1339 dStrHexErr((const char *)ucp, desc_len, 1);
1340 }
1341 if (desc_len < 32) {
1342 pr2serr("3PARTY_COPY Copy VPD, Block Device ROD Token "
1343 "Limits descriptor, too short, want 32 got %d\n",
1344 desc_len);
1345 break;
1346 }
1347 dip->odxp->max_range_desc = (ucp[10] << 8) + ucp[11];
1348 max_ito = (ucp[12] << 24) | (ucp[13] << 16) | (ucp[14] << 8) |
1349 ucp[15];
1350 dip->odxp->max_inactivity_to = max_ito;
1351 dip->odxp->def_inactivity_to = (ucp[16] << 24) | (ucp[17] << 16) |
1352 (ucp[18] << 8) | ucp[19];
1353 ull = 0;
1354 for (j = 0; j < 8; j++) {
1355 if (j > 0)
1356 ull <<= 8;
1357 ull |= ucp[20 + j];
1358 }
1359 dip->odxp->max_tok_xfer_size = ull;
1360 ull = 0;
1361 for (j = 0; j < 8; j++) {
1362 if (j > 0)
1363 ull <<= 8;
1364 ull |= ucp[28 + j];
1365 }
1366 dip->odxp->optimal_xfer_count = ull;
1367 break;
1368 default:
1369 break;
1370 }
1371 }
1372 if (rBuff != rp)
1373 free(rp);
1374 if (! found) {
1375 pr2serr("Did not find Block Device ROD Token Limits descriptor in "
1376 "3PARTY_COPY Copy VPD page\n");
1377 return SG_LIB_CAT_OTHER;
1378 }
1379 if ((max_ito > 0) && (op->inactivity_to > max_ito)) {
1380 pr2serr("Block Device ROD Token Limits: maximum inactivity timeout "
1381 "(%" PRIu32 ") exceeded\n", max_ito);
1382 if (! op->iflagp->force) {
1383 pr2serr("... exiting; can override with 'force' flag\n");
1384 return SG_LIB_CAT_OTHER;
1385 }
1386 }
1387 return 0;
1388 }
1389
1390 int
print_3pc_vpd(struct opts_t * op,int to_stderr)1391 print_3pc_vpd(struct opts_t * op, int to_stderr)
1392 {
1393 unsigned char rBuff[256];
1394 unsigned char * rp;
1395 int res, verb, len;
1396
1397 verb = (op->verbose ? (op->verbose - 1) : 0);
1398 res = fetch_3pc_vpd(op->idip->fd, NULL, rBuff, (int)sizeof(rBuff),
1399 &rp, verb);
1400 if (res)
1401 return res;
1402 len = ((rp[2] << 8) + rp[3]) + 4;
1403 decode_3party_copy_vpd(rp, len, to_stderr, verb);
1404 if (rBuff != rp)
1405 free(rp);
1406 return res;
1407 }
1408
1409 uint64_t
count_sgl_blocks(const struct scat_gath_elem * sglp,int elems)1410 count_sgl_blocks(const struct scat_gath_elem * sglp, int elems)
1411 {
1412 int k;
1413 uint64_t num;
1414
1415 for (k = 0, num = 0; k < elems; ++k, ++sglp)
1416 num += sglp->num;
1417 return num;
1418 }
1419
1420 /* Return maximum number of blocks from the available num_blks that are
1421 * available in the scatter gather list, given several constraints. First
1422 * bypass blk_off blocks in the list. Then check that elems and
1423 * max_descriptors is not exceeded. If max_descriptors is 0 then it is not
1424 * constraining. The return value is always <= num_blks. */
1425 static uint64_t
count_restricted_sgl_blocks(const struct scat_gath_elem * sglp,int elems,uint64_t blk_off,uint32_t num_blks,uint32_t max_descriptors)1426 count_restricted_sgl_blocks(const struct scat_gath_elem * sglp, int elems,
1427 uint64_t blk_off, uint32_t num_blks,
1428 uint32_t max_descriptors)
1429 {
1430 int k, j, md;
1431 uint64_t res;
1432
1433 if ((0 == max_descriptors) || (max_descriptors > INT_MAX))
1434 md = INT_MAX;
1435 else
1436 md = (int)max_descriptors;
1437 for (k = 0; k < elems; ++k, ++sglp) {
1438 if ((uint64_t)sglp->num > blk_off)
1439 break;
1440 blk_off -= sglp->num;
1441 }
1442 if (k >= elems)
1443 return 0;
1444 for (j = 0, res = 0;
1445 (k < elems) && (j < md) && (res < (uint64_t)num_blks);
1446 ++k, ++j, ++sglp) {
1447 if (0 == j)
1448 res = (uint64_t)sglp->num - blk_off;
1449 else
1450 res += (uint64_t)sglp->num;
1451 }
1452 return (res < (uint64_t)num_blks) ? res : (uint64_t)num_blks;
1453 }
1454
1455 /* Do POPULATE_TOKEN command, returns 0 on success */
1456 int
do_pop_tok(struct opts_t * op,uint64_t blk_off,uint32_t num_blks,int walk_list_id,int vb_a)1457 do_pop_tok(struct opts_t * op, uint64_t blk_off, uint32_t num_blks,
1458 int walk_list_id, int vb_a)
1459 {
1460 int res, k, j, n, len, fd, tmout, sz_bdrd, elems, pl_sz, err_vb;
1461 uint64_t lba, sg0_off;
1462 uint32_t num;
1463 const struct scat_gath_elem * sglp;
1464 unsigned char * pl;
1465
1466 if (vb_a)
1467 pr2serr("%s: blk_off=%" PRIu64 ", num_blks=%" PRIu32 "\n", __func__,
1468 blk_off, num_blks);
1469 if (op->verbose == vb_a)
1470 err_vb = op->verbose;
1471 else if (op->verbose > 0)
1472 err_vb = (vb_a > 0) ? vb_a : 1;
1473 else
1474 err_vb = 0;
1475 fd = op->idip->fd;
1476 if (op->in_sgl) {
1477 sg0_off = blk_off;
1478 for (k = 0, sglp = op->in_sgl; k < op->in_sgl_elems; ++k, ++sglp) {
1479 if ((uint64_t)sglp->num >= sg0_off)
1480 break;
1481 sg0_off -= sglp->num;
1482 }
1483 if (k >= op->in_sgl_elems) {
1484 pr2serr("%s: exhausted sgl_elems [%d], miscalculation\n",
1485 __func__, op->in_sgl_elems);
1486 return SG_LIB_CAT_MALFORMED;
1487 }
1488 /* remain sg elements is worst case, might use less */
1489 elems = op->in_sgl_elems - k;
1490 pl_sz = 16 + (16 * elems);
1491 } else {
1492 sg0_off = 0; /* compilers should be smarter */
1493 sglp = NULL;
1494 elems = 1;
1495 pl_sz = 32;
1496 }
1497 pl = (unsigned char *)malloc(pl_sz);
1498 if (NULL == pl) {
1499 pr2serr("Not enough user memory for %s\n", __func__);
1500 return SG_LIB_CAT_OTHER;
1501 }
1502 memset(pl, 0, pl_sz);
1503 if (op->rod_type_given) {
1504 pl[2] = 0x2; /* RTV bit */
1505 pl[8] = (unsigned char)((op->rod_type >> 24) & 0xff);
1506 pl[9] = (unsigned char)((op->rod_type >> 16) & 0xff);
1507 pl[10] = (unsigned char)((op->rod_type >> 8) & 0xff);
1508 pl[11] = (unsigned char)(op->rod_type & 0xff);
1509 }
1510 if (op->iflagp->immed)
1511 pl[2] |= 0x1; /* IMMED bit */
1512 /* if inactivity_to=0 then cm takes default in TPC VPD page */
1513 pl[4] = (unsigned char)((op->inactivity_to >> 24) & 0xff);
1514 pl[5] = (unsigned char)((op->inactivity_to >> 16) & 0xff);
1515 pl[6] = (unsigned char)((op->inactivity_to >> 8) & 0xff);
1516 pl[7] = (unsigned char)(op->inactivity_to & 0xff);
1517
1518 if (sglp) {
1519 lba = sglp->lba + sg0_off;
1520 num = sglp->num - sg0_off;
1521 for (k = 0, n = 15; k < elems; ++k, num_blks -= num, ++sglp) {
1522 if (k > 0) {
1523 lba = sglp->lba;
1524 num = sglp->num;
1525 }
1526 if (num > num_blks)
1527 num = num_blks;
1528 if (vb_a)
1529 pr2serr(" lba=0x%" PRIx64 ", num=%" PRIu32 ", k=%d\n", lba,
1530 num, k);
1531 pl[++n] = (unsigned char)((lba >> 56) & 0xff);
1532 pl[++n] = (unsigned char)((lba >> 48) & 0xff);
1533 pl[++n] = (unsigned char)((lba >> 40) & 0xff);
1534 pl[++n] = (unsigned char)((lba >> 32) & 0xff);
1535 pl[++n] = (unsigned char)((lba >> 24) & 0xff);
1536 pl[++n] = (unsigned char)((lba >> 16) & 0xff);
1537 pl[++n] = (unsigned char)((lba >> 8) & 0xff);
1538 pl[++n] = (unsigned char)(lba & 0xff);
1539 pl[++n] = (unsigned char)((num >> 24) & 0xff);
1540 pl[++n] = (unsigned char)((num >> 16) & 0xff);
1541 pl[++n] = (unsigned char)((num >> 8) & 0xff);
1542 pl[++n] = (unsigned char)(num & 0xff);
1543 n += 4;
1544 }
1545 sz_bdrd = k * 16;
1546 pl[14] = (unsigned char)((sz_bdrd >> 8) & 0xff);
1547 pl[15] = (unsigned char)(sz_bdrd & 0xff);
1548 len = n + 1;
1549 } else { /* assume count= and possibly skip= given */
1550 sz_bdrd = 16; /* single element */
1551 pl[14] = (unsigned char)((sz_bdrd >> 8) & 0xff);
1552 pl[15] = (unsigned char)(sz_bdrd & 0xff);
1553 lba = op->skip + blk_off;
1554 if (vb_a)
1555 pr2serr(" lba=0x%" PRIx64 ", num_blks=%" PRIu32 "\n", lba,
1556 num_blks);
1557 pl[16] = (unsigned char)((lba >> 56) & 0xff);
1558 pl[17] = (unsigned char)((lba >> 48) & 0xff);
1559 pl[18] = (unsigned char)((lba >> 40) & 0xff);
1560 pl[19] = (unsigned char)((lba >> 32) & 0xff);
1561 pl[20] = (unsigned char)((lba >> 24) & 0xff);
1562 pl[21] = (unsigned char)((lba >> 16) & 0xff);
1563 pl[22] = (unsigned char)((lba >> 8) & 0xff);
1564 pl[23] = (unsigned char)(lba & 0xff);
1565 pl[24] = (unsigned char)((num_blks >> 24) & 0xff);
1566 pl[25] = (unsigned char)((num_blks >> 16) & 0xff);
1567 pl[26] = (unsigned char)((num_blks >> 8) & 0xff);
1568 pl[27] = (unsigned char)(num_blks & 0xff);
1569 len = 32;
1570 }
1571 n = len - 2;
1572 pl[0] = (unsigned char)((n >> 8) & 0xff);
1573 pl[1] = (unsigned char)(n & 0xff);
1574
1575 tmout = (op->timeout_xcopy < 1) ? DEF_3PC_OUT_TIMEOUT : op->timeout_xcopy;
1576 res = pt_3party_copy_out(fd, SA_POP_TOK, op->list_id, DEF_GROUP_NUM,
1577 tmout, pl, len, 1, vb_a - 1, err_vb);
1578 if ((DDPT_CAT_OP_IN_PROGRESS == res) && walk_list_id) {
1579 for (j = 0; j < MAX_IN_PROGRESS; ++j) {
1580 res = pt_3party_copy_out(fd, SA_POP_TOK, ++op->list_id,
1581 DEF_GROUP_NUM, tmout, pl, len, 1,
1582 vb_a - 1, err_vb);
1583 if (DDPT_CAT_OP_IN_PROGRESS != res)
1584 break;
1585 }
1586 if (MAX_IN_PROGRESS == j) {
1587 if (vb_a)
1588 pr2serr("%s: too many list_id_s 'in progress'\n", __func__);
1589 }
1590 }
1591 free(pl);
1592 return res;
1593 }
1594
1595 int
do_rrti(struct opts_t * op,int in0_out1,struct rrti_resp_t * rrp,int verb)1596 do_rrti(struct opts_t * op, int in0_out1, struct rrti_resp_t * rrp, int verb)
1597 {
1598 int j, res, fd, off, err_vb;
1599 uint32_t len, rtdl;
1600 unsigned char rsp[1024];
1601 char b[400];
1602 char bb[80];
1603 const char * cp;
1604
1605 /* want to suppress 'pass-through requested n bytes ...' messages with
1606 * 'ddpt verbose=2 ...' */
1607 err_vb = op->verbose;
1608 if ((verb != op->verbose) && (err_vb > 1))
1609 --err_vb;
1610 fd = in0_out1 ? op->odip->fd : op->idip->fd;
1611 res = pt_3party_copy_in(fd, SA_ROD_TOK_INFO, op->list_id,
1612 DEF_3PC_IN_TIMEOUT, rsp, sizeof(rsp), 1, verb,
1613 err_vb);
1614 if (res)
1615 return res;
1616
1617 len = ((rsp[0] << 24) | (rsp[1] << 16) | (rsp[2] << 8) | rsp[3]) + 4;
1618 if (len > sizeof(rsp)) {
1619 pr2serr("RRTI: ROD Token info too long for internal buffer, output "
1620 "truncated\n");
1621 len = sizeof(rsp);
1622 }
1623 if (verb > 1) {
1624 pr2serr("\nRRTI response in hex:\n");
1625 dStrHexErr((const char *)rsp, len, 1);
1626 }
1627 if (NULL == rrp)
1628 return 0;
1629 rrp->for_sa = 0x1f & rsp[4];
1630 switch(rrp->for_sa) {
1631 case SA_POP_TOK:
1632 cp = "RRTI for PT";
1633 break;
1634 case SA_WR_USING_TOK:
1635 cp = "RRTI for WUT";
1636 break;
1637 case SA_XCOPY_LID1:
1638 cp = "RRTI for XCOPY(LID1)";
1639 break;
1640 case SA_XCOPY_LID4:
1641 cp = "RRTI for XCOPY(LID4)";
1642 break;
1643 default:
1644 cp = "RRTI for unknown originating xcopy command";
1645 break;
1646 }
1647 if (verb > 1)
1648 pr2serr("%s\n", cp);
1649 rrp->cstat = 0x7f & rsp[5];
1650 rrp->xc_cstatus = rsp[12];
1651 rrp->sense_len = rsp[14];
1652 rrp->esu_del = (rsp[8] << 24) | (rsp[9] << 16) | (rsp[10] << 8) | rsp[11];
1653 if (verb)
1654 pr2serr("%s: %s\n", cp, cpy_op_status_str(rrp->cstat, b, sizeof(b)));
1655 rrp->tc = 0;
1656 for (j = 0; j < 8; j++) {
1657 if (j > 0)
1658 rrp->tc <<= 8;
1659 rrp->tc |= rsp[16 + j];
1660 }
1661 if (rrp->sense_len > 0) {
1662 snprintf(bb, sizeof(bb), "%s: sense data", cp);
1663 sg_get_sense_str(bb, rsp + 32, rrp->sense_len, verb, sizeof(b), b);
1664 pr2serr("%s\n", b);
1665 }
1666 off = 32 + rsp[13];
1667 rtdl = (rsp[off] << 24) | (rsp[off + 1] << 16) | (rsp[off + 2] << 8) |
1668 rsp[off + 3];
1669 rrp->rt_len = (rtdl > 2) ? rtdl - 2 : 0;
1670 if (rtdl > 2)
1671 memcpy(rrp->rod_tok, rsp + off + 6,
1672 ((rrp->rt_len > 512) ? 512 : rrp->rt_len));
1673 return 0;
1674 }
1675
1676 int
process_after_poptok(struct opts_t * op,uint64_t * tcp,int vb_a)1677 process_after_poptok(struct opts_t * op, uint64_t * tcp, int vb_a)
1678 {
1679 int res, k, len, vb_b, err, cont;
1680 uint64_t rod_sz;
1681 uint32_t delay;
1682 struct rrti_resp_t r;
1683 char b[400];
1684 unsigned char uc[8];
1685
1686 if (op->verbose == vb_a)
1687 vb_b = op->verbose;
1688 else
1689 vb_b = (vb_a > 0) ? (vb_a - 1) : 0;
1690 do {
1691 res = do_rrti(op, DDPT_ARG_IN, &r, vb_b);
1692 if (res)
1693 return res;
1694 if (SA_POP_TOK != r.for_sa) {
1695 sg_get_opcode_sa_name(THIRD_PARTY_COPY_OUT_CMD, r.for_sa, 0,
1696 sizeof(b), b);
1697 pr2serr("Receive ROD Token info expected response for Populate "
1698 "Token\n but got response for %s\n", b);
1699 }
1700 cont = ((r.cstat >= 0x10) && (r.cstat <= 0x12));
1701 if (cont) {
1702 delay = r.esu_del;
1703 if ((delay < 0xfffffffe) && (delay > 0)) {
1704 if (vb_b > 1)
1705 pr2serr("using copy manager recommended delay of %"
1706 PRIu32 " milliseconds\n", delay);
1707 } else {
1708 delay = DEF_ODX_POLL_DELAY_MS;
1709 if (vb_b > 1)
1710 pr2serr("using default for poll delay\n");
1711 }
1712 if (delay)
1713 sleep_ms(delay);
1714 }
1715 } while (cont);
1716 if ((! ((0x1 == r.cstat) || (0x3 == r.cstat))) || (vb_b > 1))
1717 pr2serr("RRTI for PT: %s\n", cpy_op_status_str(r.cstat, b, sizeof(b)));
1718 if (vb_a)
1719 pr2serr("RRTI for PT: Transfer count=%" PRIu64 " [0x%" PRIx64 "]\n",
1720 r.tc, r.tc);
1721 if (tcp)
1722 *tcp = r.tc;
1723 if (r.rt_len > 0) {
1724 len = (r.rt_len > 512) ? 512 : r.rt_len;
1725 if (vb_a) {
1726 pr2serr("RRTI for PT: copy manager ROD Token id: %s",
1727 rt_cm_id_str(r.rod_tok, r.rt_len, b, sizeof(b)));
1728 if (512 == r.rt_len)
1729 pr2serr("\n");
1730 else
1731 pr2serr(" [rt_len=%" PRIu32 "d]\n", r.rt_len);
1732 }
1733 if (op->rtf_fd >= 0) { /* write ROD Token to RTF */
1734 res = write(op->rtf_fd, r.rod_tok, len);
1735 if (res < 0) {
1736 err = errno;
1737 pr2serr("%s: unable to write to file: %s [%s]\n", __func__,
1738 op->rtf, safe_strerror(err));
1739 return SG_LIB_FILE_ERROR;
1740 }
1741 if (res < len) {
1742 pr2serr("%s: short write to file: %s, wanted %d, got %d\n",
1743 __func__, op->rtf, len, res);
1744 return SG_LIB_CAT_OTHER;
1745 }
1746 if (op->rtf_len_add) {
1747 rod_sz = r.tc * op->ibs;
1748 for (k = 7; k >= 0; --k, rod_sz >>= 8)
1749 uc[k] = rod_sz & 0xff;
1750 res = write(op->rtf_fd, uc, 8);
1751 if (res < 0) {
1752 err = errno;
1753 pr2serr("%s: unable to write length to file: %s [%s]\n",
1754 __func__, op->rtf, safe_strerror(err));
1755 return SG_LIB_FILE_ERROR;
1756 }
1757 }
1758 }
1759 /* write ROD Token to static, in any case; could be a copy */
1760 if (len > LOCAL_ROD_TOKEN_SIZE) {
1761 pr2serr("%s: ROD token too large for static storage, try "
1762 "'rtf=RTF'\n", __func__);
1763 return SG_LIB_CAT_OTHER;
1764 }
1765 memcpy(local_rod_token, r.rod_tok, len);
1766 }
1767 return 0;
1768 }
1769
1770 void
get_local_rod_tok(unsigned char * tokp,int max_tok_len)1771 get_local_rod_tok(unsigned char * tokp, int max_tok_len)
1772 {
1773 int len;
1774
1775 if (tokp && (max_tok_len > 0)) {
1776 len = (max_tok_len > 512) ? 512 : max_tok_len;
1777 memcpy(tokp, local_rod_token, len);
1778 }
1779 }
1780
1781 /* Do WRITE USING TOKEN command, returns 0 on success */
1782 int
do_wut(struct opts_t * op,unsigned char * tokp,uint64_t blk_off,uint32_t num_blks,uint64_t oir,int more_left,int walk_list_id,int vb_a)1783 do_wut(struct opts_t * op, unsigned char * tokp, uint64_t blk_off,
1784 uint32_t num_blks, uint64_t oir, int more_left, int walk_list_id,
1785 int vb_a)
1786 {
1787 int len, k, j, n, fd, res, tmout, sz_bdrd, elems, pl_sz, rodt_blk_zero;
1788 int err_vb = 0;
1789 struct flags_t * flp;
1790 uint64_t lba, sg0_off;
1791 uint32_t num;
1792 const struct scat_gath_elem * sglp;
1793 unsigned char * pl;
1794 // unsigned char rt[512];
1795
1796 if (op->verbose == vb_a)
1797 err_vb = op->verbose;
1798 else if (op->verbose > 0)
1799 err_vb = (vb_a > 0) ? vb_a : 1;
1800 if (vb_a)
1801 pr2serr("%s: enter; blk_off=%" PRIu64 ", num_blks=%" PRIu32 ", "
1802 " oir=0x%" PRIx64 "\n", __func__, blk_off, num_blks, oir);
1803 fd = op->odip->fd;
1804 flp = op->oflagp;
1805 rodt_blk_zero = (RODT_BLK_ZERO == op->rod_type);
1806 if (op->out_sgl) {
1807 sglp = op->out_sgl;
1808 for (k = 0, sg0_off = blk_off; k < op->out_sgl_elems; ++k, ++sglp) {
1809 if ((uint64_t)sglp->num >= sg0_off)
1810 break;
1811 sg0_off -= sglp->num;
1812 }
1813 if (k >= op->out_sgl_elems) {
1814 pr2serr("%s: exhausted sgl_elems [%d], miscalculation\n",
1815 __func__, op->out_sgl_elems);
1816 return SG_LIB_CAT_MALFORMED;
1817 }
1818 /* remain sg elements is worst case, might use less */
1819 elems = op->out_sgl_elems - k;
1820 pl_sz = 540 + (16 * elems);
1821 } else {
1822 sg0_off = 0; /* compilers should be smarter */
1823 sglp = NULL;
1824 elems = 1;
1825 pl_sz = 540 + 16;
1826 }
1827 pl = (unsigned char *)malloc(pl_sz);
1828 if (NULL == pl) {
1829 pr2serr("Not enough user memory for %s\n", __func__);
1830 return SG_LIB_CAT_OTHER;
1831 }
1832 memset(pl, 0, pl_sz);
1833 if (! rodt_blk_zero) {
1834 if (flp->del_tkn) /* only from ddptctl */
1835 pl[2] = 0x2; /* DEL_TKN bit */
1836 else if ((! more_left) && (! flp->no_del_tkn))
1837 pl[2] = 0x2; /* last write from ROD which may hold more */
1838 }
1839 if (flp->immed)
1840 pl[2] |= 0x1; /* IMMED bit */
1841 if (oir) { /* Offset in ROD field */
1842 pl[8] = (unsigned char)((oir >> 56) & 0xff);
1843 pl[9] = (unsigned char)((oir >> 48) & 0xff);
1844 pl[10] = (unsigned char)((oir >> 40) & 0xff);
1845 pl[11] = (unsigned char)((oir >> 32) & 0xff);
1846 pl[12] = (unsigned char)((oir >> 24) & 0xff);
1847 pl[13] = (unsigned char)((oir >> 16) & 0xff);
1848 pl[14] = (unsigned char)((oir >> 8) & 0xff);
1849 pl[15] = (unsigned char)(oir & 0xff);
1850 }
1851 memcpy(pl + 16, tokp, 512);
1852
1853 if (sglp) {
1854 lba = sglp->lba + sg0_off;
1855 num = sglp->num - sg0_off;
1856 for (k = 0, n = 535; k < elems; ++k, num_blks -= num, ++sglp) {
1857 if (k > 0) {
1858 lba = sglp->lba;
1859 num = sglp->num;
1860 }
1861 if (num > num_blks)
1862 num = num_blks;
1863 if (vb_a)
1864 pr2serr(" lba=0x%" PRIx64 ", num=%" PRIu32 ", k=%d\n", lba,
1865 num, k);
1866 pl[++n] = (unsigned char)((lba >> 56) & 0xff);
1867 pl[++n] = (unsigned char)((lba >> 48) & 0xff);
1868 pl[++n] = (unsigned char)((lba >> 40) & 0xff);
1869 pl[++n] = (unsigned char)((lba >> 32) & 0xff);
1870 pl[++n] = (unsigned char)((lba >> 24) & 0xff);
1871 pl[++n] = (unsigned char)((lba >> 16) & 0xff);
1872 pl[++n] = (unsigned char)((lba >> 8) & 0xff);
1873 pl[++n] = (unsigned char)(lba & 0xff);
1874 pl[++n] = (unsigned char)((num >> 24) & 0xff);
1875 pl[++n] = (unsigned char)((num >> 16) & 0xff);
1876 pl[++n] = (unsigned char)((num >> 8) & 0xff);
1877 pl[++n] = (unsigned char)(num & 0xff);
1878 n += 4;
1879 }
1880 sz_bdrd = 16 * k;
1881 pl[534] = (unsigned char)((sz_bdrd >> 8) & 0xff);
1882 pl[535] = (unsigned char)(sz_bdrd & 0xff);
1883 } else {
1884 sz_bdrd = 16; /* single element */
1885 pl[534] = (unsigned char)((sz_bdrd >> 8) & 0xff);
1886 pl[535] = (unsigned char)(sz_bdrd & 0xff);
1887 lba = op->seek + blk_off;
1888 if (vb_a)
1889 pr2serr(" lba=0x%" PRIx64 ", num_blks=%" PRIu32 "\n", lba,
1890 num_blks);
1891 pl[536] = (unsigned char)((lba >> 56) & 0xff);
1892 pl[537] = (unsigned char)((lba >> 48) & 0xff);
1893 pl[538] = (unsigned char)((lba >> 40) & 0xff);
1894 pl[539] = (unsigned char)((lba >> 32) & 0xff);
1895 pl[540] = (unsigned char)((lba >> 24) & 0xff);
1896 pl[541] = (unsigned char)((lba >> 16) & 0xff);
1897 pl[542] = (unsigned char)((lba >> 8) & 0xff);
1898 pl[543] = (unsigned char)(lba & 0xff);
1899 pl[544] = (unsigned char)((num_blks >> 24) & 0xff);
1900 pl[545] = (unsigned char)((num_blks >> 16) & 0xff);
1901 pl[546] = (unsigned char)((num_blks >> 8) & 0xff);
1902 pl[547] = (unsigned char)(num_blks & 0xff);
1903 }
1904 len = 536 + sz_bdrd;
1905 n = len - 2;
1906 pl[0] = (unsigned char)((n >> 8) & 0xff);
1907 pl[1] = (unsigned char)(n & 0xff);
1908 fd = op->odip->fd;
1909
1910 tmout = (op->timeout_xcopy < 1) ? DEF_3PC_OUT_TIMEOUT : op->timeout_xcopy;
1911 res = pt_3party_copy_out(fd, SA_WR_USING_TOK, op->list_id, DEF_GROUP_NUM,
1912 tmout, pl, len, 1, vb_a - 1, err_vb);
1913 if ((DDPT_CAT_OP_IN_PROGRESS == res) && walk_list_id) {
1914 for (j = 0; j < MAX_IN_PROGRESS; ++j) {
1915 res = pt_3party_copy_out(fd, SA_WR_USING_TOK, ++op->list_id,
1916 DEF_GROUP_NUM, tmout, pl, len, 1,
1917 vb_a - 1, err_vb);
1918 if (DDPT_CAT_OP_IN_PROGRESS != res)
1919 break;
1920 }
1921 if (MAX_IN_PROGRESS == j) {
1922 if (vb_a)
1923 pr2serr("%s: too many list_id_s 'in progress'\n", __func__);
1924 }
1925 }
1926 free(pl);
1927 return res;
1928 }
1929
1930 int
process_after_wut(struct opts_t * op,uint64_t * tcp,int vb_a)1931 process_after_wut(struct opts_t * op, uint64_t * tcp, int vb_a)
1932 {
1933 int res, cont, vb_b;
1934 uint32_t delay;
1935 struct rrti_resp_t r;
1936 char b[80];
1937
1938 if (op->verbose == vb_a)
1939 vb_b = op->verbose;
1940 else
1941 vb_b = (vb_a > 0) ? (vb_a - 1) : 0;
1942 do {
1943 res = do_rrti(op, DDPT_ARG_OUT, &r, vb_b);
1944 if (res)
1945 return res;
1946 if (SA_WR_USING_TOK != r.for_sa) {
1947 sg_get_opcode_sa_name(THIRD_PARTY_COPY_OUT_CMD, r.for_sa, 0,
1948 sizeof(b), b);
1949 pr2serr("Receive ROD Token info expected response for Write "
1950 "Using Token\n but got response for %s\n", b);
1951 }
1952 cont = ((r.cstat >= 0x10) && (r.cstat <= 0x12));
1953 if (cont) {
1954 delay = r.esu_del;
1955 if ((delay < 0xfffffffe) && (delay > 0)) {
1956 if (vb_b > 1)
1957 pr2serr("using copy manager recommended delay of %"
1958 PRIu32 " milliseconds\n", delay);
1959 } else {
1960 delay = DEF_ODX_POLL_DELAY_MS;
1961 if (vb_b > 1)
1962 pr2serr("using default for poll delay\n");
1963 }
1964 if (delay)
1965 sleep_ms(delay);
1966 }
1967 } while (cont);
1968
1969 if ((! ((0x1 == r.cstat) || (0x3 == r.cstat))) || (vb_b > 1))
1970 pr2serr("RRTI for WUT: %s\n", cpy_op_status_str(r.cstat, b,
1971 sizeof(b)));
1972 if (tcp)
1973 *tcp = r.tc;
1974 if (vb_a)
1975 pr2serr("RRTI for WUT: Transfer count=%" PRIu64 " [0x%" PRIx64 "]\n",
1976 r.tc, r.tc);
1977 return 0;
1978 }
1979
1980 #if 0
1981 static int
1982 odx_check_sgl(struct opts_t * op, uint64_t num_blks, int in0_out1)
1983 {
1984 uint32_t allowed_descs;
1985 struct dev_info_t * dip = in0_out1 ? op->odip : op->idip;
1986 struct flags_t * flp = in0_out1 ? op->oflagp : op->iflagp;
1987 uint32_t num_elems = in0_out1 ? op->out_sgl_elems : op->in_sgl_elems;
1988 const char * sgl_nm = in0_out1 ? "scatter" : "gather";
1989
1990 if ((op->dd_count >= 0) && ((uint64_t)op->dd_count != num_blks)) {
1991 pr2serr("%s: count= value not equal to the sum of %s "
1992 "nums\n", __func__, sgl_nm);
1993 return SG_LIB_SYNTAX_ERROR;
1994 }
1995 if ((! flp->force) && dip->odxp) {
1996 allowed_descs = dip->odxp->max_range_desc;
1997 if ((allowed_descs > 0) && (num_elems > allowed_descs)) {
1998 pr2serr("%s: number of %s list elements exceeds what the "
1999 "Block Device ROD\nToken Limits descriptor in the 3PC "
2000 "VPD page permits (%d).\nCan try '%cflag=force'\n",
2001 __func__, sgl_nm, allowed_descs, (in0_out1 ? 'o' : 'i'));
2002 return SG_LIB_CAT_OTHER;
2003 }
2004 }
2005 return 0;
2006 }
2007 #endif
2008
2009 static int
fetch_read_cap(struct opts_t * op,int in0_out1,int64_t * num_blks,int * blk_sz)2010 fetch_read_cap(struct opts_t * op, int in0_out1, int64_t * num_blks,
2011 int * blk_sz)
2012 {
2013 int res;
2014 int bs = in0_out1 ? op->obs : op->ibs;
2015 struct dev_info_t * dip = in0_out1 ? op->odip : op->idip;
2016 struct flags_t * flagp = in0_out1 ? op->oflagp : op->iflagp;
2017 const char * oip = in0_out1 ? "o" : "i";
2018
2019 if ((res = pt_read_capacity(op, in0_out1, num_blks, blk_sz))) {
2020 if (SG_LIB_CAT_UNIT_ATTENTION == res) {
2021 pr2serr("Unit attention (readcap(%s)), continuing\n", oip);
2022 res = pt_read_capacity(op, in0_out1, num_blks, blk_sz);
2023 }
2024 if (res)
2025 return res;
2026 }
2027 if (op->verbose) {
2028 print_blk_sizes(dip->fn, "readcap", *num_blks, *blk_sz, 1);
2029 if (dip->prot_type > 0)
2030 pr2serr(" reports Protection_type=%d, p_i_exp=%d\n",
2031 dip->prot_type, dip->p_i_exp);
2032 }
2033 if ((*num_blks > 0) && (*blk_sz != bs)) {
2034 pr2serr(">> warning: %s block size confusion: %sbs=%d, device "
2035 "claims=%d\n", dip->fn, oip, bs, *blk_sz);
2036 if (0 == flagp->force) {
2037 pr2serr(">> abort copy, use %sflag=force to override\n", oip);
2038 return -1;
2039 }
2040 }
2041 return 0;
2042 }
2043
2044 /* This is called when rod_type=zero which implies the input is a dummy
2045 * (require 'if=/dev/null') and we want to write block of zeros to the
2046 * destination. Returns 0 when successful. */
2047 static int
odx_full_zero_copy(struct opts_t * op)2048 odx_full_zero_copy(struct opts_t * op)
2049 {
2050 int k, got_count, res, out_blk_sz, out_num_elems, vb3;
2051 struct dev_info_t * odip = op->odip;
2052 uint64_t out_blk_off, num, tc;
2053 int64_t out_num_blks, v;
2054
2055 vb3 = (op->verbose > 1) ? (op->verbose - 2) : 0;
2056 k = dd_filetype(op->idip->fn, op->verbose);
2057 got_count = (op->dd_count > 0);
2058 if (FT_DEV_NULL != k) {
2059 pr2serr("For single WUT version of ODX write blocks of zeros, "
2060 "don't give if=IFILE option\n");
2061 pr2serr("For full copy version of ODX write blocks of zeros, "
2062 "give if=/dev/null or equivalent\n");
2063 return SG_LIB_SYNTAX_ERROR;
2064 }
2065 res = fetch_read_cap(op, DDPT_ARG_OUT, &out_num_blks, &out_blk_sz);
2066 if (res)
2067 return res;
2068 v = out_num_blks;
2069 if (op->out_sgl) { /* scatter list */
2070 out_num_elems = op->out_sgl_elems;
2071 out_num_blks = count_sgl_blocks(op->out_sgl, out_num_elems);
2072 } else { /* no scatter list */
2073 out_num_elems = 1;
2074 out_num_blks = got_count ? op->dd_count : 0;
2075 }
2076 if (0 == op->dd_count) {
2077 if (op->verbose)
2078 pr2serr("%s: enough checks, count=0 given so exit\n", __func__);
2079 return 0;
2080 }
2081 if ((op->dd_count < 0) && (0 == out_num_blks)) {
2082 if (1 == op->verbose)
2083 pr2serr("%s: zero the lot after scaling for seek=\n", __func__);
2084 v -= op->seek;
2085 if (v < 0) {
2086 pr2serr("%s: seek exceeds out device size\n", __func__);
2087 return SG_LIB_SYNTAX_ERROR;
2088 }
2089 out_num_blks = v;
2090 }
2091 out_blk_off = 0;
2092 op->dd_count = out_num_blks;
2093 op->dd_count_start = op->dd_count;
2094
2095 /* Build fixed format ROD Token Block Zero; specified by SBC-3 */
2096 memset(local_rod_token, 0, sizeof(local_rod_token));
2097 local_rod_token[0] = (unsigned char)((RODT_BLK_ZERO >> 24) & 0xff);
2098 local_rod_token[1] = (unsigned char)((RODT_BLK_ZERO >> 16) & 0xff);
2099 local_rod_token[2] = (unsigned char)((RODT_BLK_ZERO >> 8) & 0xff);
2100 local_rod_token[3] = (unsigned char)(RODT_BLK_ZERO & 0xff);
2101 local_rod_token[6] = (unsigned char)((ODX_ROD_TOK_LEN_FLD >> 8) & 0xff);
2102 local_rod_token[7] = (unsigned char)(ODX_ROD_TOK_LEN_FLD & 0xff);
2103
2104 if (op->verbose > 1)
2105 pr2serr("%s: about to zero %" PRIi64 " blocks\n", __func__,
2106 out_num_blks);
2107
2108 for (k = 0; out_num_blks > 0; out_num_blks -= num, ++k) {
2109 num = out_num_blks;
2110 if ((op->obpch > 0) && ((uint64_t)op->obpch < num))
2111 num = op->obpch; /* in this case BPT refers to OFILE */
2112 if ((odip->odxp->max_tok_xfer_size > 0) &&
2113 (num > odip->odxp->max_tok_xfer_size))
2114 num = odip->odxp->max_tok_xfer_size;
2115 if (op->out_sgl)
2116 num = count_restricted_sgl_blocks(op->out_sgl, out_num_elems,
2117 out_blk_off, num,
2118 odip->odxp->max_range_desc);
2119 if ((res = do_wut(op, local_rod_token, out_blk_off, num, 0, 0,
2120 ! op->list_id_given, vb3)))
2121 return res;
2122 if ((res = process_after_wut(op, &tc, vb3)))
2123 return res;
2124 if (tc != num) {
2125 pr2serr("%s: number requested differs from transfer count\n",
2126 __func__);
2127 // ouch, think about this one
2128 }
2129 op->out_full += tc;
2130 out_blk_off += num;
2131 op->dd_count -= tc;
2132 }
2133 return 0;
2134 }
2135
2136 /* This function is designed to be the reading or input ise of a network
2137 * copy. Returns 0 on success. */
2138 static int
odx_read_into_rods(struct opts_t * op)2139 odx_read_into_rods(struct opts_t * op)
2140 {
2141 int k, res, in_blk_sz, got_count, in_num_elems, vb3;
2142 uint64_t in_blk_off, num, tc_i;
2143 int64_t in_num_blks, u;
2144 struct dev_info_t * idip = op->idip;
2145
2146 vb3 = (op->verbose > 1) ? (op->verbose - 2) : 0;
2147 got_count = (op->dd_count > 0);
2148 /* need to know block size of input and output */
2149 res = fetch_read_cap(op, DDPT_ARG_IN, &in_num_blks, &in_blk_sz);
2150 if (res)
2151 return res;
2152 u = in_num_blks;
2153 if (op->in_sgl) { /* gather list */
2154 in_num_elems = op->in_sgl_elems;
2155 in_num_blks = count_sgl_blocks(op->in_sgl, in_num_elems);
2156 if (got_count && (in_num_blks != op->dd_count)) {
2157 pr2serr("%s: count= value not equal to the sum of gather nums\n",
2158 __func__);
2159 return SG_LIB_CAT_OTHER;
2160 }
2161 } else {
2162 in_num_elems = 1;
2163 in_num_blks = got_count ? op->dd_count : 0;
2164 }
2165 if (0 == op->dd_count) {
2166 if (op->verbose)
2167 pr2serr("%s: enough checks, count=0 given so exit\n", __func__);
2168 return 0;
2169 }
2170 if ((op->dd_count < 0) && (0 == in_num_blks)) {
2171 if (op->verbose > 1)
2172 pr2serr("%s: read the lot after scaling for skip=\n", __func__);
2173 u -= op->skip;
2174 if (u < 0) {
2175 pr2serr("%s: skip exceeds input device size\n", __func__);
2176 return SG_LIB_SYNTAX_ERROR;
2177 }
2178 in_num_blks = u;
2179 }
2180
2181 in_blk_off = 0;
2182 op->dd_count = in_num_blks;
2183 op->dd_count_start = op->dd_count;
2184 if (op->verbose > 1)
2185 pr2serr("%s: about to read %" PRIi64 " blocks\n", __func__,
2186 in_num_blks);
2187
2188 /* read using PT[,PT...] sequence; output separate ROD Token for each */
2189 for (k = 0; in_num_blks > 0; in_num_blks -= num, ++k) {
2190 if (k > 0)
2191 signals_process_delay(op, DELAY_COPY_SEGMENT);
2192 num = in_num_blks;
2193 if (op->bpt_given && ((uint64_t)op->bpt_i < num))
2194 num = op->bpt_i;
2195 if ((idip->odxp->max_tok_xfer_size > 0) &&
2196 (num > idip->odxp->max_tok_xfer_size))
2197 num = idip->odxp->max_tok_xfer_size;
2198 if (op->in_sgl)
2199 num = count_restricted_sgl_blocks(op->in_sgl, in_num_elems,
2200 in_blk_off, num,
2201 idip->odxp->max_range_desc);
2202 if (op->verbose > 2)
2203 pr2serr("%s: k=%d, in_blk_off=0x%" PRIx64 ", i_num=%" PRIu64 "\n",
2204 __func__, k, in_blk_off, num);
2205
2206 if ((res = do_pop_tok(op, in_blk_off, num, ! op->list_id_given, vb3)))
2207 return res;
2208 if ((res = process_after_poptok(op, &tc_i, vb3)))
2209 return res;
2210 if (tc_i != num) {
2211 pr2serr("%s: number requested (in) differs from transfer "
2212 "count\n", __func__);
2213 // ouch, think about this one
2214 }
2215 op->in_full += tc_i;
2216 in_blk_off += tc_i;
2217 op->dd_count -= tc_i;
2218 }
2219 return 0;
2220 }
2221
2222 /* This function is designed to copy large amounts (terabytes) with
2223 * potentially different block sizes on input and output. Returns
2224 * 0 on success. */
2225 static int
odx_write_from_rods(struct opts_t * op)2226 odx_write_from_rods(struct opts_t * op)
2227 {
2228 int k, res, n, off, out_blk_sz;
2229 int got_count, out_num_elems, err, vb3;
2230 uint64_t out_blk_off, num, o_num, r_o_num, oir, tc_o;
2231 int64_t out_num_blks, v;
2232 struct dev_info_t * odip = op->odip;
2233 unsigned char rt[520];
2234
2235 vb3 = (op->verbose > 1) ? (op->verbose - 2) : 0;
2236 got_count = (op->dd_count > 0);
2237 res = fetch_read_cap(op, DDPT_ARG_OUT, &out_num_blks, &out_blk_sz);
2238 if (res)
2239 return res;
2240 v = out_num_blks;
2241 if (op->out_sgl) { /* scatter list */
2242 out_num_elems = op->out_sgl_elems;
2243 out_num_blks = count_sgl_blocks(op->out_sgl, out_num_elems);
2244 } else { /* no scatter list */
2245 out_num_elems = 1;
2246 out_num_blks = got_count ? op->dd_count : 0;
2247 }
2248 if (0 == op->dd_count) {
2249 if (op->verbose)
2250 pr2serr("%s: enough checks, count=0 given so exit\n", __func__);
2251 return 0;
2252 }
2253 if ((op->dd_count < 0) && (0 == out_num_blks)) {
2254 if (op->verbose > 1)
2255 pr2serr("%s: write the lot after scaling for seek=\n", __func__);
2256 v -= op->seek;
2257 if (v < 0) {
2258 pr2serr("%s: seek exceeds out device size\n", __func__);
2259 return SG_LIB_SYNTAX_ERROR;
2260 }
2261 out_num_blks = v;
2262 }
2263
2264 out_blk_off = 0;
2265 op->dd_count = out_num_blks;
2266 op->dd_count_start = op->dd_count;
2267 if (op->verbose > 1)
2268 pr2serr("%s: about to write %" PRIi64 " blocks (seen from output)\n",
2269 __func__, out_num_blks);
2270
2271 /* copy using PT, WUT, [WUT, ...], PT, WUT, [WUT, ...] sequence */
2272 for (k = 0; out_num_blks > 0; out_num_blks -= num, ++k) {
2273 if (k > 0)
2274 signals_process_delay(op, DELAY_COPY_SEGMENT);
2275
2276 memset(rt, 0, sizeof(rt));
2277 n = op->rtf_len_add ? 520 : 512;
2278 res = read(op->rtf_fd, rt, n);
2279 if (res < 0) {
2280 err = errno;
2281 pr2serr("%s: could not read '%s': %s\n", __func__, op->rtf,
2282 safe_strerror(err));
2283 return SG_LIB_FILE_ERROR;
2284 }
2285 if (0 == res) {
2286 if (op->verbose)
2287 pr2serr("%s: there are no more tokens to read from RTF or, \n"
2288 "if it is a pipe or socket, the other end closed "
2289 " it\n", __func__);
2290 break;
2291 }
2292 if (res < n) {
2293 pr2serr("%s: unable to read %d bytes from '%s', only got %d "
2294 "bytes\n", __func__, (int)sizeof(rt), op->rtf, res);
2295 pr2serr(" try to continue\n");
2296 }
2297 if (op->rtf_len_add)
2298 off = 512;
2299 else {
2300 /* 'number of bytes represented' is a 16 byte integer! It starts
2301 * at offset 48 and may not be present so its contents might be
2302 * random. If any of the top 8 bytes are non-zero, give up. */
2303 for (n = 0; n < 8; ++n) {
2304 if (0x0 != rt[48 + n])
2305 break;
2306 }
2307 if (n < 8) {
2308 pr2serr("%s: wild 'bytes represented' field in ROD Token so "
2309 "give up.\n Try again with conv=rtf_len\n",
2310 __func__);
2311 return SG_LIB_CAT_OTHER;
2312 }
2313 off = 56;
2314 }
2315 for (n = 0, num = 0; n < 8; ++n) {
2316 if (n > 0)
2317 num <<= 8;
2318 num += rt[off + n];
2319 }
2320 o_num = num / (unsigned int)op->obs;
2321 if (o_num > 0xffffffffffLL) {
2322 pr2serr("%s: ROD size seems too large (%" PRIu64 " blocks "
2323 "each %d bytes)\nTry again with conv=rtf_len\n", __func__,
2324 o_num, op->obs);
2325 return SG_LIB_CAT_OTHER;
2326 }
2327 if (0 == o_num) {
2328 pr2serr("%s: ROD size is less than 1 block (%d bytes). Try "
2329 "again with conv=rtf_len\n", __func__, op->obs);
2330 return SG_LIB_CAT_OTHER;
2331 }
2332 num = o_num;
2333
2334 for (oir = 0; o_num > 0; oir += r_o_num, o_num -= r_o_num) {
2335 /* output dev might be more constrained than input, so multiple
2336 * WUT calls (latter ones using offset in ROD) may be needed */
2337 if (k > 0)
2338 signals_process_delay(op, DELAY_WRITE);
2339 r_o_num = o_num;
2340 if (op->bpt_given) {
2341 /* take either bpt argument since input is a ROD */
2342 if ((op->obpch > 0) && ((uint64_t)op->obpch < r_o_num))
2343 r_o_num = op->obpch;
2344 else if ((op->bpt_i > 0) && ((uint64_t)op->bpt_i < r_o_num))
2345 r_o_num = op->bpt_i;
2346 }
2347 if ((odip->odxp->max_tok_xfer_size > 0) &&
2348 (r_o_num > odip->odxp->max_tok_xfer_size))
2349 r_o_num = odip->odxp->max_tok_xfer_size;
2350 if (op->out_sgl)
2351 r_o_num = count_restricted_sgl_blocks(op->out_sgl,
2352 out_num_elems, out_blk_off, r_o_num,
2353 odip->odxp->max_range_desc);
2354 res = do_wut(op, rt, out_blk_off, r_o_num, oir,
2355 (r_o_num < o_num), ! op->list_id_given, vb3);
2356 if (res)
2357 return res;
2358 if ((res = process_after_wut(op, &tc_o, vb3)))
2359 return res;
2360 if (tc_o != r_o_num) {
2361 pr2serr("%s: number requested (out) differs from transfer "
2362 "count\n", __func__);
2363 // ouch, could have over-drained ROD
2364 }
2365 op->out_full += tc_o;
2366 out_blk_off += tc_o;
2367 op->dd_count -= tc_o;
2368 }
2369 }
2370 return 0;
2371 }
2372
2373 /* This function is designed to copy large amounts (terabytes) with
2374 * potentially different block sizes on input and output. Returns
2375 * 0 on success. */
2376 static int
odx_full_copy(struct opts_t * op)2377 odx_full_copy(struct opts_t * op)
2378 {
2379 int k, res, ok, in_blk_sz, out_blk_sz, oneto1, in_mult, out_mult;
2380 int got_count, in_num_elems, out_num_elems, vb3;
2381 uint64_t in_blk_off, out_blk_off, num, o_num, r_o_num, oir, tc_i, tc_o;
2382 int64_t in_num_blks, out_num_blks, u, uu, v, vv;
2383 struct dev_info_t * idip = op->idip;
2384 struct dev_info_t * odip = op->odip;
2385
2386 vb3 = (op->verbose > 1) ? (op->verbose - 2) : 0;
2387 got_count = (op->dd_count > 0);
2388 /* need to know block size of input and output */
2389 res = fetch_read_cap(op, DDPT_ARG_IN, &in_num_blks, &in_blk_sz);
2390 if (res)
2391 return res;
2392 u = in_num_blks;
2393 res = fetch_read_cap(op, DDPT_ARG_OUT, &out_num_blks, &out_blk_sz);
2394 if (res)
2395 return res;
2396 v = out_num_blks;
2397 oneto1 = (in_blk_sz == out_blk_sz);
2398 in_mult = 0; /* so (in_blk_sz < out_blk_sz) */
2399 out_mult = 0;
2400 if (! oneto1) {
2401 out_mult = in_blk_sz / out_blk_sz;
2402 if (out_mult > 0)
2403 ok = (in_blk_sz == (out_mult * out_blk_sz));
2404 else {
2405 in_mult = out_blk_sz / in_blk_sz;
2406 ok = (out_blk_sz == (in_mult * in_blk_sz));
2407 }
2408 if (! ok) {
2409 pr2serr("%s: only accept different block sizes if one is a "
2410 "multiple of the other.\n input block size=%d and "
2411 "output block size=%d\n", __func__, in_blk_sz,
2412 out_blk_sz);
2413 return SG_LIB_CAT_OTHER;
2414 }
2415 }
2416 if (op->in_sgl) { /* gather list */
2417 in_num_elems = op->in_sgl_elems;
2418 in_num_blks = count_sgl_blocks(op->in_sgl, in_num_elems);
2419 if (got_count && (in_num_blks != op->dd_count)) {
2420 pr2serr("%s: count= value not equal to the sum of gather nums\n",
2421 __func__);
2422 return SG_LIB_CAT_OTHER;
2423 }
2424 } else {
2425 in_num_elems = 1;
2426 in_num_blks = got_count ? op->dd_count : 0;
2427 }
2428 if (op->out_sgl) { /* scatter list */
2429 out_num_elems = op->out_sgl_elems;
2430 out_num_blks = count_sgl_blocks(op->out_sgl, out_num_elems);
2431 if (oneto1) {
2432 if(got_count && (out_num_blks != op->dd_count)) {
2433 pr2serr("%s: count= value not equal to the sum of scatter "
2434 "nums\n", __func__);
2435 return SG_LIB_SYNTAX_ERROR;
2436 }
2437 if (out_num_blks != in_num_blks) {
2438 pr2serr("%s: number of blocks in gather list differ from "
2439 "scatter list\n", __func__);
2440 if (op->iflagp->force || op->oflagp->force)
2441 pr2serr("... continuing due to force flag\n");
2442 else {
2443 pr2serr("... can be overridden with force flag\n");
2444 return SG_LIB_SYNTAX_ERROR;
2445 }
2446 }
2447 } else { /* unequal block size */
2448 u = out_blk_sz * out_num_blks;
2449 if (op->in_sgl && (u != (in_blk_sz * in_num_blks))) {
2450 pr2serr("%s: number of blocks in both lists need to reflect "
2451 "the same number of bytes, but don't\n", __func__);
2452 return SG_LIB_SYNTAX_ERROR;
2453 }
2454 if (got_count && (u != (in_blk_sz * in_num_blks))) {
2455 pr2serr("%s: number of scatter blocks and count need to "
2456 "reflect the same number of bytes, but don't\n",
2457 __func__);
2458 return SG_LIB_SYNTAX_ERROR;
2459 }
2460 }
2461 } else { /* no scatter list */
2462 out_num_elems = 1;
2463 if (got_count) {
2464 if (oneto1)
2465 out_num_blks = op->dd_count;
2466 else if (in_mult)
2467 out_num_blks = op->dd_count * out_blk_sz / in_blk_sz;
2468 else
2469 out_num_blks = op->dd_count * in_blk_sz / out_blk_sz;
2470 } else
2471 out_num_blks = 0;
2472 }
2473 if (0 == op->dd_count) {
2474 if (op->verbose)
2475 pr2serr("%s: enough checks, count=0 given so exit\n", __func__);
2476 return 0;
2477 }
2478 if ((op->dd_count < 0) && (0 == in_num_blks) && (0 == out_num_blks)) {
2479 if (op->verbose > 1)
2480 pr2serr("%s: copy the lot after scaling for skip= and seek=\n",
2481 __func__);
2482 u -= op->skip;
2483 v -= op->seek;
2484 if (u < 0) {
2485 pr2serr("%s: skip exceeds input device size\n", __func__);
2486 return SG_LIB_SYNTAX_ERROR;
2487 }
2488 if (v < 0) {
2489 pr2serr("%s: seek exceeds out device size\n", __func__);
2490 return SG_LIB_SYNTAX_ERROR;
2491 }
2492 if (oneto1) {
2493 in_num_blks = (u < v) ? u : v;
2494 out_num_blks = in_num_blks;
2495 } else {
2496 uu = u * in_blk_sz;
2497 vv = v * out_blk_sz;
2498 if (uu == vv) {
2499 in_num_blks = u;
2500 out_num_blks = v;
2501 } else if (uu < vv) {
2502 in_num_blks = u;
2503 out_num_blks = uu / out_blk_sz;
2504 } else {
2505 in_num_blks = vv / in_blk_sz;
2506 out_num_blks = v;
2507 }
2508 }
2509 }
2510
2511 in_blk_off = 0;
2512 out_blk_off = 0;
2513 op->dd_count = in_num_blks;
2514 op->dd_count_start = op->dd_count;
2515 if (op->verbose > 1)
2516 pr2serr("%s: about to copy %" PRIi64 " blocks (seen from input)\n",
2517 __func__, in_num_blks);
2518
2519 /* copy using PT, WUT, [WUT, ...], PT, WUT, [WUT, ...] sequence */
2520 for (k = 0; in_num_blks > 0; in_num_blks -= num, ++k) {
2521 if (k > 0)
2522 signals_process_delay(op, DELAY_COPY_SEGMENT);
2523 num = in_num_blks;
2524 if (op->bpt_given && ((uint64_t)op->bpt_i < num))
2525 num = op->bpt_i;
2526 if ((idip->odxp->max_tok_xfer_size > 0) &&
2527 (num > idip->odxp->max_tok_xfer_size))
2528 num = idip->odxp->max_tok_xfer_size;
2529 if (op->in_sgl)
2530 num = count_restricted_sgl_blocks(op->in_sgl, in_num_elems,
2531 in_blk_off, num,
2532 idip->odxp->max_range_desc);
2533 if (! oneto1) {
2534 if (in_mult) {
2535 o_num = num / in_mult;
2536 num = o_num * in_mult;
2537 if (0 == num) {
2538 if (in_num_blks < in_mult) {
2539 pr2serr("%s: unable to copy trailing blocks due to "
2540 "block size mismatch\n", __func__);
2541 return 0;
2542 } else {
2543 pr2serr("%s: block size mismatch problem, perhaps "
2544 "BPT value too small\n", __func__);
2545 return SG_LIB_SYNTAX_ERROR;
2546 }
2547 }
2548 } else /* out_mult must be >= 2 */
2549 o_num = num * out_mult;
2550 } else
2551 o_num = num;
2552 if (op->verbose > 2)
2553 pr2serr("%s: k=%d, in_blk_off=0x%" PRIx64 ", i_num=%" PRIu64 ", "
2554 "out_blk_off=0x%" PRIx64 ", o_num=%" PRIu64 "\n",
2555 __func__, k, in_blk_off, num, out_blk_off, o_num);
2556
2557 if ((res = do_pop_tok(op, in_blk_off, num, ! op->list_id_given, vb3)))
2558 return res;
2559 if ((res = process_after_poptok(op, &tc_i, vb3)))
2560 return res;
2561 if (tc_i != num) {
2562 pr2serr("%s: number requested (in) differs from transfer "
2563 "count\n", __func__);
2564 // ouch, think about this one
2565 }
2566 op->in_full += tc_i;
2567 in_blk_off += tc_i;
2568
2569 for (oir = 0; o_num > 0; oir += r_o_num, o_num -= r_o_num) {
2570 /* output dev might be more constrained than input, so multiple
2571 * WUT calls (latter ones using offset in ROD) may be needed */
2572 if (k > 0)
2573 signals_process_delay(op, DELAY_WRITE);
2574 r_o_num = o_num;
2575 if ((op->obpch > 0) && ((uint64_t)op->obpch < r_o_num))
2576 r_o_num = op->obpch;
2577 if ((odip->odxp->max_tok_xfer_size > 0) &&
2578 (r_o_num > odip->odxp->max_tok_xfer_size))
2579 r_o_num = odip->odxp->max_tok_xfer_size;
2580 if (op->out_sgl)
2581 r_o_num = count_restricted_sgl_blocks(op->out_sgl,
2582 out_num_elems, out_blk_off, r_o_num,
2583 odip->odxp->max_range_desc);
2584 res = do_wut(op, local_rod_token, out_blk_off, r_o_num, oir,
2585 (r_o_num < o_num), ! op->list_id_given, vb3);
2586 if (res)
2587 return res;
2588 if ((res = process_after_wut(op, &tc_o, vb3)))
2589 return res;
2590 if (tc_o != r_o_num) {
2591 pr2serr("%s: number requested (out) differs from transfer "
2592 "count\n", __func__);
2593 // ouch, could have over-drained ROD
2594 }
2595 op->out_full += tc_o;
2596 out_blk_off += tc_o;
2597 }
2598 op->dd_count -= tc_i;
2599 }
2600 return 0;
2601 }
2602
2603 static int
odx_setup_and_run(struct opts_t * op,int * whop)2604 odx_setup_and_run(struct opts_t * op, int * whop)
2605 {
2606 int fd, res, req;
2607 struct dev_info_t * dip;
2608
2609 if (whop)
2610 *whop = 0;
2611 req = op->odx_request;
2612 if (! op->list_id_given)
2613 op->list_id = (ODX_WRITE_FROM_RODS == req) ?
2614 DEF_LID4_WR_LID : DEF_LID4_LID;
2615 if ((ODX_READ_INTO_RODS == req) ||
2616 ((ODX_COPY == req) && (RODT_BLK_ZERO != op->rod_type))) {
2617 fd = pt_open_if(op, NULL);
2618 if (-1 == fd)
2619 return SG_LIB_FILE_ERROR;
2620 else if (fd < -1)
2621 return SG_LIB_CAT_OTHER;
2622 dip = op->idip;
2623 dip->fd = fd;
2624 dip->odxp = (struct block_rodtok_vpd *)malloc(sizeof(*dip->odxp));
2625 if (NULL == dip->odxp) {
2626 pr2serr("Not enough user memory for %s\n", __func__);
2627 return SG_LIB_CAT_OTHER;
2628 }
2629 memset(dip->odxp, 0, sizeof(*dip->odxp));
2630 res = get_3pc_vpd_blkdev_lims(op, dip);
2631 if (res && (op->iflagp->force < 2))
2632 return res;
2633 }
2634 if ((ODX_WRITE_FROM_RODS == req) || (ODX_COPY == req)) {
2635 fd = pt_open_of(op, NULL);
2636 if (-1 == fd)
2637 return SG_LIB_FILE_ERROR;
2638 else if (fd < -1)
2639 return SG_LIB_CAT_OTHER;
2640 dip = op->odip;
2641 dip->fd = fd;
2642 dip->odxp = (struct block_rodtok_vpd *)malloc(sizeof(*dip->odxp));
2643 if (NULL == dip->odxp) {
2644 pr2serr("Not enough user memory for %s 2\n", __func__);
2645 return SG_LIB_CAT_OTHER;
2646 }
2647 memset(dip->odxp, 0, sizeof(*dip->odxp));
2648 res = get_3pc_vpd_blkdev_lims(op, dip);
2649 if (res && (op->oflagp->force < 2))
2650 return res;
2651 }
2652
2653 if (ODX_READ_INTO_RODS == req) {
2654 if (whop)
2655 *whop = 1;
2656 res = odx_read_into_rods(op);
2657 if (res)
2658 return res;
2659 } else if (ODX_WRITE_FROM_RODS == req) {
2660 if (whop)
2661 *whop = 2;
2662 res = odx_write_from_rods(op);
2663 if (res)
2664 return res;
2665 } else if (ODX_COPY == req) {
2666 if (op->rod_type_given && (RODT_BLK_ZERO == op->rod_type)) {
2667 if (whop)
2668 *whop = 2;
2669 return odx_full_zero_copy(op);
2670 } else
2671 return odx_full_copy(op);
2672 }
2673 return 0;
2674 }
2675
2676
2677 /* Called from main() in ddpt.c . Returns 0 on success or a positive
2678 * errno value if problems. This is for ODX which is a subset of
2679 * xcopy(LID4) for disk->disk, disk->held and held-> disk copies. */
2680 int
do_odx(struct opts_t * op)2681 do_odx(struct opts_t * op)
2682 {
2683 int ret, who;
2684
2685 if (op->iflagp->append || op->oflagp->append)
2686 ++op->rtf_append;
2687 if (op->iflagp->rtf_len || op->oflagp->rtf_len)
2688 ++op->rtf_len_add;
2689 if (op->rtf[0]) {
2690 ret = open_rtf(op);
2691 if (ret) {
2692 ret = SG_LIB_FILE_ERROR;
2693 goto the_end;
2694 }
2695 }
2696 if (op->do_time)
2697 calc_duration_init(op);
2698 ret = odx_setup_and_run(op, &who);
2699 if (0 == op->status_none)
2700 print_stats("", op, who);
2701 if (op->do_time)
2702 calc_duration_throughput("", 0, op);
2703 if (op->rtf_fd >= 0) {
2704 close(op->rtf_fd);
2705 op->rtf_fd = -1;
2706 }
2707 the_end:
2708 return ret;
2709 }
2710