1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * nvme-lightnvm.c - LightNVM NVMe device
4 *
5 * Copyright (C) 2014-2015 IT University of Copenhagen
6 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 */
8
9 #include "nvme.h"
10
11 #include <linux/nvme.h>
12 #include <linux/bitops.h>
13 #include <linux/lightnvm.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sched/sysctl.h>
16 #include <uapi/linux/lightnvm.h>
17
18 enum nvme_nvm_admin_opcode {
19 nvme_nvm_admin_identity = 0xe2,
20 nvme_nvm_admin_get_bb_tbl = 0xf2,
21 nvme_nvm_admin_set_bb_tbl = 0xf1,
22 };
23
24 enum nvme_nvm_log_page {
25 NVME_NVM_LOG_REPORT_CHUNK = 0xca,
26 };
27
28 struct nvme_nvm_ph_rw {
29 __u8 opcode;
30 __u8 flags;
31 __u16 command_id;
32 __le32 nsid;
33 __u64 rsvd2;
34 __le64 metadata;
35 __le64 prp1;
36 __le64 prp2;
37 __le64 spba;
38 __le16 length;
39 __le16 control;
40 __le32 dsmgmt;
41 __le64 resv;
42 };
43
44 struct nvme_nvm_erase_blk {
45 __u8 opcode;
46 __u8 flags;
47 __u16 command_id;
48 __le32 nsid;
49 __u64 rsvd[2];
50 __le64 prp1;
51 __le64 prp2;
52 __le64 spba;
53 __le16 length;
54 __le16 control;
55 __le32 dsmgmt;
56 __le64 resv;
57 };
58
59 struct nvme_nvm_identity {
60 __u8 opcode;
61 __u8 flags;
62 __u16 command_id;
63 __le32 nsid;
64 __u64 rsvd[2];
65 __le64 prp1;
66 __le64 prp2;
67 __u32 rsvd11[6];
68 };
69
70 struct nvme_nvm_getbbtbl {
71 __u8 opcode;
72 __u8 flags;
73 __u16 command_id;
74 __le32 nsid;
75 __u64 rsvd[2];
76 __le64 prp1;
77 __le64 prp2;
78 __le64 spba;
79 __u32 rsvd4[4];
80 };
81
82 struct nvme_nvm_setbbtbl {
83 __u8 opcode;
84 __u8 flags;
85 __u16 command_id;
86 __le32 nsid;
87 __le64 rsvd[2];
88 __le64 prp1;
89 __le64 prp2;
90 __le64 spba;
91 __le16 nlb;
92 __u8 value;
93 __u8 rsvd3;
94 __u32 rsvd4[3];
95 };
96
97 struct nvme_nvm_command {
98 union {
99 struct nvme_common_command common;
100 struct nvme_nvm_ph_rw ph_rw;
101 struct nvme_nvm_erase_blk erase;
102 struct nvme_nvm_identity identity;
103 struct nvme_nvm_getbbtbl get_bb;
104 struct nvme_nvm_setbbtbl set_bb;
105 };
106 };
107
108 struct nvme_nvm_id12_grp {
109 __u8 mtype;
110 __u8 fmtype;
111 __le16 res16;
112 __u8 num_ch;
113 __u8 num_lun;
114 __u8 num_pln;
115 __u8 rsvd1;
116 __le16 num_chk;
117 __le16 num_pg;
118 __le16 fpg_sz;
119 __le16 csecs;
120 __le16 sos;
121 __le16 rsvd2;
122 __le32 trdt;
123 __le32 trdm;
124 __le32 tprt;
125 __le32 tprm;
126 __le32 tbet;
127 __le32 tbem;
128 __le32 mpos;
129 __le32 mccap;
130 __le16 cpar;
131 __u8 reserved[906];
132 } __packed;
133
134 struct nvme_nvm_id12_addrf {
135 __u8 ch_offset;
136 __u8 ch_len;
137 __u8 lun_offset;
138 __u8 lun_len;
139 __u8 pln_offset;
140 __u8 pln_len;
141 __u8 blk_offset;
142 __u8 blk_len;
143 __u8 pg_offset;
144 __u8 pg_len;
145 __u8 sec_offset;
146 __u8 sec_len;
147 __u8 res[4];
148 } __packed;
149
150 struct nvme_nvm_id12 {
151 __u8 ver_id;
152 __u8 vmnt;
153 __u8 cgrps;
154 __u8 res;
155 __le32 cap;
156 __le32 dom;
157 struct nvme_nvm_id12_addrf ppaf;
158 __u8 resv[228];
159 struct nvme_nvm_id12_grp grp;
160 __u8 resv2[2880];
161 } __packed;
162
163 struct nvme_nvm_bb_tbl {
164 __u8 tblid[4];
165 __le16 verid;
166 __le16 revid;
167 __le32 rvsd1;
168 __le32 tblks;
169 __le32 tfact;
170 __le32 tgrown;
171 __le32 tdresv;
172 __le32 thresv;
173 __le32 rsvd2[8];
174 __u8 blk[];
175 };
176
177 struct nvme_nvm_id20_addrf {
178 __u8 grp_len;
179 __u8 pu_len;
180 __u8 chk_len;
181 __u8 lba_len;
182 __u8 resv[4];
183 };
184
185 struct nvme_nvm_id20 {
186 __u8 mjr;
187 __u8 mnr;
188 __u8 resv[6];
189
190 struct nvme_nvm_id20_addrf lbaf;
191
192 __le32 mccap;
193 __u8 resv2[12];
194
195 __u8 wit;
196 __u8 resv3[31];
197
198 /* Geometry */
199 __le16 num_grp;
200 __le16 num_pu;
201 __le32 num_chk;
202 __le32 clba;
203 __u8 resv4[52];
204
205 /* Write data requirements */
206 __le32 ws_min;
207 __le32 ws_opt;
208 __le32 mw_cunits;
209 __le32 maxoc;
210 __le32 maxocpu;
211 __u8 resv5[44];
212
213 /* Performance related metrics */
214 __le32 trdt;
215 __le32 trdm;
216 __le32 twrt;
217 __le32 twrm;
218 __le32 tcrst;
219 __le32 tcrsm;
220 __u8 resv6[40];
221
222 /* Reserved area */
223 __u8 resv7[2816];
224
225 /* Vendor specific */
226 __u8 vs[1024];
227 };
228
229 struct nvme_nvm_chk_meta {
230 __u8 state;
231 __u8 type;
232 __u8 wi;
233 __u8 rsvd[5];
234 __le64 slba;
235 __le64 cnlb;
236 __le64 wp;
237 };
238
239 /*
240 * Check we didn't inadvertently grow the command struct
241 */
_nvme_nvm_check_size(void)242 static inline void _nvme_nvm_check_size(void)
243 {
244 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
245 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
246 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
247 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
248 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
249 BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
250 BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
251 BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
252 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
253 BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
254 BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
255 BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
256 BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
257 sizeof(struct nvm_chk_meta));
258 }
259
nvme_nvm_set_addr_12(struct nvm_addrf_12 * dst,struct nvme_nvm_id12_addrf * src)260 static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
261 struct nvme_nvm_id12_addrf *src)
262 {
263 dst->ch_len = src->ch_len;
264 dst->lun_len = src->lun_len;
265 dst->blk_len = src->blk_len;
266 dst->pg_len = src->pg_len;
267 dst->pln_len = src->pln_len;
268 dst->sec_len = src->sec_len;
269
270 dst->ch_offset = src->ch_offset;
271 dst->lun_offset = src->lun_offset;
272 dst->blk_offset = src->blk_offset;
273 dst->pg_offset = src->pg_offset;
274 dst->pln_offset = src->pln_offset;
275 dst->sec_offset = src->sec_offset;
276
277 dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
278 dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
279 dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
280 dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
281 dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
282 dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
283 }
284
nvme_nvm_setup_12(struct nvme_nvm_id12 * id,struct nvm_geo * geo)285 static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id,
286 struct nvm_geo *geo)
287 {
288 struct nvme_nvm_id12_grp *src;
289 int sec_per_pg, sec_per_pl, pg_per_blk;
290
291 if (id->cgrps != 1)
292 return -EINVAL;
293
294 src = &id->grp;
295
296 if (src->mtype != 0) {
297 pr_err("nvm: memory type not supported\n");
298 return -EINVAL;
299 }
300
301 /* 1.2 spec. only reports a single version id - unfold */
302 geo->major_ver_id = id->ver_id;
303 geo->minor_ver_id = 2;
304
305 /* Set compacted version for upper layers */
306 geo->version = NVM_OCSSD_SPEC_12;
307
308 geo->num_ch = src->num_ch;
309 geo->num_lun = src->num_lun;
310 geo->all_luns = geo->num_ch * geo->num_lun;
311
312 geo->num_chk = le16_to_cpu(src->num_chk);
313
314 geo->csecs = le16_to_cpu(src->csecs);
315 geo->sos = le16_to_cpu(src->sos);
316
317 pg_per_blk = le16_to_cpu(src->num_pg);
318 sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs;
319 sec_per_pl = sec_per_pg * src->num_pln;
320 geo->clba = sec_per_pl * pg_per_blk;
321
322 geo->all_chunks = geo->all_luns * geo->num_chk;
323 geo->total_secs = geo->clba * geo->all_chunks;
324
325 geo->ws_min = sec_per_pg;
326 geo->ws_opt = sec_per_pg;
327 geo->mw_cunits = geo->ws_opt << 3; /* default to MLC safe values */
328
329 /* Do not impose values for maximum number of open blocks as it is
330 * unspecified in 1.2. Users of 1.2 must be aware of this and eventually
331 * specify these values through a quirk if restrictions apply.
332 */
333 geo->maxoc = geo->all_luns * geo->num_chk;
334 geo->maxocpu = geo->num_chk;
335
336 geo->mccap = le32_to_cpu(src->mccap);
337
338 geo->trdt = le32_to_cpu(src->trdt);
339 geo->trdm = le32_to_cpu(src->trdm);
340 geo->tprt = le32_to_cpu(src->tprt);
341 geo->tprm = le32_to_cpu(src->tprm);
342 geo->tbet = le32_to_cpu(src->tbet);
343 geo->tbem = le32_to_cpu(src->tbem);
344
345 /* 1.2 compatibility */
346 geo->vmnt = id->vmnt;
347 geo->cap = le32_to_cpu(id->cap);
348 geo->dom = le32_to_cpu(id->dom);
349
350 geo->mtype = src->mtype;
351 geo->fmtype = src->fmtype;
352
353 geo->cpar = le16_to_cpu(src->cpar);
354 geo->mpos = le32_to_cpu(src->mpos);
355
356 geo->pln_mode = NVM_PLANE_SINGLE;
357
358 if (geo->mpos & 0x020202) {
359 geo->pln_mode = NVM_PLANE_DOUBLE;
360 geo->ws_opt <<= 1;
361 } else if (geo->mpos & 0x040404) {
362 geo->pln_mode = NVM_PLANE_QUAD;
363 geo->ws_opt <<= 2;
364 }
365
366 geo->num_pln = src->num_pln;
367 geo->num_pg = le16_to_cpu(src->num_pg);
368 geo->fpg_sz = le16_to_cpu(src->fpg_sz);
369
370 nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf);
371
372 return 0;
373 }
374
nvme_nvm_set_addr_20(struct nvm_addrf * dst,struct nvme_nvm_id20_addrf * src)375 static void nvme_nvm_set_addr_20(struct nvm_addrf *dst,
376 struct nvme_nvm_id20_addrf *src)
377 {
378 dst->ch_len = src->grp_len;
379 dst->lun_len = src->pu_len;
380 dst->chk_len = src->chk_len;
381 dst->sec_len = src->lba_len;
382
383 dst->sec_offset = 0;
384 dst->chk_offset = dst->sec_len;
385 dst->lun_offset = dst->chk_offset + dst->chk_len;
386 dst->ch_offset = dst->lun_offset + dst->lun_len;
387
388 dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
389 dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
390 dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset;
391 dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
392 }
393
nvme_nvm_setup_20(struct nvme_nvm_id20 * id,struct nvm_geo * geo)394 static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
395 struct nvm_geo *geo)
396 {
397 geo->major_ver_id = id->mjr;
398 geo->minor_ver_id = id->mnr;
399
400 /* Set compacted version for upper layers */
401 geo->version = NVM_OCSSD_SPEC_20;
402
403 geo->num_ch = le16_to_cpu(id->num_grp);
404 geo->num_lun = le16_to_cpu(id->num_pu);
405 geo->all_luns = geo->num_ch * geo->num_lun;
406
407 geo->num_chk = le32_to_cpu(id->num_chk);
408 geo->clba = le32_to_cpu(id->clba);
409
410 geo->all_chunks = geo->all_luns * geo->num_chk;
411 geo->total_secs = geo->clba * geo->all_chunks;
412
413 geo->ws_min = le32_to_cpu(id->ws_min);
414 geo->ws_opt = le32_to_cpu(id->ws_opt);
415 geo->mw_cunits = le32_to_cpu(id->mw_cunits);
416 geo->maxoc = le32_to_cpu(id->maxoc);
417 geo->maxocpu = le32_to_cpu(id->maxocpu);
418
419 geo->trdt = le32_to_cpu(id->trdt);
420 geo->trdm = le32_to_cpu(id->trdm);
421 geo->tprt = le32_to_cpu(id->twrt);
422 geo->tprm = le32_to_cpu(id->twrm);
423 geo->tbet = le32_to_cpu(id->tcrst);
424 geo->tbem = le32_to_cpu(id->tcrsm);
425
426 nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf);
427
428 return 0;
429 }
430
nvme_nvm_identity(struct nvm_dev * nvmdev)431 static int nvme_nvm_identity(struct nvm_dev *nvmdev)
432 {
433 struct nvme_ns *ns = nvmdev->q->queuedata;
434 struct nvme_nvm_id12 *id;
435 struct nvme_nvm_command c = {};
436 int ret;
437
438 c.identity.opcode = nvme_nvm_admin_identity;
439 c.identity.nsid = cpu_to_le32(ns->head->ns_id);
440
441 id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
442 if (!id)
443 return -ENOMEM;
444
445 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
446 id, sizeof(struct nvme_nvm_id12));
447 if (ret) {
448 ret = -EIO;
449 goto out;
450 }
451
452 /*
453 * The 1.2 and 2.0 specifications share the first byte in their geometry
454 * command to make it possible to know what version a device implements.
455 */
456 switch (id->ver_id) {
457 case 1:
458 ret = nvme_nvm_setup_12(id, &nvmdev->geo);
459 break;
460 case 2:
461 ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id,
462 &nvmdev->geo);
463 break;
464 default:
465 dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n",
466 id->ver_id);
467 ret = -EINVAL;
468 }
469
470 out:
471 kfree(id);
472 return ret;
473 }
474
nvme_nvm_get_bb_tbl(struct nvm_dev * nvmdev,struct ppa_addr ppa,u8 * blks)475 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
476 u8 *blks)
477 {
478 struct request_queue *q = nvmdev->q;
479 struct nvm_geo *geo = &nvmdev->geo;
480 struct nvme_ns *ns = q->queuedata;
481 struct nvme_ctrl *ctrl = ns->ctrl;
482 struct nvme_nvm_command c = {};
483 struct nvme_nvm_bb_tbl *bb_tbl;
484 int nr_blks = geo->num_chk * geo->num_pln;
485 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
486 int ret = 0;
487
488 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
489 c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
490 c.get_bb.spba = cpu_to_le64(ppa.ppa);
491
492 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
493 if (!bb_tbl)
494 return -ENOMEM;
495
496 ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
497 bb_tbl, tblsz);
498 if (ret) {
499 dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
500 ret = -EIO;
501 goto out;
502 }
503
504 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
505 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
506 dev_err(ctrl->device, "bbt format mismatch\n");
507 ret = -EINVAL;
508 goto out;
509 }
510
511 if (le16_to_cpu(bb_tbl->verid) != 1) {
512 ret = -EINVAL;
513 dev_err(ctrl->device, "bbt version not supported\n");
514 goto out;
515 }
516
517 if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
518 ret = -EINVAL;
519 dev_err(ctrl->device,
520 "bbt unsuspected blocks returned (%u!=%u)",
521 le32_to_cpu(bb_tbl->tblks), nr_blks);
522 goto out;
523 }
524
525 memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln);
526 out:
527 kfree(bb_tbl);
528 return ret;
529 }
530
nvme_nvm_set_bb_tbl(struct nvm_dev * nvmdev,struct ppa_addr * ppas,int nr_ppas,int type)531 static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
532 int nr_ppas, int type)
533 {
534 struct nvme_ns *ns = nvmdev->q->queuedata;
535 struct nvme_nvm_command c = {};
536 int ret = 0;
537
538 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
539 c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
540 c.set_bb.spba = cpu_to_le64(ppas->ppa);
541 c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
542 c.set_bb.value = type;
543
544 ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
545 NULL, 0);
546 if (ret)
547 dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
548 ret);
549 return ret;
550 }
551
552 /*
553 * Expect the lba in device format
554 */
nvme_nvm_get_chk_meta(struct nvm_dev * ndev,sector_t slba,int nchks,struct nvm_chk_meta * meta)555 static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
556 sector_t slba, int nchks,
557 struct nvm_chk_meta *meta)
558 {
559 struct nvm_geo *geo = &ndev->geo;
560 struct nvme_ns *ns = ndev->q->queuedata;
561 struct nvme_ctrl *ctrl = ns->ctrl;
562 struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
563 struct ppa_addr ppa;
564 size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
565 size_t log_pos, offset, len;
566 int i, max_len;
567 int ret = 0;
568
569 /*
570 * limit requests to maximum 256K to avoid issuing arbitrary large
571 * requests when the device does not specific a maximum transfer size.
572 */
573 max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
574
575 dev_meta = kmalloc(max_len, GFP_KERNEL);
576 if (!dev_meta)
577 return -ENOMEM;
578
579 /* Normalize lba address space to obtain log offset */
580 ppa.ppa = slba;
581 ppa = dev_to_generic_addr(ndev, ppa);
582
583 log_pos = ppa.m.chk;
584 log_pos += ppa.m.pu * geo->num_chk;
585 log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
586
587 offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
588
589 while (left) {
590 len = min_t(unsigned int, left, max_len);
591
592 memset(dev_meta, 0, max_len);
593 dev_meta_off = dev_meta;
594
595 ret = nvme_get_log(ctrl, ns->head->ns_id,
596 NVME_NVM_LOG_REPORT_CHUNK, 0, NVME_CSI_NVM,
597 dev_meta, len, offset);
598 if (ret) {
599 dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
600 break;
601 }
602
603 for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
604 meta->state = dev_meta_off->state;
605 meta->type = dev_meta_off->type;
606 meta->wi = dev_meta_off->wi;
607 meta->slba = le64_to_cpu(dev_meta_off->slba);
608 meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
609 meta->wp = le64_to_cpu(dev_meta_off->wp);
610
611 meta++;
612 dev_meta_off++;
613 }
614
615 offset += len;
616 left -= len;
617 }
618
619 kfree(dev_meta);
620
621 return ret;
622 }
623
nvme_nvm_rqtocmd(struct nvm_rq * rqd,struct nvme_ns * ns,struct nvme_nvm_command * c)624 static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
625 struct nvme_nvm_command *c)
626 {
627 c->ph_rw.opcode = rqd->opcode;
628 c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
629 c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
630 c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
631 c->ph_rw.control = cpu_to_le16(rqd->flags);
632 c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
633 }
634
nvme_nvm_end_io(struct request * rq,blk_status_t status)635 static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
636 {
637 struct nvm_rq *rqd = rq->end_io_data;
638
639 rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
640 rqd->error = nvme_req(rq)->status;
641 nvm_end_io(rqd);
642
643 kfree(nvme_req(rq)->cmd);
644 blk_mq_free_request(rq);
645 }
646
nvme_nvm_alloc_request(struct request_queue * q,struct nvm_rq * rqd,struct nvme_nvm_command * cmd)647 static struct request *nvme_nvm_alloc_request(struct request_queue *q,
648 struct nvm_rq *rqd,
649 struct nvme_nvm_command *cmd)
650 {
651 struct nvme_ns *ns = q->queuedata;
652 struct request *rq;
653
654 nvme_nvm_rqtocmd(rqd, ns, cmd);
655
656 rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0);
657 if (IS_ERR(rq))
658 return rq;
659
660 rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
661
662 if (rqd->bio)
663 blk_rq_append_bio(rq, rqd->bio);
664 else
665 rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
666
667 return rq;
668 }
669
nvme_nvm_submit_io(struct nvm_dev * dev,struct nvm_rq * rqd,void * buf)670 static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
671 void *buf)
672 {
673 struct nvm_geo *geo = &dev->geo;
674 struct request_queue *q = dev->q;
675 struct nvme_nvm_command *cmd;
676 struct request *rq;
677 int ret;
678
679 cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
680 if (!cmd)
681 return -ENOMEM;
682
683 rq = nvme_nvm_alloc_request(q, rqd, cmd);
684 if (IS_ERR(rq)) {
685 ret = PTR_ERR(rq);
686 goto err_free_cmd;
687 }
688
689 if (buf) {
690 ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
691 GFP_KERNEL);
692 if (ret)
693 goto err_free_cmd;
694 }
695
696 rq->end_io_data = rqd;
697
698 blk_execute_rq_nowait(NULL, rq, 0, nvme_nvm_end_io);
699
700 return 0;
701
702 err_free_cmd:
703 kfree(cmd);
704 return ret;
705 }
706
nvme_nvm_create_dma_pool(struct nvm_dev * nvmdev,char * name,int size)707 static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
708 int size)
709 {
710 struct nvme_ns *ns = nvmdev->q->queuedata;
711
712 return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0);
713 }
714
nvme_nvm_destroy_dma_pool(void * pool)715 static void nvme_nvm_destroy_dma_pool(void *pool)
716 {
717 struct dma_pool *dma_pool = pool;
718
719 dma_pool_destroy(dma_pool);
720 }
721
nvme_nvm_dev_dma_alloc(struct nvm_dev * dev,void * pool,gfp_t mem_flags,dma_addr_t * dma_handler)722 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
723 gfp_t mem_flags, dma_addr_t *dma_handler)
724 {
725 return dma_pool_alloc(pool, mem_flags, dma_handler);
726 }
727
nvme_nvm_dev_dma_free(void * pool,void * addr,dma_addr_t dma_handler)728 static void nvme_nvm_dev_dma_free(void *pool, void *addr,
729 dma_addr_t dma_handler)
730 {
731 dma_pool_free(pool, addr, dma_handler);
732 }
733
734 static struct nvm_dev_ops nvme_nvm_dev_ops = {
735 .identity = nvme_nvm_identity,
736
737 .get_bb_tbl = nvme_nvm_get_bb_tbl,
738 .set_bb_tbl = nvme_nvm_set_bb_tbl,
739
740 .get_chk_meta = nvme_nvm_get_chk_meta,
741
742 .submit_io = nvme_nvm_submit_io,
743
744 .create_dma_pool = nvme_nvm_create_dma_pool,
745 .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
746 .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
747 .dev_dma_free = nvme_nvm_dev_dma_free,
748 };
749
nvme_nvm_submit_user_cmd(struct request_queue * q,struct nvme_ns * ns,struct nvme_nvm_command * vcmd,void __user * ubuf,unsigned int bufflen,void __user * meta_buf,unsigned int meta_len,void __user * ppa_buf,unsigned int ppa_len,u32 * result,u64 * status,unsigned int timeout)750 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
751 struct nvme_ns *ns,
752 struct nvme_nvm_command *vcmd,
753 void __user *ubuf, unsigned int bufflen,
754 void __user *meta_buf, unsigned int meta_len,
755 void __user *ppa_buf, unsigned int ppa_len,
756 u32 *result, u64 *status, unsigned int timeout)
757 {
758 bool write = nvme_is_write((struct nvme_command *)vcmd);
759 struct nvm_dev *dev = ns->ndev;
760 struct request *rq;
761 struct bio *bio = NULL;
762 __le64 *ppa_list = NULL;
763 dma_addr_t ppa_dma;
764 __le64 *metadata = NULL;
765 dma_addr_t metadata_dma;
766 DECLARE_COMPLETION_ONSTACK(wait);
767 int ret = 0;
768
769 rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0);
770 if (IS_ERR(rq)) {
771 ret = -ENOMEM;
772 goto err_cmd;
773 }
774
775 if (timeout)
776 rq->timeout = timeout;
777
778 if (ppa_buf && ppa_len) {
779 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
780 if (!ppa_list) {
781 ret = -ENOMEM;
782 goto err_rq;
783 }
784 if (copy_from_user(ppa_list, (void __user *)ppa_buf,
785 sizeof(u64) * (ppa_len + 1))) {
786 ret = -EFAULT;
787 goto err_ppa;
788 }
789 vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
790 } else {
791 vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
792 }
793
794 if (ubuf && bufflen) {
795 ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
796 if (ret)
797 goto err_ppa;
798 bio = rq->bio;
799
800 if (meta_buf && meta_len) {
801 metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
802 &metadata_dma);
803 if (!metadata) {
804 ret = -ENOMEM;
805 goto err_map;
806 }
807
808 if (write) {
809 if (copy_from_user(metadata,
810 (void __user *)meta_buf,
811 meta_len)) {
812 ret = -EFAULT;
813 goto err_meta;
814 }
815 }
816 vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
817 }
818
819 bio_set_dev(bio, ns->disk->part0);
820 }
821
822 blk_execute_rq(NULL, rq, 0);
823
824 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
825 ret = -EINTR;
826 else if (nvme_req(rq)->status & 0x7ff)
827 ret = -EIO;
828 if (result)
829 *result = nvme_req(rq)->status & 0x7ff;
830 if (status)
831 *status = le64_to_cpu(nvme_req(rq)->result.u64);
832
833 if (metadata && !ret && !write) {
834 if (copy_to_user(meta_buf, (void *)metadata, meta_len))
835 ret = -EFAULT;
836 }
837 err_meta:
838 if (meta_buf && meta_len)
839 dma_pool_free(dev->dma_pool, metadata, metadata_dma);
840 err_map:
841 if (bio)
842 blk_rq_unmap_user(bio);
843 err_ppa:
844 if (ppa_buf && ppa_len)
845 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
846 err_rq:
847 blk_mq_free_request(rq);
848 err_cmd:
849 return ret;
850 }
851
nvme_nvm_submit_vio(struct nvme_ns * ns,struct nvm_user_vio __user * uvio)852 static int nvme_nvm_submit_vio(struct nvme_ns *ns,
853 struct nvm_user_vio __user *uvio)
854 {
855 struct nvm_user_vio vio;
856 struct nvme_nvm_command c;
857 unsigned int length;
858 int ret;
859
860 if (copy_from_user(&vio, uvio, sizeof(vio)))
861 return -EFAULT;
862 if (vio.flags)
863 return -EINVAL;
864
865 memset(&c, 0, sizeof(c));
866 c.ph_rw.opcode = vio.opcode;
867 c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
868 c.ph_rw.control = cpu_to_le16(vio.control);
869 c.ph_rw.length = cpu_to_le16(vio.nppas);
870
871 length = (vio.nppas + 1) << ns->lba_shift;
872
873 ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
874 (void __user *)(uintptr_t)vio.addr, length,
875 (void __user *)(uintptr_t)vio.metadata,
876 vio.metadata_len,
877 (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
878 &vio.result, &vio.status, 0);
879
880 if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
881 return -EFAULT;
882
883 return ret;
884 }
885
nvme_nvm_user_vcmd(struct nvme_ns * ns,int admin,struct nvm_passthru_vio __user * uvcmd)886 static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
887 struct nvm_passthru_vio __user *uvcmd)
888 {
889 struct nvm_passthru_vio vcmd;
890 struct nvme_nvm_command c;
891 struct request_queue *q;
892 unsigned int timeout = 0;
893 int ret;
894
895 if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
896 return -EFAULT;
897 if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
898 return -EACCES;
899 if (vcmd.flags)
900 return -EINVAL;
901
902 memset(&c, 0, sizeof(c));
903 c.common.opcode = vcmd.opcode;
904 c.common.nsid = cpu_to_le32(ns->head->ns_id);
905 c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
906 c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
907 /* cdw11-12 */
908 c.ph_rw.length = cpu_to_le16(vcmd.nppas);
909 c.ph_rw.control = cpu_to_le16(vcmd.control);
910 c.common.cdw13 = cpu_to_le32(vcmd.cdw13);
911 c.common.cdw14 = cpu_to_le32(vcmd.cdw14);
912 c.common.cdw15 = cpu_to_le32(vcmd.cdw15);
913
914 if (vcmd.timeout_ms)
915 timeout = msecs_to_jiffies(vcmd.timeout_ms);
916
917 q = admin ? ns->ctrl->admin_q : ns->queue;
918
919 ret = nvme_nvm_submit_user_cmd(q, ns,
920 (struct nvme_nvm_command *)&c,
921 (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
922 (void __user *)(uintptr_t)vcmd.metadata,
923 vcmd.metadata_len,
924 (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
925 &vcmd.result, &vcmd.status, timeout);
926
927 if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
928 return -EFAULT;
929
930 return ret;
931 }
932
nvme_nvm_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp)933 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp)
934 {
935 switch (cmd) {
936 case NVME_NVM_IOCTL_ADMIN_VIO:
937 return nvme_nvm_user_vcmd(ns, 1, argp);
938 case NVME_NVM_IOCTL_IO_VIO:
939 return nvme_nvm_user_vcmd(ns, 0, argp);
940 case NVME_NVM_IOCTL_SUBMIT_VIO:
941 return nvme_nvm_submit_vio(ns, argp);
942 default:
943 return -ENOTTY;
944 }
945 }
946
nvme_nvm_register(struct nvme_ns * ns,char * disk_name,int node)947 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
948 {
949 struct request_queue *q = ns->queue;
950 struct nvm_dev *dev;
951 struct nvm_geo *geo;
952
953 _nvme_nvm_check_size();
954
955 dev = nvm_alloc_dev(node);
956 if (!dev)
957 return -ENOMEM;
958
959 /* Note that csecs and sos will be overridden if it is a 1.2 drive. */
960 geo = &dev->geo;
961 geo->csecs = 1 << ns->lba_shift;
962 geo->sos = ns->ms;
963 if (ns->features & NVME_NS_EXT_LBAS)
964 geo->ext = true;
965 else
966 geo->ext = false;
967 geo->mdts = ns->ctrl->max_hw_sectors;
968
969 dev->q = q;
970 memcpy(dev->name, disk_name, DISK_NAME_LEN);
971 dev->ops = &nvme_nvm_dev_ops;
972 dev->private_data = ns;
973 ns->ndev = dev;
974
975 return nvm_register(dev);
976 }
977
nvme_nvm_unregister(struct nvme_ns * ns)978 void nvme_nvm_unregister(struct nvme_ns *ns)
979 {
980 nvm_unregister(ns->ndev);
981 }
982
nvm_dev_attr_show(struct device * dev,struct device_attribute * dattr,char * page)983 static ssize_t nvm_dev_attr_show(struct device *dev,
984 struct device_attribute *dattr, char *page)
985 {
986 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
987 struct nvm_dev *ndev = ns->ndev;
988 struct nvm_geo *geo = &ndev->geo;
989 struct attribute *attr;
990
991 if (!ndev)
992 return 0;
993
994 attr = &dattr->attr;
995
996 if (strcmp(attr->name, "version") == 0) {
997 if (geo->major_ver_id == 1)
998 return scnprintf(page, PAGE_SIZE, "%u\n",
999 geo->major_ver_id);
1000 else
1001 return scnprintf(page, PAGE_SIZE, "%u.%u\n",
1002 geo->major_ver_id,
1003 geo->minor_ver_id);
1004 } else if (strcmp(attr->name, "capabilities") == 0) {
1005 return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
1006 } else if (strcmp(attr->name, "read_typ") == 0) {
1007 return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
1008 } else if (strcmp(attr->name, "read_max") == 0) {
1009 return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
1010 } else {
1011 return scnprintf(page,
1012 PAGE_SIZE,
1013 "Unhandled attr(%s) in `%s`\n",
1014 attr->name, __func__);
1015 }
1016 }
1017
nvm_dev_attr_show_ppaf(struct nvm_addrf_12 * ppaf,char * page)1018 static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
1019 {
1020 return scnprintf(page, PAGE_SIZE,
1021 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1022 ppaf->ch_offset, ppaf->ch_len,
1023 ppaf->lun_offset, ppaf->lun_len,
1024 ppaf->pln_offset, ppaf->pln_len,
1025 ppaf->blk_offset, ppaf->blk_len,
1026 ppaf->pg_offset, ppaf->pg_len,
1027 ppaf->sec_offset, ppaf->sec_len);
1028 }
1029
nvm_dev_attr_show_12(struct device * dev,struct device_attribute * dattr,char * page)1030 static ssize_t nvm_dev_attr_show_12(struct device *dev,
1031 struct device_attribute *dattr, char *page)
1032 {
1033 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1034 struct nvm_dev *ndev = ns->ndev;
1035 struct nvm_geo *geo = &ndev->geo;
1036 struct attribute *attr;
1037
1038 if (!ndev)
1039 return 0;
1040
1041 attr = &dattr->attr;
1042
1043 if (strcmp(attr->name, "vendor_opcode") == 0) {
1044 return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
1045 } else if (strcmp(attr->name, "device_mode") == 0) {
1046 return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
1047 /* kept for compatibility */
1048 } else if (strcmp(attr->name, "media_manager") == 0) {
1049 return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
1050 } else if (strcmp(attr->name, "ppa_format") == 0) {
1051 return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
1052 } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
1053 return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
1054 } else if (strcmp(attr->name, "flash_media_type") == 0) {
1055 return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
1056 } else if (strcmp(attr->name, "num_channels") == 0) {
1057 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1058 } else if (strcmp(attr->name, "num_luns") == 0) {
1059 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1060 } else if (strcmp(attr->name, "num_planes") == 0) {
1061 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
1062 } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
1063 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1064 } else if (strcmp(attr->name, "num_pages") == 0) {
1065 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
1066 } else if (strcmp(attr->name, "page_size") == 0) {
1067 return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
1068 } else if (strcmp(attr->name, "hw_sector_size") == 0) {
1069 return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
1070 } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
1071 return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
1072 } else if (strcmp(attr->name, "prog_typ") == 0) {
1073 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1074 } else if (strcmp(attr->name, "prog_max") == 0) {
1075 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1076 } else if (strcmp(attr->name, "erase_typ") == 0) {
1077 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1078 } else if (strcmp(attr->name, "erase_max") == 0) {
1079 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1080 } else if (strcmp(attr->name, "multiplane_modes") == 0) {
1081 return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
1082 } else if (strcmp(attr->name, "media_capabilities") == 0) {
1083 return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
1084 } else if (strcmp(attr->name, "max_phys_secs") == 0) {
1085 return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
1086 } else {
1087 return scnprintf(page, PAGE_SIZE,
1088 "Unhandled attr(%s) in `%s`\n",
1089 attr->name, __func__);
1090 }
1091 }
1092
nvm_dev_attr_show_20(struct device * dev,struct device_attribute * dattr,char * page)1093 static ssize_t nvm_dev_attr_show_20(struct device *dev,
1094 struct device_attribute *dattr, char *page)
1095 {
1096 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1097 struct nvm_dev *ndev = ns->ndev;
1098 struct nvm_geo *geo = &ndev->geo;
1099 struct attribute *attr;
1100
1101 if (!ndev)
1102 return 0;
1103
1104 attr = &dattr->attr;
1105
1106 if (strcmp(attr->name, "groups") == 0) {
1107 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1108 } else if (strcmp(attr->name, "punits") == 0) {
1109 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1110 } else if (strcmp(attr->name, "chunks") == 0) {
1111 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1112 } else if (strcmp(attr->name, "clba") == 0) {
1113 return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
1114 } else if (strcmp(attr->name, "ws_min") == 0) {
1115 return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
1116 } else if (strcmp(attr->name, "ws_opt") == 0) {
1117 return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
1118 } else if (strcmp(attr->name, "maxoc") == 0) {
1119 return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
1120 } else if (strcmp(attr->name, "maxocpu") == 0) {
1121 return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
1122 } else if (strcmp(attr->name, "mw_cunits") == 0) {
1123 return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
1124 } else if (strcmp(attr->name, "write_typ") == 0) {
1125 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1126 } else if (strcmp(attr->name, "write_max") == 0) {
1127 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1128 } else if (strcmp(attr->name, "reset_typ") == 0) {
1129 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1130 } else if (strcmp(attr->name, "reset_max") == 0) {
1131 return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1132 } else {
1133 return scnprintf(page, PAGE_SIZE,
1134 "Unhandled attr(%s) in `%s`\n",
1135 attr->name, __func__);
1136 }
1137 }
1138
1139 #define NVM_DEV_ATTR_RO(_name) \
1140 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
1141 #define NVM_DEV_ATTR_12_RO(_name) \
1142 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
1143 #define NVM_DEV_ATTR_20_RO(_name) \
1144 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)
1145
1146 /* general attributes */
1147 static NVM_DEV_ATTR_RO(version);
1148 static NVM_DEV_ATTR_RO(capabilities);
1149
1150 static NVM_DEV_ATTR_RO(read_typ);
1151 static NVM_DEV_ATTR_RO(read_max);
1152
1153 /* 1.2 values */
1154 static NVM_DEV_ATTR_12_RO(vendor_opcode);
1155 static NVM_DEV_ATTR_12_RO(device_mode);
1156 static NVM_DEV_ATTR_12_RO(ppa_format);
1157 static NVM_DEV_ATTR_12_RO(media_manager);
1158 static NVM_DEV_ATTR_12_RO(media_type);
1159 static NVM_DEV_ATTR_12_RO(flash_media_type);
1160 static NVM_DEV_ATTR_12_RO(num_channels);
1161 static NVM_DEV_ATTR_12_RO(num_luns);
1162 static NVM_DEV_ATTR_12_RO(num_planes);
1163 static NVM_DEV_ATTR_12_RO(num_blocks);
1164 static NVM_DEV_ATTR_12_RO(num_pages);
1165 static NVM_DEV_ATTR_12_RO(page_size);
1166 static NVM_DEV_ATTR_12_RO(hw_sector_size);
1167 static NVM_DEV_ATTR_12_RO(oob_sector_size);
1168 static NVM_DEV_ATTR_12_RO(prog_typ);
1169 static NVM_DEV_ATTR_12_RO(prog_max);
1170 static NVM_DEV_ATTR_12_RO(erase_typ);
1171 static NVM_DEV_ATTR_12_RO(erase_max);
1172 static NVM_DEV_ATTR_12_RO(multiplane_modes);
1173 static NVM_DEV_ATTR_12_RO(media_capabilities);
1174 static NVM_DEV_ATTR_12_RO(max_phys_secs);
1175
1176 /* 2.0 values */
1177 static NVM_DEV_ATTR_20_RO(groups);
1178 static NVM_DEV_ATTR_20_RO(punits);
1179 static NVM_DEV_ATTR_20_RO(chunks);
1180 static NVM_DEV_ATTR_20_RO(clba);
1181 static NVM_DEV_ATTR_20_RO(ws_min);
1182 static NVM_DEV_ATTR_20_RO(ws_opt);
1183 static NVM_DEV_ATTR_20_RO(maxoc);
1184 static NVM_DEV_ATTR_20_RO(maxocpu);
1185 static NVM_DEV_ATTR_20_RO(mw_cunits);
1186 static NVM_DEV_ATTR_20_RO(write_typ);
1187 static NVM_DEV_ATTR_20_RO(write_max);
1188 static NVM_DEV_ATTR_20_RO(reset_typ);
1189 static NVM_DEV_ATTR_20_RO(reset_max);
1190
1191 static struct attribute *nvm_dev_attrs[] = {
1192 /* version agnostic attrs */
1193 &dev_attr_version.attr,
1194 &dev_attr_capabilities.attr,
1195 &dev_attr_read_typ.attr,
1196 &dev_attr_read_max.attr,
1197
1198 /* 1.2 attrs */
1199 &dev_attr_vendor_opcode.attr,
1200 &dev_attr_device_mode.attr,
1201 &dev_attr_media_manager.attr,
1202 &dev_attr_ppa_format.attr,
1203 &dev_attr_media_type.attr,
1204 &dev_attr_flash_media_type.attr,
1205 &dev_attr_num_channels.attr,
1206 &dev_attr_num_luns.attr,
1207 &dev_attr_num_planes.attr,
1208 &dev_attr_num_blocks.attr,
1209 &dev_attr_num_pages.attr,
1210 &dev_attr_page_size.attr,
1211 &dev_attr_hw_sector_size.attr,
1212 &dev_attr_oob_sector_size.attr,
1213 &dev_attr_prog_typ.attr,
1214 &dev_attr_prog_max.attr,
1215 &dev_attr_erase_typ.attr,
1216 &dev_attr_erase_max.attr,
1217 &dev_attr_multiplane_modes.attr,
1218 &dev_attr_media_capabilities.attr,
1219 &dev_attr_max_phys_secs.attr,
1220
1221 /* 2.0 attrs */
1222 &dev_attr_groups.attr,
1223 &dev_attr_punits.attr,
1224 &dev_attr_chunks.attr,
1225 &dev_attr_clba.attr,
1226 &dev_attr_ws_min.attr,
1227 &dev_attr_ws_opt.attr,
1228 &dev_attr_maxoc.attr,
1229 &dev_attr_maxocpu.attr,
1230 &dev_attr_mw_cunits.attr,
1231
1232 &dev_attr_write_typ.attr,
1233 &dev_attr_write_max.attr,
1234 &dev_attr_reset_typ.attr,
1235 &dev_attr_reset_max.attr,
1236
1237 NULL,
1238 };
1239
nvm_dev_attrs_visible(struct kobject * kobj,struct attribute * attr,int index)1240 static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
1241 struct attribute *attr, int index)
1242 {
1243 struct device *dev = kobj_to_dev(kobj);
1244 struct gendisk *disk = dev_to_disk(dev);
1245 struct nvme_ns *ns = disk->private_data;
1246 struct nvm_dev *ndev = ns->ndev;
1247 struct device_attribute *dev_attr =
1248 container_of(attr, typeof(*dev_attr), attr);
1249
1250 if (!ndev)
1251 return 0;
1252
1253 if (dev_attr->show == nvm_dev_attr_show)
1254 return attr->mode;
1255
1256 switch (ndev->geo.major_ver_id) {
1257 case 1:
1258 if (dev_attr->show == nvm_dev_attr_show_12)
1259 return attr->mode;
1260 break;
1261 case 2:
1262 if (dev_attr->show == nvm_dev_attr_show_20)
1263 return attr->mode;
1264 break;
1265 }
1266
1267 return 0;
1268 }
1269
1270 const struct attribute_group nvme_nvm_attr_group = {
1271 .name = "lightnvm",
1272 .attrs = nvm_dev_attrs,
1273 .is_visible = nvm_dev_attrs_visible,
1274 };
1275