xref: /qemu/hw/ufs/ufs.c (revision f2c8aeb1)
1 /*
2  * QEMU Universal Flash Storage (UFS) Controller
3  *
4  * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
5  *
6  * Written by Jeuk Kim <jeuk20.kim@samsung.com>
7  *
8  * SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 /**
12  * Reference Specs: https://www.jedec.org/, 3.1
13  *
14  * Usage
15  * -----
16  *
17  * Add options:
18  *      -drive file=<file>,if=none,id=<drive_id>
19  *      -device ufs,serial=<serial>,id=<bus_name>, \
20  *              nutrs=<N[optional]>,nutmrs=<N[optional]>
21  *      -device ufs-lu,drive=<drive_id>,bus=<bus_name>
22  */
23 
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "migration/vmstate.h"
27 #include "scsi/constants.h"
28 #include "trace.h"
29 #include "ufs.h"
30 
31 /* The QEMU-UFS device follows spec version 3.1 */
32 #define UFS_SPEC_VER 0x0310
33 #define UFS_MAX_NUTRS 32
34 #define UFS_MAX_NUTMRS 8
35 
ufs_addr_read(UfsHc * u,hwaddr addr,void * buf,int size)36 static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
37 {
38     hwaddr hi = addr + size - 1;
39 
40     if (hi < addr) {
41         return MEMTX_DECODE_ERROR;
42     }
43 
44     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
45         return MEMTX_DECODE_ERROR;
46     }
47 
48     return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
49 }
50 
ufs_addr_write(UfsHc * u,hwaddr addr,const void * buf,int size)51 static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
52                                   int size)
53 {
54     hwaddr hi = addr + size - 1;
55     if (hi < addr) {
56         return MEMTX_DECODE_ERROR;
57     }
58 
59     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
60         return MEMTX_DECODE_ERROR;
61     }
62 
63     return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
64 }
65 
ufs_get_utrd_addr(UfsHc * u,uint32_t slot)66 static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
67 {
68     hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
69     hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
70 
71     return utrd_addr;
72 }
73 
ufs_get_req_upiu_base_addr(const UtpTransferReqDesc * utrd)74 static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
75 {
76     uint32_t cmd_desc_base_addr_lo =
77         le32_to_cpu(utrd->command_desc_base_addr_lo);
78     uint32_t cmd_desc_base_addr_hi =
79         le32_to_cpu(utrd->command_desc_base_addr_hi);
80 
81     return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
82 }
83 
ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc * utrd)84 static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
85 {
86     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
87     uint32_t rsp_upiu_byte_off =
88         le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
89     return req_upiu_base_addr + rsp_upiu_byte_off;
90 }
91 
ufs_dma_read_utrd(UfsRequest * req)92 static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
93 {
94     UfsHc *u = req->hc;
95     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
96     MemTxResult ret;
97 
98     ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
99     if (ret) {
100         trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
101     }
102     return ret;
103 }
104 
ufs_dma_read_req_upiu(UfsRequest * req)105 static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
106 {
107     UfsHc *u = req->hc;
108     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
109     UtpUpiuReq *req_upiu = &req->req_upiu;
110     uint32_t copy_size;
111     uint16_t data_segment_length;
112     MemTxResult ret;
113 
114     /*
115      * To know the size of the req_upiu, we need to read the
116      * data_segment_length in the header first.
117      */
118     ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
119                         sizeof(UtpUpiuHeader));
120     if (ret) {
121         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
122         return ret;
123     }
124     data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
125 
126     copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
127                 data_segment_length;
128 
129     if (copy_size > sizeof(req->req_upiu)) {
130         copy_size = sizeof(req->req_upiu);
131     }
132 
133     ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
134     if (ret) {
135         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
136     }
137     return ret;
138 }
139 
ufs_dma_read_prdt(UfsRequest * req)140 static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
141 {
142     UfsHc *u = req->hc;
143     uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
144     uint16_t prdt_byte_off =
145         le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
146     uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
147     g_autofree UfshcdSgEntry *prd_entries = NULL;
148     hwaddr req_upiu_base_addr, prdt_base_addr;
149     int err;
150 
151     assert(!req->sg);
152 
153     if (prdt_size == 0) {
154         return MEMTX_OK;
155     }
156     prd_entries = g_new(UfshcdSgEntry, prdt_size);
157 
158     req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
159     prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
160 
161     err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
162     if (err) {
163         trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
164         return err;
165     }
166 
167     req->sg = g_malloc0(sizeof(QEMUSGList));
168     pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
169     req->data_len = 0;
170 
171     for (uint16_t i = 0; i < prdt_len; ++i) {
172         hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
173         uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
174         qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
175         req->data_len += data_byte_count;
176     }
177     return MEMTX_OK;
178 }
179 
ufs_dma_read_upiu(UfsRequest * req)180 static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
181 {
182     MemTxResult ret;
183 
184     ret = ufs_dma_read_utrd(req);
185     if (ret) {
186         return ret;
187     }
188 
189     ret = ufs_dma_read_req_upiu(req);
190     if (ret) {
191         return ret;
192     }
193 
194     ret = ufs_dma_read_prdt(req);
195     if (ret) {
196         return ret;
197     }
198 
199     return 0;
200 }
201 
ufs_dma_write_utrd(UfsRequest * req)202 static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
203 {
204     UfsHc *u = req->hc;
205     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
206     MemTxResult ret;
207 
208     ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
209     if (ret) {
210         trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
211     }
212     return ret;
213 }
214 
ufs_dma_write_rsp_upiu(UfsRequest * req)215 static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
216 {
217     UfsHc *u = req->hc;
218     hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
219     uint32_t rsp_upiu_byte_len =
220         le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
221     uint16_t data_segment_length =
222         be16_to_cpu(req->rsp_upiu.header.data_segment_length);
223     uint32_t copy_size = sizeof(UtpUpiuHeader) +
224                          UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
225                          data_segment_length;
226     MemTxResult ret;
227 
228     if (copy_size > rsp_upiu_byte_len) {
229         copy_size = rsp_upiu_byte_len;
230     }
231 
232     if (copy_size > sizeof(req->rsp_upiu)) {
233         copy_size = sizeof(req->rsp_upiu);
234     }
235 
236     ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
237     if (ret) {
238         trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
239     }
240     return ret;
241 }
242 
ufs_dma_write_upiu(UfsRequest * req)243 static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
244 {
245     MemTxResult ret;
246 
247     ret = ufs_dma_write_rsp_upiu(req);
248     if (ret) {
249         return ret;
250     }
251 
252     return ufs_dma_write_utrd(req);
253 }
254 
ufs_irq_check(UfsHc * u)255 static void ufs_irq_check(UfsHc *u)
256 {
257     PCIDevice *pci = PCI_DEVICE(u);
258 
259     if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
260         trace_ufs_irq_raise();
261         pci_irq_assert(pci);
262     } else {
263         trace_ufs_irq_lower();
264         pci_irq_deassert(pci);
265     }
266 }
267 
ufs_process_db(UfsHc * u,uint32_t val)268 static void ufs_process_db(UfsHc *u, uint32_t val)
269 {
270     DECLARE_BITMAP(doorbell, UFS_MAX_NUTRS);
271     uint32_t slot;
272     uint32_t nutrs = u->params.nutrs;
273     UfsRequest *req;
274 
275     val &= ~u->reg.utrldbr;
276     if (!val) {
277         return;
278     }
279 
280     doorbell[0] = val;
281     slot = find_first_bit(doorbell, nutrs);
282 
283     while (slot < nutrs) {
284         req = &u->req_list[slot];
285         if (req->state == UFS_REQUEST_ERROR) {
286             trace_ufs_err_utrl_slot_error(req->slot);
287             return;
288         }
289 
290         if (req->state != UFS_REQUEST_IDLE) {
291             trace_ufs_err_utrl_slot_busy(req->slot);
292             return;
293         }
294 
295         trace_ufs_process_db(slot);
296         req->state = UFS_REQUEST_READY;
297         slot = find_next_bit(doorbell, nutrs, slot + 1);
298     }
299 
300     qemu_bh_schedule(u->doorbell_bh);
301 }
302 
ufs_process_uiccmd(UfsHc * u,uint32_t val)303 static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
304 {
305     trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
306                              u->reg.ucmdarg3);
307     /*
308      * Only the essential uic commands for running drivers on Linux and Windows
309      * are implemented.
310      */
311     switch (val) {
312     case UFS_UIC_CMD_DME_LINK_STARTUP:
313         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
314         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
315         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
316         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
317         break;
318     /* TODO: Revisit it when Power Management is implemented */
319     case UFS_UIC_CMD_DME_HIBER_ENTER:
320         u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
321         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
322         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
323         break;
324     case UFS_UIC_CMD_DME_HIBER_EXIT:
325         u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
326         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
327         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
328         break;
329     default:
330         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
331     }
332 
333     u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
334 
335     ufs_irq_check(u);
336 }
337 
ufs_write_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)338 static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
339 {
340     switch (offset) {
341     case A_IS:
342         u->reg.is &= ~data;
343         ufs_irq_check(u);
344         break;
345     case A_IE:
346         u->reg.ie = data;
347         ufs_irq_check(u);
348         break;
349     case A_HCE:
350         if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
351             u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
352             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
353         } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
354                    !FIELD_EX32(data, HCE, HCE)) {
355             u->reg.hcs = 0;
356             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
357         }
358         break;
359     case A_UTRLBA:
360         u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
361         break;
362     case A_UTRLBAU:
363         u->reg.utrlbau = data;
364         break;
365     case A_UTRLDBR:
366         ufs_process_db(u, data);
367         u->reg.utrldbr |= data;
368         break;
369     case A_UTRLRSR:
370         u->reg.utrlrsr = data;
371         break;
372     case A_UTRLCNR:
373         u->reg.utrlcnr &= ~data;
374         break;
375     case A_UTMRLBA:
376         u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
377         break;
378     case A_UTMRLBAU:
379         u->reg.utmrlbau = data;
380         break;
381     case A_UICCMD:
382         ufs_process_uiccmd(u, data);
383         break;
384     case A_UCMDARG1:
385         u->reg.ucmdarg1 = data;
386         break;
387     case A_UCMDARG2:
388         u->reg.ucmdarg2 = data;
389         break;
390     case A_UCMDARG3:
391         u->reg.ucmdarg3 = data;
392         break;
393     case A_UTRLCLR:
394     case A_UTMRLDBR:
395     case A_UTMRLCLR:
396     case A_UTMRLRSR:
397         trace_ufs_err_unsupport_register_offset(offset);
398         break;
399     default:
400         trace_ufs_err_invalid_register_offset(offset);
401         break;
402     }
403 }
404 
ufs_mmio_read(void * opaque,hwaddr addr,unsigned size)405 static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
406 {
407     UfsHc *u = (UfsHc *)opaque;
408     uint8_t *ptr = (uint8_t *)&u->reg;
409     uint64_t value;
410 
411     if (addr > sizeof(u->reg) - size) {
412         trace_ufs_err_invalid_register_offset(addr);
413         return 0;
414     }
415 
416     value = *(uint32_t *)(ptr + addr);
417     trace_ufs_mmio_read(addr, value, size);
418     return value;
419 }
420 
ufs_mmio_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)421 static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
422                            unsigned size)
423 {
424     UfsHc *u = (UfsHc *)opaque;
425 
426     if (addr > sizeof(u->reg) - size) {
427         trace_ufs_err_invalid_register_offset(addr);
428         return;
429     }
430 
431     trace_ufs_mmio_write(addr, data, size);
432     ufs_write_reg(u, addr, data, size);
433 }
434 
435 static const MemoryRegionOps ufs_mmio_ops = {
436     .read = ufs_mmio_read,
437     .write = ufs_mmio_write,
438     .endianness = DEVICE_LITTLE_ENDIAN,
439     .impl = {
440         .min_access_size = 4,
441         .max_access_size = 4,
442     },
443 };
444 
445 
ufs_build_upiu_header(UfsRequest * req,uint8_t trans_type,uint8_t flags,uint8_t response,uint8_t scsi_status,uint16_t data_segment_length)446 void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
447                            uint8_t response, uint8_t scsi_status,
448                            uint16_t data_segment_length)
449 {
450     memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
451     req->rsp_upiu.header.trans_type = trans_type;
452     req->rsp_upiu.header.flags = flags;
453     req->rsp_upiu.header.response = response;
454     req->rsp_upiu.header.scsi_status = scsi_status;
455     req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
456 }
457 
ufs_exec_scsi_cmd(UfsRequest * req)458 static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
459 {
460     UfsHc *u = req->hc;
461     uint8_t lun = req->req_upiu.header.lun;
462 
463     UfsLu *lu = NULL;
464 
465     trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
466 
467     if (!is_wlun(lun) && (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
468         trace_ufs_err_scsi_cmd_invalid_lun(lun);
469         return UFS_REQUEST_FAIL;
470     }
471 
472     switch (lun) {
473     case UFS_UPIU_REPORT_LUNS_WLUN:
474         lu = &u->report_wlu;
475         break;
476     case UFS_UPIU_UFS_DEVICE_WLUN:
477         lu = &u->dev_wlu;
478         break;
479     case UFS_UPIU_BOOT_WLUN:
480         lu = &u->boot_wlu;
481         break;
482     case UFS_UPIU_RPMB_WLUN:
483         lu = &u->rpmb_wlu;
484         break;
485     default:
486         lu = u->lus[lun];
487     }
488 
489     return lu->scsi_op(lu, req);
490 }
491 
ufs_exec_nop_cmd(UfsRequest * req)492 static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
493 {
494     trace_ufs_exec_nop_cmd(req->slot);
495     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
496     return UFS_REQUEST_SUCCESS;
497 }
498 
499 /*
500  * This defines the permission of flags based on their IDN. There are some
501  * things that are declared read-only, which is inconsistent with the ufs spec,
502  * because we want to return an error for features that are not yet supported.
503  */
504 static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
505     [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
506     /* Write protection is not supported */
507     [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
508     [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
509     [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
510                                     UFS_QUERY_FLAG_CLEAR |
511                                     UFS_QUERY_FLAG_TOGGLE,
512     [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
513         UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
514         UFS_QUERY_FLAG_TOGGLE,
515     /* Purge Operation is not supported */
516     [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
517     /* Refresh Operation is not supported */
518     [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
519     /* Physical Resource Removal is not supported */
520     [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
521     [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
522     [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
523     /* Write Booster is not supported */
524     [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
525     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
526     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
527 };
528 
ufs_flag_check_idn_valid(uint8_t idn,int op)529 static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
530 {
531     if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
532         return UFS_QUERY_RESULT_INVALID_IDN;
533     }
534 
535     if (!(flag_permission[idn] & op)) {
536         if (op == UFS_QUERY_FLAG_READ) {
537             trace_ufs_err_query_flag_not_readable(idn);
538             return UFS_QUERY_RESULT_NOT_READABLE;
539         }
540         trace_ufs_err_query_flag_not_writable(idn);
541         return UFS_QUERY_RESULT_NOT_WRITEABLE;
542     }
543 
544     return UFS_QUERY_RESULT_SUCCESS;
545 }
546 
547 static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
548     /* booting is not supported */
549     [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
550     [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
551     [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
552         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
553     [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
554     [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
555     [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
556     [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
557         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
558     [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
559         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
560     [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
561     [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
562         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
563     [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
564     [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
565         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
566     [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
567         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
568     [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
569     [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
570     [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
571     [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
572     [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
573     [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
574         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
575     [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
576     [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
577     [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
578     [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
579     [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
580     [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
581     [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
582     [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
583     [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
584     /* refresh operation is not supported */
585     [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
586     [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
587     [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
588 };
589 
ufs_attr_check_idn_valid(uint8_t idn,int op)590 static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
591 {
592     if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
593         return UFS_QUERY_RESULT_INVALID_IDN;
594     }
595 
596     if (!(attr_permission[idn] & op)) {
597         if (op == UFS_QUERY_ATTR_READ) {
598             trace_ufs_err_query_attr_not_readable(idn);
599             return UFS_QUERY_RESULT_NOT_READABLE;
600         }
601         trace_ufs_err_query_attr_not_writable(idn);
602         return UFS_QUERY_RESULT_NOT_WRITEABLE;
603     }
604 
605     return UFS_QUERY_RESULT_SUCCESS;
606 }
607 
ufs_exec_query_flag(UfsRequest * req,int op)608 static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
609 {
610     UfsHc *u = req->hc;
611     uint8_t idn = req->req_upiu.qr.idn;
612     uint32_t value;
613     QueryRespCode ret;
614 
615     ret = ufs_flag_check_idn_valid(idn, op);
616     if (ret) {
617         return ret;
618     }
619 
620     if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
621         value = 0;
622     } else if (op == UFS_QUERY_FLAG_READ) {
623         value = *(((uint8_t *)&u->flags) + idn);
624     } else if (op == UFS_QUERY_FLAG_SET) {
625         value = 1;
626     } else if (op == UFS_QUERY_FLAG_CLEAR) {
627         value = 0;
628     } else if (op == UFS_QUERY_FLAG_TOGGLE) {
629         value = *(((uint8_t *)&u->flags) + idn);
630         value = !value;
631     } else {
632         trace_ufs_err_query_invalid_opcode(op);
633         return UFS_QUERY_RESULT_INVALID_OPCODE;
634     }
635 
636     *(((uint8_t *)&u->flags) + idn) = value;
637     req->rsp_upiu.qr.value = cpu_to_be32(value);
638     return UFS_QUERY_RESULT_SUCCESS;
639 }
640 
ufs_read_attr_value(UfsHc * u,uint8_t idn)641 static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
642 {
643     switch (idn) {
644     case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
645         return u->attributes.boot_lun_en;
646     case UFS_QUERY_ATTR_IDN_POWER_MODE:
647         return u->attributes.current_power_mode;
648     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
649         return u->attributes.active_icc_level;
650     case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
651         return u->attributes.out_of_order_data_en;
652     case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
653         return u->attributes.background_op_status;
654     case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
655         return u->attributes.purge_status;
656     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
657         return u->attributes.max_data_in_size;
658     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
659         return u->attributes.max_data_out_size;
660     case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
661         return be32_to_cpu(u->attributes.dyn_cap_needed);
662     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
663         return u->attributes.ref_clk_freq;
664     case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
665         return u->attributes.config_descr_lock;
666     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
667         return u->attributes.max_num_of_rtt;
668     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
669         return be16_to_cpu(u->attributes.exception_event_control);
670     case UFS_QUERY_ATTR_IDN_EE_STATUS:
671         return be16_to_cpu(u->attributes.exception_event_status);
672     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
673         return be32_to_cpu(u->attributes.seconds_passed);
674     case UFS_QUERY_ATTR_IDN_CNTX_CONF:
675         return be16_to_cpu(u->attributes.context_conf);
676     case UFS_QUERY_ATTR_IDN_FFU_STATUS:
677         return u->attributes.device_ffu_status;
678     case UFS_QUERY_ATTR_IDN_PSA_STATE:
679         return be32_to_cpu(u->attributes.psa_state);
680     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
681         return be32_to_cpu(u->attributes.psa_data_size);
682     case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
683         return u->attributes.ref_clk_gating_wait_time;
684     case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
685         return u->attributes.device_case_rough_temperaure;
686     case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
687         return u->attributes.device_too_high_temp_boundary;
688     case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
689         return u->attributes.device_too_low_temp_boundary;
690     case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
691         return u->attributes.throttling_status;
692     case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
693         return u->attributes.wb_buffer_flush_status;
694     case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
695         return u->attributes.available_wb_buffer_size;
696     case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
697         return u->attributes.wb_buffer_life_time_est;
698     case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
699         return be32_to_cpu(u->attributes.current_wb_buffer_size);
700     case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
701         return u->attributes.refresh_status;
702     case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
703         return u->attributes.refresh_freq;
704     case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
705         return u->attributes.refresh_unit;
706     }
707     return 0;
708 }
709 
ufs_write_attr_value(UfsHc * u,uint8_t idn,uint32_t value)710 static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
711 {
712     switch (idn) {
713     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
714         u->attributes.active_icc_level = value;
715         break;
716     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
717         u->attributes.max_data_in_size = value;
718         break;
719     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
720         u->attributes.max_data_out_size = value;
721         break;
722     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
723         u->attributes.ref_clk_freq = value;
724         break;
725     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
726         u->attributes.max_num_of_rtt = value;
727         break;
728     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
729         u->attributes.exception_event_control = cpu_to_be16(value);
730         break;
731     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
732         u->attributes.seconds_passed = cpu_to_be32(value);
733         break;
734     case UFS_QUERY_ATTR_IDN_PSA_STATE:
735         u->attributes.psa_state = value;
736         break;
737     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
738         u->attributes.psa_data_size = cpu_to_be32(value);
739         break;
740     }
741 }
742 
ufs_exec_query_attr(UfsRequest * req,int op)743 static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
744 {
745     UfsHc *u = req->hc;
746     uint8_t idn = req->req_upiu.qr.idn;
747     uint32_t value;
748     QueryRespCode ret;
749 
750     ret = ufs_attr_check_idn_valid(idn, op);
751     if (ret) {
752         return ret;
753     }
754 
755     if (op == UFS_QUERY_ATTR_READ) {
756         value = ufs_read_attr_value(u, idn);
757     } else {
758         value = be32_to_cpu(req->req_upiu.qr.value);
759         ufs_write_attr_value(u, idn, value);
760     }
761 
762     req->rsp_upiu.qr.value = cpu_to_be32(value);
763     return UFS_QUERY_RESULT_SUCCESS;
764 }
765 
766 static const RpmbUnitDescriptor rpmb_unit_desc = {
767     .length = sizeof(RpmbUnitDescriptor),
768     .descriptor_idn = 2,
769     .unit_index = UFS_UPIU_RPMB_WLUN,
770     .lu_enable = 0,
771 };
772 
ufs_read_unit_desc(UfsRequest * req)773 static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
774 {
775     UfsHc *u = req->hc;
776     uint8_t lun = req->req_upiu.qr.index;
777 
778     if (lun != UFS_UPIU_RPMB_WLUN &&
779         (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
780         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
781         return UFS_QUERY_RESULT_INVALID_INDEX;
782     }
783 
784     if (lun == UFS_UPIU_RPMB_WLUN) {
785         memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
786     } else {
787         memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
788                sizeof(u->lus[lun]->unit_desc));
789     }
790 
791     return UFS_QUERY_RESULT_SUCCESS;
792 }
793 
manufacturer_str_desc(void)794 static inline StringDescriptor manufacturer_str_desc(void)
795 {
796     StringDescriptor desc = {
797         .length = 0x12,
798         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
799     };
800     desc.UC[0] = cpu_to_be16('R');
801     desc.UC[1] = cpu_to_be16('E');
802     desc.UC[2] = cpu_to_be16('D');
803     desc.UC[3] = cpu_to_be16('H');
804     desc.UC[4] = cpu_to_be16('A');
805     desc.UC[5] = cpu_to_be16('T');
806     return desc;
807 }
808 
product_name_str_desc(void)809 static inline StringDescriptor product_name_str_desc(void)
810 {
811     StringDescriptor desc = {
812         .length = 0x22,
813         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
814     };
815     desc.UC[0] = cpu_to_be16('Q');
816     desc.UC[1] = cpu_to_be16('E');
817     desc.UC[2] = cpu_to_be16('M');
818     desc.UC[3] = cpu_to_be16('U');
819     desc.UC[4] = cpu_to_be16(' ');
820     desc.UC[5] = cpu_to_be16('U');
821     desc.UC[6] = cpu_to_be16('F');
822     desc.UC[7] = cpu_to_be16('S');
823     return desc;
824 }
825 
product_rev_level_str_desc(void)826 static inline StringDescriptor product_rev_level_str_desc(void)
827 {
828     StringDescriptor desc = {
829         .length = 0x0a,
830         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
831     };
832     desc.UC[0] = cpu_to_be16('0');
833     desc.UC[1] = cpu_to_be16('0');
834     desc.UC[2] = cpu_to_be16('0');
835     desc.UC[3] = cpu_to_be16('1');
836     return desc;
837 }
838 
839 static const StringDescriptor null_str_desc = {
840     .length = 0x02,
841     .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
842 };
843 
ufs_read_string_desc(UfsRequest * req)844 static QueryRespCode ufs_read_string_desc(UfsRequest *req)
845 {
846     UfsHc *u = req->hc;
847     uint8_t index = req->req_upiu.qr.index;
848     StringDescriptor desc;
849 
850     if (index == u->device_desc.manufacturer_name) {
851         desc = manufacturer_str_desc();
852         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
853     } else if (index == u->device_desc.product_name) {
854         desc = product_name_str_desc();
855         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
856     } else if (index == u->device_desc.serial_number) {
857         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
858     } else if (index == u->device_desc.oem_id) {
859         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
860     } else if (index == u->device_desc.product_revision_level) {
861         desc = product_rev_level_str_desc();
862         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
863     } else {
864         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
865         return UFS_QUERY_RESULT_INVALID_INDEX;
866     }
867     return UFS_QUERY_RESULT_SUCCESS;
868 }
869 
interconnect_desc(void)870 static inline InterconnectDescriptor interconnect_desc(void)
871 {
872     InterconnectDescriptor desc = {
873         .length = sizeof(InterconnectDescriptor),
874         .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
875     };
876     desc.bcd_unipro_version = cpu_to_be16(0x180);
877     desc.bcd_mphy_version = cpu_to_be16(0x410);
878     return desc;
879 }
880 
ufs_read_desc(UfsRequest * req)881 static QueryRespCode ufs_read_desc(UfsRequest *req)
882 {
883     UfsHc *u = req->hc;
884     QueryRespCode status;
885     uint8_t idn = req->req_upiu.qr.idn;
886     uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
887     InterconnectDescriptor desc;
888 
889     switch (idn) {
890     case UFS_QUERY_DESC_IDN_DEVICE:
891         memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
892         status = UFS_QUERY_RESULT_SUCCESS;
893         break;
894     case UFS_QUERY_DESC_IDN_UNIT:
895         status = ufs_read_unit_desc(req);
896         break;
897     case UFS_QUERY_DESC_IDN_GEOMETRY:
898         memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
899                sizeof(u->geometry_desc));
900         status = UFS_QUERY_RESULT_SUCCESS;
901         break;
902     case UFS_QUERY_DESC_IDN_INTERCONNECT: {
903         desc = interconnect_desc();
904         memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
905         status = UFS_QUERY_RESULT_SUCCESS;
906         break;
907     }
908     case UFS_QUERY_DESC_IDN_STRING:
909         status = ufs_read_string_desc(req);
910         break;
911     case UFS_QUERY_DESC_IDN_POWER:
912         /* mocking of power descriptor is not supported */
913         memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
914         req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
915         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
916         status = UFS_QUERY_RESULT_SUCCESS;
917         break;
918     case UFS_QUERY_DESC_IDN_HEALTH:
919         /* mocking of health descriptor is not supported */
920         memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
921         req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
922         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
923         status = UFS_QUERY_RESULT_SUCCESS;
924         break;
925     default:
926         length = 0;
927         trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
928         status = UFS_QUERY_RESULT_INVALID_IDN;
929     }
930 
931     if (length > req->rsp_upiu.qr.data[0]) {
932         length = req->rsp_upiu.qr.data[0];
933     }
934     req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
935     req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
936     req->rsp_upiu.qr.index = req->req_upiu.qr.index;
937     req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
938     req->rsp_upiu.qr.length = cpu_to_be16(length);
939 
940     return status;
941 }
942 
ufs_exec_query_read(UfsRequest * req)943 static QueryRespCode ufs_exec_query_read(UfsRequest *req)
944 {
945     QueryRespCode status;
946     switch (req->req_upiu.qr.opcode) {
947     case UFS_UPIU_QUERY_OPCODE_NOP:
948         status = UFS_QUERY_RESULT_SUCCESS;
949         break;
950     case UFS_UPIU_QUERY_OPCODE_READ_DESC:
951         status = ufs_read_desc(req);
952         break;
953     case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
954         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
955         break;
956     case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
957         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
958         break;
959     default:
960         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
961         status = UFS_QUERY_RESULT_INVALID_OPCODE;
962         break;
963     }
964 
965     return status;
966 }
967 
ufs_exec_query_write(UfsRequest * req)968 static QueryRespCode ufs_exec_query_write(UfsRequest *req)
969 {
970     QueryRespCode status;
971     switch (req->req_upiu.qr.opcode) {
972     case UFS_UPIU_QUERY_OPCODE_NOP:
973         status = UFS_QUERY_RESULT_SUCCESS;
974         break;
975     case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
976         /* write descriptor is not supported */
977         status = UFS_QUERY_RESULT_NOT_WRITEABLE;
978         break;
979     case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
980         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
981         break;
982     case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
983         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
984         break;
985     case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
986         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
987         break;
988     case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
989         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
990         break;
991     default:
992         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
993         status = UFS_QUERY_RESULT_INVALID_OPCODE;
994         break;
995     }
996 
997     return status;
998 }
999 
ufs_exec_query_cmd(UfsRequest * req)1000 static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
1001 {
1002     uint8_t query_func = req->req_upiu.header.query_func;
1003     uint16_t data_segment_length;
1004     QueryRespCode status;
1005 
1006     trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
1007     if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
1008         status = ufs_exec_query_read(req);
1009     } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
1010         status = ufs_exec_query_write(req);
1011     } else {
1012         status = UFS_QUERY_RESULT_GENERAL_FAILURE;
1013     }
1014 
1015     data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
1016     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
1017                           data_segment_length);
1018 
1019     if (status != UFS_QUERY_RESULT_SUCCESS) {
1020         return UFS_REQUEST_FAIL;
1021     }
1022     return UFS_REQUEST_SUCCESS;
1023 }
1024 
ufs_exec_req(UfsRequest * req)1025 static void ufs_exec_req(UfsRequest *req)
1026 {
1027     UfsReqResult req_result;
1028 
1029     if (ufs_dma_read_upiu(req)) {
1030         return;
1031     }
1032 
1033     switch (req->req_upiu.header.trans_type) {
1034     case UFS_UPIU_TRANSACTION_NOP_OUT:
1035         req_result = ufs_exec_nop_cmd(req);
1036         break;
1037     case UFS_UPIU_TRANSACTION_COMMAND:
1038         req_result = ufs_exec_scsi_cmd(req);
1039         break;
1040     case UFS_UPIU_TRANSACTION_QUERY_REQ:
1041         req_result = ufs_exec_query_cmd(req);
1042         break;
1043     default:
1044         trace_ufs_err_invalid_trans_code(req->slot,
1045                                          req->req_upiu.header.trans_type);
1046         req_result = UFS_REQUEST_FAIL;
1047     }
1048 
1049     /*
1050      * The ufs_complete_req for scsi commands is handled by the
1051      * ufs_scsi_command_complete() callback function. Therefore, to avoid
1052      * duplicate processing, ufs_complete_req() is not called for scsi commands.
1053      */
1054     if (req_result != UFS_REQUEST_NO_COMPLETE) {
1055         ufs_complete_req(req, req_result);
1056     }
1057 }
1058 
ufs_process_req(void * opaque)1059 static void ufs_process_req(void *opaque)
1060 {
1061     UfsHc *u = opaque;
1062     UfsRequest *req;
1063     int slot;
1064 
1065     for (slot = 0; slot < u->params.nutrs; slot++) {
1066         req = &u->req_list[slot];
1067 
1068         if (req->state != UFS_REQUEST_READY) {
1069             continue;
1070         }
1071         trace_ufs_process_req(slot);
1072         req->state = UFS_REQUEST_RUNNING;
1073 
1074         ufs_exec_req(req);
1075     }
1076 }
1077 
ufs_complete_req(UfsRequest * req,UfsReqResult req_result)1078 void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
1079 {
1080     UfsHc *u = req->hc;
1081     assert(req->state == UFS_REQUEST_RUNNING);
1082 
1083     if (req_result == UFS_REQUEST_SUCCESS) {
1084         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
1085     } else {
1086         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
1087     }
1088 
1089     trace_ufs_complete_req(req->slot);
1090     req->state = UFS_REQUEST_COMPLETE;
1091     qemu_bh_schedule(u->complete_bh);
1092 }
1093 
ufs_clear_req(UfsRequest * req)1094 static void ufs_clear_req(UfsRequest *req)
1095 {
1096     if (req->sg != NULL) {
1097         qemu_sglist_destroy(req->sg);
1098         g_free(req->sg);
1099         req->sg = NULL;
1100         req->data_len = 0;
1101     }
1102 
1103     memset(&req->utrd, 0, sizeof(req->utrd));
1104     memset(&req->req_upiu, 0, sizeof(req->req_upiu));
1105     memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
1106 }
1107 
ufs_sendback_req(void * opaque)1108 static void ufs_sendback_req(void *opaque)
1109 {
1110     UfsHc *u = opaque;
1111     UfsRequest *req;
1112     int slot;
1113 
1114     for (slot = 0; slot < u->params.nutrs; slot++) {
1115         req = &u->req_list[slot];
1116 
1117         if (req->state != UFS_REQUEST_COMPLETE) {
1118             continue;
1119         }
1120 
1121         if (ufs_dma_write_upiu(req)) {
1122             req->state = UFS_REQUEST_ERROR;
1123             continue;
1124         }
1125 
1126         /*
1127          * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
1128          * supported
1129          */
1130         if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
1131             le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
1132             u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
1133         }
1134 
1135         u->reg.utrldbr &= ~(1 << slot);
1136         u->reg.utrlcnr |= (1 << slot);
1137 
1138         trace_ufs_sendback_req(req->slot);
1139 
1140         ufs_clear_req(req);
1141         req->state = UFS_REQUEST_IDLE;
1142     }
1143 
1144     ufs_irq_check(u);
1145 }
1146 
ufs_check_constraints(UfsHc * u,Error ** errp)1147 static bool ufs_check_constraints(UfsHc *u, Error **errp)
1148 {
1149     if (u->params.nutrs > UFS_MAX_NUTRS) {
1150         error_setg(errp, "nutrs must be less than or equal to %d",
1151                    UFS_MAX_NUTRS);
1152         return false;
1153     }
1154 
1155     if (u->params.nutmrs > UFS_MAX_NUTMRS) {
1156         error_setg(errp, "nutmrs must be less than or equal to %d",
1157                    UFS_MAX_NUTMRS);
1158         return false;
1159     }
1160 
1161     return true;
1162 }
1163 
ufs_init_pci(UfsHc * u,PCIDevice * pci_dev)1164 static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
1165 {
1166     uint8_t *pci_conf = pci_dev->config;
1167 
1168     pci_conf[PCI_INTERRUPT_PIN] = 1;
1169     pci_config_set_prog_interface(pci_conf, 0x1);
1170 
1171     memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
1172                           u->reg_size);
1173     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
1174     u->irq = pci_allocate_irq(pci_dev);
1175 }
1176 
ufs_init_state(UfsHc * u)1177 static void ufs_init_state(UfsHc *u)
1178 {
1179     u->req_list = g_new0(UfsRequest, u->params.nutrs);
1180 
1181     for (int i = 0; i < u->params.nutrs; i++) {
1182         u->req_list[i].hc = u;
1183         u->req_list[i].slot = i;
1184         u->req_list[i].sg = NULL;
1185         u->req_list[i].state = UFS_REQUEST_IDLE;
1186     }
1187 
1188     u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
1189                                          &DEVICE(u)->mem_reentrancy_guard);
1190     u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
1191                                          &DEVICE(u)->mem_reentrancy_guard);
1192 }
1193 
ufs_init_hc(UfsHc * u)1194 static void ufs_init_hc(UfsHc *u)
1195 {
1196     uint32_t cap = 0;
1197 
1198     u->reg_size = pow2ceil(sizeof(UfsReg));
1199 
1200     memset(&u->reg, 0, sizeof(u->reg));
1201     cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
1202     cap = FIELD_DP32(cap, CAP, RTT, 2);
1203     cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
1204     cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
1205     cap = FIELD_DP32(cap, CAP, 64AS, 1);
1206     cap = FIELD_DP32(cap, CAP, OODDS, 0);
1207     cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
1208     cap = FIELD_DP32(cap, CAP, CS, 0);
1209     u->reg.cap = cap;
1210     u->reg.ver = UFS_SPEC_VER;
1211 
1212     memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
1213     u->device_desc.length = sizeof(DeviceDescriptor);
1214     u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
1215     u->device_desc.device_sub_class = 0x01;
1216     u->device_desc.number_lu = 0x00;
1217     u->device_desc.number_wlu = 0x04;
1218     /* TODO: Revisit it when Power Management is implemented */
1219     u->device_desc.init_power_mode = 0x01; /* Active Mode */
1220     u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
1221     u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
1222     u->device_desc.manufacturer_name = 0x00;
1223     u->device_desc.product_name = 0x01;
1224     u->device_desc.serial_number = 0x02;
1225     u->device_desc.oem_id = 0x03;
1226     u->device_desc.ud_0_base_offset = 0x16;
1227     u->device_desc.ud_config_p_length = 0x1A;
1228     u->device_desc.device_rtt_cap = 0x02;
1229     u->device_desc.queue_depth = u->params.nutrs;
1230     u->device_desc.product_revision_level = 0x04;
1231 
1232     memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
1233     u->geometry_desc.length = sizeof(GeometryDescriptor);
1234     u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
1235     u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
1236     u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
1237     u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
1238     u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
1239     u->geometry_desc.max_in_buffer_size = 0x8;
1240     u->geometry_desc.max_out_buffer_size = 0x8;
1241     u->geometry_desc.rpmb_read_write_size = 0x40;
1242     u->geometry_desc.data_ordering =
1243         0x0; /* out-of-order data transfer is not supported */
1244     u->geometry_desc.max_context_id_number = 0x5;
1245     u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
1246 
1247     memset(&u->attributes, 0, sizeof(u->attributes));
1248     u->attributes.max_data_in_size = 0x08;
1249     u->attributes.max_data_out_size = 0x08;
1250     u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
1251     /* configure descriptor is not supported */
1252     u->attributes.config_descr_lock = 0x01;
1253     u->attributes.max_num_of_rtt = 0x02;
1254 
1255     memset(&u->flags, 0, sizeof(u->flags));
1256     u->flags.permanently_disable_fw_update = 1;
1257 }
1258 
ufs_realize(PCIDevice * pci_dev,Error ** errp)1259 static void ufs_realize(PCIDevice *pci_dev, Error **errp)
1260 {
1261     UfsHc *u = UFS(pci_dev);
1262 
1263     if (!ufs_check_constraints(u, errp)) {
1264         return;
1265     }
1266 
1267     qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
1268               u->parent_obj.qdev.id);
1269 
1270     ufs_init_state(u);
1271     ufs_init_hc(u);
1272     ufs_init_pci(u, pci_dev);
1273 
1274     ufs_init_wlu(&u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN);
1275     ufs_init_wlu(&u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN);
1276     ufs_init_wlu(&u->boot_wlu, UFS_UPIU_BOOT_WLUN);
1277     ufs_init_wlu(&u->rpmb_wlu, UFS_UPIU_RPMB_WLUN);
1278 }
1279 
ufs_exit(PCIDevice * pci_dev)1280 static void ufs_exit(PCIDevice *pci_dev)
1281 {
1282     UfsHc *u = UFS(pci_dev);
1283 
1284     qemu_bh_delete(u->doorbell_bh);
1285     qemu_bh_delete(u->complete_bh);
1286 
1287     for (int i = 0; i < u->params.nutrs; i++) {
1288         ufs_clear_req(&u->req_list[i]);
1289     }
1290     g_free(u->req_list);
1291 }
1292 
1293 static Property ufs_props[] = {
1294     DEFINE_PROP_STRING("serial", UfsHc, params.serial),
1295     DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
1296     DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
1297     DEFINE_PROP_END_OF_LIST(),
1298 };
1299 
1300 static const VMStateDescription ufs_vmstate = {
1301     .name = "ufs",
1302     .unmigratable = 1,
1303 };
1304 
ufs_class_init(ObjectClass * oc,void * data)1305 static void ufs_class_init(ObjectClass *oc, void *data)
1306 {
1307     DeviceClass *dc = DEVICE_CLASS(oc);
1308     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1309 
1310     pc->realize = ufs_realize;
1311     pc->exit = ufs_exit;
1312     pc->vendor_id = PCI_VENDOR_ID_REDHAT;
1313     pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
1314     pc->class_id = PCI_CLASS_STORAGE_UFS;
1315 
1316     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1317     dc->desc = "Universal Flash Storage";
1318     device_class_set_props(dc, ufs_props);
1319     dc->vmsd = &ufs_vmstate;
1320 }
1321 
ufs_bus_check_address(BusState * qbus,DeviceState * qdev,Error ** errp)1322 static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
1323                                   Error **errp)
1324 {
1325     if (strcmp(object_get_typename(OBJECT(qdev)), TYPE_UFS_LU) != 0) {
1326         error_setg(errp, "%s cannot be connected to ufs-bus",
1327                    object_get_typename(OBJECT(qdev)));
1328         return false;
1329     }
1330 
1331     return true;
1332 }
1333 
ufs_bus_get_dev_path(DeviceState * dev)1334 static char *ufs_bus_get_dev_path(DeviceState *dev)
1335 {
1336     BusState *bus = qdev_get_parent_bus(dev);
1337 
1338     return qdev_get_dev_path(bus->parent);
1339 }
1340 
ufs_bus_class_init(ObjectClass * class,void * data)1341 static void ufs_bus_class_init(ObjectClass *class, void *data)
1342 {
1343     BusClass *bc = BUS_CLASS(class);
1344     bc->get_dev_path = ufs_bus_get_dev_path;
1345     bc->check_address = ufs_bus_check_address;
1346 }
1347 
1348 static const TypeInfo ufs_info = {
1349     .name = TYPE_UFS,
1350     .parent = TYPE_PCI_DEVICE,
1351     .class_init = ufs_class_init,
1352     .instance_size = sizeof(UfsHc),
1353     .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
1354 };
1355 
1356 static const TypeInfo ufs_bus_info = {
1357     .name = TYPE_UFS_BUS,
1358     .parent = TYPE_BUS,
1359     .class_init = ufs_bus_class_init,
1360     .class_size = sizeof(UfsBusClass),
1361     .instance_size = sizeof(UfsBus),
1362 };
1363 
ufs_register_types(void)1364 static void ufs_register_types(void)
1365 {
1366     type_register_static(&ufs_info);
1367     type_register_static(&ufs_bus_info);
1368 }
1369 
1370 type_init(ufs_register_types)
1371