1 /*
2 * QEMU Universal Flash Storage (UFS) Controller
3 *
4 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
5 *
6 * Written by Jeuk Kim <jeuk20.kim@samsung.com>
7 *
8 * SPDX-License-Identifier: GPL-2.0-or-later
9 */
10
11 /**
12 * Reference Specs: https://www.jedec.org/, 4.0
13 *
14 * Usage
15 * -----
16 *
17 * Add options:
18 * -drive file=<file>,if=none,id=<drive_id>
19 * -device ufs,serial=<serial>,id=<bus_name>, \
20 * nutrs=<N[optional]>,nutmrs=<N[optional]>
21 * -device ufs-lu,drive=<drive_id>,bus=<bus_name>
22 */
23
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "migration/vmstate.h"
27 #include "scsi/constants.h"
28 #include "trace.h"
29 #include "ufs.h"
30
31 /* The QEMU-UFS device follows spec version 4.0 */
32 #define UFS_SPEC_VER 0x0400
33 #define UFS_MAX_NUTRS 32
34 #define UFS_MAX_NUTMRS 8
35 #define UFS_MCQ_QCFGPTR 2
36
37 static void ufs_exec_req(UfsRequest *req);
38 static void ufs_clear_req(UfsRequest *req);
39
ufs_mcq_reg_addr(UfsHc * u,int qid)40 static inline uint64_t ufs_mcq_reg_addr(UfsHc *u, int qid)
41 {
42 /* Submission Queue MCQ Registers offset (400h) */
43 return (UFS_MCQ_QCFGPTR * 0x200) + qid * 0x40;
44 }
45
ufs_mcq_op_reg_addr(UfsHc * u,int qid)46 static inline uint64_t ufs_mcq_op_reg_addr(UfsHc *u, int qid)
47 {
48 /* MCQ Operation & Runtime Registers offset (1000h) */
49 return UFS_MCQ_OPR_START + qid * 48;
50 }
51
ufs_reg_size(UfsHc * u)52 static inline uint64_t ufs_reg_size(UfsHc *u)
53 {
54 /* Total UFS HCI Register size in bytes */
55 return ufs_mcq_op_reg_addr(u, 0) + sizeof(u->mcq_op_reg);
56 }
57
ufs_is_mcq_reg(UfsHc * u,uint64_t addr)58 static inline bool ufs_is_mcq_reg(UfsHc *u, uint64_t addr)
59 {
60 uint64_t mcq_reg_addr = ufs_mcq_reg_addr(u, 0);
61 return addr >= mcq_reg_addr && addr < mcq_reg_addr + sizeof(u->mcq_reg);
62 }
63
ufs_is_mcq_op_reg(UfsHc * u,uint64_t addr)64 static inline bool ufs_is_mcq_op_reg(UfsHc *u, uint64_t addr)
65 {
66 uint64_t mcq_op_reg_addr = ufs_mcq_op_reg_addr(u, 0);
67 return (addr >= mcq_op_reg_addr &&
68 addr < mcq_op_reg_addr + sizeof(u->mcq_op_reg));
69 }
70
ufs_addr_read(UfsHc * u,hwaddr addr,void * buf,int size)71 static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
72 {
73 hwaddr hi = addr + size - 1;
74
75 if (hi < addr) {
76 return MEMTX_DECODE_ERROR;
77 }
78
79 if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
80 return MEMTX_DECODE_ERROR;
81 }
82
83 return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
84 }
85
ufs_addr_write(UfsHc * u,hwaddr addr,const void * buf,int size)86 static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
87 int size)
88 {
89 hwaddr hi = addr + size - 1;
90 if (hi < addr) {
91 return MEMTX_DECODE_ERROR;
92 }
93
94 if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
95 return MEMTX_DECODE_ERROR;
96 }
97
98 return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
99 }
100
ufs_get_utrd_addr(UfsHc * u,uint32_t slot)101 static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
102 {
103 hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
104 hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
105
106 return utrd_addr;
107 }
108
ufs_get_req_upiu_base_addr(const UtpTransferReqDesc * utrd)109 static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
110 {
111 uint32_t cmd_desc_base_addr_lo =
112 le32_to_cpu(utrd->command_desc_base_addr_lo);
113 uint32_t cmd_desc_base_addr_hi =
114 le32_to_cpu(utrd->command_desc_base_addr_hi);
115
116 return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
117 }
118
ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc * utrd)119 static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
120 {
121 hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
122 uint32_t rsp_upiu_byte_off =
123 le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
124 return req_upiu_base_addr + rsp_upiu_byte_off;
125 }
126
ufs_dma_read_utrd(UfsRequest * req)127 static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
128 {
129 UfsHc *u = req->hc;
130 hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
131 MemTxResult ret;
132
133 ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
134 if (ret) {
135 trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
136 }
137 return ret;
138 }
139
ufs_dma_read_req_upiu(UfsRequest * req)140 static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
141 {
142 UfsHc *u = req->hc;
143 hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
144 UtpUpiuReq *req_upiu = &req->req_upiu;
145 uint32_t copy_size;
146 uint16_t data_segment_length;
147 MemTxResult ret;
148
149 /*
150 * To know the size of the req_upiu, we need to read the
151 * data_segment_length in the header first.
152 */
153 ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
154 sizeof(UtpUpiuHeader));
155 if (ret) {
156 trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
157 return ret;
158 }
159 data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
160
161 copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
162 data_segment_length;
163
164 if (copy_size > sizeof(req->req_upiu)) {
165 copy_size = sizeof(req->req_upiu);
166 }
167
168 ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
169 if (ret) {
170 trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
171 }
172 return ret;
173 }
174
ufs_dma_read_prdt(UfsRequest * req)175 static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
176 {
177 UfsHc *u = req->hc;
178 uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
179 uint16_t prdt_byte_off =
180 le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
181 uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
182 g_autofree UfshcdSgEntry *prd_entries = NULL;
183 hwaddr req_upiu_base_addr, prdt_base_addr;
184 int err;
185
186 assert(!req->sg);
187
188 if (prdt_size == 0) {
189 return MEMTX_OK;
190 }
191 prd_entries = g_new(UfshcdSgEntry, prdt_size);
192
193 req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
194 prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
195
196 err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
197 if (err) {
198 trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
199 return err;
200 }
201
202 req->sg = g_malloc0(sizeof(QEMUSGList));
203 pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
204 req->data_len = 0;
205
206 for (uint16_t i = 0; i < prdt_len; ++i) {
207 hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
208 uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
209 qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
210 req->data_len += data_byte_count;
211 }
212 return MEMTX_OK;
213 }
214
ufs_dma_read_upiu(UfsRequest * req)215 static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
216 {
217 MemTxResult ret;
218
219 /*
220 * In case of MCQ, UTRD has already been read from a SQ, so skip it.
221 */
222 if (!ufs_mcq_req(req)) {
223 ret = ufs_dma_read_utrd(req);
224 if (ret) {
225 return ret;
226 }
227 }
228
229 ret = ufs_dma_read_req_upiu(req);
230 if (ret) {
231 return ret;
232 }
233
234 ret = ufs_dma_read_prdt(req);
235 if (ret) {
236 return ret;
237 }
238
239 return 0;
240 }
241
ufs_dma_write_utrd(UfsRequest * req)242 static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
243 {
244 UfsHc *u = req->hc;
245 hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
246 MemTxResult ret;
247
248 ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
249 if (ret) {
250 trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
251 }
252 return ret;
253 }
254
ufs_dma_write_rsp_upiu(UfsRequest * req)255 static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
256 {
257 UfsHc *u = req->hc;
258 hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
259 uint32_t rsp_upiu_byte_len =
260 le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
261 uint16_t data_segment_length =
262 be16_to_cpu(req->rsp_upiu.header.data_segment_length);
263 uint32_t copy_size = sizeof(UtpUpiuHeader) +
264 UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
265 data_segment_length;
266 MemTxResult ret;
267
268 if (copy_size > rsp_upiu_byte_len) {
269 copy_size = rsp_upiu_byte_len;
270 }
271
272 if (copy_size > sizeof(req->rsp_upiu)) {
273 copy_size = sizeof(req->rsp_upiu);
274 }
275
276 ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
277 if (ret) {
278 trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
279 }
280 return ret;
281 }
282
ufs_dma_write_upiu(UfsRequest * req)283 static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
284 {
285 MemTxResult ret;
286
287 ret = ufs_dma_write_rsp_upiu(req);
288 if (ret) {
289 return ret;
290 }
291
292 return ufs_dma_write_utrd(req);
293 }
294
ufs_irq_check(UfsHc * u)295 static void ufs_irq_check(UfsHc *u)
296 {
297 PCIDevice *pci = PCI_DEVICE(u);
298
299 if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
300 trace_ufs_irq_raise();
301 pci_irq_assert(pci);
302 } else {
303 trace_ufs_irq_lower();
304 pci_irq_deassert(pci);
305 }
306 }
307
ufs_process_db(UfsHc * u,uint32_t val)308 static void ufs_process_db(UfsHc *u, uint32_t val)
309 {
310 DECLARE_BITMAP(doorbell, UFS_MAX_NUTRS);
311 uint32_t slot;
312 uint32_t nutrs = u->params.nutrs;
313 UfsRequest *req;
314
315 val &= ~u->reg.utrldbr;
316 if (!val) {
317 return;
318 }
319
320 doorbell[0] = val;
321 slot = find_first_bit(doorbell, nutrs);
322
323 while (slot < nutrs) {
324 req = &u->req_list[slot];
325 if (req->state == UFS_REQUEST_ERROR) {
326 trace_ufs_err_utrl_slot_error(req->slot);
327 return;
328 }
329
330 if (req->state != UFS_REQUEST_IDLE) {
331 trace_ufs_err_utrl_slot_busy(req->slot);
332 return;
333 }
334
335 trace_ufs_process_db(slot);
336 req->state = UFS_REQUEST_READY;
337 slot = find_next_bit(doorbell, nutrs, slot + 1);
338 }
339
340 qemu_bh_schedule(u->doorbell_bh);
341 }
342
ufs_process_uiccmd(UfsHc * u,uint32_t val)343 static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
344 {
345 trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
346 u->reg.ucmdarg3);
347 /*
348 * Only the essential uic commands for running drivers on Linux and Windows
349 * are implemented.
350 */
351 switch (val) {
352 case UFS_UIC_CMD_DME_LINK_STARTUP:
353 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
354 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
355 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
356 u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
357 break;
358 /* TODO: Revisit it when Power Management is implemented */
359 case UFS_UIC_CMD_DME_HIBER_ENTER:
360 u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
361 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
362 u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
363 break;
364 case UFS_UIC_CMD_DME_HIBER_EXIT:
365 u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
366 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
367 u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
368 break;
369 default:
370 u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
371 }
372
373 u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
374
375 ufs_irq_check(u);
376 }
377
ufs_mcq_init_req(UfsHc * u,UfsRequest * req,UfsSq * sq)378 static void ufs_mcq_init_req(UfsHc *u, UfsRequest *req, UfsSq *sq)
379 {
380 memset(req, 0, sizeof(*req));
381
382 req->hc = u;
383 req->state = UFS_REQUEST_IDLE;
384 req->slot = UFS_INVALID_SLOT;
385 req->sq = sq;
386 }
387
ufs_mcq_process_sq(void * opaque)388 static void ufs_mcq_process_sq(void *opaque)
389 {
390 UfsSq *sq = opaque;
391 UfsHc *u = sq->u;
392 UfsSqEntry sqe;
393 UfsRequest *req;
394 hwaddr addr;
395 uint16_t head = ufs_mcq_sq_head(u, sq->sqid);
396 int err;
397
398 while (!(ufs_mcq_sq_empty(u, sq->sqid) || QTAILQ_EMPTY(&sq->req_list))) {
399 addr = sq->addr + head;
400 err = ufs_addr_read(sq->u, addr, (void *)&sqe, sizeof(sqe));
401 if (err) {
402 trace_ufs_err_dma_read_sq(sq->sqid, addr);
403 return;
404 }
405
406 head = (head + sizeof(sqe)) % (sq->size * sizeof(sqe));
407 ufs_mcq_update_sq_head(u, sq->sqid, head);
408
409 req = QTAILQ_FIRST(&sq->req_list);
410 QTAILQ_REMOVE(&sq->req_list, req, entry);
411
412 ufs_mcq_init_req(sq->u, req, sq);
413 memcpy(&req->utrd, &sqe, sizeof(req->utrd));
414
415 req->state = UFS_REQUEST_RUNNING;
416 ufs_exec_req(req);
417 }
418 }
419
ufs_mcq_process_cq(void * opaque)420 static void ufs_mcq_process_cq(void *opaque)
421 {
422 UfsCq *cq = opaque;
423 UfsHc *u = cq->u;
424 UfsRequest *req, *next;
425 MemTxResult ret;
426 uint32_t tail = ufs_mcq_cq_tail(u, cq->cqid);
427
428 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next)
429 {
430 ufs_dma_write_rsp_upiu(req);
431
432 req->cqe.utp_addr =
433 ((uint64_t)req->utrd.command_desc_base_addr_hi << 32ULL) |
434 req->utrd.command_desc_base_addr_lo;
435 req->cqe.utp_addr |= req->sq->sqid;
436 req->cqe.resp_len = req->utrd.response_upiu_length;
437 req->cqe.resp_off = req->utrd.response_upiu_offset;
438 req->cqe.prdt_len = req->utrd.prd_table_length;
439 req->cqe.prdt_off = req->utrd.prd_table_offset;
440 req->cqe.status = req->utrd.header.dword_2 & 0xf;
441 req->cqe.error = 0;
442
443 ret = ufs_addr_write(u, cq->addr + tail, &req->cqe, sizeof(req->cqe));
444 if (ret) {
445 trace_ufs_err_dma_write_cq(cq->cqid, cq->addr + tail);
446 }
447 QTAILQ_REMOVE(&cq->req_list, req, entry);
448
449 tail = (tail + sizeof(req->cqe)) % (cq->size * sizeof(req->cqe));
450 ufs_mcq_update_cq_tail(u, cq->cqid, tail);
451
452 ufs_clear_req(req);
453 QTAILQ_INSERT_TAIL(&req->sq->req_list, req, entry);
454 }
455
456 if (!ufs_mcq_cq_empty(u, cq->cqid)) {
457 u->mcq_op_reg[cq->cqid].cq_int.is =
458 FIELD_DP32(u->mcq_op_reg[cq->cqid].cq_int.is, CQIS, TEPS, 1);
459
460 u->reg.is = FIELD_DP32(u->reg.is, IS, CQES, 1);
461 ufs_irq_check(u);
462 }
463 }
464
ufs_mcq_create_sq(UfsHc * u,uint8_t qid,uint32_t attr)465 static bool ufs_mcq_create_sq(UfsHc *u, uint8_t qid, uint32_t attr)
466 {
467 UfsMcqReg *reg = &u->mcq_reg[qid];
468 UfsSq *sq;
469 uint8_t cqid = FIELD_EX32(attr, SQATTR, CQID);
470
471 if (qid >= u->params.mcq_maxq) {
472 trace_ufs_err_mcq_create_sq_invalid_sqid(qid);
473 return false;
474 }
475
476 if (u->sq[qid]) {
477 trace_ufs_err_mcq_create_sq_already_exists(qid);
478 return false;
479 }
480
481 if (!u->cq[cqid]) {
482 trace_ufs_err_mcq_create_sq_invalid_cqid(qid);
483 return false;
484 }
485
486 sq = g_malloc0(sizeof(*sq));
487 sq->u = u;
488 sq->sqid = qid;
489 sq->cq = u->cq[cqid];
490 sq->addr = ((uint64_t)reg->squba << 32) | reg->sqlba;
491 sq->size = ((FIELD_EX32(attr, SQATTR, SIZE) + 1) << 2) / sizeof(UfsSqEntry);
492
493 sq->bh = qemu_bh_new_guarded(ufs_mcq_process_sq, sq,
494 &DEVICE(u)->mem_reentrancy_guard);
495 sq->req = g_new0(UfsRequest, sq->size);
496 QTAILQ_INIT(&sq->req_list);
497 for (int i = 0; i < sq->size; i++) {
498 ufs_mcq_init_req(u, &sq->req[i], sq);
499 QTAILQ_INSERT_TAIL(&sq->req_list, &sq->req[i], entry);
500 }
501
502 u->sq[qid] = sq;
503
504 trace_ufs_mcq_create_sq(sq->sqid, sq->cq->cqid, sq->addr, sq->size);
505 return true;
506 }
507
ufs_mcq_delete_sq(UfsHc * u,uint8_t qid)508 static bool ufs_mcq_delete_sq(UfsHc *u, uint8_t qid)
509 {
510 UfsSq *sq;
511
512 if (qid >= u->params.mcq_maxq) {
513 trace_ufs_err_mcq_delete_sq_invalid_sqid(qid);
514 return false;
515 }
516
517 if (!u->sq[qid]) {
518 trace_ufs_err_mcq_delete_sq_not_exists(qid);
519 return false;
520 }
521
522 sq = u->sq[qid];
523
524 qemu_bh_delete(sq->bh);
525 g_free(sq->req);
526 g_free(sq);
527 u->sq[qid] = NULL;
528 return true;
529 }
530
ufs_mcq_create_cq(UfsHc * u,uint8_t qid,uint32_t attr)531 static bool ufs_mcq_create_cq(UfsHc *u, uint8_t qid, uint32_t attr)
532 {
533 UfsMcqReg *reg = &u->mcq_reg[qid];
534 UfsCq *cq;
535
536 if (qid >= u->params.mcq_maxq) {
537 trace_ufs_err_mcq_create_cq_invalid_cqid(qid);
538 return false;
539 }
540
541 if (u->cq[qid]) {
542 trace_ufs_err_mcq_create_cq_already_exists(qid);
543 return false;
544 }
545
546 cq = g_malloc0(sizeof(*cq));
547 cq->u = u;
548 cq->cqid = qid;
549 cq->addr = ((uint64_t)reg->cquba << 32) | reg->cqlba;
550 cq->size = ((FIELD_EX32(attr, CQATTR, SIZE) + 1) << 2) / sizeof(UfsCqEntry);
551
552 cq->bh = qemu_bh_new_guarded(ufs_mcq_process_cq, cq,
553 &DEVICE(u)->mem_reentrancy_guard);
554 QTAILQ_INIT(&cq->req_list);
555
556 u->cq[qid] = cq;
557
558 trace_ufs_mcq_create_cq(cq->cqid, cq->addr, cq->size);
559 return true;
560 }
561
ufs_mcq_delete_cq(UfsHc * u,uint8_t qid)562 static bool ufs_mcq_delete_cq(UfsHc *u, uint8_t qid)
563 {
564 UfsCq *cq;
565
566 if (qid >= u->params.mcq_maxq) {
567 trace_ufs_err_mcq_delete_cq_invalid_cqid(qid);
568 return false;
569 }
570
571 if (!u->cq[qid]) {
572 trace_ufs_err_mcq_delete_cq_not_exists(qid);
573 return false;
574 }
575
576 for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
577 if (u->sq[i] && u->sq[i]->cq->cqid == qid) {
578 trace_ufs_err_mcq_delete_cq_sq_not_deleted(i, qid);
579 return false;
580 }
581 }
582
583 cq = u->cq[qid];
584
585 qemu_bh_delete(cq->bh);
586 g_free(cq);
587 u->cq[qid] = NULL;
588 return true;
589 }
590
ufs_write_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)591 static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
592 {
593 switch (offset) {
594 case A_IS:
595 u->reg.is &= ~data;
596 ufs_irq_check(u);
597 break;
598 case A_IE:
599 u->reg.ie = data;
600 ufs_irq_check(u);
601 break;
602 case A_HCE:
603 if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
604 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
605 u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
606 } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
607 !FIELD_EX32(data, HCE, HCE)) {
608 u->reg.hcs = 0;
609 u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
610 }
611 break;
612 case A_UTRLBA:
613 u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
614 break;
615 case A_UTRLBAU:
616 u->reg.utrlbau = data;
617 break;
618 case A_UTRLDBR:
619 ufs_process_db(u, data);
620 u->reg.utrldbr |= data;
621 break;
622 case A_UTRLRSR:
623 u->reg.utrlrsr = data;
624 break;
625 case A_UTRLCNR:
626 u->reg.utrlcnr &= ~data;
627 break;
628 case A_UTMRLBA:
629 u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
630 break;
631 case A_UTMRLBAU:
632 u->reg.utmrlbau = data;
633 break;
634 case A_UICCMD:
635 ufs_process_uiccmd(u, data);
636 break;
637 case A_UCMDARG1:
638 u->reg.ucmdarg1 = data;
639 break;
640 case A_UCMDARG2:
641 u->reg.ucmdarg2 = data;
642 break;
643 case A_UCMDARG3:
644 u->reg.ucmdarg3 = data;
645 break;
646 case A_CONFIG:
647 u->reg.config = data;
648 break;
649 case A_MCQCONFIG:
650 u->reg.mcqconfig = data;
651 break;
652 case A_UTRLCLR:
653 case A_UTMRLDBR:
654 case A_UTMRLCLR:
655 case A_UTMRLRSR:
656 trace_ufs_err_unsupport_register_offset(offset);
657 break;
658 default:
659 trace_ufs_err_invalid_register_offset(offset);
660 break;
661 }
662 }
663
ufs_write_mcq_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)664 static void ufs_write_mcq_reg(UfsHc *u, hwaddr offset, uint32_t data,
665 unsigned size)
666 {
667 int qid = offset / sizeof(UfsMcqReg);
668 UfsMcqReg *reg = &u->mcq_reg[qid];
669
670 switch (offset % sizeof(UfsMcqReg)) {
671 case A_SQATTR:
672 if (!FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
673 FIELD_EX32(data, SQATTR, SQEN)) {
674 if (!ufs_mcq_create_sq(u, qid, data)) {
675 break;
676 }
677 } else if (FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
678 !FIELD_EX32(data, SQATTR, SQEN)) {
679 if (!ufs_mcq_delete_sq(u, qid)) {
680 break;
681 }
682 }
683 reg->sqattr = data;
684 break;
685 case A_SQLBA:
686 reg->sqlba = data;
687 break;
688 case A_SQUBA:
689 reg->squba = data;
690 break;
691 case A_SQCFG:
692 reg->sqcfg = data;
693 break;
694 case A_CQATTR:
695 if (!FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
696 FIELD_EX32(data, CQATTR, CQEN)) {
697 if (!ufs_mcq_create_cq(u, qid, data)) {
698 break;
699 }
700 } else if (FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
701 !FIELD_EX32(data, CQATTR, CQEN)) {
702 if (!ufs_mcq_delete_cq(u, qid)) {
703 break;
704 }
705 }
706 reg->cqattr = data;
707 break;
708 case A_CQLBA:
709 reg->cqlba = data;
710 break;
711 case A_CQUBA:
712 reg->cquba = data;
713 break;
714 case A_CQCFG:
715 reg->cqcfg = data;
716 break;
717 case A_SQDAO:
718 case A_SQISAO:
719 case A_CQDAO:
720 case A_CQISAO:
721 trace_ufs_err_unsupport_register_offset(offset);
722 break;
723 default:
724 trace_ufs_err_invalid_register_offset(offset);
725 break;
726 }
727 }
728
ufs_mcq_process_db(UfsHc * u,uint8_t qid,uint32_t db)729 static void ufs_mcq_process_db(UfsHc *u, uint8_t qid, uint32_t db)
730 {
731 UfsSq *sq;
732
733 if (qid >= u->params.mcq_maxq) {
734 trace_ufs_err_mcq_db_wr_invalid_sqid(qid);
735 return;
736 }
737
738 sq = u->sq[qid];
739 if (sq->size * sizeof(UfsSqEntry) <= db) {
740 trace_ufs_err_mcq_db_wr_invalid_db(qid, db);
741 return;
742 }
743
744 ufs_mcq_update_sq_tail(u, sq->sqid, db);
745 qemu_bh_schedule(sq->bh);
746 }
747
ufs_write_mcq_op_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)748 static void ufs_write_mcq_op_reg(UfsHc *u, hwaddr offset, uint32_t data,
749 unsigned size)
750 {
751 int qid = offset / sizeof(UfsMcqOpReg);
752 UfsMcqOpReg *opr = &u->mcq_op_reg[qid];
753
754 switch (offset % sizeof(UfsMcqOpReg)) {
755 case offsetof(UfsMcqOpReg, sq.tp):
756 if (opr->sq.tp != data) {
757 ufs_mcq_process_db(u, qid, data);
758 }
759 opr->sq.tp = data;
760 break;
761 case offsetof(UfsMcqOpReg, cq.hp):
762 opr->cq.hp = data;
763 ufs_mcq_update_cq_head(u, qid, data);
764 break;
765 case offsetof(UfsMcqOpReg, cq_int.is):
766 opr->cq_int.is &= ~data;
767 break;
768 default:
769 trace_ufs_err_invalid_register_offset(offset);
770 break;
771 }
772 }
773
ufs_mmio_read(void * opaque,hwaddr addr,unsigned size)774 static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
775 {
776 UfsHc *u = (UfsHc *)opaque;
777 uint8_t *ptr;
778 uint64_t value;
779 uint64_t offset;
780
781 if (addr < sizeof(u->reg)) {
782 offset = addr;
783 ptr = (uint8_t *)&u->reg;
784 } else if (ufs_is_mcq_reg(u, addr)) {
785 offset = addr - ufs_mcq_reg_addr(u, 0);
786 ptr = (uint8_t *)&u->mcq_reg;
787 } else if (ufs_is_mcq_op_reg(u, addr)) {
788 offset = addr - ufs_mcq_op_reg_addr(u, 0);
789 ptr = (uint8_t *)&u->mcq_op_reg;
790 } else {
791 trace_ufs_err_invalid_register_offset(addr);
792 return 0;
793 }
794
795 value = *(uint32_t *)(ptr + offset);
796 trace_ufs_mmio_read(addr, value, size);
797 return value;
798 }
799
ufs_mmio_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)800 static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
801 unsigned size)
802 {
803 UfsHc *u = (UfsHc *)opaque;
804
805 trace_ufs_mmio_write(addr, data, size);
806
807 if (addr < sizeof(u->reg)) {
808 ufs_write_reg(u, addr, data, size);
809 } else if (ufs_is_mcq_reg(u, addr)) {
810 ufs_write_mcq_reg(u, addr - ufs_mcq_reg_addr(u, 0), data, size);
811 } else if (ufs_is_mcq_op_reg(u, addr)) {
812 ufs_write_mcq_op_reg(u, addr - ufs_mcq_op_reg_addr(u, 0), data, size);
813 } else {
814 trace_ufs_err_invalid_register_offset(addr);
815 }
816 }
817
818 static const MemoryRegionOps ufs_mmio_ops = {
819 .read = ufs_mmio_read,
820 .write = ufs_mmio_write,
821 .endianness = DEVICE_LITTLE_ENDIAN,
822 .impl = {
823 .min_access_size = 4,
824 .max_access_size = 4,
825 },
826 };
827
828
ufs_build_upiu_header(UfsRequest * req,uint8_t trans_type,uint8_t flags,uint8_t response,uint8_t scsi_status,uint16_t data_segment_length)829 void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
830 uint8_t response, uint8_t scsi_status,
831 uint16_t data_segment_length)
832 {
833 memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
834 req->rsp_upiu.header.trans_type = trans_type;
835 req->rsp_upiu.header.flags = flags;
836 req->rsp_upiu.header.response = response;
837 req->rsp_upiu.header.scsi_status = scsi_status;
838 req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
839 }
840
ufs_exec_scsi_cmd(UfsRequest * req)841 static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
842 {
843 UfsHc *u = req->hc;
844 uint8_t lun = req->req_upiu.header.lun;
845
846 UfsLu *lu = NULL;
847
848 trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
849
850 if (!is_wlun(lun) && (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
851 trace_ufs_err_scsi_cmd_invalid_lun(lun);
852 return UFS_REQUEST_FAIL;
853 }
854
855 switch (lun) {
856 case UFS_UPIU_REPORT_LUNS_WLUN:
857 lu = &u->report_wlu;
858 break;
859 case UFS_UPIU_UFS_DEVICE_WLUN:
860 lu = &u->dev_wlu;
861 break;
862 case UFS_UPIU_BOOT_WLUN:
863 lu = &u->boot_wlu;
864 break;
865 case UFS_UPIU_RPMB_WLUN:
866 lu = &u->rpmb_wlu;
867 break;
868 default:
869 lu = u->lus[lun];
870 }
871
872 return lu->scsi_op(lu, req);
873 }
874
ufs_exec_nop_cmd(UfsRequest * req)875 static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
876 {
877 trace_ufs_exec_nop_cmd(req->slot);
878 ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
879 return UFS_REQUEST_SUCCESS;
880 }
881
882 /*
883 * This defines the permission of flags based on their IDN. There are some
884 * things that are declared read-only, which is inconsistent with the ufs spec,
885 * because we want to return an error for features that are not yet supported.
886 */
887 static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
888 [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
889 /* Write protection is not supported */
890 [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
891 [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
892 [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
893 UFS_QUERY_FLAG_CLEAR |
894 UFS_QUERY_FLAG_TOGGLE,
895 [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
896 UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
897 UFS_QUERY_FLAG_TOGGLE,
898 /* Purge Operation is not supported */
899 [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
900 /* Refresh Operation is not supported */
901 [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
902 /* Physical Resource Removal is not supported */
903 [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
904 [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
905 [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
906 /* Write Booster is not supported */
907 [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
908 [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
909 [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
910 };
911
ufs_flag_check_idn_valid(uint8_t idn,int op)912 static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
913 {
914 if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
915 return UFS_QUERY_RESULT_INVALID_IDN;
916 }
917
918 if (!(flag_permission[idn] & op)) {
919 if (op == UFS_QUERY_FLAG_READ) {
920 trace_ufs_err_query_flag_not_readable(idn);
921 return UFS_QUERY_RESULT_NOT_READABLE;
922 }
923 trace_ufs_err_query_flag_not_writable(idn);
924 return UFS_QUERY_RESULT_NOT_WRITEABLE;
925 }
926
927 return UFS_QUERY_RESULT_SUCCESS;
928 }
929
930 static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
931 /* booting is not supported */
932 [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
933 [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
934 [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
935 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
936 [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
937 [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
938 [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
939 [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
940 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
941 [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
942 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
943 [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
944 [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
945 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
946 [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
947 [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
948 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
949 [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
950 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
951 [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
952 [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
953 [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
954 [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
955 [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
956 [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
957 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
958 [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
959 [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
960 [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
961 [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
962 [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
963 [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
964 [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
965 [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
966 [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
967 /* refresh operation is not supported */
968 [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
969 [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
970 [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
971 };
972
ufs_attr_check_idn_valid(uint8_t idn,int op)973 static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
974 {
975 if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
976 return UFS_QUERY_RESULT_INVALID_IDN;
977 }
978
979 if (!(attr_permission[idn] & op)) {
980 if (op == UFS_QUERY_ATTR_READ) {
981 trace_ufs_err_query_attr_not_readable(idn);
982 return UFS_QUERY_RESULT_NOT_READABLE;
983 }
984 trace_ufs_err_query_attr_not_writable(idn);
985 return UFS_QUERY_RESULT_NOT_WRITEABLE;
986 }
987
988 return UFS_QUERY_RESULT_SUCCESS;
989 }
990
ufs_exec_query_flag(UfsRequest * req,int op)991 static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
992 {
993 UfsHc *u = req->hc;
994 uint8_t idn = req->req_upiu.qr.idn;
995 uint32_t value;
996 QueryRespCode ret;
997
998 ret = ufs_flag_check_idn_valid(idn, op);
999 if (ret) {
1000 return ret;
1001 }
1002
1003 if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
1004 value = 0;
1005 } else if (op == UFS_QUERY_FLAG_READ) {
1006 value = *(((uint8_t *)&u->flags) + idn);
1007 } else if (op == UFS_QUERY_FLAG_SET) {
1008 value = 1;
1009 } else if (op == UFS_QUERY_FLAG_CLEAR) {
1010 value = 0;
1011 } else if (op == UFS_QUERY_FLAG_TOGGLE) {
1012 value = *(((uint8_t *)&u->flags) + idn);
1013 value = !value;
1014 } else {
1015 trace_ufs_err_query_invalid_opcode(op);
1016 return UFS_QUERY_RESULT_INVALID_OPCODE;
1017 }
1018
1019 *(((uint8_t *)&u->flags) + idn) = value;
1020 req->rsp_upiu.qr.value = cpu_to_be32(value);
1021 return UFS_QUERY_RESULT_SUCCESS;
1022 }
1023
ufs_read_attr_value(UfsHc * u,uint8_t idn)1024 static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
1025 {
1026 switch (idn) {
1027 case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
1028 return u->attributes.boot_lun_en;
1029 case UFS_QUERY_ATTR_IDN_POWER_MODE:
1030 return u->attributes.current_power_mode;
1031 case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1032 return u->attributes.active_icc_level;
1033 case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
1034 return u->attributes.out_of_order_data_en;
1035 case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
1036 return u->attributes.background_op_status;
1037 case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
1038 return u->attributes.purge_status;
1039 case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1040 return u->attributes.max_data_in_size;
1041 case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1042 return u->attributes.max_data_out_size;
1043 case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
1044 return be32_to_cpu(u->attributes.dyn_cap_needed);
1045 case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1046 return u->attributes.ref_clk_freq;
1047 case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
1048 return u->attributes.config_descr_lock;
1049 case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1050 return u->attributes.max_num_of_rtt;
1051 case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1052 return be16_to_cpu(u->attributes.exception_event_control);
1053 case UFS_QUERY_ATTR_IDN_EE_STATUS:
1054 return be16_to_cpu(u->attributes.exception_event_status);
1055 case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1056 return be32_to_cpu(u->attributes.seconds_passed);
1057 case UFS_QUERY_ATTR_IDN_CNTX_CONF:
1058 return be16_to_cpu(u->attributes.context_conf);
1059 case UFS_QUERY_ATTR_IDN_FFU_STATUS:
1060 return u->attributes.device_ffu_status;
1061 case UFS_QUERY_ATTR_IDN_PSA_STATE:
1062 return be32_to_cpu(u->attributes.psa_state);
1063 case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1064 return be32_to_cpu(u->attributes.psa_data_size);
1065 case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
1066 return u->attributes.ref_clk_gating_wait_time;
1067 case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
1068 return u->attributes.device_case_rough_temperaure;
1069 case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
1070 return u->attributes.device_too_high_temp_boundary;
1071 case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
1072 return u->attributes.device_too_low_temp_boundary;
1073 case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
1074 return u->attributes.throttling_status;
1075 case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
1076 return u->attributes.wb_buffer_flush_status;
1077 case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
1078 return u->attributes.available_wb_buffer_size;
1079 case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
1080 return u->attributes.wb_buffer_life_time_est;
1081 case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
1082 return be32_to_cpu(u->attributes.current_wb_buffer_size);
1083 case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
1084 return u->attributes.refresh_status;
1085 case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
1086 return u->attributes.refresh_freq;
1087 case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
1088 return u->attributes.refresh_unit;
1089 }
1090 return 0;
1091 }
1092
ufs_write_attr_value(UfsHc * u,uint8_t idn,uint32_t value)1093 static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
1094 {
1095 switch (idn) {
1096 case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1097 u->attributes.active_icc_level = value;
1098 break;
1099 case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1100 u->attributes.max_data_in_size = value;
1101 break;
1102 case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1103 u->attributes.max_data_out_size = value;
1104 break;
1105 case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1106 u->attributes.ref_clk_freq = value;
1107 break;
1108 case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1109 u->attributes.max_num_of_rtt = value;
1110 break;
1111 case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1112 u->attributes.exception_event_control = cpu_to_be16(value);
1113 break;
1114 case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1115 u->attributes.seconds_passed = cpu_to_be32(value);
1116 break;
1117 case UFS_QUERY_ATTR_IDN_PSA_STATE:
1118 u->attributes.psa_state = value;
1119 break;
1120 case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1121 u->attributes.psa_data_size = cpu_to_be32(value);
1122 break;
1123 }
1124 }
1125
ufs_exec_query_attr(UfsRequest * req,int op)1126 static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
1127 {
1128 UfsHc *u = req->hc;
1129 uint8_t idn = req->req_upiu.qr.idn;
1130 uint32_t value;
1131 QueryRespCode ret;
1132
1133 ret = ufs_attr_check_idn_valid(idn, op);
1134 if (ret) {
1135 return ret;
1136 }
1137
1138 if (op == UFS_QUERY_ATTR_READ) {
1139 value = ufs_read_attr_value(u, idn);
1140 } else {
1141 value = be32_to_cpu(req->req_upiu.qr.value);
1142 ufs_write_attr_value(u, idn, value);
1143 }
1144
1145 req->rsp_upiu.qr.value = cpu_to_be32(value);
1146 return UFS_QUERY_RESULT_SUCCESS;
1147 }
1148
1149 static const RpmbUnitDescriptor rpmb_unit_desc = {
1150 .length = sizeof(RpmbUnitDescriptor),
1151 .descriptor_idn = 2,
1152 .unit_index = UFS_UPIU_RPMB_WLUN,
1153 .lu_enable = 0,
1154 };
1155
ufs_read_unit_desc(UfsRequest * req)1156 static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
1157 {
1158 UfsHc *u = req->hc;
1159 uint8_t lun = req->req_upiu.qr.index;
1160
1161 if (lun != UFS_UPIU_RPMB_WLUN &&
1162 (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
1163 trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
1164 return UFS_QUERY_RESULT_INVALID_INDEX;
1165 }
1166
1167 if (lun == UFS_UPIU_RPMB_WLUN) {
1168 memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
1169 } else {
1170 memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
1171 sizeof(u->lus[lun]->unit_desc));
1172 }
1173
1174 return UFS_QUERY_RESULT_SUCCESS;
1175 }
1176
manufacturer_str_desc(void)1177 static inline StringDescriptor manufacturer_str_desc(void)
1178 {
1179 StringDescriptor desc = {
1180 .length = 0x12,
1181 .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1182 };
1183 desc.UC[0] = cpu_to_be16('R');
1184 desc.UC[1] = cpu_to_be16('E');
1185 desc.UC[2] = cpu_to_be16('D');
1186 desc.UC[3] = cpu_to_be16('H');
1187 desc.UC[4] = cpu_to_be16('A');
1188 desc.UC[5] = cpu_to_be16('T');
1189 return desc;
1190 }
1191
product_name_str_desc(void)1192 static inline StringDescriptor product_name_str_desc(void)
1193 {
1194 StringDescriptor desc = {
1195 .length = 0x22,
1196 .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1197 };
1198 desc.UC[0] = cpu_to_be16('Q');
1199 desc.UC[1] = cpu_to_be16('E');
1200 desc.UC[2] = cpu_to_be16('M');
1201 desc.UC[3] = cpu_to_be16('U');
1202 desc.UC[4] = cpu_to_be16(' ');
1203 desc.UC[5] = cpu_to_be16('U');
1204 desc.UC[6] = cpu_to_be16('F');
1205 desc.UC[7] = cpu_to_be16('S');
1206 return desc;
1207 }
1208
product_rev_level_str_desc(void)1209 static inline StringDescriptor product_rev_level_str_desc(void)
1210 {
1211 StringDescriptor desc = {
1212 .length = 0x0a,
1213 .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1214 };
1215 desc.UC[0] = cpu_to_be16('0');
1216 desc.UC[1] = cpu_to_be16('0');
1217 desc.UC[2] = cpu_to_be16('0');
1218 desc.UC[3] = cpu_to_be16('1');
1219 return desc;
1220 }
1221
1222 static const StringDescriptor null_str_desc = {
1223 .length = 0x02,
1224 .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1225 };
1226
ufs_read_string_desc(UfsRequest * req)1227 static QueryRespCode ufs_read_string_desc(UfsRequest *req)
1228 {
1229 UfsHc *u = req->hc;
1230 uint8_t index = req->req_upiu.qr.index;
1231 StringDescriptor desc;
1232
1233 if (index == u->device_desc.manufacturer_name) {
1234 desc = manufacturer_str_desc();
1235 memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1236 } else if (index == u->device_desc.product_name) {
1237 desc = product_name_str_desc();
1238 memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1239 } else if (index == u->device_desc.serial_number) {
1240 memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1241 } else if (index == u->device_desc.oem_id) {
1242 memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1243 } else if (index == u->device_desc.product_revision_level) {
1244 desc = product_rev_level_str_desc();
1245 memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1246 } else {
1247 trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
1248 return UFS_QUERY_RESULT_INVALID_INDEX;
1249 }
1250 return UFS_QUERY_RESULT_SUCCESS;
1251 }
1252
interconnect_desc(void)1253 static inline InterconnectDescriptor interconnect_desc(void)
1254 {
1255 InterconnectDescriptor desc = {
1256 .length = sizeof(InterconnectDescriptor),
1257 .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
1258 };
1259 desc.bcd_unipro_version = cpu_to_be16(0x180);
1260 desc.bcd_mphy_version = cpu_to_be16(0x410);
1261 return desc;
1262 }
1263
ufs_read_desc(UfsRequest * req)1264 static QueryRespCode ufs_read_desc(UfsRequest *req)
1265 {
1266 UfsHc *u = req->hc;
1267 QueryRespCode status;
1268 uint8_t idn = req->req_upiu.qr.idn;
1269 uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
1270 InterconnectDescriptor desc;
1271
1272 switch (idn) {
1273 case UFS_QUERY_DESC_IDN_DEVICE:
1274 memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
1275 status = UFS_QUERY_RESULT_SUCCESS;
1276 break;
1277 case UFS_QUERY_DESC_IDN_UNIT:
1278 status = ufs_read_unit_desc(req);
1279 break;
1280 case UFS_QUERY_DESC_IDN_GEOMETRY:
1281 memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
1282 sizeof(u->geometry_desc));
1283 status = UFS_QUERY_RESULT_SUCCESS;
1284 break;
1285 case UFS_QUERY_DESC_IDN_INTERCONNECT: {
1286 desc = interconnect_desc();
1287 memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
1288 status = UFS_QUERY_RESULT_SUCCESS;
1289 break;
1290 }
1291 case UFS_QUERY_DESC_IDN_STRING:
1292 status = ufs_read_string_desc(req);
1293 break;
1294 case UFS_QUERY_DESC_IDN_POWER:
1295 /* mocking of power descriptor is not supported */
1296 memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
1297 req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
1298 req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
1299 status = UFS_QUERY_RESULT_SUCCESS;
1300 break;
1301 case UFS_QUERY_DESC_IDN_HEALTH:
1302 /* mocking of health descriptor is not supported */
1303 memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
1304 req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
1305 req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
1306 status = UFS_QUERY_RESULT_SUCCESS;
1307 break;
1308 default:
1309 length = 0;
1310 trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
1311 status = UFS_QUERY_RESULT_INVALID_IDN;
1312 }
1313
1314 if (length > req->rsp_upiu.qr.data[0]) {
1315 length = req->rsp_upiu.qr.data[0];
1316 }
1317 req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
1318 req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
1319 req->rsp_upiu.qr.index = req->req_upiu.qr.index;
1320 req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
1321 req->rsp_upiu.qr.length = cpu_to_be16(length);
1322
1323 return status;
1324 }
1325
ufs_exec_query_read(UfsRequest * req)1326 static QueryRespCode ufs_exec_query_read(UfsRequest *req)
1327 {
1328 QueryRespCode status;
1329 switch (req->req_upiu.qr.opcode) {
1330 case UFS_UPIU_QUERY_OPCODE_NOP:
1331 status = UFS_QUERY_RESULT_SUCCESS;
1332 break;
1333 case UFS_UPIU_QUERY_OPCODE_READ_DESC:
1334 status = ufs_read_desc(req);
1335 break;
1336 case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
1337 status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
1338 break;
1339 case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
1340 status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
1341 break;
1342 default:
1343 trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1344 status = UFS_QUERY_RESULT_INVALID_OPCODE;
1345 break;
1346 }
1347
1348 return status;
1349 }
1350
ufs_exec_query_write(UfsRequest * req)1351 static QueryRespCode ufs_exec_query_write(UfsRequest *req)
1352 {
1353 QueryRespCode status;
1354 switch (req->req_upiu.qr.opcode) {
1355 case UFS_UPIU_QUERY_OPCODE_NOP:
1356 status = UFS_QUERY_RESULT_SUCCESS;
1357 break;
1358 case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
1359 /* write descriptor is not supported */
1360 status = UFS_QUERY_RESULT_NOT_WRITEABLE;
1361 break;
1362 case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
1363 status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
1364 break;
1365 case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
1366 status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
1367 break;
1368 case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
1369 status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
1370 break;
1371 case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1372 status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
1373 break;
1374 default:
1375 trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1376 status = UFS_QUERY_RESULT_INVALID_OPCODE;
1377 break;
1378 }
1379
1380 return status;
1381 }
1382
ufs_exec_query_cmd(UfsRequest * req)1383 static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
1384 {
1385 uint8_t query_func = req->req_upiu.header.query_func;
1386 uint16_t data_segment_length;
1387 QueryRespCode status;
1388
1389 trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
1390 if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
1391 status = ufs_exec_query_read(req);
1392 } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
1393 status = ufs_exec_query_write(req);
1394 } else {
1395 status = UFS_QUERY_RESULT_GENERAL_FAILURE;
1396 }
1397
1398 data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
1399 ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
1400 data_segment_length);
1401
1402 if (status != UFS_QUERY_RESULT_SUCCESS) {
1403 return UFS_REQUEST_FAIL;
1404 }
1405 return UFS_REQUEST_SUCCESS;
1406 }
1407
ufs_exec_req(UfsRequest * req)1408 static void ufs_exec_req(UfsRequest *req)
1409 {
1410 UfsReqResult req_result;
1411
1412 if (ufs_dma_read_upiu(req)) {
1413 return;
1414 }
1415
1416 switch (req->req_upiu.header.trans_type) {
1417 case UFS_UPIU_TRANSACTION_NOP_OUT:
1418 req_result = ufs_exec_nop_cmd(req);
1419 break;
1420 case UFS_UPIU_TRANSACTION_COMMAND:
1421 req_result = ufs_exec_scsi_cmd(req);
1422 break;
1423 case UFS_UPIU_TRANSACTION_QUERY_REQ:
1424 req_result = ufs_exec_query_cmd(req);
1425 break;
1426 default:
1427 trace_ufs_err_invalid_trans_code(req->slot,
1428 req->req_upiu.header.trans_type);
1429 req_result = UFS_REQUEST_FAIL;
1430 }
1431
1432 /*
1433 * The ufs_complete_req for scsi commands is handled by the
1434 * ufs_scsi_command_complete() callback function. Therefore, to avoid
1435 * duplicate processing, ufs_complete_req() is not called for scsi commands.
1436 */
1437 if (req_result != UFS_REQUEST_NO_COMPLETE) {
1438 ufs_complete_req(req, req_result);
1439 }
1440 }
1441
ufs_process_req(void * opaque)1442 static void ufs_process_req(void *opaque)
1443 {
1444 UfsHc *u = opaque;
1445 UfsRequest *req;
1446 int slot;
1447
1448 for (slot = 0; slot < u->params.nutrs; slot++) {
1449 req = &u->req_list[slot];
1450
1451 if (req->state != UFS_REQUEST_READY) {
1452 continue;
1453 }
1454 trace_ufs_process_req(slot);
1455 req->state = UFS_REQUEST_RUNNING;
1456
1457 ufs_exec_req(req);
1458 }
1459 }
1460
ufs_complete_req(UfsRequest * req,UfsReqResult req_result)1461 void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
1462 {
1463 UfsHc *u = req->hc;
1464 assert(req->state == UFS_REQUEST_RUNNING);
1465
1466 if (req_result == UFS_REQUEST_SUCCESS) {
1467 req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
1468 } else {
1469 req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
1470 }
1471
1472 req->state = UFS_REQUEST_COMPLETE;
1473
1474 if (ufs_mcq_req(req)) {
1475 trace_ufs_mcq_complete_req(req->sq->sqid);
1476 QTAILQ_INSERT_TAIL(&req->sq->cq->req_list, req, entry);
1477 qemu_bh_schedule(req->sq->cq->bh);
1478 } else {
1479 trace_ufs_complete_req(req->slot);
1480 qemu_bh_schedule(u->complete_bh);
1481 }
1482 }
1483
ufs_clear_req(UfsRequest * req)1484 static void ufs_clear_req(UfsRequest *req)
1485 {
1486 if (req->sg != NULL) {
1487 qemu_sglist_destroy(req->sg);
1488 g_free(req->sg);
1489 req->sg = NULL;
1490 req->data_len = 0;
1491 }
1492
1493 memset(&req->utrd, 0, sizeof(req->utrd));
1494 memset(&req->req_upiu, 0, sizeof(req->req_upiu));
1495 memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
1496 }
1497
ufs_sendback_req(void * opaque)1498 static void ufs_sendback_req(void *opaque)
1499 {
1500 UfsHc *u = opaque;
1501 UfsRequest *req;
1502 int slot;
1503
1504 for (slot = 0; slot < u->params.nutrs; slot++) {
1505 req = &u->req_list[slot];
1506
1507 if (req->state != UFS_REQUEST_COMPLETE) {
1508 continue;
1509 }
1510
1511 if (ufs_dma_write_upiu(req)) {
1512 req->state = UFS_REQUEST_ERROR;
1513 continue;
1514 }
1515
1516 /*
1517 * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
1518 * supported
1519 */
1520 if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
1521 le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
1522 u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
1523 }
1524
1525 u->reg.utrldbr &= ~(1 << slot);
1526 u->reg.utrlcnr |= (1 << slot);
1527
1528 trace_ufs_sendback_req(req->slot);
1529
1530 ufs_clear_req(req);
1531 req->state = UFS_REQUEST_IDLE;
1532 }
1533
1534 ufs_irq_check(u);
1535 }
1536
ufs_check_constraints(UfsHc * u,Error ** errp)1537 static bool ufs_check_constraints(UfsHc *u, Error **errp)
1538 {
1539 if (u->params.nutrs > UFS_MAX_NUTRS) {
1540 error_setg(errp, "nutrs must be less than or equal to %d",
1541 UFS_MAX_NUTRS);
1542 return false;
1543 }
1544
1545 if (u->params.nutmrs > UFS_MAX_NUTMRS) {
1546 error_setg(errp, "nutmrs must be less than or equal to %d",
1547 UFS_MAX_NUTMRS);
1548 return false;
1549 }
1550
1551 if (u->params.mcq_maxq >= UFS_MAX_MCQ_QNUM) {
1552 error_setg(errp, "mcq-maxq must be less than %d", UFS_MAX_MCQ_QNUM);
1553 return false;
1554 }
1555
1556 return true;
1557 }
1558
ufs_init_pci(UfsHc * u,PCIDevice * pci_dev)1559 static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
1560 {
1561 uint8_t *pci_conf = pci_dev->config;
1562
1563 pci_conf[PCI_INTERRUPT_PIN] = 1;
1564 pci_config_set_prog_interface(pci_conf, 0x1);
1565
1566 memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
1567 u->reg_size);
1568 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
1569 u->irq = pci_allocate_irq(pci_dev);
1570 }
1571
ufs_init_state(UfsHc * u)1572 static void ufs_init_state(UfsHc *u)
1573 {
1574 u->req_list = g_new0(UfsRequest, u->params.nutrs);
1575
1576 for (int i = 0; i < u->params.nutrs; i++) {
1577 u->req_list[i].hc = u;
1578 u->req_list[i].slot = i;
1579 u->req_list[i].sg = NULL;
1580 u->req_list[i].state = UFS_REQUEST_IDLE;
1581 }
1582
1583 u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
1584 &DEVICE(u)->mem_reentrancy_guard);
1585 u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
1586 &DEVICE(u)->mem_reentrancy_guard);
1587
1588 if (u->params.mcq) {
1589 memset(u->sq, 0, sizeof(u->sq));
1590 memset(u->cq, 0, sizeof(u->cq));
1591 }
1592 }
1593
ufs_init_hc(UfsHc * u)1594 static void ufs_init_hc(UfsHc *u)
1595 {
1596 uint32_t cap = 0;
1597 uint32_t mcqconfig = 0;
1598 uint32_t mcqcap = 0;
1599
1600 u->reg_size = pow2ceil(ufs_reg_size(u));
1601
1602 memset(&u->reg, 0, sizeof(u->reg));
1603 memset(&u->mcq_reg, 0, sizeof(u->mcq_reg));
1604 memset(&u->mcq_op_reg, 0, sizeof(u->mcq_op_reg));
1605 cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
1606 cap = FIELD_DP32(cap, CAP, RTT, 2);
1607 cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
1608 cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
1609 cap = FIELD_DP32(cap, CAP, 64AS, 1);
1610 cap = FIELD_DP32(cap, CAP, OODDS, 0);
1611 cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
1612 cap = FIELD_DP32(cap, CAP, CS, 0);
1613 cap = FIELD_DP32(cap, CAP, LSDBS, 1);
1614 cap = FIELD_DP32(cap, CAP, MCQS, u->params.mcq);
1615 u->reg.cap = cap;
1616
1617 if (u->params.mcq) {
1618 mcqconfig = FIELD_DP32(mcqconfig, MCQCONFIG, MAC, 0x1f);
1619 u->reg.mcqconfig = mcqconfig;
1620
1621 mcqcap = FIELD_DP32(mcqcap, MCQCAP, MAXQ, u->params.mcq_maxq - 1);
1622 mcqcap = FIELD_DP32(mcqcap, MCQCAP, RRP, 1);
1623 mcqcap = FIELD_DP32(mcqcap, MCQCAP, QCFGPTR, UFS_MCQ_QCFGPTR);
1624 u->reg.mcqcap = mcqcap;
1625
1626 for (int i = 0; i < ARRAY_SIZE(u->mcq_reg); i++) {
1627 uint64_t addr = ufs_mcq_op_reg_addr(u, i);
1628 u->mcq_reg[i].sqdao = addr;
1629 u->mcq_reg[i].sqisao = addr + sizeof(UfsMcqSqReg);
1630 addr += sizeof(UfsMcqSqReg);
1631 u->mcq_reg[i].cqdao = addr + sizeof(UfsMcqSqIntReg);
1632 addr += sizeof(UfsMcqSqIntReg);
1633 u->mcq_reg[i].cqisao = addr + sizeof(UfsMcqCqReg);
1634 }
1635 }
1636 u->reg.ver = UFS_SPEC_VER;
1637
1638 memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
1639 u->device_desc.length = sizeof(DeviceDescriptor);
1640 u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
1641 u->device_desc.device_sub_class = 0x01;
1642 u->device_desc.number_lu = 0x00;
1643 u->device_desc.number_wlu = 0x04;
1644 /* TODO: Revisit it when Power Management is implemented */
1645 u->device_desc.init_power_mode = 0x01; /* Active Mode */
1646 u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
1647 u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
1648 u->device_desc.manufacturer_name = 0x00;
1649 u->device_desc.product_name = 0x01;
1650 u->device_desc.serial_number = 0x02;
1651 u->device_desc.oem_id = 0x03;
1652 u->device_desc.ud_0_base_offset = 0x16;
1653 u->device_desc.ud_config_p_length = 0x1A;
1654 u->device_desc.device_rtt_cap = 0x02;
1655 u->device_desc.queue_depth = u->params.nutrs;
1656 u->device_desc.product_revision_level = 0x04;
1657
1658 memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
1659 u->geometry_desc.length = sizeof(GeometryDescriptor);
1660 u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
1661 u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
1662 u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
1663 u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
1664 u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
1665 u->geometry_desc.max_in_buffer_size = 0x8;
1666 u->geometry_desc.max_out_buffer_size = 0x8;
1667 u->geometry_desc.rpmb_read_write_size = 0x40;
1668 u->geometry_desc.data_ordering =
1669 0x0; /* out-of-order data transfer is not supported */
1670 u->geometry_desc.max_context_id_number = 0x5;
1671 u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
1672
1673 memset(&u->attributes, 0, sizeof(u->attributes));
1674 u->attributes.max_data_in_size = 0x08;
1675 u->attributes.max_data_out_size = 0x08;
1676 u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
1677 /* configure descriptor is not supported */
1678 u->attributes.config_descr_lock = 0x01;
1679 u->attributes.max_num_of_rtt = 0x02;
1680
1681 memset(&u->flags, 0, sizeof(u->flags));
1682 u->flags.permanently_disable_fw_update = 1;
1683 }
1684
ufs_realize(PCIDevice * pci_dev,Error ** errp)1685 static void ufs_realize(PCIDevice *pci_dev, Error **errp)
1686 {
1687 UfsHc *u = UFS(pci_dev);
1688
1689 if (!ufs_check_constraints(u, errp)) {
1690 return;
1691 }
1692
1693 qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
1694 u->parent_obj.qdev.id);
1695
1696 ufs_init_state(u);
1697 ufs_init_hc(u);
1698 ufs_init_pci(u, pci_dev);
1699
1700 ufs_init_wlu(&u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN);
1701 ufs_init_wlu(&u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN);
1702 ufs_init_wlu(&u->boot_wlu, UFS_UPIU_BOOT_WLUN);
1703 ufs_init_wlu(&u->rpmb_wlu, UFS_UPIU_RPMB_WLUN);
1704 }
1705
ufs_exit(PCIDevice * pci_dev)1706 static void ufs_exit(PCIDevice *pci_dev)
1707 {
1708 UfsHc *u = UFS(pci_dev);
1709
1710 qemu_bh_delete(u->doorbell_bh);
1711 qemu_bh_delete(u->complete_bh);
1712
1713 for (int i = 0; i < u->params.nutrs; i++) {
1714 ufs_clear_req(&u->req_list[i]);
1715 }
1716 g_free(u->req_list);
1717
1718 for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
1719 if (u->sq[i]) {
1720 ufs_mcq_delete_sq(u, i);
1721 }
1722 }
1723 for (int i = 0; i < ARRAY_SIZE(u->cq); i++) {
1724 if (u->cq[i]) {
1725 ufs_mcq_delete_cq(u, i);
1726 }
1727 }
1728 }
1729
1730 static Property ufs_props[] = {
1731 DEFINE_PROP_STRING("serial", UfsHc, params.serial),
1732 DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
1733 DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
1734 DEFINE_PROP_BOOL("mcq", UfsHc, params.mcq, false),
1735 DEFINE_PROP_UINT8("mcq-maxq", UfsHc, params.mcq_maxq, 2),
1736 DEFINE_PROP_END_OF_LIST(),
1737 };
1738
1739 static const VMStateDescription ufs_vmstate = {
1740 .name = "ufs",
1741 .unmigratable = 1,
1742 };
1743
ufs_class_init(ObjectClass * oc,void * data)1744 static void ufs_class_init(ObjectClass *oc, void *data)
1745 {
1746 DeviceClass *dc = DEVICE_CLASS(oc);
1747 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1748
1749 pc->realize = ufs_realize;
1750 pc->exit = ufs_exit;
1751 pc->vendor_id = PCI_VENDOR_ID_REDHAT;
1752 pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
1753 pc->class_id = PCI_CLASS_STORAGE_UFS;
1754
1755 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1756 dc->desc = "Universal Flash Storage";
1757 device_class_set_props(dc, ufs_props);
1758 dc->vmsd = &ufs_vmstate;
1759 }
1760
ufs_bus_check_address(BusState * qbus,DeviceState * qdev,Error ** errp)1761 static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
1762 Error **errp)
1763 {
1764 if (strcmp(object_get_typename(OBJECT(qdev)), TYPE_UFS_LU) != 0) {
1765 error_setg(errp, "%s cannot be connected to ufs-bus",
1766 object_get_typename(OBJECT(qdev)));
1767 return false;
1768 }
1769
1770 return true;
1771 }
1772
ufs_bus_get_dev_path(DeviceState * dev)1773 static char *ufs_bus_get_dev_path(DeviceState *dev)
1774 {
1775 BusState *bus = qdev_get_parent_bus(dev);
1776
1777 return qdev_get_dev_path(bus->parent);
1778 }
1779
ufs_bus_class_init(ObjectClass * class,void * data)1780 static void ufs_bus_class_init(ObjectClass *class, void *data)
1781 {
1782 BusClass *bc = BUS_CLASS(class);
1783 bc->get_dev_path = ufs_bus_get_dev_path;
1784 bc->check_address = ufs_bus_check_address;
1785 }
1786
1787 static const TypeInfo ufs_info = {
1788 .name = TYPE_UFS,
1789 .parent = TYPE_PCI_DEVICE,
1790 .class_init = ufs_class_init,
1791 .instance_size = sizeof(UfsHc),
1792 .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
1793 };
1794
1795 static const TypeInfo ufs_bus_info = {
1796 .name = TYPE_UFS_BUS,
1797 .parent = TYPE_BUS,
1798 .class_init = ufs_bus_class_init,
1799 .class_size = sizeof(UfsBusClass),
1800 .instance_size = sizeof(UfsBus),
1801 };
1802
ufs_register_types(void)1803 static void ufs_register_types(void)
1804 {
1805 type_register_static(&ufs_info);
1806 type_register_static(&ufs_bus_info);
1807 }
1808
1809 type_init(ufs_register_types)
1810