1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (c) 2011-12 The Chromium OS Authors.
4 *
5 * This file is derived from the flashrom project.
6 */
7
8 #define LOG_CATEGORY UCLASS_SPI
9
10 #include <common.h>
11 #include <bootstage.h>
12 #include <div64.h>
13 #include <dm.h>
14 #include <dt-structs.h>
15 #include <errno.h>
16 #include <log.h>
17 #include <malloc.h>
18 #include <pch.h>
19 #include <pci.h>
20 #include <pci_ids.h>
21 #include <spi.h>
22 #include <spi_flash.h>
23 #include <spi-mem.h>
24 #include <spl.h>
25 #include <asm/fast_spi.h>
26 #include <asm/io.h>
27 #include <dm/uclass-internal.h>
28 #include <asm/mtrr.h>
29 #include <linux/bitops.h>
30 #include <linux/delay.h>
31 #include <linux/sizes.h>
32
33 #include "ich.h"
34
35 #ifdef DEBUG_TRACE
36 #define debug_trace(fmt, args...) debug(fmt, ##args)
37 #else
38 #define debug_trace(x, args...)
39 #endif
40
ich_readb(struct ich_spi_priv * priv,int reg)41 static u8 ich_readb(struct ich_spi_priv *priv, int reg)
42 {
43 u8 value = readb(priv->base + reg);
44
45 debug_trace("read %2.2x from %4.4x\n", value, reg);
46
47 return value;
48 }
49
ich_readw(struct ich_spi_priv * priv,int reg)50 static u16 ich_readw(struct ich_spi_priv *priv, int reg)
51 {
52 u16 value = readw(priv->base + reg);
53
54 debug_trace("read %4.4x from %4.4x\n", value, reg);
55
56 return value;
57 }
58
ich_readl(struct ich_spi_priv * priv,int reg)59 static u32 ich_readl(struct ich_spi_priv *priv, int reg)
60 {
61 u32 value = readl(priv->base + reg);
62
63 debug_trace("read %8.8x from %4.4x\n", value, reg);
64
65 return value;
66 }
67
ich_writeb(struct ich_spi_priv * priv,u8 value,int reg)68 static void ich_writeb(struct ich_spi_priv *priv, u8 value, int reg)
69 {
70 writeb(value, priv->base + reg);
71 debug_trace("wrote %2.2x to %4.4x\n", value, reg);
72 }
73
ich_writew(struct ich_spi_priv * priv,u16 value,int reg)74 static void ich_writew(struct ich_spi_priv *priv, u16 value, int reg)
75 {
76 writew(value, priv->base + reg);
77 debug_trace("wrote %4.4x to %4.4x\n", value, reg);
78 }
79
ich_writel(struct ich_spi_priv * priv,u32 value,int reg)80 static void ich_writel(struct ich_spi_priv *priv, u32 value, int reg)
81 {
82 writel(value, priv->base + reg);
83 debug_trace("wrote %8.8x to %4.4x\n", value, reg);
84 }
85
write_reg(struct ich_spi_priv * priv,const void * value,int dest_reg,uint32_t size)86 static void write_reg(struct ich_spi_priv *priv, const void *value,
87 int dest_reg, uint32_t size)
88 {
89 memcpy_toio(priv->base + dest_reg, value, size);
90 }
91
read_reg(struct ich_spi_priv * priv,int src_reg,void * value,uint32_t size)92 static void read_reg(struct ich_spi_priv *priv, int src_reg, void *value,
93 uint32_t size)
94 {
95 memcpy_fromio(value, priv->base + src_reg, size);
96 }
97
ich_set_bbar(struct ich_spi_priv * ctlr,uint32_t minaddr)98 static void ich_set_bbar(struct ich_spi_priv *ctlr, uint32_t minaddr)
99 {
100 const uint32_t bbar_mask = 0x00ffff00;
101 uint32_t ichspi_bbar;
102
103 if (ctlr->bbar) {
104 minaddr &= bbar_mask;
105 ichspi_bbar = ich_readl(ctlr, ctlr->bbar) & ~bbar_mask;
106 ichspi_bbar |= minaddr;
107 ich_writel(ctlr, ichspi_bbar, ctlr->bbar);
108 }
109 }
110
111 /* @return 1 if the SPI flash supports the 33MHz speed */
ich9_can_do_33mhz(struct udevice * dev)112 static bool ich9_can_do_33mhz(struct udevice *dev)
113 {
114 struct ich_spi_priv *priv = dev_get_priv(dev);
115 u32 fdod, speed;
116
117 if (!CONFIG_IS_ENABLED(PCI))
118 return false;
119 /* Observe SPI Descriptor Component Section 0 */
120 dm_pci_write_config32(priv->pch, 0xb0, 0x1000);
121
122 /* Extract the Write/Erase SPI Frequency from descriptor */
123 dm_pci_read_config32(priv->pch, 0xb4, &fdod);
124
125 /* Bits 23:21 have the fast read clock frequency, 0=20MHz, 1=33MHz */
126 speed = (fdod >> 21) & 7;
127
128 return speed == 1;
129 }
130
spi_lock_down(struct ich_spi_plat * plat,void * sbase)131 static void spi_lock_down(struct ich_spi_plat *plat, void *sbase)
132 {
133 if (plat->ich_version == ICHV_7) {
134 struct ich7_spi_regs *ich7_spi = sbase;
135
136 setbits_le16(&ich7_spi->spis, SPIS_LOCK);
137 } else if (plat->ich_version == ICHV_9) {
138 struct ich9_spi_regs *ich9_spi = sbase;
139
140 setbits_le16(&ich9_spi->hsfs, HSFS_FLOCKDN);
141 }
142 }
143
spi_lock_status(struct ich_spi_plat * plat,void * sbase)144 static bool spi_lock_status(struct ich_spi_plat *plat, void *sbase)
145 {
146 int lock = 0;
147
148 if (plat->ich_version == ICHV_7) {
149 struct ich7_spi_regs *ich7_spi = sbase;
150
151 lock = readw(&ich7_spi->spis) & SPIS_LOCK;
152 } else if (plat->ich_version == ICHV_9) {
153 struct ich9_spi_regs *ich9_spi = sbase;
154
155 lock = readw(&ich9_spi->hsfs) & HSFS_FLOCKDN;
156 }
157
158 return lock != 0;
159 }
160
spi_setup_opcode(struct ich_spi_priv * ctlr,struct spi_trans * trans,bool lock)161 static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans,
162 bool lock)
163 {
164 uint16_t optypes;
165 uint8_t opmenu[ctlr->menubytes];
166
167 if (!lock) {
168 /* The lock is off, so just use index 0. */
169 ich_writeb(ctlr, trans->opcode, ctlr->opmenu);
170 optypes = ich_readw(ctlr, ctlr->optype);
171 optypes = (optypes & 0xfffc) | (trans->type & 0x3);
172 ich_writew(ctlr, optypes, ctlr->optype);
173 return 0;
174 } else {
175 /* The lock is on. See if what we need is on the menu. */
176 uint8_t optype;
177 uint16_t opcode_index;
178
179 /* Write Enable is handled as atomic prefix */
180 if (trans->opcode == SPI_OPCODE_WREN)
181 return 0;
182
183 read_reg(ctlr, ctlr->opmenu, opmenu, sizeof(opmenu));
184 for (opcode_index = 0; opcode_index < ctlr->menubytes;
185 opcode_index++) {
186 if (opmenu[opcode_index] == trans->opcode)
187 break;
188 }
189
190 if (opcode_index == ctlr->menubytes) {
191 debug("ICH SPI: Opcode %x not found\n", trans->opcode);
192 return -EINVAL;
193 }
194
195 optypes = ich_readw(ctlr, ctlr->optype);
196 optype = (optypes >> (opcode_index * 2)) & 0x3;
197
198 if (optype != trans->type) {
199 debug("ICH SPI: Transaction doesn't fit type %d\n",
200 optype);
201 return -ENOSPC;
202 }
203 return opcode_index;
204 }
205 }
206
207 /*
208 * Wait for up to 6s til status register bit(s) turn 1 (in case wait_til_set
209 * below is true) or 0. In case the wait was for the bit(s) to set - write
210 * those bits back, which would cause resetting them.
211 *
212 * Return the last read status value on success or -1 on failure.
213 */
ich_status_poll(struct ich_spi_priv * ctlr,u16 bitmask,int wait_til_set)214 static int ich_status_poll(struct ich_spi_priv *ctlr, u16 bitmask,
215 int wait_til_set)
216 {
217 int timeout = 600000; /* This will result in 6s */
218 u16 status = 0;
219
220 while (timeout--) {
221 status = ich_readw(ctlr, ctlr->status);
222 if (wait_til_set ^ ((status & bitmask) == 0)) {
223 if (wait_til_set) {
224 ich_writew(ctlr, status & bitmask,
225 ctlr->status);
226 }
227 return status;
228 }
229 udelay(10);
230 }
231 debug("ICH SPI: SCIP timeout, read %x, expected %x, wts %x %x\n",
232 status, bitmask, wait_til_set, status & bitmask);
233
234 return -ETIMEDOUT;
235 }
236
ich_spi_config_opcode(struct udevice * dev)237 static void ich_spi_config_opcode(struct udevice *dev)
238 {
239 struct ich_spi_priv *ctlr = dev_get_priv(dev);
240
241 /*
242 * PREOP, OPTYPE, OPMENU1/OPMENU2 registers can be locked down
243 * to prevent accidental or intentional writes. Before they get
244 * locked down, these registers should be initialized properly.
245 */
246 ich_writew(ctlr, SPI_OPPREFIX, ctlr->preop);
247 ich_writew(ctlr, SPI_OPTYPE, ctlr->optype);
248 ich_writel(ctlr, SPI_OPMENU_LOWER, ctlr->opmenu);
249 ich_writel(ctlr, SPI_OPMENU_UPPER, ctlr->opmenu + sizeof(u32));
250 }
251
ich_spi_exec_op_swseq(struct spi_slave * slave,const struct spi_mem_op * op)252 static int ich_spi_exec_op_swseq(struct spi_slave *slave,
253 const struct spi_mem_op *op)
254 {
255 struct udevice *bus = dev_get_parent(slave->dev);
256 struct ich_spi_plat *plat = dev_get_plat(bus);
257 struct ich_spi_priv *ctlr = dev_get_priv(bus);
258 uint16_t control;
259 int16_t opcode_index;
260 int with_address;
261 int status;
262 struct spi_trans *trans = &ctlr->trans;
263 bool lock = spi_lock_status(plat, ctlr->base);
264 int ret = 0;
265
266 trans->in = NULL;
267 trans->out = NULL;
268 trans->type = 0xFF;
269
270 if (op->data.nbytes) {
271 if (op->data.dir == SPI_MEM_DATA_IN) {
272 trans->in = op->data.buf.in;
273 trans->bytesin = op->data.nbytes;
274 } else {
275 trans->out = op->data.buf.out;
276 trans->bytesout = op->data.nbytes;
277 }
278 }
279
280 if (trans->opcode != op->cmd.opcode)
281 trans->opcode = op->cmd.opcode;
282
283 if (lock && trans->opcode == SPI_OPCODE_WRDIS)
284 return 0;
285
286 if (trans->opcode == SPI_OPCODE_WREN) {
287 /*
288 * Treat Write Enable as Atomic Pre-Op if possible
289 * in order to prevent the Management Engine from
290 * issuing a transaction between WREN and DATA.
291 */
292 if (!lock)
293 ich_writew(ctlr, trans->opcode, ctlr->preop);
294 return 0;
295 }
296
297 ret = ich_status_poll(ctlr, SPIS_SCIP, 0);
298 if (ret < 0)
299 return ret;
300
301 if (plat->ich_version == ICHV_7)
302 ich_writew(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
303 else
304 ich_writeb(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
305
306 /* Try to guess spi transaction type */
307 if (op->data.dir == SPI_MEM_DATA_OUT) {
308 if (op->addr.nbytes)
309 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
310 else
311 trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS;
312 } else {
313 if (op->addr.nbytes)
314 trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS;
315 else
316 trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS;
317 }
318 /* Special erase case handling */
319 if (op->addr.nbytes && !op->data.buswidth)
320 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
321
322 opcode_index = spi_setup_opcode(ctlr, trans, lock);
323 if (opcode_index < 0)
324 return -EINVAL;
325
326 if (op->addr.nbytes) {
327 trans->offset = op->addr.val;
328 with_address = 1;
329 }
330
331 if (ctlr->speed && ctlr->max_speed >= 33000000) {
332 int byte;
333
334 byte = ich_readb(ctlr, ctlr->speed);
335 if (ctlr->cur_speed >= 33000000)
336 byte |= SSFC_SCF_33MHZ;
337 else
338 byte &= ~SSFC_SCF_33MHZ;
339 ich_writeb(ctlr, byte, ctlr->speed);
340 }
341
342 /* Preset control fields */
343 control = SPIC_SCGO | ((opcode_index & 0x07) << 4);
344
345 /* Issue atomic preop cycle if needed */
346 if (ich_readw(ctlr, ctlr->preop))
347 control |= SPIC_ACS;
348
349 if (!trans->bytesout && !trans->bytesin) {
350 /* SPI addresses are 24 bit only */
351 if (with_address) {
352 ich_writel(ctlr, trans->offset & 0x00FFFFFF,
353 ctlr->addr);
354 }
355 /*
356 * This is a 'no data' command (like Write Enable), its
357 * bitesout size was 1, decremented to zero while executing
358 * spi_setup_opcode() above. Tell the chip to send the
359 * command.
360 */
361 ich_writew(ctlr, control, ctlr->control);
362
363 /* wait for the result */
364 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
365 if (status < 0)
366 return status;
367
368 if (status & SPIS_FCERR) {
369 debug("ICH SPI: Command transaction error\n");
370 return -EIO;
371 }
372
373 return 0;
374 }
375
376 while (trans->bytesout || trans->bytesin) {
377 uint32_t data_length;
378
379 /* SPI addresses are 24 bit only */
380 ich_writel(ctlr, trans->offset & 0x00FFFFFF, ctlr->addr);
381
382 if (trans->bytesout)
383 data_length = min(trans->bytesout, ctlr->databytes);
384 else
385 data_length = min(trans->bytesin, ctlr->databytes);
386
387 /* Program data into FDATA0 to N */
388 if (trans->bytesout) {
389 write_reg(ctlr, trans->out, ctlr->data, data_length);
390 trans->bytesout -= data_length;
391 }
392
393 /* Add proper control fields' values */
394 control &= ~((ctlr->databytes - 1) << 8);
395 control |= SPIC_DS;
396 control |= (data_length - 1) << 8;
397
398 /* write it */
399 ich_writew(ctlr, control, ctlr->control);
400
401 /* Wait for Cycle Done Status or Flash Cycle Error */
402 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
403 if (status < 0)
404 return status;
405
406 if (status & SPIS_FCERR) {
407 debug("ICH SPI: Data transaction error %x\n", status);
408 return -EIO;
409 }
410
411 if (trans->bytesin) {
412 read_reg(ctlr, ctlr->data, trans->in, data_length);
413 trans->bytesin -= data_length;
414 }
415 }
416
417 /* Clear atomic preop now that xfer is done */
418 if (!lock)
419 ich_writew(ctlr, 0, ctlr->preop);
420
421 return 0;
422 }
423
424 /*
425 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
426 * that the operation does not cross page boundary.
427 */
get_xfer_len(u32 offset,int len,int page_size)428 static uint get_xfer_len(u32 offset, int len, int page_size)
429 {
430 uint xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE);
431 uint bytes_left = ALIGN(offset, page_size) - offset;
432
433 if (bytes_left)
434 xfer_len = min(xfer_len, bytes_left);
435
436 return xfer_len;
437 }
438
439 /* Fill FDATAn FIFO in preparation for a write transaction */
fill_xfer_fifo(struct fast_spi_regs * regs,const void * data,uint len)440 static void fill_xfer_fifo(struct fast_spi_regs *regs, const void *data,
441 uint len)
442 {
443 memcpy(regs->fdata, data, len);
444 }
445
446 /* Drain FDATAn FIFO after a read transaction populates data */
drain_xfer_fifo(struct fast_spi_regs * regs,void * dest,uint len)447 static void drain_xfer_fifo(struct fast_spi_regs *regs, void *dest, uint len)
448 {
449 memcpy(dest, regs->fdata, len);
450 }
451
452 /* Fire up a transfer using the hardware sequencer */
start_hwseq_xfer(struct fast_spi_regs * regs,uint hsfsts_cycle,uint offset,uint len)453 static void start_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
454 uint offset, uint len)
455 {
456 /* Make sure all W1C status bits get cleared */
457 u32 hsfsts;
458
459 hsfsts = readl(®s->hsfsts_ctl);
460 hsfsts &= ~(HSFSTS_FCYCLE_MASK | HSFSTS_FDBC_MASK);
461 hsfsts |= HSFSTS_AEL | HSFSTS_FCERR | HSFSTS_FDONE;
462
463 /* Set up transaction parameters */
464 hsfsts |= hsfsts_cycle << HSFSTS_FCYCLE_SHIFT;
465 hsfsts |= ((len - 1) << HSFSTS_FDBC_SHIFT) & HSFSTS_FDBC_MASK;
466 hsfsts |= HSFSTS_FGO;
467
468 writel(offset, ®s->faddr);
469 writel(hsfsts, ®s->hsfsts_ctl);
470 }
471
wait_for_hwseq_xfer(struct fast_spi_regs * regs,uint offset)472 static int wait_for_hwseq_xfer(struct fast_spi_regs *regs, uint offset)
473 {
474 ulong start;
475 u32 hsfsts;
476
477 start = get_timer(0);
478 do {
479 hsfsts = readl(®s->hsfsts_ctl);
480 if (hsfsts & HSFSTS_FCERR) {
481 debug("SPI transaction error at offset %x HSFSTS = %08x\n",
482 offset, hsfsts);
483 return -EIO;
484 }
485 if (hsfsts & HSFSTS_AEL)
486 return -EPERM;
487
488 if (hsfsts & HSFSTS_FDONE)
489 return 0;
490 } while (get_timer(start) < SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
491
492 debug("SPI transaction timeout at offset %x HSFSTS = %08x, timer %d\n",
493 offset, hsfsts, (uint)get_timer(start));
494
495 return -ETIMEDOUT;
496 }
497
498 /**
499 * exec_sync_hwseq_xfer() - Execute flash transfer by hardware sequencing
500 *
501 * This waits until complete or timeout
502 *
503 * @regs: SPI registers
504 * @hsfsts_cycle: Cycle type (enum hsfsts_cycle_t)
505 * @offset: Offset to access
506 * @len: Number of bytes to transfer (can be 0)
507 * @return 0 if OK, -EIO on flash-cycle error (FCERR), -EPERM on access error
508 * (AEL), -ETIMEDOUT on timeout
509 */
exec_sync_hwseq_xfer(struct fast_spi_regs * regs,uint hsfsts_cycle,uint offset,uint len)510 static int exec_sync_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
511 uint offset, uint len)
512 {
513 start_hwseq_xfer(regs, hsfsts_cycle, offset, len);
514
515 return wait_for_hwseq_xfer(regs, offset);
516 }
517
ich_spi_exec_op_hwseq(struct spi_slave * slave,const struct spi_mem_op * op)518 static int ich_spi_exec_op_hwseq(struct spi_slave *slave,
519 const struct spi_mem_op *op)
520 {
521 struct spi_flash *flash = dev_get_uclass_priv(slave->dev);
522 struct udevice *bus = dev_get_parent(slave->dev);
523 struct ich_spi_priv *priv = dev_get_priv(bus);
524 struct fast_spi_regs *regs = priv->base;
525 uint page_size;
526 uint offset;
527 int cycle;
528 uint len;
529 bool out;
530 int ret;
531 u8 *buf;
532
533 offset = op->addr.val;
534 len = op->data.nbytes;
535
536 switch (op->cmd.opcode) {
537 case SPINOR_OP_RDID:
538 cycle = HSFSTS_CYCLE_RDID;
539 break;
540 case SPINOR_OP_READ_FAST:
541 cycle = HSFSTS_CYCLE_READ;
542 break;
543 case SPINOR_OP_PP:
544 cycle = HSFSTS_CYCLE_WRITE;
545 break;
546 case SPINOR_OP_WREN:
547 /* Nothing needs to be done */
548 return 0;
549 case SPINOR_OP_WRSR:
550 cycle = HSFSTS_CYCLE_WR_STATUS;
551 break;
552 case SPINOR_OP_RDSR:
553 cycle = HSFSTS_CYCLE_RD_STATUS;
554 break;
555 case SPINOR_OP_WRDI:
556 return 0; /* ignore */
557 case SPINOR_OP_BE_4K:
558 cycle = HSFSTS_CYCLE_4K_ERASE;
559 ret = exec_sync_hwseq_xfer(regs, cycle, offset, 0);
560 return ret;
561 default:
562 debug("Unknown cycle %x\n", op->cmd.opcode);
563 return -EINVAL;
564 };
565
566 out = op->data.dir == SPI_MEM_DATA_OUT;
567 buf = out ? (u8 *)op->data.buf.out : op->data.buf.in;
568 page_size = flash->page_size ? : 256;
569
570 while (len) {
571 uint xfer_len = get_xfer_len(offset, len, page_size);
572
573 if (out)
574 fill_xfer_fifo(regs, buf, xfer_len);
575
576 ret = exec_sync_hwseq_xfer(regs, cycle, offset, xfer_len);
577 if (ret)
578 return ret;
579
580 if (!out)
581 drain_xfer_fifo(regs, buf, xfer_len);
582
583 offset += xfer_len;
584 buf += xfer_len;
585 len -= xfer_len;
586 }
587
588 return 0;
589 }
590
ich_spi_exec_op(struct spi_slave * slave,const struct spi_mem_op * op)591 static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
592 {
593 struct udevice *bus = dev_get_parent(slave->dev);
594 struct ich_spi_plat *plat = dev_get_plat(bus);
595 int ret;
596
597 bootstage_start(BOOTSTAGE_ID_ACCUM_SPI, "fast_spi");
598 if (plat->hwseq)
599 ret = ich_spi_exec_op_hwseq(slave, op);
600 else
601 ret = ich_spi_exec_op_swseq(slave, op);
602 bootstage_accum(BOOTSTAGE_ID_ACCUM_SPI);
603
604 return ret;
605 }
606
607 #if !CONFIG_IS_ENABLED(OF_PLATDATA)
608 /**
609 * ich_spi_get_basics() - Get basic information about the ICH device
610 *
611 * This works without probing any devices if requested.
612 *
613 * @bus: SPI controller to use
614 * @can_probe: true if this function is allowed to probe the PCH
615 * @pchp: Returns a pointer to the pch, or NULL if not found
616 * @ich_versionp: Returns ICH version detected on success
617 * @mmio_basep: Returns the address of the SPI registers on success
618 * @return 0 if OK, -EPROTOTYPE if the PCH could not be found, -EAGAIN if
619 * the function cannot success without probing, possible another error if
620 * pch_get_spi_base() fails
621 */
ich_spi_get_basics(struct udevice * bus,bool can_probe,struct udevice ** pchp,enum ich_version * ich_versionp,ulong * mmio_basep)622 static int ich_spi_get_basics(struct udevice *bus, bool can_probe,
623 struct udevice **pchp,
624 enum ich_version *ich_versionp, ulong *mmio_basep)
625 {
626 struct udevice *pch = NULL;
627 int ret = 0;
628
629 /* Find a PCH if there is one */
630 if (can_probe) {
631 pch = dev_get_parent(bus);
632 if (device_get_uclass_id(pch) != UCLASS_PCH) {
633 uclass_first_device(UCLASS_PCH, &pch);
634 if (!pch)
635 return log_msg_ret("uclass", -EPROTOTYPE);
636 }
637 }
638
639 *ich_versionp = dev_get_driver_data(bus);
640 if (*ich_versionp == ICHV_APL)
641 *mmio_basep = dm_pci_read_bar32(bus, 0);
642 else if (pch)
643 ret = pch_get_spi_base(pch, mmio_basep);
644 else
645 return -EAGAIN;
646 *pchp = pch;
647
648 return ret;
649 }
650 #endif
651
652 /**
653 * ich_get_mmap_bus() - Handle the get_mmap() method for a bus
654 *
655 * There are several cases to consider:
656 * 1. Using of-platdata, in which case we have the BDF and can access the
657 * registers by reading the BAR
658 * 2. Not using of-platdata, but still with a SPI controller that is on its own
659 * PCI PDF. In this case we read the BDF from the parent plat and again get
660 * the registers by reading the BAR
661 * 3. Using a SPI controller that is a child of the PCH, in which case we try
662 * to find the registers by asking the PCH. This only works if the PCH has
663 * been probed (which it will be if the bus is probed since parents are
664 * probed before children), since the PCH may not have a PCI address until
665 * its parent (the PCI bus itself) has been probed. If you are using this
666 * method then you should make sure the SPI bus is probed.
667 *
668 * The first two cases are useful in early init. The last one is more useful
669 * afterwards.
670 */
ich_get_mmap_bus(struct udevice * bus,ulong * map_basep,uint * map_sizep,uint * offsetp)671 static int ich_get_mmap_bus(struct udevice *bus, ulong *map_basep,
672 uint *map_sizep, uint *offsetp)
673 {
674 pci_dev_t spi_bdf;
675 #if !CONFIG_IS_ENABLED(OF_PLATDATA)
676 if (device_is_on_pci_bus(bus)) {
677 struct pci_child_plat *pplat;
678
679 pplat = dev_get_parent_plat(bus);
680 spi_bdf = pplat->devfn;
681 } else {
682 enum ich_version ich_version;
683 struct fast_spi_regs *regs;
684 struct udevice *pch;
685 ulong mmio_base;
686 int ret;
687
688 ret = ich_spi_get_basics(bus, device_active(bus), &pch,
689 &ich_version, &mmio_base);
690 if (ret)
691 return log_msg_ret("basics", ret);
692 regs = (struct fast_spi_regs *)mmio_base;
693
694 return fast_spi_get_bios_mmap_regs(regs, map_basep, map_sizep,
695 offsetp);
696 }
697 #else
698 struct ich_spi_plat *plat = dev_get_plat(bus);
699
700 /*
701 * We cannot rely on plat->bdf being set up yet since this method can
702 * be called before the device is probed. Use the of-platdata directly
703 * instead.
704 */
705 spi_bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
706 #endif
707
708 return fast_spi_get_bios_mmap(spi_bdf, map_basep, map_sizep, offsetp);
709 }
710
ich_get_mmap(struct udevice * dev,ulong * map_basep,uint * map_sizep,uint * offsetp)711 static int ich_get_mmap(struct udevice *dev, ulong *map_basep, uint *map_sizep,
712 uint *offsetp)
713 {
714 struct udevice *bus = dev_get_parent(dev);
715
716 return ich_get_mmap_bus(bus, map_basep, map_sizep, offsetp);
717 }
718
ich_spi_adjust_size(struct spi_slave * slave,struct spi_mem_op * op)719 static int ich_spi_adjust_size(struct spi_slave *slave, struct spi_mem_op *op)
720 {
721 unsigned int page_offset;
722 int addr = op->addr.val;
723 unsigned int byte_count = op->data.nbytes;
724
725 if (hweight32(ICH_BOUNDARY) == 1) {
726 page_offset = addr & (ICH_BOUNDARY - 1);
727 } else {
728 u64 aux = addr;
729
730 page_offset = do_div(aux, ICH_BOUNDARY);
731 }
732
733 if (op->data.dir == SPI_MEM_DATA_IN) {
734 if (slave->max_read_size) {
735 op->data.nbytes = min(ICH_BOUNDARY - page_offset,
736 slave->max_read_size);
737 }
738 } else if (slave->max_write_size) {
739 op->data.nbytes = min(ICH_BOUNDARY - page_offset,
740 slave->max_write_size);
741 }
742
743 op->data.nbytes = min(op->data.nbytes, byte_count);
744
745 return 0;
746 }
747
ich_protect_lockdown(struct udevice * dev)748 static int ich_protect_lockdown(struct udevice *dev)
749 {
750 struct ich_spi_plat *plat = dev_get_plat(dev);
751 struct ich_spi_priv *priv = dev_get_priv(dev);
752 int ret = -ENOSYS;
753
754 /* Disable the BIOS write protect so write commands are allowed */
755 if (priv->pch)
756 ret = pch_set_spi_protect(priv->pch, false);
757 if (ret == -ENOSYS) {
758 u8 bios_cntl;
759
760 bios_cntl = ich_readb(priv, priv->bcr);
761 bios_cntl &= ~BIT(5); /* clear Enable InSMM_STS (EISS) */
762 bios_cntl |= 1; /* Write Protect Disable (WPD) */
763 ich_writeb(priv, bios_cntl, priv->bcr);
764 } else if (ret) {
765 debug("%s: Failed to disable write-protect: err=%d\n",
766 __func__, ret);
767 return ret;
768 }
769
770 /* Lock down SPI controller settings if required */
771 if (plat->lockdown) {
772 ich_spi_config_opcode(dev);
773 spi_lock_down(plat, priv->base);
774 }
775
776 return 0;
777 }
778
ich_init_controller(struct udevice * dev,struct ich_spi_plat * plat,struct ich_spi_priv * ctlr)779 static int ich_init_controller(struct udevice *dev,
780 struct ich_spi_plat *plat,
781 struct ich_spi_priv *ctlr)
782 {
783 if (spl_phase() == PHASE_TPL) {
784 struct ich_spi_plat *plat = dev_get_plat(dev);
785 int ret;
786
787 ret = fast_spi_early_init(plat->bdf, plat->mmio_base);
788 if (ret)
789 return ret;
790 }
791
792 ctlr->base = (void *)plat->mmio_base;
793 if (plat->ich_version == ICHV_7) {
794 struct ich7_spi_regs *ich7_spi = ctlr->base;
795
796 ctlr->opmenu = offsetof(struct ich7_spi_regs, opmenu);
797 ctlr->menubytes = sizeof(ich7_spi->opmenu);
798 ctlr->optype = offsetof(struct ich7_spi_regs, optype);
799 ctlr->addr = offsetof(struct ich7_spi_regs, spia);
800 ctlr->data = offsetof(struct ich7_spi_regs, spid);
801 ctlr->databytes = sizeof(ich7_spi->spid);
802 ctlr->status = offsetof(struct ich7_spi_regs, spis);
803 ctlr->control = offsetof(struct ich7_spi_regs, spic);
804 ctlr->bbar = offsetof(struct ich7_spi_regs, bbar);
805 ctlr->preop = offsetof(struct ich7_spi_regs, preop);
806 } else if (plat->ich_version == ICHV_9) {
807 struct ich9_spi_regs *ich9_spi = ctlr->base;
808
809 ctlr->opmenu = offsetof(struct ich9_spi_regs, opmenu);
810 ctlr->menubytes = sizeof(ich9_spi->opmenu);
811 ctlr->optype = offsetof(struct ich9_spi_regs, optype);
812 ctlr->addr = offsetof(struct ich9_spi_regs, faddr);
813 ctlr->data = offsetof(struct ich9_spi_regs, fdata);
814 ctlr->databytes = sizeof(ich9_spi->fdata);
815 ctlr->status = offsetof(struct ich9_spi_regs, ssfs);
816 ctlr->control = offsetof(struct ich9_spi_regs, ssfc);
817 ctlr->speed = ctlr->control + 2;
818 ctlr->bbar = offsetof(struct ich9_spi_regs, bbar);
819 ctlr->preop = offsetof(struct ich9_spi_regs, preop);
820 ctlr->bcr = offsetof(struct ich9_spi_regs, bcr);
821 ctlr->pr = &ich9_spi->pr[0];
822 } else if (plat->ich_version == ICHV_APL) {
823 } else {
824 debug("ICH SPI: Unrecognised ICH version %d\n",
825 plat->ich_version);
826 return -EINVAL;
827 }
828
829 /* Work out the maximum speed we can support */
830 ctlr->max_speed = 20000000;
831 if (plat->ich_version == ICHV_9 && ich9_can_do_33mhz(dev))
832 ctlr->max_speed = 33000000;
833 debug("ICH SPI: Version ID %d detected at %lx, speed %ld\n",
834 plat->ich_version, plat->mmio_base, ctlr->max_speed);
835
836 ich_set_bbar(ctlr, 0);
837
838 return 0;
839 }
840
ich_cache_bios_region(struct udevice * dev)841 static int ich_cache_bios_region(struct udevice *dev)
842 {
843 ulong map_base;
844 uint map_size;
845 uint offset;
846 ulong base;
847 int ret;
848
849 ret = ich_get_mmap_bus(dev, &map_base, &map_size, &offset);
850 if (ret)
851 return ret;
852
853 /* Don't use WRBACK since we are not supposed to write to SPI flash */
854 base = SZ_4G - map_size;
855 mtrr_set_next_var(MTRR_TYPE_WRPROT, base, map_size);
856 log_debug("BIOS cache base=%lx, size=%x\n", base, (uint)map_size);
857
858 return 0;
859 }
860
ich_spi_probe(struct udevice * dev)861 static int ich_spi_probe(struct udevice *dev)
862 {
863 struct ich_spi_plat *plat = dev_get_plat(dev);
864 struct ich_spi_priv *priv = dev_get_priv(dev);
865 int ret;
866
867 ret = ich_init_controller(dev, plat, priv);
868 if (ret)
869 return ret;
870
871 if (spl_phase() == PHASE_TPL) {
872 /* Cache the BIOS to speed things up */
873 ret = ich_cache_bios_region(dev);
874 if (ret)
875 return ret;
876 } else {
877 ret = ich_protect_lockdown(dev);
878 if (ret)
879 return ret;
880 }
881 priv->cur_speed = priv->max_speed;
882
883 return 0;
884 }
885
ich_spi_remove(struct udevice * bus)886 static int ich_spi_remove(struct udevice *bus)
887 {
888 /*
889 * Configure SPI controller so that the Linux MTD driver can fully
890 * access the SPI NOR chip
891 */
892 ich_spi_config_opcode(bus);
893
894 return 0;
895 }
896
ich_spi_set_speed(struct udevice * bus,uint speed)897 static int ich_spi_set_speed(struct udevice *bus, uint speed)
898 {
899 struct ich_spi_priv *priv = dev_get_priv(bus);
900
901 priv->cur_speed = speed;
902
903 return 0;
904 }
905
ich_spi_set_mode(struct udevice * bus,uint mode)906 static int ich_spi_set_mode(struct udevice *bus, uint mode)
907 {
908 debug("%s: mode=%d\n", __func__, mode);
909
910 return 0;
911 }
912
ich_spi_child_pre_probe(struct udevice * dev)913 static int ich_spi_child_pre_probe(struct udevice *dev)
914 {
915 struct udevice *bus = dev_get_parent(dev);
916 struct ich_spi_plat *plat = dev_get_plat(bus);
917 struct ich_spi_priv *priv = dev_get_priv(bus);
918 struct spi_slave *slave = dev_get_parent_priv(dev);
919
920 /*
921 * Yes this controller can only write a small number of bytes at
922 * once! The limit is typically 64 bytes. For hardware sequencing a
923 * a loop is used to get around this.
924 */
925 if (!plat->hwseq)
926 slave->max_write_size = priv->databytes;
927 /*
928 * ICH 7 SPI controller only supports array read command
929 * and byte program command for SST flash
930 */
931 if (plat->ich_version == ICHV_7)
932 slave->mode = SPI_RX_SLOW | SPI_TX_BYTE;
933
934 return 0;
935 }
936
ich_spi_of_to_plat(struct udevice * dev)937 static int ich_spi_of_to_plat(struct udevice *dev)
938 {
939 struct ich_spi_plat *plat = dev_get_plat(dev);
940
941 #if !CONFIG_IS_ENABLED(OF_PLATDATA)
942 struct ich_spi_priv *priv = dev_get_priv(dev);
943 int ret;
944
945 ret = ich_spi_get_basics(dev, true, &priv->pch, &plat->ich_version,
946 &plat->mmio_base);
947 if (ret)
948 return log_msg_ret("basics", ret);
949 plat->lockdown = dev_read_bool(dev, "intel,spi-lock-down");
950 /*
951 * Use an int so that the property is present in of-platdata even
952 * when false.
953 */
954 plat->hwseq = dev_read_u32_default(dev, "intel,hardware-seq", 0);
955 #else
956 plat->ich_version = ICHV_APL;
957 plat->mmio_base = plat->dtplat.early_regs[0];
958 plat->bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
959 plat->hwseq = plat->dtplat.intel_hardware_seq;
960 #endif
961 debug("%s: mmio_base=%lx\n", __func__, plat->mmio_base);
962
963 return 0;
964 }
965
966 static const struct spi_controller_mem_ops ich_controller_mem_ops = {
967 .adjust_op_size = ich_spi_adjust_size,
968 .supports_op = NULL,
969 .exec_op = ich_spi_exec_op,
970 };
971
972 static const struct dm_spi_ops ich_spi_ops = {
973 /* xfer is not supported */
974 .set_speed = ich_spi_set_speed,
975 .set_mode = ich_spi_set_mode,
976 .mem_ops = &ich_controller_mem_ops,
977 .get_mmap = ich_get_mmap,
978 /*
979 * cs_info is not needed, since we require all chip selects to be
980 * in the device tree explicitly
981 */
982 };
983
984 static const struct udevice_id ich_spi_ids[] = {
985 { .compatible = "intel,ich7-spi", ICHV_7 },
986 { .compatible = "intel,ich9-spi", ICHV_9 },
987 { .compatible = "intel,fast-spi", ICHV_APL },
988 { }
989 };
990
991 U_BOOT_DRIVER(intel_fast_spi) = {
992 .name = "intel_fast_spi",
993 .id = UCLASS_SPI,
994 .of_match = ich_spi_ids,
995 .ops = &ich_spi_ops,
996 .of_to_plat = ich_spi_of_to_plat,
997 .plat_auto = sizeof(struct ich_spi_plat),
998 .priv_auto = sizeof(struct ich_spi_priv),
999 .child_pre_probe = ich_spi_child_pre_probe,
1000 .probe = ich_spi_probe,
1001 .remove = ich_spi_remove,
1002 .flags = DM_FLAG_OS_PREPARE,
1003 };
1004