1 // SPDX-License-Identifier:    GPL-2.0
2 /*
3  * Copyright (C) 2018 Marvell International Ltd.
4  */
5 
6 #include <dm.h>
7 #include <malloc.h>
8 #include <misc.h>
9 #include <net.h>
10 #include <pci.h>
11 #include <pci_ids.h>
12 #include <phy.h>
13 #include <asm/io.h>
14 #include <linux/delay.h>
15 
16 #include "nic_reg.h"
17 #include "nic.h"
18 #include "nicvf_queues.h"
19 
20 /* Register read/write APIs */
nicvf_reg_write(struct nicvf * nic,u64 offset,u64 val)21 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
22 {
23 	writeq(val, nic->reg_base + offset);
24 }
25 
nicvf_reg_read(struct nicvf * nic,u64 offset)26 u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
27 {
28 	return readq(nic->reg_base + offset);
29 }
30 
nicvf_queue_reg_write(struct nicvf * nic,u64 offset,u64 qidx,u64 val)31 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
32 			   u64 qidx, u64 val)
33 {
34 	void *addr = nic->reg_base + offset;
35 
36 	writeq(val, (void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
37 }
38 
nicvf_queue_reg_read(struct nicvf * nic,u64 offset,u64 qidx)39 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
40 {
41 	void *addr = nic->reg_base + offset;
42 
43 	return readq((void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
44 }
45 
46 static void  nicvf_handle_mbx_intr(struct nicvf *nic);
47 
48 /* VF -> PF mailbox communication */
nicvf_write_to_mbx(struct nicvf * nic,union nic_mbx * mbx)49 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
50 {
51 	u64 *msg = (u64 *)mbx;
52 
53 	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
54 	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
55 }
56 
nicvf_send_msg_to_pf(struct nicvf * nic,union nic_mbx * mbx)57 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
58 {
59 	int timeout = NIC_PF_VF_MBX_TIMEOUT;
60 	int sleep = 10;
61 
62 	nic->pf_acked = false;
63 	nic->pf_nacked = false;
64 
65 	nicvf_write_to_mbx(nic, mbx);
66 
67 	nic_handle_mbx_intr(nic->nicpf, nic->vf_id);
68 
69 	/* Wait for previous message to be acked, timeout 2sec */
70 	while (!nic->pf_acked) {
71 		if (nic->pf_nacked)
72 			return -1;
73 		mdelay(sleep);
74 		nicvf_handle_mbx_intr(nic);
75 
76 		if (nic->pf_acked)
77 			break;
78 		timeout -= sleep;
79 		if (!timeout) {
80 			printf("PF didn't ack to mbox msg %d from VF%d\n",
81 			       (mbx->msg.msg & 0xFF), nic->vf_id);
82 			return -1;
83 		}
84 	}
85 
86 	return 0;
87 }
88 
89 /* Checks if VF is able to comminicate with PF
90  * and also gets the VNIC number this VF is associated to.
91  */
nicvf_check_pf_ready(struct nicvf * nic)92 static int nicvf_check_pf_ready(struct nicvf *nic)
93 {
94 	union nic_mbx mbx = {};
95 
96 	mbx.msg.msg = NIC_MBOX_MSG_READY;
97 	if (nicvf_send_msg_to_pf(nic, &mbx)) {
98 		printf("PF didn't respond to READY msg\n");
99 		return 0;
100 	}
101 
102 	return 1;
103 }
104 
nicvf_handle_mbx_intr(struct nicvf * nic)105 static void  nicvf_handle_mbx_intr(struct nicvf *nic)
106 {
107 	union nic_mbx mbx = {};
108 	struct eth_pdata *pdata = dev_get_plat(nic->dev);
109 	u64 *mbx_data;
110 	u64 mbx_addr;
111 	int i;
112 
113 	mbx_addr = NIC_VF_PF_MAILBOX_0_1;
114 	mbx_data = (u64 *)&mbx;
115 
116 	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
117 		*mbx_data = nicvf_reg_read(nic, mbx_addr);
118 		mbx_data++;
119 		mbx_addr += sizeof(u64);
120 	}
121 
122 	debug("Mbox message: msg: 0x%x\n", mbx.msg.msg);
123 	switch (mbx.msg.msg) {
124 	case NIC_MBOX_MSG_READY:
125 		nic->pf_acked = true;
126 		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
127 		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
128 		nic->node = mbx.nic_cfg.node_id;
129 		if (!nic->set_mac_pending)
130 			memcpy(pdata->enetaddr,
131 			       mbx.nic_cfg.mac_addr, 6);
132 		nic->loopback_supported = mbx.nic_cfg.loopback_supported;
133 		nic->link_up = false;
134 		nic->duplex = 0;
135 		nic->speed = 0;
136 		break;
137 	case NIC_MBOX_MSG_ACK:
138 		nic->pf_acked = true;
139 		break;
140 	case NIC_MBOX_MSG_NACK:
141 		nic->pf_nacked = true;
142 		break;
143 	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
144 		nic->pf_acked = true;
145 		nic->link_up = mbx.link_status.link_up;
146 		nic->duplex = mbx.link_status.duplex;
147 		nic->speed = mbx.link_status.speed;
148 		if (nic->link_up) {
149 			printf("%s: Link is Up %d Mbps %s\n",
150 			       nic->dev->name, nic->speed,
151 			       nic->duplex == 1 ?
152 			       "Full duplex" : "Half duplex");
153 		} else {
154 			printf("%s: Link is Down\n", nic->dev->name);
155 		}
156 		break;
157 	default:
158 		printf("Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
159 		break;
160 	}
161 
162 	nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
163 }
164 
nicvf_hw_set_mac_addr(struct nicvf * nic,struct udevice * dev)165 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct udevice *dev)
166 {
167 	union nic_mbx mbx = {};
168 	struct eth_pdata *pdata = dev_get_plat(dev);
169 
170 	mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
171 	mbx.mac.vf_id = nic->vf_id;
172 	memcpy(mbx.mac.mac_addr, pdata->enetaddr, 6);
173 
174 	return nicvf_send_msg_to_pf(nic, &mbx);
175 }
176 
nicvf_config_cpi(struct nicvf * nic)177 static void nicvf_config_cpi(struct nicvf *nic)
178 {
179 	union nic_mbx mbx = {};
180 
181 	mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
182 	mbx.cpi_cfg.vf_id = nic->vf_id;
183 	mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
184 	mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
185 
186 	nicvf_send_msg_to_pf(nic, &mbx);
187 }
188 
nicvf_init_resources(struct nicvf * nic)189 static int nicvf_init_resources(struct nicvf *nic)
190 {
191 	int err;
192 
193 	nic->num_qs = 1;
194 
195 	/* Enable Qset */
196 	nicvf_qset_config(nic, true);
197 
198 	/* Initialize queues and HW for data transfer */
199 	err = nicvf_config_data_transfer(nic, true);
200 
201 	if (err) {
202 		printf("Failed to alloc/config VF's QSet resources\n");
203 		return err;
204 	}
205 	return 0;
206 }
207 
nicvf_snd_pkt_handler(struct nicvf * nic,struct cmp_queue * cq,void * cq_desc,int cqe_type)208 static void nicvf_snd_pkt_handler(struct nicvf *nic,
209 				  struct cmp_queue *cq,
210 				  void *cq_desc, int cqe_type)
211 {
212 	struct cqe_send_t *cqe_tx;
213 	struct snd_queue *sq;
214 	struct sq_hdr_subdesc *hdr;
215 
216 	cqe_tx = (struct cqe_send_t *)cq_desc;
217 	sq = &nic->qs->sq[cqe_tx->sq_idx];
218 
219 	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
220 	if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
221 		return;
222 
223 	nicvf_check_cqe_tx_errs(nic, cq, cq_desc);
224 	nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
225 }
226 
nicvf_rcv_pkt_handler(struct nicvf * nic,struct cmp_queue * cq,void * cq_desc,void ** ppkt,int cqe_type)227 static int nicvf_rcv_pkt_handler(struct nicvf *nic,
228 				 struct cmp_queue *cq, void *cq_desc,
229 				 void **ppkt, int cqe_type)
230 {
231 	void *pkt;
232 
233 	size_t pkt_len;
234 	struct cqe_rx_t *cqe_rx = (struct cqe_rx_t *)cq_desc;
235 	int err = 0;
236 
237 	/* Check for errors */
238 	err = nicvf_check_cqe_rx_errs(nic, cq, cq_desc);
239 	if (err && !cqe_rx->rb_cnt)
240 		return -1;
241 
242 	pkt = nicvf_get_rcv_pkt(nic, cq_desc, &pkt_len);
243 	if (!pkt) {
244 		debug("Packet not received\n");
245 		return -1;
246 	}
247 
248 	if (pkt)
249 		*ppkt = pkt;
250 
251 	return pkt_len;
252 }
253 
nicvf_cq_handler(struct nicvf * nic,void ** ppkt,int * pkt_len)254 int nicvf_cq_handler(struct nicvf *nic, void **ppkt, int *pkt_len)
255 {
256 	int cq_qnum = 0;
257 	int processed_sq_cqe = 0;
258 	int processed_rq_cqe = 0;
259 	int processed_cqe = 0;
260 
261 	unsigned long cqe_count, cqe_head;
262 	struct queue_set *qs = nic->qs;
263 	struct cmp_queue *cq = &qs->cq[cq_qnum];
264 	struct cqe_rx_t *cq_desc;
265 
266 	/* Get num of valid CQ entries expect next one to be SQ completion */
267 	cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_qnum);
268 	cqe_count &= 0xFFFF;
269 	if (!cqe_count)
270 		return 0;
271 
272 	/* Get head of the valid CQ entries */
273 	cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_qnum);
274 	cqe_head >>= 9;
275 	cqe_head &= 0xFFFF;
276 
277 	if (cqe_count) {
278 		/* Get the CQ descriptor */
279 		cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
280 		cqe_head++;
281 		cqe_head &= (cq->dmem.q_len - 1);
282 		/* Initiate prefetch for next descriptor */
283 		prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
284 
285 		switch (cq_desc->cqe_type) {
286 		case CQE_TYPE_RX:
287 			debug("%s: Got Rx CQE\n", nic->dev->name);
288 			*pkt_len = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
289 							 ppkt, CQE_TYPE_RX);
290 			processed_rq_cqe++;
291 			break;
292 		case CQE_TYPE_SEND:
293 			debug("%s: Got Tx CQE\n", nic->dev->name);
294 			nicvf_snd_pkt_handler(nic, cq, cq_desc, CQE_TYPE_SEND);
295 			processed_sq_cqe++;
296 			break;
297 		default:
298 			debug("%s: Got CQ type %u\n", nic->dev->name,
299 			      cq_desc->cqe_type);
300 			break;
301 		}
302 		processed_cqe++;
303 	}
304 
305 	/* Dequeue CQE */
306 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
307 			      cq_qnum, processed_cqe);
308 
309 	asm volatile ("dsb sy");
310 
311 	return (processed_sq_cqe | processed_rq_cqe);
312 }
313 
314 /* Qset error interrupt handler
315  *
316  * As of now only CQ errors are handled
317  */
nicvf_handle_qs_err(struct nicvf * nic)318 void nicvf_handle_qs_err(struct nicvf *nic)
319 {
320 	struct queue_set *qs = nic->qs;
321 	int qidx;
322 	u64 status;
323 
324 	/* Check if it is CQ err */
325 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
326 		status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
327 					      qidx);
328 		if (!(status & CQ_ERR_MASK))
329 			continue;
330 		/* Process already queued CQEs and reconfig CQ */
331 		nicvf_sq_disable(nic, qidx);
332 		nicvf_cmp_queue_config(nic, qs, qidx, true);
333 		nicvf_sq_free_used_descs(nic->dev, &qs->sq[qidx], qidx);
334 		nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
335 	}
336 }
337 
nicvf_free_pkt(struct udevice * dev,uchar * pkt,int pkt_len)338 static int nicvf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
339 {
340 	struct nicvf *nic = dev_get_priv(dev);
341 
342 	if (pkt && pkt_len)
343 		free(pkt);
344 	nicvf_refill_rbdr(nic);
345 	return 0;
346 }
347 
nicvf_xmit(struct udevice * dev,void * pkt,int pkt_len)348 static int nicvf_xmit(struct udevice *dev, void *pkt, int pkt_len)
349 {
350 	struct nicvf *nic = dev_get_priv(dev);
351 	int ret = 0;
352 	int rcv_len = 0;
353 	unsigned int timeout = 5000;
354 	void *rpkt = NULL;
355 
356 	if (!nicvf_sq_append_pkt(nic, pkt, pkt_len)) {
357 		printf("VF%d: TX ring full\n", nic->vf_id);
358 		return -1;
359 	}
360 
361 	/* check and update CQ for pkt sent */
362 	while (!ret && timeout--) {
363 		ret = nicvf_cq_handler(nic, &rpkt, &rcv_len);
364 		if (!ret) {
365 			debug("%s: %d, Not sent\n", __func__, __LINE__);
366 			udelay(10);
367 		}
368 	}
369 
370 	return 0;
371 }
372 
nicvf_recv(struct udevice * dev,int flags,uchar ** packetp)373 static int nicvf_recv(struct udevice *dev, int flags, uchar **packetp)
374 {
375 	struct nicvf *nic = dev_get_priv(dev);
376 	void *pkt;
377 	int pkt_len = 0;
378 #ifdef DEBUG
379 	u8 *dpkt;
380 	int i, j;
381 #endif
382 
383 	nicvf_cq_handler(nic, &pkt, &pkt_len);
384 
385 	if (pkt_len) {
386 #ifdef DEBUG
387 		dpkt = pkt;
388 		printf("RX packet contents:\n");
389 		for (i = 0; i < 8; i++) {
390 			puts("\t");
391 			for (j = 0; j < 10; j++)
392 				printf("%02x ", dpkt[i * 10 + j]);
393 			puts("\n");
394 		}
395 #endif
396 		*packetp = pkt;
397 	}
398 
399 	return pkt_len;
400 }
401 
nicvf_stop(struct udevice * dev)402 void nicvf_stop(struct udevice *dev)
403 {
404 	struct nicvf *nic = dev_get_priv(dev);
405 
406 	if (!nic->open)
407 		return;
408 
409 	/* Free resources */
410 	nicvf_config_data_transfer(nic, false);
411 
412 	/* Disable HW Qset */
413 	nicvf_qset_config(nic, false);
414 
415 	nic->open = false;
416 }
417 
nicvf_open(struct udevice * dev)418 int nicvf_open(struct udevice *dev)
419 {
420 	int err;
421 	struct nicvf *nic = dev_get_priv(dev);
422 
423 	nicvf_hw_set_mac_addr(nic, dev);
424 
425 	/* Configure CPI alorithm */
426 	nic->cpi_alg = CPI_ALG_NONE;
427 	nicvf_config_cpi(nic);
428 
429 	/* Initialize the queues */
430 	err = nicvf_init_resources(nic);
431 	if (err)
432 		return -1;
433 
434 	if (!nicvf_check_pf_ready(nic))
435 		return -1;
436 
437 	nic->open = true;
438 
439 	/* Make sure queue initialization is written */
440 	asm volatile("dsb sy");
441 
442 	return 0;
443 }
444 
nicvf_write_hwaddr(struct udevice * dev)445 int nicvf_write_hwaddr(struct udevice *dev)
446 {
447 	unsigned char ethaddr[ARP_HLEN];
448 	struct eth_pdata *pdata = dev_get_plat(dev);
449 	struct nicvf *nic = dev_get_priv(dev);
450 
451 	/* If lower level firmware fails to set proper MAC
452 	 * u-boot framework updates MAC to random address.
453 	 * Use this hook to update mac address in environment.
454 	 */
455 	if (!eth_env_get_enetaddr_by_index("eth", dev_seq(dev), ethaddr)) {
456 		eth_env_set_enetaddr_by_index("eth", dev_seq(dev),
457 					      pdata->enetaddr);
458 		debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
459 	}
460 	eth_env_get_enetaddr_by_index("eth", dev_seq(dev), ethaddr);
461 	if (memcmp(ethaddr, pdata->enetaddr, ARP_HLEN)) {
462 		debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
463 		nicvf_hw_set_mac_addr(nic, dev);
464 	}
465 	return 0;
466 }
467 
nicvf_probe_mdio_devices(void)468 static void nicvf_probe_mdio_devices(void)
469 {
470 	struct udevice *pdev;
471 	int err;
472 	static int probed;
473 
474 	if (probed)
475 		return;
476 
477 	err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
478 				 PCI_DEVICE_ID_CAVIUM_SMI, 0,
479 				 &pdev);
480 	if (err)
481 		debug("%s couldn't find SMI device\n", __func__);
482 	probed = 1;
483 }
484 
nicvf_initialize(struct udevice * dev)485 int nicvf_initialize(struct udevice *dev)
486 {
487 	struct nicvf *nicvf = dev_get_priv(dev);
488 	struct eth_pdata *pdata = dev_get_plat(dev);
489 	int    ret = 0, bgx, lmac;
490 	char   name[16];
491 	unsigned char ethaddr[ARP_HLEN];
492 	struct udevice *pfdev;
493 	struct nicpf *pf;
494 	static int vfid;
495 
496 	if (dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
497 			       PCI_DEVICE_ID_CAVIUM_NIC, 0, &pfdev)) {
498 		printf("%s NIC PF device not found..VF probe failed\n",
499 		       __func__);
500 		return -1;
501 	}
502 	pf = dev_get_priv(pfdev);
503 	nicvf->vf_id = vfid++;
504 	nicvf->dev = dev;
505 	nicvf->nicpf = pf;
506 
507 	nicvf_probe_mdio_devices();
508 
509 	/* Enable TSO support */
510 	nicvf->hw_tso = true;
511 
512 	nicvf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
513 					 PCI_REGION_MEM);
514 
515 	debug("nicvf->reg_base: %p\n", nicvf->reg_base);
516 
517 	if (!nicvf->reg_base) {
518 		printf("Cannot map config register space, aborting\n");
519 		ret = -1;
520 		goto fail;
521 	}
522 
523 	ret = nicvf_set_qset_resources(nicvf);
524 	if (ret)
525 		return -1;
526 
527 	sprintf(name, "vnic%u", nicvf->vf_id);
528 	debug("%s name %s\n", __func__, name);
529 	device_set_name(dev, name);
530 
531 	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
532 	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
533 	debug("%s VF %d BGX %d LMAC %d\n", __func__, nicvf->vf_id, bgx, lmac);
534 	debug("%s PF %p pfdev %p VF %p vfdev %p vf->pdata %p\n",
535 	      __func__, nicvf->nicpf, nicvf->nicpf->udev, nicvf, nicvf->dev,
536 	      pdata);
537 
538 	fdt_board_get_ethaddr(bgx, lmac, ethaddr);
539 
540 	debug("%s bgx %d lmac %d ethaddr %pM\n", __func__, bgx, lmac, ethaddr);
541 
542 	if (is_valid_ethaddr(ethaddr)) {
543 		memcpy(pdata->enetaddr, ethaddr, ARP_HLEN);
544 		eth_env_set_enetaddr_by_index("eth", dev_seq(dev), ethaddr);
545 	}
546 	debug("%s enetaddr %pM ethaddr %pM\n", __func__,
547 	      pdata->enetaddr, ethaddr);
548 
549 fail:
550 	return ret;
551 }
552 
octeontx_vnic_probe(struct udevice * dev)553 int octeontx_vnic_probe(struct udevice *dev)
554 {
555 	return nicvf_initialize(dev);
556 }
557 
558 static const struct eth_ops octeontx_vnic_ops = {
559 	.start = nicvf_open,
560 	.stop  = nicvf_stop,
561 	.send  = nicvf_xmit,
562 	.recv  = nicvf_recv,
563 	.free_pkt = nicvf_free_pkt,
564 	.write_hwaddr = nicvf_write_hwaddr,
565 };
566 
567 U_BOOT_DRIVER(octeontx_vnic) = {
568 	.name	= "vnic",
569 	.id	= UCLASS_ETH,
570 	.probe	= octeontx_vnic_probe,
571 	.ops	= &octeontx_vnic_ops,
572 	.priv_auto	= sizeof(struct nicvf),
573 	.plat_auto	= sizeof(struct eth_pdata),
574 };
575 
576 static struct pci_device_id octeontx_vnic_supported[] = {
577 	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF) },
578 	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF_1) },
579 	{}
580 };
581 
582 U_BOOT_PCI_DEVICE(octeontx_vnic, octeontx_vnic_supported);
583