1 /*
2  * Copyright (C) 2011 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  *
19  * You can also choose to distribute this program under the terms of
20  * the Unmodified Binary Distribution Licence (as given in the file
21  * COPYING.UBDL), provided that you have satisfied its requirements.
22  */
23 
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25 
26 #include <stdint.h>
27 #include <errno.h>
28 #include <assert.h>
29 #include <byteswap.h>
30 #include <ipxe/pci.h>
31 #include <ipxe/io.h>
32 #include <ipxe/malloc.h>
33 #include <ipxe/profile.h>
34 #include <ipxe/iobuf.h>
35 #include <ipxe/netdevice.h>
36 #include <ipxe/if_ether.h>
37 #include <ipxe/ethernet.h>
38 #include "vmxnet3.h"
39 
40 /**
41  * @file
42  *
43  * VMware vmxnet3 virtual NIC driver
44  *
45  */
46 
47 /** VM command profiler */
48 static struct profiler vmxnet3_vm_command_profiler __profiler =
49 	{ .name = "vmxnet3.vm_command" };
50 
51 /** VM transmit profiler */
52 static struct profiler vmxnet3_vm_tx_profiler __profiler =
53 	{ .name = "vmxnet3.vm_tx" };
54 
55 /** VM receive refill profiler */
56 static struct profiler vmxnet3_vm_refill_profiler __profiler =
57 	{ .name = "vmxnet3.vm_refill" };
58 
59 /** VM event profiler */
60 static struct profiler vmxnet3_vm_event_profiler __profiler =
61 	{ .name = "vmxnet3.vm_event" };
62 
63 /**
64  * Issue command
65  *
66  * @v vmxnet		vmxnet3 NIC
67  * @v command		Command to issue
68  * @ret result		Command result
69  */
vmxnet3_command(struct vmxnet3_nic * vmxnet,uint32_t command)70 static inline uint32_t vmxnet3_command ( struct vmxnet3_nic *vmxnet,
71 					 uint32_t command ) {
72 	uint32_t result;
73 
74 	/* Issue command */
75 	profile_start ( &vmxnet3_vm_command_profiler );
76 	writel ( command, ( vmxnet->vd + VMXNET3_VD_CMD ) );
77 	result = readl ( vmxnet->vd + VMXNET3_VD_CMD );
78 	profile_stop ( &vmxnet3_vm_command_profiler );
79 	profile_exclude ( &vmxnet3_vm_command_profiler );
80 
81 	return result;
82 }
83 
84 /**
85  * Transmit packet
86  *
87  * @v netdev		Network device
88  * @v iobuf		I/O buffer
89  * @ret rc		Return status code
90  */
vmxnet3_transmit(struct net_device * netdev,struct io_buffer * iobuf)91 static int vmxnet3_transmit ( struct net_device *netdev,
92 			      struct io_buffer *iobuf ) {
93 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
94 	struct vmxnet3_tx_desc *tx_desc;
95 	unsigned int fill;
96 	unsigned int desc_idx;
97 	unsigned int generation;
98 
99 	/* Check that we have a free transmit descriptor */
100 	fill = ( vmxnet->count.tx_prod - vmxnet->count.tx_cons );
101 	if ( fill >= VMXNET3_TX_FILL ) {
102 		DBGC ( vmxnet, "VMXNET3 %p out of transmit descriptors\n",
103 		       vmxnet );
104 		return -ENOBUFS;
105 	}
106 
107 	/* Locate transmit descriptor */
108 	desc_idx = ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC );
109 	generation = ( ( vmxnet->count.tx_prod & VMXNET3_NUM_TX_DESC ) ?
110 		       0 : cpu_to_le32 ( VMXNET3_TXF_GEN ) );
111 	assert ( vmxnet->tx_iobuf[desc_idx] == NULL );
112 
113 	/* Increment producer counter */
114 	vmxnet->count.tx_prod++;
115 
116 	/* Store I/O buffer for later completion */
117 	vmxnet->tx_iobuf[desc_idx] = iobuf;
118 
119 	/* Populate transmit descriptor */
120 	tx_desc = &vmxnet->dma->tx_desc[desc_idx];
121 	tx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
122 	tx_desc->flags[0] = ( generation | cpu_to_le32 ( iob_len ( iobuf ) ) );
123 	tx_desc->flags[1] = cpu_to_le32 ( VMXNET3_TXF_CQ | VMXNET3_TXF_EOP );
124 
125 	/* Hand over descriptor to NIC */
126 	wmb();
127 	profile_start ( &vmxnet3_vm_tx_profiler );
128 	writel ( ( vmxnet->count.tx_prod % VMXNET3_NUM_TX_DESC ),
129 		 ( vmxnet->pt + VMXNET3_PT_TXPROD ) );
130 	profile_stop ( &vmxnet3_vm_tx_profiler );
131 	profile_exclude ( &vmxnet3_vm_tx_profiler );
132 
133 	return 0;
134 }
135 
136 /**
137  * Poll for completed transmissions
138  *
139  * @v netdev		Network device
140  */
vmxnet3_poll_tx(struct net_device * netdev)141 static void vmxnet3_poll_tx ( struct net_device *netdev ) {
142 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
143 	struct vmxnet3_tx_comp *tx_comp;
144 	struct io_buffer *iobuf;
145 	unsigned int comp_idx;
146 	unsigned int desc_idx;
147 	unsigned int generation;
148 
149 	while ( 1 ) {
150 
151 		/* Look for completed descriptors */
152 		comp_idx = ( vmxnet->count.tx_cons % VMXNET3_NUM_TX_COMP );
153 		generation = ( ( vmxnet->count.tx_cons & VMXNET3_NUM_TX_COMP ) ?
154 			       0 : cpu_to_le32 ( VMXNET3_TXCF_GEN ) );
155 		tx_comp = &vmxnet->dma->tx_comp[comp_idx];
156 		if ( generation != ( tx_comp->flags &
157 				     cpu_to_le32 ( VMXNET3_TXCF_GEN ) ) ) {
158 			break;
159 		}
160 
161 		/* Increment consumer counter */
162 		vmxnet->count.tx_cons++;
163 
164 		/* Locate corresponding transmit descriptor */
165 		desc_idx = ( le32_to_cpu ( tx_comp->index ) %
166 			     VMXNET3_NUM_TX_DESC );
167 		iobuf = vmxnet->tx_iobuf[desc_idx];
168 		if ( ! iobuf ) {
169 			DBGC ( vmxnet, "VMXNET3 %p completed on empty transmit "
170 			       "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
171 			netdev_tx_err ( netdev, NULL, -ENOTTY );
172 			continue;
173 		}
174 
175 		/* Remove I/O buffer from transmit queue */
176 		vmxnet->tx_iobuf[desc_idx] = NULL;
177 
178 		/* Report transmission completion to network layer */
179 		DBGC2 ( vmxnet, "VMXNET3 %p completed TX %#x/%#x (len %#zx)\n",
180 			vmxnet, comp_idx, desc_idx, iob_len ( iobuf ) );
181 		netdev_tx_complete ( netdev, iobuf );
182 	}
183 }
184 
185 /**
186  * Flush any uncompleted transmit buffers
187  *
188  * @v netdev		Network device
189  */
vmxnet3_flush_tx(struct net_device * netdev)190 static void vmxnet3_flush_tx ( struct net_device *netdev ) {
191 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
192 	unsigned int i;
193 
194 	for ( i = 0 ; i < VMXNET3_NUM_TX_DESC ; i++ ) {
195 		if ( vmxnet->tx_iobuf[i] ) {
196 			netdev_tx_complete_err ( netdev, vmxnet->tx_iobuf[i],
197 						 -ECANCELED );
198 			vmxnet->tx_iobuf[i] = NULL;
199 		}
200 	}
201 }
202 
203 /**
204  * Refill receive ring
205  *
206  * @v netdev		Network device
207  */
vmxnet3_refill_rx(struct net_device * netdev)208 static void vmxnet3_refill_rx ( struct net_device *netdev ) {
209 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
210 	struct vmxnet3_rx_desc *rx_desc;
211 	struct io_buffer *iobuf;
212 	unsigned int orig_rx_prod = vmxnet->count.rx_prod;
213 	unsigned int desc_idx;
214 	unsigned int generation;
215 
216 	/* Fill receive ring to specified fill level */
217 	while ( vmxnet->count.rx_fill < VMXNET3_RX_FILL ) {
218 
219 		/* Locate receive descriptor */
220 		desc_idx = ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC );
221 		generation = ( ( vmxnet->count.rx_prod & VMXNET3_NUM_RX_DESC ) ?
222 			       0 : cpu_to_le32 ( VMXNET3_RXF_GEN ) );
223 		assert ( vmxnet->rx_iobuf[desc_idx] == NULL );
224 
225 		/* Allocate I/O buffer */
226 		iobuf = alloc_iob ( VMXNET3_MTU + NET_IP_ALIGN );
227 		if ( ! iobuf ) {
228 			/* Non-fatal low memory condition */
229 			break;
230 		}
231 		iob_reserve ( iobuf, NET_IP_ALIGN );
232 
233 		/* Increment producer counter and fill level */
234 		vmxnet->count.rx_prod++;
235 		vmxnet->count.rx_fill++;
236 
237 		/* Store I/O buffer for later completion */
238 		vmxnet->rx_iobuf[desc_idx] = iobuf;
239 
240 		/* Populate receive descriptor */
241 		rx_desc = &vmxnet->dma->rx_desc[desc_idx];
242 		rx_desc->address = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
243 		rx_desc->flags = ( generation | cpu_to_le32 ( VMXNET3_MTU ) );
244 
245 	}
246 
247 	/* Hand over any new descriptors to NIC */
248 	if ( vmxnet->count.rx_prod != orig_rx_prod ) {
249 		wmb();
250 		profile_start ( &vmxnet3_vm_refill_profiler );
251 		writel ( ( vmxnet->count.rx_prod % VMXNET3_NUM_RX_DESC ),
252 			 ( vmxnet->pt + VMXNET3_PT_RXPROD ) );
253 		profile_stop ( &vmxnet3_vm_refill_profiler );
254 		profile_exclude ( &vmxnet3_vm_refill_profiler );
255 	}
256 }
257 
258 /**
259  * Poll for received packets
260  *
261  * @v netdev		Network device
262  */
vmxnet3_poll_rx(struct net_device * netdev)263 static void vmxnet3_poll_rx ( struct net_device *netdev ) {
264 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
265 	struct vmxnet3_rx_comp *rx_comp;
266 	struct io_buffer *iobuf;
267 	unsigned int comp_idx;
268 	unsigned int desc_idx;
269 	unsigned int generation;
270 	size_t len;
271 
272 	while ( 1 ) {
273 
274 		/* Look for completed descriptors */
275 		comp_idx = ( vmxnet->count.rx_cons % VMXNET3_NUM_RX_COMP );
276 		generation = ( ( vmxnet->count.rx_cons & VMXNET3_NUM_RX_COMP ) ?
277 			       0 : cpu_to_le32 ( VMXNET3_RXCF_GEN ) );
278 		rx_comp = &vmxnet->dma->rx_comp[comp_idx];
279 		if ( generation != ( rx_comp->flags &
280 				     cpu_to_le32 ( VMXNET3_RXCF_GEN ) ) ) {
281 			break;
282 		}
283 
284 		/* Increment consumer counter */
285 		vmxnet->count.rx_cons++;
286 
287 		/* Locate corresponding receive descriptor */
288 		desc_idx = ( le32_to_cpu ( rx_comp->index ) %
289 			     VMXNET3_NUM_RX_DESC );
290 		iobuf = vmxnet->rx_iobuf[desc_idx];
291 		if ( ! iobuf ) {
292 			DBGC ( vmxnet, "VMXNET3 %p completed on empty receive "
293 			       "buffer %#x/%#x\n", vmxnet, comp_idx, desc_idx );
294 			netdev_rx_err ( netdev, NULL, -ENOTTY );
295 			continue;
296 		}
297 
298 		/* Remove I/O buffer from receive queue */
299 		vmxnet->rx_iobuf[desc_idx] = NULL;
300 		vmxnet->count.rx_fill--;
301 
302 		/* Deliver packet to network layer */
303 		len = ( le32_to_cpu ( rx_comp->len ) &
304 			( VMXNET3_MAX_PACKET_LEN - 1 ) );
305 		DBGC2 ( vmxnet, "VMXNET3 %p completed RX %#x/%#x (len %#zx)\n",
306 			vmxnet, comp_idx, desc_idx, len );
307 		iob_put ( iobuf, len );
308 		netdev_rx ( netdev, iobuf );
309 	}
310 }
311 
312 /**
313  * Flush any uncompleted receive buffers
314  *
315  * @v netdev		Network device
316  */
vmxnet3_flush_rx(struct net_device * netdev)317 static void vmxnet3_flush_rx ( struct net_device *netdev ) {
318 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
319 	struct io_buffer *iobuf;
320 	unsigned int i;
321 
322 	for ( i = 0 ; i < VMXNET3_NUM_RX_DESC ; i++ ) {
323 		if ( ( iobuf = vmxnet->rx_iobuf[i] ) != NULL ) {
324 			netdev_rx_err ( netdev, iobuf, -ECANCELED );
325 			vmxnet->rx_iobuf[i] = NULL;
326 		}
327 	}
328 }
329 
330 /**
331  * Check link state
332  *
333  * @v netdev		Network device
334  */
vmxnet3_check_link(struct net_device * netdev)335 static void vmxnet3_check_link ( struct net_device *netdev ) {
336 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
337 	uint32_t state;
338 	int link_up;
339 	unsigned int link_speed;
340 
341 	/* Get link state */
342 	state = vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_LINK );
343 	link_up = ( state & 1 );
344 	link_speed = ( state >> 16 );
345 
346 	/* Report link state to network device */
347 	if ( link_up ) {
348 		DBGC ( vmxnet, "VMXNET3 %p link is up at %d Mbps\n",
349 		       vmxnet, link_speed );
350 		netdev_link_up ( netdev );
351 	} else {
352 		DBGC ( vmxnet, "VMXNET3 %p link is down\n", vmxnet );
353 		netdev_link_down ( netdev );
354 	}
355 }
356 
357 /**
358  * Poll for events
359  *
360  * @v netdev		Network device
361  */
vmxnet3_poll_events(struct net_device * netdev)362 static void vmxnet3_poll_events ( struct net_device *netdev ) {
363 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
364 	uint32_t events;
365 
366 	/* Do nothing unless there are events to process */
367 	if ( ! vmxnet->dma->shared.ecr )
368 		return;
369 	events = le32_to_cpu ( vmxnet->dma->shared.ecr );
370 
371 	/* Acknowledge these events */
372 	profile_start ( &vmxnet3_vm_event_profiler );
373 	writel ( events, ( vmxnet->vd + VMXNET3_VD_ECR ) );
374 	profile_stop ( &vmxnet3_vm_event_profiler );
375 	profile_exclude ( &vmxnet3_vm_event_profiler );
376 
377 	/* Check for link state change */
378 	if ( events & VMXNET3_ECR_LINK ) {
379 		vmxnet3_check_link ( netdev );
380 		events &= ~VMXNET3_ECR_LINK;
381 	}
382 
383 	/* Check for queue errors */
384 	if ( events & ( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR ) ) {
385 		vmxnet3_command ( vmxnet, VMXNET3_CMD_GET_QUEUE_STATUS );
386 		DBGC ( vmxnet, "VMXNET3 %p queue error status (TX %08x, RX "
387 		       "%08x)\n", vmxnet,
388 		       le32_to_cpu ( vmxnet->dma->queues.tx.status.error ),
389 		       le32_to_cpu ( vmxnet->dma->queues.rx.status.error ) );
390 		/* Report errors to allow for visibility via "ifstat" */
391 		if ( events & VMXNET3_ECR_TQERR )
392 			netdev_tx_err ( netdev, NULL, -EPIPE );
393 		if ( events & VMXNET3_ECR_RQERR )
394 			netdev_rx_err ( netdev, NULL, -EPIPE );
395 		events &= ~( VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR );
396 	}
397 
398 	/* Check for unknown events */
399 	if ( events ) {
400 		DBGC ( vmxnet, "VMXNET3 %p unknown events %08x\n",
401 		       vmxnet, events );
402 		/* Report error to allow for visibility via "ifstat" */
403 		netdev_rx_err ( netdev, NULL, -ENODEV );
404 	}
405 }
406 
407 /**
408  * Poll network device
409  *
410  * @v netdev		Network device
411  */
vmxnet3_poll(struct net_device * netdev)412 static void vmxnet3_poll ( struct net_device *netdev ) {
413 
414 	vmxnet3_poll_events ( netdev );
415 	vmxnet3_poll_tx ( netdev );
416 	vmxnet3_poll_rx ( netdev );
417 	vmxnet3_refill_rx ( netdev );
418 }
419 
420 /**
421  * Enable/disable interrupts
422  *
423  * @v netdev		Network device
424  * @v enable		Interrupts should be enabled
425  */
vmxnet3_irq(struct net_device * netdev,int enable)426 static void vmxnet3_irq ( struct net_device *netdev, int enable ) {
427 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
428 
429 	DBGC ( vmxnet, "VMXNET3 %p %s IRQ not implemented\n",
430 	       vmxnet, ( enable ? "enable" : "disable" ) );
431 }
432 
433 /**
434  * Set MAC address
435  *
436  * @v vmxnet		vmxnet3 NIC
437  * @v ll_addr		Link-layer address to set
438  */
vmxnet3_set_ll_addr(struct vmxnet3_nic * vmxnet,const void * ll_addr)439 static void vmxnet3_set_ll_addr ( struct vmxnet3_nic *vmxnet,
440 				  const void *ll_addr ) {
441 	struct {
442 		uint32_t low;
443 		uint32_t high;
444 	} __attribute__ (( packed )) mac;
445 
446 	memset ( &mac, 0, sizeof ( mac ) );
447 	memcpy ( &mac, ll_addr, ETH_ALEN );
448 	writel ( cpu_to_le32 ( mac.low ), ( vmxnet->vd + VMXNET3_VD_MACL ) );
449 	writel ( cpu_to_le32 ( mac.high ), ( vmxnet->vd + VMXNET3_VD_MACH ) );
450 }
451 
452 /**
453  * Open NIC
454  *
455  * @v netdev		Network device
456  * @ret rc		Return status code
457  */
vmxnet3_open(struct net_device * netdev)458 static int vmxnet3_open ( struct net_device *netdev ) {
459 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
460 	struct vmxnet3_shared *shared;
461 	struct vmxnet3_queues *queues;
462 	uint64_t shared_bus;
463 	uint64_t queues_bus;
464 	uint32_t status;
465 	int rc;
466 
467 	/* Allocate DMA areas */
468 	vmxnet->dma = malloc_phys ( sizeof ( *vmxnet->dma ),
469 				    VMXNET3_DMA_ALIGN );
470 	if ( ! vmxnet->dma ) {
471 		DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
472 		       vmxnet );
473 		rc = -ENOMEM;
474 		goto err_alloc_dma;
475 	}
476 	memset ( vmxnet->dma, 0, sizeof ( *vmxnet->dma ) );
477 
478 	/* Populate queue descriptors */
479 	queues = &vmxnet->dma->queues;
480 	queues->tx.cfg.desc_address =
481 		cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_desc ) );
482 	queues->tx.cfg.comp_address =
483 		cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->tx_comp ) );
484 	queues->tx.cfg.num_desc = cpu_to_le32 ( VMXNET3_NUM_TX_DESC );
485 	queues->tx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_TX_COMP );
486 	queues->rx.cfg.desc_address[0] =
487 		cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_desc ) );
488 	queues->rx.cfg.comp_address =
489 		cpu_to_le64 ( virt_to_bus ( &vmxnet->dma->rx_comp ) );
490 	queues->rx.cfg.num_desc[0] = cpu_to_le32 ( VMXNET3_NUM_RX_DESC );
491 	queues->rx.cfg.num_comp = cpu_to_le32 ( VMXNET3_NUM_RX_COMP );
492 	queues_bus = virt_to_bus ( queues );
493 	DBGC ( vmxnet, "VMXNET3 %p queue descriptors at %08llx+%zx\n",
494 	       vmxnet, queues_bus, sizeof ( *queues ) );
495 
496 	/* Populate shared area */
497 	shared = &vmxnet->dma->shared;
498 	shared->magic = cpu_to_le32 ( VMXNET3_SHARED_MAGIC );
499 	shared->misc.version = cpu_to_le32 ( VMXNET3_VERSION_MAGIC );
500 	shared->misc.version_support = cpu_to_le32 ( VMXNET3_VERSION_SELECT );
501 	shared->misc.upt_version_support =
502 		cpu_to_le32 ( VMXNET3_UPT_VERSION_SELECT );
503 	shared->misc.queue_desc_address = cpu_to_le64 ( queues_bus );
504 	shared->misc.queue_desc_len = cpu_to_le32 ( sizeof ( *queues ) );
505 	shared->misc.mtu = cpu_to_le32 ( VMXNET3_MTU );
506 	shared->misc.num_tx_queues = 1;
507 	shared->misc.num_rx_queues = 1;
508 	shared->interrupt.num_intrs = 1;
509 	shared->interrupt.control = cpu_to_le32 ( VMXNET3_IC_DISABLE_ALL );
510 	shared->rx_filter.mode = cpu_to_le32 ( VMXNET3_RXM_UCAST |
511 					       VMXNET3_RXM_BCAST |
512 					       VMXNET3_RXM_ALL_MULTI );
513 	shared_bus = virt_to_bus ( shared );
514 	DBGC ( vmxnet, "VMXNET3 %p shared area at %08llx+%zx\n",
515 	       vmxnet, shared_bus, sizeof ( *shared ) );
516 
517 	/* Zero counters */
518 	memset ( &vmxnet->count, 0, sizeof ( vmxnet->count ) );
519 
520 	/* Set MAC address */
521 	vmxnet3_set_ll_addr ( vmxnet, &netdev->ll_addr );
522 
523 	/* Pass shared area to device */
524 	writel ( ( shared_bus >> 0 ), ( vmxnet->vd + VMXNET3_VD_DSAL ) );
525 	writel ( ( shared_bus >> 32 ), ( vmxnet->vd + VMXNET3_VD_DSAH ) );
526 
527 	/* Activate device */
528 	if ( ( status = vmxnet3_command ( vmxnet,
529 					  VMXNET3_CMD_ACTIVATE_DEV ) ) != 0 ) {
530 		DBGC ( vmxnet, "VMXNET3 %p could not activate (status %#x)\n",
531 		       vmxnet, status );
532 		rc = -EIO;
533 		goto err_activate;
534 	}
535 
536 	/* Fill receive ring */
537 	vmxnet3_refill_rx ( netdev );
538 
539 	return 0;
540 
541 	vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
542 	vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
543  err_activate:
544 	vmxnet3_flush_tx ( netdev );
545 	vmxnet3_flush_rx ( netdev );
546 	free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
547  err_alloc_dma:
548 	return rc;
549 }
550 
551 /**
552  * Close NIC
553  *
554  * @v netdev		Network device
555  */
vmxnet3_close(struct net_device * netdev)556 static void vmxnet3_close ( struct net_device *netdev ) {
557 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
558 
559 	vmxnet3_command ( vmxnet, VMXNET3_CMD_QUIESCE_DEV );
560 	vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
561 	vmxnet3_flush_tx ( netdev );
562 	vmxnet3_flush_rx ( netdev );
563 	free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
564 }
565 
566 /** vmxnet3 net device operations */
567 static struct net_device_operations vmxnet3_operations = {
568 	.open		= vmxnet3_open,
569 	.close		= vmxnet3_close,
570 	.transmit	= vmxnet3_transmit,
571 	.poll		= vmxnet3_poll,
572 	.irq		= vmxnet3_irq,
573 };
574 
575 /**
576  * Check version
577  *
578  * @v vmxnet		vmxnet3 NIC
579  * @ret rc		Return status code
580  */
vmxnet3_check_version(struct vmxnet3_nic * vmxnet)581 static int vmxnet3_check_version ( struct vmxnet3_nic *vmxnet ) {
582 	uint32_t version;
583 	uint32_t upt_version;
584 
585 	/* Read version */
586 	version = readl ( vmxnet->vd + VMXNET3_VD_VRRS );
587 	upt_version = readl ( vmxnet->vd + VMXNET3_VD_UVRS );
588 	DBGC ( vmxnet, "VMXNET3 %p is version %d (UPT version %d)\n",
589 	       vmxnet, version, upt_version );
590 
591 	/* Inform NIC of driver version */
592 	writel ( VMXNET3_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_VRRS ) );
593 	writel ( VMXNET3_UPT_VERSION_SELECT, ( vmxnet->vd + VMXNET3_VD_UVRS ) );
594 
595 	return 0;
596 }
597 
598 /**
599  * Get permanent MAC address
600  *
601  * @v vmxnet		vmxnet3 NIC
602  * @v hw_addr		Hardware address to fill in
603  */
vmxnet3_get_hw_addr(struct vmxnet3_nic * vmxnet,void * hw_addr)604 static void vmxnet3_get_hw_addr ( struct vmxnet3_nic *vmxnet, void *hw_addr ) {
605 	struct {
606 		uint32_t low;
607 		uint32_t high;
608 	} __attribute__ (( packed )) mac;
609 
610 	mac.low = le32_to_cpu ( vmxnet3_command ( vmxnet,
611 					       VMXNET3_CMD_GET_PERM_MAC_LO ) );
612 	mac.high = le32_to_cpu ( vmxnet3_command ( vmxnet,
613 					       VMXNET3_CMD_GET_PERM_MAC_HI ) );
614 	memcpy ( hw_addr, &mac, ETH_ALEN );
615 }
616 
617 /**
618  * Probe PCI device
619  *
620  * @v pci		PCI device
621  * @v id		PCI ID
622  * @ret rc		Return status code
623  */
vmxnet3_probe(struct pci_device * pci)624 static int vmxnet3_probe ( struct pci_device *pci ) {
625 	struct net_device *netdev;
626 	struct vmxnet3_nic *vmxnet;
627 	int rc;
628 
629 	/* Allocate network device */
630 	netdev = alloc_etherdev ( sizeof ( *vmxnet ) );
631 	if ( ! netdev ) {
632 		rc = -ENOMEM;
633 		goto err_alloc_etherdev;
634 	}
635 	netdev_init ( netdev, &vmxnet3_operations );
636 	vmxnet = netdev_priv ( netdev );
637 	pci_set_drvdata ( pci, netdev );
638 	netdev->dev = &pci->dev;
639 	memset ( vmxnet, 0, sizeof ( *vmxnet ) );
640 
641 	/* Fix up PCI device */
642 	adjust_pci_device ( pci );
643 
644 	/* Map PCI BARs */
645 	vmxnet->pt = pci_ioremap ( pci, pci_bar_start ( pci, VMXNET3_PT_BAR ),
646 				   VMXNET3_PT_LEN );
647 	if ( ! vmxnet->pt ) {
648 		rc = -ENODEV;
649 		goto err_ioremap_pt;
650 	}
651 	vmxnet->vd = pci_ioremap ( pci, pci_bar_start ( pci, VMXNET3_VD_BAR ),
652 				   VMXNET3_VD_LEN );
653 	if ( ! vmxnet->vd ) {
654 		rc = -ENODEV;
655 		goto err_ioremap_vd;
656 	}
657 
658 	/* Version check */
659 	if ( ( rc = vmxnet3_check_version ( vmxnet ) ) != 0 )
660 		goto err_check_version;
661 
662 	/* Reset device */
663 	if ( ( rc = vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV ) ) != 0 )
664 		goto err_reset;
665 
666 	/* Read initial MAC address */
667 	vmxnet3_get_hw_addr ( vmxnet, &netdev->hw_addr );
668 
669 	/* Register network device */
670 	if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
671 		DBGC ( vmxnet, "VMXNET3 %p could not register net device: "
672 		       "%s\n", vmxnet, strerror ( rc ) );
673 		goto err_register_netdev;
674 	}
675 
676 	/* Get initial link state */
677 	vmxnet3_check_link ( netdev );
678 
679 	return 0;
680 
681 	unregister_netdev ( netdev );
682  err_register_netdev:
683  err_reset:
684  err_check_version:
685 	iounmap ( vmxnet->vd );
686  err_ioremap_vd:
687 	iounmap ( vmxnet->pt );
688  err_ioremap_pt:
689 	netdev_nullify ( netdev );
690 	netdev_put ( netdev );
691  err_alloc_etherdev:
692 	return rc;
693 }
694 
695 /**
696  * Remove PCI device
697  *
698  * @v pci		PCI device
699  */
vmxnet3_remove(struct pci_device * pci)700 static void vmxnet3_remove ( struct pci_device *pci ) {
701 	struct net_device *netdev = pci_get_drvdata ( pci );
702 	struct vmxnet3_nic *vmxnet = netdev_priv ( netdev );
703 
704 	unregister_netdev ( netdev );
705 	iounmap ( vmxnet->vd );
706 	iounmap ( vmxnet->pt );
707 	netdev_nullify ( netdev );
708 	netdev_put ( netdev );
709 }
710 
711 /** vmxnet3 PCI IDs */
712 static struct pci_device_id vmxnet3_nics[] = {
713 	PCI_ROM ( 0x15ad, 0x07b0, "vmxnet3", "vmxnet3 virtual NIC", 0 ),
714 };
715 
716 /** vmxnet3 PCI driver */
717 struct pci_driver vmxnet3_driver __pci_driver = {
718 	.ids = vmxnet3_nics,
719 	.id_count = ( sizeof ( vmxnet3_nics ) / sizeof ( vmxnet3_nics[0] ) ),
720 	.probe = vmxnet3_probe,
721 	.remove = vmxnet3_remove,
722 };
723