1 /*
2  * vxge-config.c: iPXE driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
3  *              Virtualized Server Adapter.
4  *
5  * Copyright(c) 2002-2010 Neterion Inc.
6  *
7  * This software may be used and distributed according to the terms of
8  * the GNU General Public License (GPL), incorporated herein by
9  * reference.  Drivers based on or derived from this code fall under
10  * the GPL and must retain the authorship, copyright and license
11  * notice.
12  *
13  */
14 
15 FILE_LICENCE(GPL2_ONLY);
16 
17 #include <stdlib.h>
18 #include <stdio.h>
19 #include <ipxe/malloc.h>
20 #include <ipxe/pci.h>
21 #include <ipxe/iobuf.h>
22 #include <ipxe/ethernet.h>
23 #include <byteswap.h>
24 
25 #include "vxge_traffic.h"
26 #include "vxge_config.h"
27 #include "vxge_main.h"
28 
29 void
vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device * hldev)30 vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev)
31 {
32 	u64 val64;
33 	struct __vxge_hw_virtualpath *vpath;
34 	struct vxge_hw_vpath_reg __iomem *vp_reg;
35 
36 	vpath = &hldev->virtual_path;
37 	vp_reg = vpath->vp_reg;
38 
39 	val64 = readq(&vp_reg->rxmac_vcfg0);
40 	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
41 	writeq(val64, &vp_reg->rxmac_vcfg0);
42 	val64 = readq(&vp_reg->rxmac_vcfg0);
43 	return;
44 }
45 
46 enum vxge_hw_status
vxge_hw_set_fw_api(struct __vxge_hw_device * hldev,u64 vp_id,u32 action,u32 offset,u64 data0,u64 data1)47 vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
48 		u64 vp_id,
49 		u32 action,
50 		u32 offset,
51 		u64 data0,
52 		u64 data1)
53 {
54 	enum vxge_hw_status status = VXGE_HW_OK;
55 	u64 val64;
56 	u32 fw_memo = VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO;
57 
58 	struct vxge_hw_vpath_reg __iomem *vp_reg;
59 
60 	vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
61 
62 	writeq(data0, &vp_reg->rts_access_steer_data0);
63 	writeq(data1, &vp_reg->rts_access_steer_data1);
64 
65 	wmb();
66 
67 	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
68 		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
69 		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
70 		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE;
71 
72 	writeq(val64, &vp_reg->rts_access_steer_ctrl);
73 
74 	wmb();
75 
76 	status = __vxge_hw_device_register_poll(
77 			&vp_reg->rts_access_steer_ctrl,
78 			VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
79 			WAIT_FACTOR *
80 			VXGE_HW_DEF_DEVICE_POLL_MILLIS);
81 
82 	if (status != VXGE_HW_OK)
83 		return VXGE_HW_FAIL;
84 
85 	val64 = readq(&vp_reg->rts_access_steer_ctrl);
86 
87 	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
88 		status = VXGE_HW_OK;
89 	else
90 		status = VXGE_HW_FAIL;
91 
92 	return status;
93 }
94 
95 /* Get function mode */
96 enum vxge_hw_status
vxge_hw_get_func_mode(struct __vxge_hw_device * hldev,u32 * func_mode)97 vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode)
98 {
99 	enum vxge_hw_status status = VXGE_HW_OK;
100 	struct vxge_hw_vpath_reg __iomem *vp_reg;
101 	u64 val64;
102 	int vp_id;
103 
104 	/* get the first vpath number assigned to this function */
105 	vp_id = hldev->first_vp_id;
106 
107 	vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
108 
109 	status = vxge_hw_set_fw_api(hldev, vp_id,
110 				VXGE_HW_FW_API_GET_FUNC_MODE, 0, 0, 0);
111 
112 	if (status == VXGE_HW_OK) {
113 		val64 = readq(&vp_reg->rts_access_steer_data0);
114 		*func_mode = VXGE_HW_GET_FUNC_MODE_VAL(val64);
115 	}
116 
117 	return status;
118 }
119 
120 /*
121  * __vxge_hw_device_pci_e_init
122  * Initialize certain PCI/PCI-X configuration registers
123  * with recommended values. Save config space for future hw resets.
124  */
125 void
__vxge_hw_device_pci_e_init(struct __vxge_hw_device * hldev)126 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
127 {
128 	u16 cmd = 0;
129 	struct pci_device *pdev = hldev->pdev;
130 
131 	vxge_trace();
132 
133 	/* Set the PErr Repconse bit and SERR in PCI command register. */
134 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
135 	cmd |= 0x140;
136 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
137 
138 	return;
139 }
140 
141 /*
142  * __vxge_hw_device_register_poll
143  * Will poll certain register for specified amount of time.
144  * Will poll until masked bit is not cleared.
145  */
146 enum vxge_hw_status
__vxge_hw_device_register_poll(void __iomem * reg,u64 mask,u32 max_millis)147 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
148 {
149 	u64 val64;
150 	u32 i = 0;
151 	enum vxge_hw_status ret = VXGE_HW_FAIL;
152 
153 	udelay(10);
154 
155 	do {
156 		val64 = readq(reg);
157 		if (!(val64 & mask))
158 			return VXGE_HW_OK;
159 		udelay(100);
160 	} while (++i <= 9);
161 
162 	i = 0;
163 	do {
164 		val64 = readq(reg);
165 		if (!(val64 & mask))
166 			return VXGE_HW_OK;
167 		udelay(1000);
168 	} while (++i <= max_millis);
169 
170 	return ret;
171 }
172 
173  /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
174  * in progress
175  * This routine checks the vpath reset in progress register is turned zero
176  */
177 enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem * vpath_rst_in_prog)178 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
179 {
180 	enum vxge_hw_status status;
181 
182 	vxge_trace();
183 
184 	status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
185 			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
186 			VXGE_HW_DEF_DEVICE_POLL_MILLIS);
187 	return status;
188 }
189 
190 /*
191  * __vxge_hw_device_get_legacy_reg
192  * This routine gets the legacy register section's memory mapped address
193  * and sets the swapper.
194  */
195 static struct vxge_hw_legacy_reg __iomem *
__vxge_hw_device_get_legacy_reg(struct pci_device * pdev,void __iomem * bar0)196 __vxge_hw_device_get_legacy_reg(struct pci_device *pdev, void __iomem *bar0)
197 {
198 	enum vxge_hw_status status;
199 	struct vxge_hw_legacy_reg __iomem *legacy_reg;
200 	/*
201 	 * If the length of Bar0 is 16MB, then assume that we are configured
202 	 * in MF8P_VP2 mode and then add 8MB to the legacy_reg offsets
203 	 */
204 	if (pci_bar_size(pdev, PCI_BASE_ADDRESS_0) == 0x1000000)
205 		legacy_reg = (struct vxge_hw_legacy_reg __iomem *)
206 				(bar0 + 0x800000);
207 	else
208 		legacy_reg = (struct vxge_hw_legacy_reg __iomem *)bar0;
209 
210 	status = __vxge_hw_legacy_swapper_set(legacy_reg);
211 	if (status != VXGE_HW_OK)
212 		return NULL;
213 
214 	return legacy_reg;
215 }
216 /*
217  * __vxge_hw_device_toc_get
218  * This routine sets the swapper and reads the toc pointer and returns the
219  * memory mapped address of the toc
220  */
221 struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem * bar0,struct vxge_hw_legacy_reg __iomem * legacy_reg)222 __vxge_hw_device_toc_get(void __iomem *bar0,
223 	struct vxge_hw_legacy_reg __iomem *legacy_reg)
224 {
225 	u64 val64;
226 	struct vxge_hw_toc_reg __iomem *toc = NULL;
227 
228 	val64 =	readq(&legacy_reg->toc_first_pointer);
229 	toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
230 
231 	return toc;
232 }
233 
234 /*
235  * __vxge_hw_device_reg_addr_get
236  * This routine sets the swapper and reads the toc pointer and initializes the
237  * register location pointers in the device object. It waits until the ric is
238  * completed initializing registers.
239  */
240 enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device * hldev)241 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
242 {
243 	u64 val64;
244 	u32 i;
245 	enum vxge_hw_status status = VXGE_HW_OK;
246 
247 	hldev->legacy_reg = __vxge_hw_device_get_legacy_reg(hldev->pdev,
248 					hldev->bar0);
249 	if (hldev->legacy_reg  == NULL) {
250 		status = VXGE_HW_FAIL;
251 		goto exit;
252 	}
253 
254 	hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0,
255 					hldev->legacy_reg);
256 	if (hldev->toc_reg  == NULL) {
257 		status = VXGE_HW_FAIL;
258 		goto exit;
259 	}
260 
261 	val64 = readq(&hldev->toc_reg->toc_common_pointer);
262 	hldev->common_reg =
263 		(struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
264 
265 	val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
266 	hldev->mrpcim_reg =
267 		(struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
268 
269 	for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
270 		val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
271 		hldev->srpcim_reg[i] =
272 			(struct vxge_hw_srpcim_reg __iomem *)
273 				(hldev->bar0 + val64);
274 	}
275 
276 	for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
277 		val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
278 		hldev->vpmgmt_reg[i] =
279 		(struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
280 	}
281 
282 	for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
283 		val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
284 		hldev->vpath_reg[i] =
285 			(struct vxge_hw_vpath_reg __iomem *)
286 				(hldev->bar0 + val64);
287 	}
288 
289 	val64 = readq(&hldev->toc_reg->toc_kdfc);
290 
291 	switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
292 	case 0:
293 		hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
294 			VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
295 		break;
296 	default:
297 		break;
298 	}
299 
300 	status = __vxge_hw_device_vpath_reset_in_prog_check(
301 			(u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
302 exit:
303 	return status;
304 }
305 
306 /*
307  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
308  * This routine returns the Access Rights of the driver
309  */
310 static u32
__vxge_hw_device_access_rights_get(u32 host_type,u32 func_id)311 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
312 {
313 	u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
314 
315 	switch (host_type) {
316 	case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
317 		if (func_id == 0) {
318 			access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
319 					VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
320 		}
321 		break;
322 	case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
323 		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
324 				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
325 		break;
326 	case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
327 		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
328 				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
329 		break;
330 	case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
331 	case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
332 	case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
333 		break;
334 	case VXGE_HW_SR_VH_FUNCTION0:
335 	case VXGE_HW_VH_NORMAL_FUNCTION:
336 		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
337 		break;
338 	}
339 
340 	return access_rights;
341 }
342 
343 /*
344  * __vxge_hw_device_host_info_get
345  * This routine returns the host type assignments
346  */
__vxge_hw_device_host_info_get(struct __vxge_hw_device * hldev)347 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
348 {
349 	u64 val64;
350 	u32 i;
351 
352 	val64 = readq(&hldev->common_reg->host_type_assignments);
353 
354 	hldev->host_type =
355 	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
356 
357 	hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
358 
359 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
360 
361 		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
362 			continue;
363 
364 		hldev->func_id =
365 			__vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
366 
367 		hldev->access_rights = __vxge_hw_device_access_rights_get(
368 			hldev->host_type, hldev->func_id);
369 
370 		hldev->first_vp_id = i;
371 		break;
372 	}
373 
374 	return;
375 }
376 
377 /**
378  * vxge_hw_device_hw_info_get - Get the hw information
379  * Returns the vpath mask that has the bits set for each vpath allocated
380  * for the driver, FW version information and the first mac addresse for
381  * each vpath
382  */
383 enum vxge_hw_status
vxge_hw_device_hw_info_get(struct pci_device * pdev,void __iomem * bar0,struct vxge_hw_device_hw_info * hw_info)384 vxge_hw_device_hw_info_get(struct pci_device *pdev, void __iomem *bar0,
385 				struct vxge_hw_device_hw_info *hw_info)
386 {
387 	u32 i;
388 	u64 val64;
389 	struct vxge_hw_toc_reg __iomem *toc;
390 	struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
391 	struct vxge_hw_common_reg __iomem *common_reg;
392 	struct vxge_hw_vpath_reg __iomem *vpath_reg;
393 	struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
394 	struct vxge_hw_legacy_reg __iomem *legacy_reg;
395 	enum vxge_hw_status status;
396 
397 	vxge_trace();
398 
399 	memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
400 
401 	legacy_reg = __vxge_hw_device_get_legacy_reg(pdev, bar0);
402 	if (legacy_reg == NULL) {
403 		status = VXGE_HW_ERR_CRITICAL;
404 		goto exit;
405 	}
406 
407 	toc = __vxge_hw_device_toc_get(bar0, legacy_reg);
408 	if (toc == NULL) {
409 		status = VXGE_HW_ERR_CRITICAL;
410 		goto exit;
411 	}
412 
413 	val64 = readq(&toc->toc_common_pointer);
414 	common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
415 
416 	status = __vxge_hw_device_vpath_reset_in_prog_check(
417 		(u64 __iomem *)&common_reg->vpath_rst_in_prog);
418 	if (status != VXGE_HW_OK)
419 		goto exit;
420 
421 	hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
422 
423 	val64 = readq(&common_reg->host_type_assignments);
424 
425 	hw_info->host_type =
426 	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
427 
428 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
429 
430 		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
431 			continue;
432 
433 		val64 = readq(&toc->toc_vpmgmt_pointer[i]);
434 
435 		vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
436 				(bar0 + val64);
437 
438 		hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
439 		if (__vxge_hw_device_access_rights_get(hw_info->host_type,
440 			hw_info->func_id) &
441 			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
442 
443 			val64 = readq(&toc->toc_mrpcim_pointer);
444 
445 			mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
446 					(bar0 + val64);
447 
448 			writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
449 			wmb();
450 		}
451 
452 		val64 = readq(&toc->toc_vpath_pointer[i]);
453 
454 		vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
455 
456 		status = __vxge_hw_vpath_fw_ver_get(vpath_reg, hw_info);
457 		if (status != VXGE_HW_OK)
458 			goto exit;
459 
460 		status = __vxge_hw_vpath_card_info_get(vpath_reg, hw_info);
461 		if (status != VXGE_HW_OK)
462 			goto exit;
463 
464 		break;
465 	}
466 
467 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
468 
469 		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
470 			continue;
471 
472 		val64 = readq(&toc->toc_vpath_pointer[i]);
473 		vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
474 
475 		status =  __vxge_hw_vpath_addr_get(vpath_reg,
476 				hw_info->mac_addrs[i],
477 				hw_info->mac_addr_masks[i]);
478 		if (status != VXGE_HW_OK)
479 			goto exit;
480 	}
481 exit:
482 	return status;
483 }
484 
485 /*
486  * vxge_hw_device_initialize - Initialize Titan device.
487  * Initialize Titan device. Note that all the arguments of this public API
488  * are 'IN', including @hldev. Driver cooperates with
489  * OS to find new Titan device, locate its PCI and memory spaces.
490  *
491  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
492  * to enable the latter to perform Titan hardware initialization.
493  */
494 enum vxge_hw_status
vxge_hw_device_initialize(struct __vxge_hw_device ** devh,void * bar0,struct pci_device * pdev,u8 titan1)495 vxge_hw_device_initialize(
496 	struct __vxge_hw_device **devh,
497 	void *bar0,
498 	struct pci_device *pdev,
499 	u8 titan1)
500 {
501 	struct __vxge_hw_device *hldev = NULL;
502 	enum vxge_hw_status status = VXGE_HW_OK;
503 
504 	vxge_trace();
505 
506 	hldev = (struct __vxge_hw_device *)
507 			zalloc(sizeof(struct __vxge_hw_device));
508 	if (hldev == NULL) {
509 		vxge_debug(VXGE_ERR, "hldev allocation failed\n");
510 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
511 		goto exit;
512 	}
513 
514 	hldev->magic = VXGE_HW_DEVICE_MAGIC;
515 
516 	hldev->bar0 = bar0;
517 	hldev->pdev = pdev;
518 	hldev->titan1 = titan1;
519 
520 	__vxge_hw_device_pci_e_init(hldev);
521 
522 	status = __vxge_hw_device_reg_addr_get(hldev);
523 	if (status != VXGE_HW_OK) {
524 		vxge_debug(VXGE_ERR, "%s:%d __vxge_hw_device_reg_addr_get "
525 			"failed\n", __func__, __LINE__);
526 		vxge_hw_device_terminate(hldev);
527 		goto exit;
528 	}
529 
530 	__vxge_hw_device_host_info_get(hldev);
531 
532 	*devh = hldev;
533 exit:
534 	return status;
535 }
536 
537 /*
538  * vxge_hw_device_terminate - Terminate Titan device.
539  * Terminate HW device.
540  */
541 void
vxge_hw_device_terminate(struct __vxge_hw_device * hldev)542 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
543 {
544 	vxge_trace();
545 
546 	assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
547 
548 	hldev->magic = VXGE_HW_DEVICE_DEAD;
549 	free(hldev);
550 }
551 
552 /*
553  *vxge_hw_ring_replenish - Initial replenish of RxDs
554  * This function replenishes the RxDs from reserve array to work array
555  */
556 enum vxge_hw_status
vxge_hw_ring_replenish(struct __vxge_hw_ring * ring)557 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
558 {
559 	struct __vxge_hw_device *hldev;
560 	struct vxge_hw_ring_rxd_1 *rxd;
561 	enum vxge_hw_status status = VXGE_HW_OK;
562 	u8 offset = 0;
563 	struct __vxge_hw_ring_block *block;
564 	u8 i, iob_off;
565 
566 	vxge_trace();
567 
568 	hldev = ring->vpathh->hldev;
569 	/*
570 	 * We allocate all the dma buffers first and then share the
571 	 * these buffers among the all rx descriptors in the block.
572 	 */
573 	for (i = 0; i < ARRAY_SIZE(ring->iobuf); i++) {
574 		ring->iobuf[i] = alloc_iob(VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
575 		if (!ring->iobuf[i]) {
576 			while (i) {
577 				free_iob(ring->iobuf[--i]);
578 				ring->iobuf[i] = NULL;
579 			}
580 			status = VXGE_HW_ERR_OUT_OF_MEMORY;
581 			goto iobuf_err;
582 		}
583 	}
584 
585 	for (offset = 0; offset < VXGE_HW_MAX_RXDS_PER_BLOCK_1; offset++) {
586 
587 		rxd = &ring->rxdl->rxd[offset];
588 		if (offset == (VXGE_HW_MAX_RXDS_PER_BLOCK_1 - 1))
589 			iob_off = VXGE_HW_RING_BUF_PER_BLOCK;
590 		else
591 			iob_off = offset % ring->buf_per_block;
592 
593 		rxd->control_0 = rxd->control_1 = 0;
594 		vxge_hw_ring_rxd_1b_set(rxd, ring->iobuf[iob_off],
595 				VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
596 
597 		vxge_hw_ring_rxd_post(ring, rxd);
598 	}
599 	/* linking the block to itself as we use only one rx block*/
600 	block = ring->rxdl;
601 	block->reserved_2_pNext_RxD_block = (unsigned long) block;
602 	block->pNext_RxD_Blk_physical = (u64)virt_to_bus(block);
603 
604 	ring->rxd_offset = 0;
605 iobuf_err:
606 	return status;
607 }
608 
609 /*
610  * __vxge_hw_ring_create - Create a Ring
611  * This function creates Ring and initializes it.
612  *
613  */
614 enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_virtualpath * vpath,struct __vxge_hw_ring * ring)615 __vxge_hw_ring_create(struct __vxge_hw_virtualpath *vpath,
616 		      struct __vxge_hw_ring *ring)
617 {
618 	enum vxge_hw_status status = VXGE_HW_OK;
619 	struct __vxge_hw_device *hldev;
620 	u32 vp_id;
621 
622 	vxge_trace();
623 
624 	hldev = vpath->hldev;
625 	vp_id = vpath->vp_id;
626 
627 	ring->rxdl = malloc_dma(sizeof(struct __vxge_hw_ring_block),
628 			sizeof(struct __vxge_hw_ring_block));
629 	if (!ring->rxdl) {
630 		vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
631 				__func__, __LINE__);
632 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
633 		goto exit;
634 	}
635 	ring->rxd_offset = 0;
636 	ring->vpathh = vpath;
637 	ring->buf_per_block = VXGE_HW_RING_BUF_PER_BLOCK;
638 	ring->rx_poll_weight = VXGE_HW_RING_RX_POLL_WEIGHT;
639 	ring->vp_id = vp_id;
640 	ring->vp_reg = vpath->vp_reg;
641 	ring->common_reg = hldev->common_reg;
642 
643 	ring->rxd_qword_limit = VXGE_HW_RING_RXD_QWORD_LIMIT;
644 
645 	status = vxge_hw_ring_replenish(ring);
646 	if (status != VXGE_HW_OK) {
647 		__vxge_hw_ring_delete(ring);
648 		goto exit;
649 	}
650 exit:
651 	return status;
652 }
653 
654 /*
655  * __vxge_hw_ring_delete - Removes the ring
656  * This function freeup the memory pool and removes the ring
657  */
__vxge_hw_ring_delete(struct __vxge_hw_ring * ring)658 enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_ring *ring)
659 {
660 	u8 i;
661 
662 	vxge_trace();
663 
664 	for (i = 0; (i < ARRAY_SIZE(ring->iobuf)) && ring->iobuf[i]; i++) {
665 		free_iob(ring->iobuf[i]);
666 		ring->iobuf[i] = NULL;
667 	}
668 
669 	if (ring->rxdl) {
670 		free_dma(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
671 		ring->rxdl = NULL;
672 	}
673 	ring->rxd_offset = 0;
674 
675 	return VXGE_HW_OK;
676 }
677 
678 /*
679  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
680  * Set the swapper bits appropriately for the legacy section.
681  */
682 enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem * legacy_reg)683 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
684 {
685 	u64 val64;
686 	enum vxge_hw_status status = VXGE_HW_OK;
687 
688 	vxge_trace();
689 
690 	val64 = readq(&legacy_reg->toc_swapper_fb);
691 
692 	wmb();
693 
694 	switch (val64) {
695 
696 	case VXGE_HW_SWAPPER_INITIAL_VALUE:
697 		return status;
698 
699 	case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
700 		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
701 			&legacy_reg->pifm_rd_swap_en);
702 		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
703 			&legacy_reg->pifm_rd_flip_en);
704 		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
705 			&legacy_reg->pifm_wr_swap_en);
706 		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
707 			&legacy_reg->pifm_wr_flip_en);
708 		break;
709 
710 	case VXGE_HW_SWAPPER_BYTE_SWAPPED:
711 		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
712 			&legacy_reg->pifm_rd_swap_en);
713 		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
714 			&legacy_reg->pifm_wr_swap_en);
715 		break;
716 
717 	case VXGE_HW_SWAPPER_BIT_FLIPPED:
718 		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
719 			&legacy_reg->pifm_rd_flip_en);
720 		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
721 			&legacy_reg->pifm_wr_flip_en);
722 		break;
723 	}
724 
725 	wmb();
726 
727 	val64 = readq(&legacy_reg->toc_swapper_fb);
728 	if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
729 		status = VXGE_HW_ERR_SWAPPER_CTRL;
730 
731 	return status;
732 }
733 
734 /*
735  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
736  * Set the swapper bits appropriately for the vpath.
737  */
738 enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem * vpath_reg)739 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
740 {
741 	vxge_trace();
742 
743 #if (__BYTE_ORDER != __BIG_ENDIAN)
744 	u64 val64;
745 
746 	val64 = readq(&vpath_reg->vpath_general_cfg1);
747 	wmb();
748 	val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
749 	writeq(val64, &vpath_reg->vpath_general_cfg1);
750 	wmb();
751 #endif
752 	return VXGE_HW_OK;
753 }
754 
755 /*
756  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
757  * Set the swapper bits appropriately for the vpath.
758  */
759 enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem * legacy_reg,struct vxge_hw_vpath_reg __iomem * vpath_reg)760 __vxge_hw_kdfc_swapper_set(
761 	struct vxge_hw_legacy_reg __iomem *legacy_reg,
762 	struct vxge_hw_vpath_reg __iomem *vpath_reg)
763 {
764 	u64 val64;
765 
766 	vxge_trace();
767 
768 	val64 = readq(&legacy_reg->pifm_wr_swap_en);
769 
770 	if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
771 		val64 = readq(&vpath_reg->kdfcctl_cfg0);
772 		wmb();
773 
774 		val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0	|
775 			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1	|
776 			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
777 
778 		writeq(val64, &vpath_reg->kdfcctl_cfg0);
779 		wmb();
780 	}
781 
782 	return VXGE_HW_OK;
783 }
784 
785 /*
786  * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
787  */
788 enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device * hldev,u64 vpath_mask)789 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
790 {
791 	struct vxge_hw_vpmgmt_reg	__iomem *vpmgmt_reg;
792 	enum vxge_hw_status status = VXGE_HW_OK;
793 	int i = 0, j = 0;
794 
795 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
796 		if (!((vpath_mask) & vxge_mBIT(i)))
797 			continue;
798 		vpmgmt_reg = hldev->vpmgmt_reg[i];
799 		for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
800 			if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
801 			& VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
802 				return VXGE_HW_FAIL;
803 		}
804 	}
805 	return status;
806 }
807 
808 /*
809  * __vxge_hw_fifo_create - Create a FIFO
810  * This function creates FIFO and initializes it.
811  */
812 enum vxge_hw_status
__vxge_hw_fifo_create(struct __vxge_hw_virtualpath * vpath,struct __vxge_hw_fifo * fifo)813 __vxge_hw_fifo_create(struct __vxge_hw_virtualpath *vpath,
814 			struct __vxge_hw_fifo *fifo)
815 {
816 	enum vxge_hw_status status = VXGE_HW_OK;
817 
818 	vxge_trace();
819 
820 	fifo->vpathh = vpath;
821 	fifo->depth = VXGE_HW_FIFO_TXD_DEPTH;
822 	fifo->hw_offset = fifo->sw_offset = 0;
823 	fifo->nofl_db = vpath->nofl_db;
824 	fifo->vp_id = vpath->vp_id;
825 	fifo->vp_reg = vpath->vp_reg;
826 	fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP)
827 				+ VXGE_HW_VPATH_INTR_TX;
828 
829 	fifo->txdl = malloc_dma(sizeof(struct vxge_hw_fifo_txd)
830 				* fifo->depth, fifo->depth);
831 	if (!fifo->txdl) {
832 		vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
833 				__func__, __LINE__);
834 		return VXGE_HW_ERR_OUT_OF_MEMORY;
835 	}
836 	memset(fifo->txdl, 0, sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
837 	return status;
838 }
839 
840 /*
841  * __vxge_hw_fifo_delete - Removes the FIFO
842  * This function freeup the memory pool and removes the FIFO
843  */
__vxge_hw_fifo_delete(struct __vxge_hw_fifo * fifo)844 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo)
845 {
846 	vxge_trace();
847 
848 	if (fifo->txdl)
849 		free_dma(fifo->txdl,
850 			sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
851 
852 	fifo->txdl = NULL;
853 	fifo->hw_offset = fifo->sw_offset = 0;
854 
855 	return VXGE_HW_OK;
856 }
857 
858 /*
859  * __vxge_hw_vpath_pci_read - Read the content of given address
860  *                          in pci config space.
861  * Read from the vpath pci config space.
862  */
863 enum vxge_hw_status
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath * vpath,u32 phy_func_0,u32 offset,u32 * val)864 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
865 			 u32 phy_func_0, u32 offset, u32 *val)
866 {
867 	u64 val64;
868 	enum vxge_hw_status status = VXGE_HW_OK;
869 	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
870 
871 	val64 =	VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
872 
873 	if (phy_func_0)
874 		val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
875 
876 	writeq(val64, &vp_reg->pci_config_access_cfg1);
877 	wmb();
878 	writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
879 			&vp_reg->pci_config_access_cfg2);
880 	wmb();
881 
882 	status = __vxge_hw_device_register_poll(
883 			&vp_reg->pci_config_access_cfg2,
884 			VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
885 
886 	if (status != VXGE_HW_OK)
887 		goto exit;
888 
889 	val64 = readq(&vp_reg->pci_config_access_status);
890 
891 	if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
892 		status = VXGE_HW_FAIL;
893 		*val = 0;
894 	} else
895 		*val = (u32)vxge_bVALn(val64, 32, 32);
896 exit:
897 	return status;
898 }
899 
900 /*
901  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
902  * Returns the function number of the vpath.
903  */
904 u32
__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem * vpmgmt_reg)905 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
906 {
907 	u64 val64;
908 
909 	val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
910 
911 	return
912 	 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
913 }
914 
915 /*
916  * __vxge_hw_read_rts_ds - Program RTS steering critieria
917  */
918 static inline void
__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem * vpath_reg,u64 dta_struct_sel)919 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
920 				u64 dta_struct_sel)
921 {
922 	writeq(0, &vpath_reg->rts_access_steer_ctrl);
923 	wmb();
924 	writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
925 	writeq(0, &vpath_reg->rts_access_steer_data1);
926 	wmb();
927 	return;
928 }
929 
930 /*
931  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
932  * part number and product description.
933  */
934 enum vxge_hw_status
__vxge_hw_vpath_card_info_get(struct vxge_hw_vpath_reg __iomem * vpath_reg,struct vxge_hw_device_hw_info * hw_info)935 __vxge_hw_vpath_card_info_get(
936 	struct vxge_hw_vpath_reg __iomem *vpath_reg,
937 	struct vxge_hw_device_hw_info *hw_info)
938 {
939 	u32 i, j;
940 	u64 val64;
941 	u64 data1 = 0ULL;
942 	u64 data2 = 0ULL;
943 	enum vxge_hw_status status = VXGE_HW_OK;
944 	u8 *serial_number = hw_info->serial_number;
945 	u8 *part_number = hw_info->part_number;
946 	u8 *product_desc = hw_info->product_desc;
947 
948 	__vxge_hw_read_rts_ds(vpath_reg,
949 		VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
950 
951 	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
952 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
953 		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
954 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
955 		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
956 		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
957 
958 	status = __vxge_hw_pio_mem_write64(val64,
959 				&vpath_reg->rts_access_steer_ctrl,
960 				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
961 				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
962 
963 	if (status != VXGE_HW_OK)
964 		return status;
965 
966 	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
967 
968 	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
969 		data1 = readq(&vpath_reg->rts_access_steer_data0);
970 		((u64 *)serial_number)[0] = be64_to_cpu(data1);
971 
972 		data2 = readq(&vpath_reg->rts_access_steer_data1);
973 		((u64 *)serial_number)[1] = be64_to_cpu(data2);
974 		status = VXGE_HW_OK;
975 	} else
976 		*serial_number = 0;
977 
978 	__vxge_hw_read_rts_ds(vpath_reg,
979 			VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
980 
981 	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
982 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
983 		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
984 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
985 		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
986 		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
987 
988 	status = __vxge_hw_pio_mem_write64(val64,
989 				&vpath_reg->rts_access_steer_ctrl,
990 				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
991 				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
992 
993 	if (status != VXGE_HW_OK)
994 		return status;
995 
996 	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
997 
998 	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
999 
1000 		data1 = readq(&vpath_reg->rts_access_steer_data0);
1001 		((u64 *)part_number)[0] = be64_to_cpu(data1);
1002 
1003 		data2 = readq(&vpath_reg->rts_access_steer_data1);
1004 		((u64 *)part_number)[1] = be64_to_cpu(data2);
1005 
1006 		status = VXGE_HW_OK;
1007 
1008 	} else
1009 		*part_number = 0;
1010 
1011 	j = 0;
1012 
1013 	for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
1014 	     i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
1015 
1016 		__vxge_hw_read_rts_ds(vpath_reg, i);
1017 
1018 		val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
1019 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
1020 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
1021 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
1022 			VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
1023 			VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
1024 
1025 		status = __vxge_hw_pio_mem_write64(val64,
1026 				&vpath_reg->rts_access_steer_ctrl,
1027 				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
1028 				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
1029 
1030 		if (status != VXGE_HW_OK)
1031 			return status;
1032 
1033 		val64 = readq(&vpath_reg->rts_access_steer_ctrl);
1034 
1035 		if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
1036 
1037 			data1 = readq(&vpath_reg->rts_access_steer_data0);
1038 			((u64 *)product_desc)[j++] = be64_to_cpu(data1);
1039 
1040 			data2 = readq(&vpath_reg->rts_access_steer_data1);
1041 			((u64 *)product_desc)[j++] = be64_to_cpu(data2);
1042 
1043 			status = VXGE_HW_OK;
1044 		} else
1045 			*product_desc = 0;
1046 	}
1047 
1048 	return status;
1049 }
1050 
1051 /*
1052  * __vxge_hw_vpath_fw_ver_get - Get the fw version
1053  * Returns FW Version
1054  */
1055 enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(struct vxge_hw_vpath_reg __iomem * vpath_reg,struct vxge_hw_device_hw_info * hw_info)1056 __vxge_hw_vpath_fw_ver_get(
1057 	struct vxge_hw_vpath_reg __iomem *vpath_reg,
1058 	struct vxge_hw_device_hw_info *hw_info)
1059 {
1060 	u64 val64;
1061 	u64 data1 = 0ULL;
1062 	u64 data2 = 0ULL;
1063 	struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
1064 	struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
1065 	struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
1066 	struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
1067 	enum vxge_hw_status status = VXGE_HW_OK;
1068 
1069 	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
1070 		VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
1071 		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
1072 		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
1073 		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
1074 		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
1075 
1076 	status = __vxge_hw_pio_mem_write64(val64,
1077 				&vpath_reg->rts_access_steer_ctrl,
1078 				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
1079 				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
1080 
1081 	if (status != VXGE_HW_OK)
1082 		goto exit;
1083 
1084 	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
1085 
1086 	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
1087 
1088 		data1 = readq(&vpath_reg->rts_access_steer_data0);
1089 		data2 = readq(&vpath_reg->rts_access_steer_data1);
1090 
1091 		fw_date->day =
1092 			(u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
1093 						data1);
1094 		fw_date->month =
1095 			(u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
1096 						data1);
1097 		fw_date->year =
1098 			(u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
1099 						data1);
1100 
1101 		snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
1102 			fw_date->month, fw_date->day, fw_date->year);
1103 
1104 		fw_version->major =
1105 		    (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
1106 		fw_version->minor =
1107 		    (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
1108 		fw_version->build =
1109 		    (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
1110 
1111 		snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
1112 		    fw_version->major, fw_version->minor, fw_version->build);
1113 
1114 		flash_date->day =
1115 		  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
1116 		flash_date->month =
1117 		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
1118 		flash_date->year =
1119 		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
1120 
1121 		snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
1122 			flash_date->month, flash_date->day, flash_date->year);
1123 
1124 		flash_version->major =
1125 		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
1126 		flash_version->minor =
1127 		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
1128 		flash_version->build =
1129 		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
1130 
1131 		snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
1132 			flash_version->major, flash_version->minor,
1133 			flash_version->build);
1134 
1135 		status = VXGE_HW_OK;
1136 
1137 	} else
1138 		status = VXGE_HW_FAIL;
1139 exit:
1140 	return status;
1141 }
1142 
1143 /*
1144  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
1145  *               from MAC address table.
1146  */
1147 enum vxge_hw_status
__vxge_hw_vpath_addr_get(struct vxge_hw_vpath_reg * vpath_reg,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1148 __vxge_hw_vpath_addr_get(
1149 	struct vxge_hw_vpath_reg *vpath_reg,
1150 	u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
1151 {
1152 	u32 i;
1153 	u64 val64;
1154 	u64 data1 = 0ULL;
1155 	u64 data2 = 0ULL;
1156 	u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY;
1157 	enum vxge_hw_status status = VXGE_HW_OK;
1158 
1159 	while (1) {
1160 		val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
1161 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
1162 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
1163 			VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
1164 			VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
1165 
1166 		status = __vxge_hw_pio_mem_write64(val64,
1167 					&vpath_reg->rts_access_steer_ctrl,
1168 					VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
1169 					VXGE_HW_DEF_DEVICE_POLL_MILLIS);
1170 
1171 		if (status != VXGE_HW_OK)
1172 			break;
1173 
1174 		val64 = readq(&vpath_reg->rts_access_steer_ctrl);
1175 
1176 		if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
1177 
1178 			data1 = readq(&vpath_reg->rts_access_steer_data0);
1179 			data2 = readq(&vpath_reg->rts_access_steer_data1);
1180 
1181 			data1 =
1182 			 VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1183 			data2 =
1184 			 VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
1185 								data2);
1186 
1187 			for (i = ETH_ALEN; i > 0; i--) {
1188 				macaddr[i-1] = (u8)(data1 & 0xFF);
1189 				data1 >>= 8;
1190 
1191 				macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1192 				data2 >>= 8;
1193 			}
1194 			if (is_valid_ether_addr(macaddr)) {
1195 				status = VXGE_HW_OK;
1196 				break;
1197 			}
1198 			action =
1199 			  VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
1200 		} else
1201 			status = VXGE_HW_FAIL;
1202 	}
1203 
1204 	return status;
1205 }
1206 
1207 /*
1208  * __vxge_hw_vpath_mgmt_read
1209  * This routine reads the vpath_mgmt registers
1210  */
1211 static enum vxge_hw_status
__vxge_hw_vpath_mgmt_read(struct __vxge_hw_virtualpath * vpath)1212 __vxge_hw_vpath_mgmt_read(
1213 	struct __vxge_hw_virtualpath *vpath)
1214 {
1215 	u32 i, mtu = 0, max_pyld = 0;
1216 	u64 val64;
1217 	enum vxge_hw_status status = VXGE_HW_OK;
1218 
1219 	for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1220 
1221 		val64 = readq(&vpath->vpmgmt_reg->
1222 				rxmac_cfg0_port_vpmgmt_clone[i]);
1223 		max_pyld =
1224 			(u32)
1225 			VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
1226 			(val64);
1227 		if (mtu < max_pyld)
1228 			mtu = max_pyld;
1229 	}
1230 
1231 	vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
1232 
1233 	val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
1234 
1235 	if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
1236 		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
1237 	else
1238 		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
1239 
1240 	return status;
1241 }
1242 
1243 /*
1244  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
1245  * This routine checks the vpath_rst_in_prog register to see if
1246  * adapter completed the reset process for the vpath
1247  */
1248 enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath * vpath)1249 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
1250 {
1251 	enum vxge_hw_status status;
1252 
1253 	vxge_trace();
1254 
1255 	status = __vxge_hw_device_register_poll(
1256 			&vpath->hldev->common_reg->vpath_rst_in_prog,
1257 			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
1258 				1 << (16 - vpath->vp_id)),
1259 			VXGE_HW_DEF_DEVICE_POLL_MILLIS);
1260 
1261 	return status;
1262 }
1263 
1264 /*
1265  * __vxge_hw_vpath_reset
1266  * This routine resets the vpath on the device
1267  */
1268 enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device * hldev,u32 vp_id)1269 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
1270 {
1271 	u64 val64;
1272 	enum vxge_hw_status status = VXGE_HW_OK;
1273 
1274 	vxge_trace();
1275 
1276 	val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
1277 
1278 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
1279 				&hldev->common_reg->cmn_rsthdlr_cfg0);
1280 
1281 	return status;
1282 }
1283 
1284 /*
1285  * __vxge_hw_vpath_prc_configure
1286  * This routine configures the prc registers of virtual path using the config
1287  * passed
1288  */
1289 void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device * hldev)1290 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev)
1291 {
1292 	u64 val64;
1293 	struct __vxge_hw_virtualpath *vpath;
1294 	struct vxge_hw_vpath_reg __iomem *vp_reg;
1295 
1296 	vxge_trace();
1297 
1298 	vpath = &hldev->virtual_path;
1299 	vp_reg = vpath->vp_reg;
1300 
1301 	val64 = readq(&vp_reg->prc_cfg1);
1302 	val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
1303 	writeq(val64, &vp_reg->prc_cfg1);
1304 
1305 	val64 = readq(&vpath->vp_reg->prc_cfg6);
1306 	val64 &= ~VXGE_HW_PRC_CFG6_RXD_CRXDT(0x1ff);
1307 	val64 &= ~VXGE_HW_PRC_CFG6_RXD_SPAT(0x1ff);
1308 	val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
1309 	val64 |= VXGE_HW_PRC_CFG6_RXD_CRXDT(0x3);
1310 	val64 |= VXGE_HW_PRC_CFG6_RXD_SPAT(0xf);
1311 	writeq(val64, &vpath->vp_reg->prc_cfg6);
1312 
1313 	writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
1314 			(u64)virt_to_bus(vpath->ringh.rxdl) >> 3),
1315 			&vp_reg->prc_cfg5);
1316 
1317 	val64 = readq(&vp_reg->prc_cfg4);
1318 	val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
1319 	val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
1320 	val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
1321 			VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
1322 	val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
1323 
1324 	writeq(val64, &vp_reg->prc_cfg4);
1325 	return;
1326 }
1327 
1328 /*
1329  * __vxge_hw_vpath_kdfc_configure
1330  * This routine configures the kdfc registers of virtual path using the
1331  * config passed
1332  */
1333 enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device * hldev,u32 vp_id)1334 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
1335 {
1336 	u64 val64;
1337 	u64 vpath_stride;
1338 	enum vxge_hw_status status = VXGE_HW_OK;
1339 	struct __vxge_hw_virtualpath *vpath;
1340 	struct vxge_hw_vpath_reg __iomem *vp_reg;
1341 
1342 	vxge_trace();
1343 
1344 	vpath = &hldev->virtual_path;
1345 	vp_reg = vpath->vp_reg;
1346 	status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
1347 
1348 	if (status != VXGE_HW_OK)
1349 		goto exit;
1350 
1351 	val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
1352 
1353 	vpath->max_kdfc_db =
1354 		(u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
1355 			val64+1)/2;
1356 
1357 	vpath->max_nofl_db = vpath->max_kdfc_db;
1358 
1359 	val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
1360 				(vpath->max_nofl_db*2)-1);
1361 
1362 	writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
1363 
1364 	writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
1365 		&vp_reg->kdfc_fifo_trpl_ctrl);
1366 
1367 	val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
1368 
1369 	val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
1370 		   VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
1371 
1372 	val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
1373 		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
1374 #if (__BYTE_ORDER != __BIG_ENDIAN)
1375 		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
1376 #endif
1377 		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
1378 
1379 	writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
1380 	writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
1381 	wmb();
1382 	vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
1383 
1384 	vpath->nofl_db =
1385 		(struct __vxge_hw_non_offload_db_wrapper __iomem *)
1386 		(hldev->kdfc + (vp_id *
1387 		VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
1388 					vpath_stride)));
1389 exit:
1390 	return status;
1391 }
1392 
1393 /*
1394  * __vxge_hw_vpath_mac_configure
1395  * This routine configures the mac of virtual path using the config passed
1396  */
1397 enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device * hldev)1398 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev)
1399 {
1400 	u64 val64;
1401 	enum vxge_hw_status status = VXGE_HW_OK;
1402 	struct __vxge_hw_virtualpath *vpath;
1403 	struct vxge_hw_vpath_reg __iomem *vp_reg;
1404 
1405 	vxge_trace();
1406 
1407 	vpath = &hldev->virtual_path;
1408 	vp_reg = vpath->vp_reg;
1409 
1410 	writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
1411 			vpath->vsport_number), &vp_reg->xmac_vsport_choice);
1412 
1413 	val64 = readq(&vp_reg->rxmac_vcfg1);
1414 
1415 	val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
1416 		VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
1417 
1418 	writeq(val64, &vp_reg->rxmac_vcfg1);
1419 	return status;
1420 }
1421 
1422 /*
1423  * __vxge_hw_vpath_tim_configure
1424  * This routine configures the tim registers of virtual path using the config
1425  * passed
1426  */
1427 enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device * hldev,u32 vp_id)1428 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
1429 {
1430 	u64 val64;
1431 	enum vxge_hw_status status = VXGE_HW_OK;
1432 	struct __vxge_hw_virtualpath *vpath;
1433 	struct vxge_hw_vpath_reg __iomem *vp_reg;
1434 
1435 	vxge_trace();
1436 
1437 	vpath = &hldev->virtual_path;
1438 	vp_reg = vpath->vp_reg;
1439 
1440 	writeq((u64)0, &vp_reg->tim_dest_addr);
1441 	writeq((u64)0, &vp_reg->tim_vpath_map);
1442 	writeq((u64)0, &vp_reg->tim_bitmap);
1443 	writeq((u64)0, &vp_reg->tim_remap);
1444 
1445 	writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
1446 		(vp_id * VXGE_HW_MAX_INTR_PER_VP) +
1447 		VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
1448 
1449 	val64 = readq(&vp_reg->tim_pci_cfg);
1450 	val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
1451 	writeq(val64, &vp_reg->tim_pci_cfg);
1452 
1453 	/* TX configuration */
1454 	val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
1455 			(VXGE_TTI_BTIMER_VAL * 1000) / 272);
1456 	val64 |= (VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC |
1457 			VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI |
1458 			VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN);
1459 	val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(TTI_TX_URANGE_A) |
1460 			VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(TTI_TX_URANGE_B) |
1461 			VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(TTI_TX_URANGE_C);
1462 	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
1463 
1464 	val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(TTI_TX_UFC_A) |
1465 			VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(TTI_TX_UFC_B) |
1466 			VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(TTI_TX_UFC_C) |
1467 			VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(TTI_TX_UFC_D);
1468 	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
1469 
1470 	val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
1471 			VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL);
1472 	val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
1473 			(VXGE_TTI_LTIMER_VAL * 1000) / 272);
1474 	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
1475 
1476 	/* RX configuration */
1477 	val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
1478 			(VXGE_RTI_BTIMER_VAL * 1000) / 272);
1479 	val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
1480 	val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(RTI_RX_URANGE_A) |
1481 			VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(RTI_RX_URANGE_B) |
1482 			VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(RTI_RX_URANGE_C);
1483 	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
1484 
1485 	val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(RTI_RX_UFC_A) |
1486 			VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(RTI_RX_UFC_B) |
1487 			VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(RTI_RX_UFC_C) |
1488 			VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(RTI_RX_UFC_D);
1489 	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
1490 
1491 	val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
1492 			VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL);
1493 	val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
1494 			(VXGE_RTI_LTIMER_VAL * 1000) / 272);
1495 	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
1496 
1497 	val64 = 0;
1498 	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
1499 	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
1500 	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
1501 	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
1502 	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
1503 	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
1504 
1505 	return status;
1506 }
1507 
1508 /*
1509  * __vxge_hw_vpath_initialize
1510  * This routine is the final phase of init which initializes the
1511  * registers of the vpath using the configuration passed.
1512  */
1513 enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device * hldev,u32 vp_id)1514 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
1515 {
1516 	u64 val64;
1517 	u32 val32;
1518 	int i;
1519 	enum vxge_hw_status status = VXGE_HW_OK;
1520 	struct __vxge_hw_virtualpath *vpath;
1521 	struct vxge_hw_vpath_reg *vp_reg;
1522 
1523 	vxge_trace();
1524 
1525 	vpath = &hldev->virtual_path;
1526 
1527 	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
1528 		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
1529 		goto exit;
1530 	}
1531 	vp_reg = vpath->vp_reg;
1532 	status = __vxge_hw_legacy_swapper_set(hldev->legacy_reg);
1533 	if (status != VXGE_HW_OK)
1534 		goto exit;
1535 
1536 	status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
1537 
1538 	if (status != VXGE_HW_OK)
1539 		goto exit;
1540 	val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
1541 
1542 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1543 		if (val64 & vxge_mBIT(i))
1544 			vpath->vsport_number = i;
1545 	}
1546 
1547 	status = __vxge_hw_vpath_mac_configure(hldev);
1548 
1549 	if (status != VXGE_HW_OK)
1550 		goto exit;
1551 
1552 	status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
1553 
1554 	if (status != VXGE_HW_OK)
1555 		goto exit;
1556 
1557 	status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
1558 
1559 	if (status != VXGE_HW_OK)
1560 		goto exit;
1561 
1562 	val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
1563 
1564 	/* Get MRRS value from device control */
1565 	status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
1566 
1567 	if (status == VXGE_HW_OK) {
1568 		val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
1569 		val64 &=
1570 		    ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
1571 		val64 |=
1572 		    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
1573 
1574 		val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
1575 	}
1576 
1577 	val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
1578 	val64 |=
1579 	    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
1580 		    VXGE_HW_MAX_PAYLOAD_SIZE_512);
1581 
1582 	val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
1583 	writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
1584 
1585 exit:
1586 	return status;
1587 }
1588 
1589 /*
1590  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
1591  * This routine is the initial phase of init which resets the vpath and
1592  * initializes the software support structures.
1593  */
1594 enum vxge_hw_status
__vxge_hw_vp_initialize(struct __vxge_hw_device * hldev,u32 vp_id,struct __vxge_hw_virtualpath * vpath)1595 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
1596 			struct __vxge_hw_virtualpath *vpath)
1597 {
1598 	enum vxge_hw_status status = VXGE_HW_OK;
1599 
1600 	vxge_trace();
1601 
1602 	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
1603 		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
1604 		goto exit;
1605 	}
1606 
1607 	vpath->vp_id = vp_id;
1608 	vpath->vp_open = VXGE_HW_VP_OPEN;
1609 	vpath->hldev = hldev;
1610 	vpath->vp_reg = hldev->vpath_reg[vp_id];
1611 	vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
1612 
1613 	__vxge_hw_vpath_reset(hldev, vp_id);
1614 
1615 	status = __vxge_hw_vpath_reset_check(vpath);
1616 	if (status != VXGE_HW_OK) {
1617 		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
1618 		goto exit;
1619 	}
1620 
1621 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
1622 		hldev->tim_int_mask1, vp_id);
1623 
1624 	status = __vxge_hw_vpath_initialize(hldev, vp_id);
1625 
1626 	if (status != VXGE_HW_OK) {
1627 		__vxge_hw_vp_terminate(hldev, vpath);
1628 		goto exit;
1629 	}
1630 
1631 	status = __vxge_hw_vpath_mgmt_read(vpath);
1632 exit:
1633 	return status;
1634 }
1635 
1636 /*
1637  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
1638  * This routine closes all channels it opened and freeup memory
1639  */
1640 void
__vxge_hw_vp_terminate(struct __vxge_hw_device * hldev,struct __vxge_hw_virtualpath * vpath)1641 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev,
1642 			struct __vxge_hw_virtualpath *vpath)
1643 {
1644 	vxge_trace();
1645 
1646 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
1647 		return;
1648 
1649 	VXGE_HW_DEVICE_TIM_INT_MASK_RESET(hldev->tim_int_mask0,
1650 		hldev->tim_int_mask1, vpath->vp_id);
1651 
1652 	memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
1653 }
1654 
1655 /*
1656  * vxge_hw_vpath_mtu_set - Set MTU.
1657  * Set new MTU value. Example, to use jumbo frames:
1658  * vxge_hw_vpath_mtu_set(my_device, 9600);
1659  */
1660 enum vxge_hw_status
vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath * vpath,u32 new_mtu)1661 vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu)
1662 {
1663 	u64 val64;
1664 	enum vxge_hw_status status = VXGE_HW_OK;
1665 
1666 	vxge_trace();
1667 
1668 	new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
1669 
1670 	if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
1671 		status = VXGE_HW_ERR_INVALID_MTU_SIZE;
1672 
1673 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1674 
1675 	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
1676 	val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
1677 
1678 	writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1679 
1680 	return status;
1681 }
1682 
1683 /*
1684  * vxge_hw_vpath_open - Open a virtual path on a given adapter
1685  * This function is used to open access to virtual path of an
1686  * adapter for offload, GRO operations. This function returns
1687  * synchronously.
1688  */
1689 enum vxge_hw_status
vxge_hw_vpath_open(struct __vxge_hw_device * hldev,struct vxge_vpath * vpath)1690 vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath)
1691 {
1692 	struct __vxge_hw_virtualpath *vpathh;
1693 	enum vxge_hw_status status;
1694 
1695 	vxge_trace();
1696 
1697 	vpathh = &hldev->virtual_path;
1698 
1699 	if (vpath->vp_open == VXGE_HW_VP_OPEN) {
1700 		status = VXGE_HW_ERR_INVALID_STATE;
1701 		goto vpath_open_exit1;
1702 	}
1703 
1704 	status = __vxge_hw_vp_initialize(hldev, hldev->first_vp_id, vpathh);
1705 	if (status != VXGE_HW_OK)
1706 		goto vpath_open_exit1;
1707 
1708 	status = __vxge_hw_fifo_create(vpathh, &vpathh->fifoh);
1709 	if (status != VXGE_HW_OK)
1710 		goto vpath_open_exit2;
1711 
1712 	status = __vxge_hw_ring_create(vpathh, &vpathh->ringh);
1713 	if (status != VXGE_HW_OK)
1714 		goto vpath_open_exit3;
1715 
1716 	__vxge_hw_vpath_prc_configure(hldev);
1717 
1718 	return VXGE_HW_OK;
1719 
1720 vpath_open_exit3:
1721 	__vxge_hw_fifo_delete(&vpathh->fifoh);
1722 vpath_open_exit2:
1723 	__vxge_hw_vp_terminate(hldev, vpathh);
1724 vpath_open_exit1:
1725 	return status;
1726 }
1727 
1728 /*
1729  * vxge_hw_vpath_rx_doorbell_init -  Post the count of the refreshed region
1730  * of RxD list
1731  * @vp: vpath handle
1732  *
1733  * This function decides on the Rxd replenish count depending on the
1734  * descriptor memory that has been allocated to this VPath.
1735  */
1736 void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath * vpath)1737 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath)
1738 {
1739 	u64 new_count, val64;
1740 
1741 	vxge_trace();
1742 
1743 	if (vpath->hldev->titan1) {
1744 		new_count = readq(&vpath->vp_reg->rxdmem_size);
1745 		new_count &= 0x1fff;
1746 	} else
1747 		new_count = VXGE_HW_RING_RXD_QWORDS_MODE_1 * 4;
1748 
1749 	val64 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
1750 
1751 	writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val64),
1752 		&vpath->vp_reg->prc_rxd_doorbell);
1753 }
1754 
1755 /*
1756  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
1757  * This function is used to close access to virtual path opened
1758  * earlier.
1759  */
vxge_hw_vpath_close(struct __vxge_hw_virtualpath * vpath)1760 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath)
1761 {
1762 	struct __vxge_hw_device *devh = NULL;
1763 	u32 vp_id = vpath->vp_id;
1764 	enum vxge_hw_status status = VXGE_HW_OK;
1765 
1766 	vxge_trace();
1767 
1768 	devh = vpath->hldev;
1769 
1770 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1771 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1772 		goto vpath_close_exit;
1773 	}
1774 
1775 	devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
1776 
1777 	__vxge_hw_ring_delete(&vpath->ringh);
1778 
1779 	__vxge_hw_fifo_delete(&vpath->fifoh);
1780 
1781 	__vxge_hw_vp_terminate(devh, vpath);
1782 
1783 	vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
1784 
1785 vpath_close_exit:
1786 	return status;
1787 }
1788 
1789 /*
1790  * vxge_hw_vpath_reset - Resets vpath
1791  * This function is used to request a reset of vpath
1792  */
vxge_hw_vpath_reset(struct __vxge_hw_virtualpath * vpath)1793 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath)
1794 {
1795 	enum vxge_hw_status status;
1796 	u32 vp_id;
1797 
1798 	vxge_trace();
1799 
1800 	vp_id = vpath->vp_id;
1801 
1802 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1803 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1804 		goto exit;
1805 	}
1806 
1807 	status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
1808 exit:
1809 	return status;
1810 }
1811 
1812 /*
1813  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
1814  * This function poll's for the vpath reset completion and re initializes
1815  * the vpath.
1816  */
1817 enum vxge_hw_status
vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath * vpath)1818 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath)
1819 {
1820 	enum vxge_hw_status status;
1821 	struct __vxge_hw_device *hldev;
1822 	u32 vp_id;
1823 
1824 	vxge_trace();
1825 
1826 	vp_id = vpath->vp_id;
1827 	hldev = vpath->hldev;
1828 
1829 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1830 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1831 		goto exit;
1832 	}
1833 
1834 	status = __vxge_hw_vpath_reset_check(vpath);
1835 	if (status != VXGE_HW_OK)
1836 		goto exit;
1837 
1838 	status = __vxge_hw_vpath_initialize(hldev, vp_id);
1839 	if (status != VXGE_HW_OK)
1840 		goto exit;
1841 
1842 	__vxge_hw_vpath_prc_configure(hldev);
1843 
1844 exit:
1845 	return status;
1846 }
1847 
1848 /*
1849  * vxge_hw_vpath_enable - Enable vpath.
1850  * This routine clears the vpath reset thereby enabling a vpath
1851  * to start forwarding frames and generating interrupts.
1852  */
1853 void
vxge_hw_vpath_enable(struct __vxge_hw_virtualpath * vpath)1854 vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath)
1855 {
1856 	struct __vxge_hw_device *hldev;
1857 	u64 val64;
1858 
1859 	vxge_trace();
1860 
1861 	hldev = vpath->hldev;
1862 
1863 	val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
1864 		1 << (16 - vpath->vp_id));
1865 
1866 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
1867 		&hldev->common_reg->cmn_rsthdlr_cfg1);
1868 }
1869