xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_hw.c (revision d6b92ffa)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File : ecore_hw.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "bcm_osal.h"
35 #include "ecore_hsi_common.h"
36 #include "ecore_status.h"
37 #include "ecore.h"
38 #include "ecore_hw.h"
39 #include "reg_addr.h"
40 #include "ecore_utils.h"
41 #include "ecore_iov_api.h"
42 
43 #ifndef ASIC_ONLY
44 #define ECORE_EMUL_FACTOR 2000
45 #define ECORE_FPGA_FACTOR 200
46 #endif
47 
48 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
49 
50 /* Invalid values */
51 #define ECORE_BAR_INVALID_OFFSET	(OSAL_CPU_TO_LE32(-1))
52 
53 struct ecore_ptt {
54 	osal_list_entry_t	list_entry;
55 	unsigned int		idx;
56 	struct pxp_ptt_entry	pxp;
57 	u8			hwfn_id;
58 };
59 
60 struct ecore_ptt_pool {
61 	osal_list_t		free_list;
62 	osal_spinlock_t		lock; /* ptt synchronized access */
63 	struct ecore_ptt	ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
64 };
65 
66 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
67 {
68 	struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
69 						   GFP_KERNEL,
70 						   sizeof(*p_pool));
71 	int i;
72 
73 	if (!p_pool)
74 		return ECORE_NOMEM;
75 
76 	OSAL_LIST_INIT(&p_pool->free_list);
77 	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
78 		p_pool->ptts[i].idx = i;
79 		p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
80 		p_pool->ptts[i].pxp.pretend.control = 0;
81 		p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
82 
83 		/* There are special PTT entries that are taken only by design.
84 		 * The rest are added ot the list for general usage.
85 		 */
86 		if (i >= RESERVED_PTT_MAX)
87 			OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
88 					    &p_pool->free_list);
89 	}
90 
91 	p_hwfn->p_ptt_pool = p_pool;
92 #ifdef CONFIG_ECORE_LOCK_ALLOC
93 	OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
94 #endif
95 	OSAL_SPIN_LOCK_INIT(&p_pool->lock);
96 
97 	return ECORE_SUCCESS;
98 }
99 
100 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
101 {
102 	struct ecore_ptt *p_ptt;
103 	int i;
104 
105 	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
106 		p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
107 		p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
108 	}
109 }
110 
111 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
112 {
113 #ifdef CONFIG_ECORE_LOCK_ALLOC
114 	if (p_hwfn->p_ptt_pool)
115 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
116 #endif
117 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
118 	p_hwfn->p_ptt_pool = OSAL_NULL;
119 }
120 
121 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn	*p_hwfn)
122 {
123 	struct ecore_ptt *p_ptt;
124 	unsigned int i;
125 
126 	/* Take the free PTT from the list */
127 	for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
128 		OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
129 
130 		if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
131 			p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
132 						      struct ecore_ptt, list_entry);
133 			OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
134 					       &p_hwfn->p_ptt_pool->free_list);
135 
136 			OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
137 
138 			DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
139 				   "allocated ptt %d\n", p_ptt->idx);
140 
141 			return p_ptt;
142 		}
143 
144 		OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
145 		OSAL_MSLEEP(1);
146 	}
147 
148 	DP_NOTICE(p_hwfn, true, "PTT acquire timeout - failed to allocate PTT\n");
149 	return OSAL_NULL;
150 }
151 
152 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
153 		       struct ecore_ptt *p_ptt) {
154 	/* This PTT should not be set to pretend if it is being released */
155 	/* TODO - add some pretend sanity checks, to make sure pretend isn't set on this ptt */
156 
157 	OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
158 	OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
159 	OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
160 }
161 
162 u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
163 {
164 	/* The HW is using DWORDS and we need to translate it to Bytes */
165 	return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
166 }
167 
168 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
169 {
170 	return PXP_PF_WINDOW_ADMIN_PER_PF_START +
171 	       p_ptt->idx * sizeof(struct pxp_ptt_entry);
172 }
173 
174 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
175 {
176 	return PXP_EXTERNAL_BAR_PF_WINDOW_START +
177 	       p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
178 }
179 
180 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
181 		       struct ecore_ptt *p_ptt,
182 		       u32 new_hw_addr)
183 {
184 	u32 prev_hw_addr;
185 
186 	prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
187 
188 	if (new_hw_addr == prev_hw_addr)
189 		return;
190 
191 	/* Update PTT entery in admin window */
192 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
193 		   "Updating PTT entry %d to offset 0x%x\n",
194 		   p_ptt->idx, new_hw_addr);
195 
196 	/* The HW is using DWORDS and the address is in Bytes */
197 	p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
198 
199 	REG_WR(p_hwfn,
200 	       ecore_ptt_config_addr(p_ptt) +
201 	       OFFSETOF(struct pxp_ptt_entry, offset),
202 	       OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
203 }
204 
205 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
206 			 struct ecore_ptt *p_ptt,
207 			 u32 hw_addr)
208 {
209 	u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
210 	u32 offset;
211 
212 	offset = hw_addr - win_hw_addr;
213 
214 	if (p_ptt->hwfn_id != p_hwfn->my_id)
215 		DP_NOTICE(p_hwfn, true,
216 			  "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
217 			  p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
218 
219 	/* Verify the address is within the window */
220 	if (hw_addr < win_hw_addr ||
221 	    offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
222 		ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
223 		offset = 0;
224 	}
225 
226 	return ecore_ptt_get_bar_addr(p_ptt) + offset;
227 }
228 
229 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn	*p_hwfn,
230 					 enum reserved_ptts	ptt_idx)
231 {
232 	if (ptt_idx >= RESERVED_PTT_MAX) {
233 		DP_NOTICE(p_hwfn, true,
234 			  "Requested PTT %d is out of range\n", ptt_idx);
235 		return OSAL_NULL;
236 	}
237 
238 	return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
239 }
240 
241 static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
242 				    struct ecore_ptt *p_ptt)
243 {
244 	bool is_empty = true;
245 	u32 bar_addr;
246 
247 	if (!p_hwfn->p_dev->chk_reg_fifo)
248 		goto out;
249 
250 	/* ecore_rd() cannot be used here since it calls this function */
251 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
252 	is_empty = REG_RD(p_hwfn, bar_addr) == 0;
253 
254 #ifndef ASIC_ONLY
255 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
256 		OSAL_UDELAY(100);
257 #endif
258 
259 out:
260 	return is_empty;
261 }
262 
263 void ecore_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr,
264 	      u32 val)
265 {
266 	bool prev_fifo_err;
267 	u32 bar_addr;
268 
269 	prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
270 
271 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
272 	REG_WR(p_hwfn, bar_addr, val);
273 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
274 		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
275 		   bar_addr, hw_addr, val);
276 
277 #ifndef ASIC_ONLY
278 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
279 		OSAL_UDELAY(100);
280 #endif
281 
282 	OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
283 		  "reg_fifo error was caused by a call to ecore_wr(0x%x, 0x%x)\n",
284 		  hw_addr, val);
285 }
286 
287 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
288 {
289 	bool prev_fifo_err;
290 	u32 bar_addr, val;
291 
292 	prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
293 
294 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
295 	val = REG_RD(p_hwfn, bar_addr);
296 
297 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
298 		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
299 		   bar_addr, hw_addr, val);
300 
301 #ifndef ASIC_ONLY
302 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
303 		OSAL_UDELAY(100);
304 #endif
305 
306 	OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
307 		  "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
308 		  hw_addr);
309 
310 	return val;
311 }
312 
313 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
314 			    struct ecore_ptt *p_ptt,
315 			    void *addr,
316 			    u32 hw_addr,
317 			    osal_size_t n,
318 			    bool to_device)
319 {
320 	u32 dw_count, *host_addr, hw_offset;
321 	osal_size_t quota, done = 0;
322 	u32 OSAL_IOMEM *reg_addr;
323 
324 	while (done < n) {
325 		quota = OSAL_MIN_T(osal_size_t, n - done,
326 				   PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
327 
328 		if (IS_PF(p_hwfn->p_dev)) {
329 			ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
330 			hw_offset = ecore_ptt_get_bar_addr(p_ptt);
331 		} else {
332 			hw_offset = hw_addr + done;
333 		}
334 
335 		dw_count = quota / 4;
336 		host_addr = (u32 *)((u8 *)addr + done);
337 		reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
338 
339 		if (to_device)
340 			while (dw_count--)
341 				DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
342 		else
343 			while (dw_count--)
344 				*host_addr++ = DIRECT_REG_RD(p_hwfn,
345 							     reg_addr++);
346 
347 		done += quota;
348 	}
349 }
350 
351 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
352 		       struct ecore_ptt *p_ptt,
353 		       void *dest, u32 hw_addr, osal_size_t n)
354 {
355 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
356 		   "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
357 		   hw_addr, dest, hw_addr, (unsigned long) n);
358 
359 	ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
360 }
361 
362 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
363 		     struct ecore_ptt *p_ptt,
364 		     u32 hw_addr, void *src, osal_size_t n)
365 {
366 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
367 		   "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
368 		   hw_addr, hw_addr, src, (unsigned long)n);
369 
370 	ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
371 }
372 
373 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
374 		       struct ecore_ptt *p_ptt, u16 fid)
375 {
376 	u16 control = 0;
377 
378 	SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
379 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
380 
381 	/* Every pretend undos previous pretends, including
382 	 * previous port pretend.
383 	 */
384 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
385 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
386 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
387 
388 	if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
389 		fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
390 
391 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
392 	p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
393 
394 	REG_WR(p_hwfn,
395 	       ecore_ptt_config_addr(p_ptt) +
396 	       OFFSETOF(struct pxp_ptt_entry, pretend),
397 	       *(u32 *)&p_ptt->pxp.pretend);
398 }
399 
400 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
401 			struct ecore_ptt *p_ptt, u8 port_id)
402 {
403 	u16 control = 0;
404 
405 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
406 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
407 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
408 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
409 
410 	REG_WR(p_hwfn,
411 	       ecore_ptt_config_addr(p_ptt) +
412 	       OFFSETOF(struct pxp_ptt_entry, pretend),
413 	       *(u32 *)&p_ptt->pxp.pretend);
414 }
415 
416 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
417 			  struct ecore_ptt *p_ptt)
418 {
419 	u16 control = 0;
420 
421 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
422 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
423 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
424 
425 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
426 
427 	REG_WR(p_hwfn,
428 	       ecore_ptt_config_addr(p_ptt) +
429 	       OFFSETOF(struct pxp_ptt_entry, pretend),
430 	       *(u32 *)&p_ptt->pxp.pretend);
431 }
432 
433 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
434 {
435 	u32 concrete_fid = 0;
436 
437 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
438 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
439 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
440 
441 	return concrete_fid;
442 }
443 
444 #if 0
445 /* Ecore HW lock
446  * =============
447  * Although the implemention is ready, today we don't have any flow that
448  * utliizes said locks - and we want to keep it this way.
449  * If this changes, this needs to be revisted.
450  */
451 #define HW_LOCK_MAX_RETRIES 1000
452 enum _ecore_status_t ecore_hw_lock(struct ecore_hwfn		*p_hwfn,
453 				   struct ecore_ptt		*p_ptt,
454 				   u8                           resource,
455 				   bool				block)
456 {
457 	u32 cnt, lock_status, hw_lock_cntr_reg;
458 	enum _ecore_status_t ecore_status;
459 
460 	/* Locate the proper lock register for this function.
461 	 * Note This code assumes all the H/W lock registers are sequential
462 	 * in memory.
463 	 */
464 	hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
465 			   p_hwfn->rel_pf_id *
466 			   MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
467 
468 	/* Validate that the resource is not already taken */
469 	lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
470 
471 	if (lock_status & resource) {
472 		DP_NOTICE(p_hwfn, true,
473 			  "Resource already locked: lock_status=0x%x resource=0x%x\n",
474 			  lock_status, resource);
475 
476 		return ECORE_BUSY;
477 	}
478 
479 	/* Register for the lock */
480 	ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg + sizeof(u32), resource);
481 
482 	/* Try for 5 seconds every 5ms */
483 	for (cnt = 0; cnt < HW_LOCK_MAX_RETRIES; cnt++) {
484 		lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
485 
486 		if (lock_status & resource)
487 			return ECORE_SUCCESS;
488 
489 		if (!block) {
490 			ecore_status = ECORE_BUSY;
491 			break;
492 		}
493 
494 		OSAL_MSLEEP(5);
495 	}
496 
497 	if (cnt == HW_LOCK_MAX_RETRIES) {
498 		DP_NOTICE(p_hwfn, true, "Lock timeout resource=0x%x\n",
499 			  resource);
500 		ecore_status = ECORE_TIMEOUT;
501 	}
502 
503 	/* Clear the pending request */
504 	ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
505 
506 	return ecore_status;
507 }
508 
509 enum _ecore_status_t ecore_hw_unlock(struct ecore_hwfn		*p_hwfn,
510 				     struct ecore_ptt		*p_ptt,
511 				     u8                         resource)
512 {
513 	u32 lock_status, hw_lock_cntr_reg;
514 
515 	/* Locate the proper lock register for this function.
516 	 * Note This code assumes all the H/W lock registers are sequential
517 	 * in memory.
518 	 */
519 	hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
520 			   p_hwfn->rel_pf_id *
521 			   MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
522 
523 	/*  Validate that the resource is currently taken */
524 	lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
525 
526 	if (!(lock_status & resource)) {
527 		DP_NOTICE(p_hwfn, true,
528 			  "resource 0x%x was not taken (lock status 0x%x)\n",
529 			  resource, lock_status);
530 
531 		return ECORE_NODEV;
532 	}
533 
534 	/* clear lock for resource */
535 	ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
536 	return ECORE_SUCCESS;
537 }
538 #endif /* HW locks logic */
539 
540 /* DMAE */
541 static void ecore_dmae_opcode(struct ecore_hwfn	*p_hwfn,
542 			      const u8	is_src_type_grc,
543 			      const u8	is_dst_type_grc,
544 			      struct ecore_dmae_params *p_params)
545 {
546 	u16 opcode_b = 0;
547 	u32 opcode = 0;
548 
549 	/* Whether the source is the PCIe or the GRC.
550 	 * 0- The source is the PCIe
551 	 * 1- The source is the GRC.
552 	 */
553 	opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
554 				   : DMAE_CMD_SRC_MASK_PCIE) <<
555 		  DMAE_CMD_SRC_SHIFT;
556 	opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
557 		  DMAE_CMD_SRC_PF_ID_SHIFT;
558 
559 	/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
560 	opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
561 				   : DMAE_CMD_DST_MASK_PCIE) <<
562 		  DMAE_CMD_DST_SHIFT;
563 	opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
564 		  DMAE_CMD_DST_PF_ID_SHIFT;
565 
566 	/* DMAE_E4_TODO need to check which value to specifiy here. */
567 	/* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT;*/
568 
569 	/* Whether to write a completion word to the completion destination:
570 	 * 0-Do not write a completion word
571 	 * 1-Write the completion word
572 	 */
573 	opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
574 	opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
575 		  DMAE_CMD_SRC_ADDR_RESET_SHIFT;
576 
577 	if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
578 		opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
579 
580 	/* swapping mode 3 - big endian there should be a define ifdefed in
581 	 * the HSI somewhere. Since it is currently
582 	 */
583 	opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
584 
585 	opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
586 
587 	/* reset source address in next go */
588 	opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK <<
589 		  DMAE_CMD_SRC_ADDR_RESET_SHIFT;
590 
591 	/* reset dest address in next go */
592 	opcode |= DMAE_CMD_DST_ADDR_RESET_MASK <<
593 		  DMAE_CMD_DST_ADDR_RESET_SHIFT;
594 
595 	/* SRC/DST VFID: all 1's - pf, otherwise VF id */
596 	if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
597 		opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
598 		opcode_b |= (p_params->src_vfid <<  DMAE_CMD_SRC_VF_ID_SHIFT);
599 	} else {
600 		opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
601 			     DMAE_CMD_SRC_VF_ID_SHIFT);
602 	}
603 	if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
604 		opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
605 		opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
606 	} else {
607 		opcode_b |= DMAE_CMD_DST_VF_ID_MASK <<
608 			    DMAE_CMD_DST_VF_ID_SHIFT;
609 	}
610 
611 	p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
612 	p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
613 }
614 
615 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
616 {
617 	OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) !=
618 			  31 * 4);
619 
620 	/* All the DMAE 'go' registers form an array in internal memory */
621 	return DMAE_REG_GO_C0 + (idx << 2);
622 }
623 
624 static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
625 						    struct ecore_ptt *p_ptt)
626 {
627 	struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
628 	u8 idx_cmd = p_hwfn->dmae_info.channel, i;
629 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
630 
631 	/* verify address is not OSAL_NULL */
632 	if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
633 	     ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
634 		DP_NOTICE(p_hwfn, true,
635 			  "source or destination address 0 idx_cmd=%d\n"
636 			  "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
637 			  idx_cmd,
638 			  OSAL_LE32_TO_CPU(p_command->opcode),
639 			  OSAL_LE16_TO_CPU(p_command->opcode_b),
640 			  OSAL_LE16_TO_CPU(p_command->length_dw),
641 			  OSAL_LE32_TO_CPU(p_command->src_addr_hi),
642 			  OSAL_LE32_TO_CPU(p_command->src_addr_lo),
643 			  OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
644 			  OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
645 
646 		return ECORE_INVAL;
647 	}
648 
649 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
650 		   "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
651 		   idx_cmd,
652 		   OSAL_LE32_TO_CPU(p_command->opcode),
653 		   OSAL_LE16_TO_CPU(p_command->opcode_b),
654 		   OSAL_LE16_TO_CPU(p_command->length_dw),
655 		   OSAL_LE32_TO_CPU(p_command->src_addr_hi),
656 		   OSAL_LE32_TO_CPU(p_command->src_addr_lo),
657 		   OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
658 		   OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
659 
660 	/* Copy the command to DMAE - need to do it before every call
661 	 * for source/dest address no reset.
662 	 * The number of commands have been increased to 16 (previous was 14)
663 	 * The first 9 DWs are the command registers, the 10 DW is the
664 	 * GO register, and
665 	 * the rest are result registers (which are read only by the client).
666 	 */
667 	for (i = 0; i < DMAE_CMD_SIZE; i++) {
668 		u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
669 			    *(((u32 *)p_command) + i) : 0;
670 
671 		ecore_wr(p_hwfn, p_ptt,
672 			 DMAE_REG_CMD_MEM +
673 			 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
674 			 (i * sizeof(u32)), data);
675 	}
676 
677 	ecore_wr(p_hwfn, p_ptt,
678 		 ecore_dmae_idx_to_go_cmd(idx_cmd),
679 		 DMAE_GO_VALUE);
680 
681 	return ecore_status;
682 }
683 
684 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
685 {
686 	dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
687 	struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
688 	u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
689 	u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
690 
691 	*p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
692 	if (*p_comp == OSAL_NULL) {
693 		DP_NOTICE(p_hwfn, true,
694 			  "Failed to allocate `p_completion_word'\n");
695 		goto err;
696 	}
697 
698 	p_addr =  &p_hwfn->dmae_info.dmae_cmd_phys_addr;
699 	*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
700 					 sizeof(struct dmae_cmd));
701 	if (*p_cmd == OSAL_NULL) {
702 		DP_NOTICE(p_hwfn, true,
703 			  "Failed to allocate `struct dmae_cmd'\n");
704 		goto err;
705 	}
706 
707 	p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
708 	*p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
709 					  sizeof(u32) * DMAE_MAX_RW_SIZE);
710 	if (*p_buff == OSAL_NULL) {
711 		DP_NOTICE(p_hwfn, true,
712 			  "Failed to allocate `intermediate_buffer'\n");
713 		goto err;
714 	}
715 
716 	p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
717 
718 	return ECORE_SUCCESS;
719 err:
720 	ecore_dmae_info_free(p_hwfn);
721 	return ECORE_NOMEM;
722 }
723 
724 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
725 {
726 	dma_addr_t p_phys;
727 
728 	/* Just make sure no one is in the middle */
729 	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
730 
731 	if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
732 		p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
733 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
734 				       p_hwfn->dmae_info.p_completion_word,
735 				       p_phys, sizeof(u32));
736 		p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
737 	}
738 
739 	if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
740 		p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
741 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
742 				       p_hwfn->dmae_info.p_dmae_cmd,
743 				       p_phys, sizeof(struct dmae_cmd));
744 		p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
745 	}
746 
747 	if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
748 		p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
749 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
750 				       p_hwfn->dmae_info.p_intermediate_buffer,
751 				       p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
752 		p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
753 	}
754 
755 	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
756 }
757 
758 static enum _ecore_status_t
759 ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
760 {
761 	u32 wait_cnt_limit = 10000, wait_cnt = 0;
762 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
763 
764 #ifndef ASIC_ONLY
765 	u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
766 		      ECORE_EMUL_FACTOR :
767 		      (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
768 		       ECORE_FPGA_FACTOR : 1));
769 
770 	wait_cnt_limit *= factor;
771 #endif
772 
773 	/* DMAE_E4_TODO : TODO check if we have to call any other function
774 	 * other than BARRIER to sync the completion_word since we are not
775 	 * using the volatile keyword for this
776 	 */
777 	OSAL_BARRIER(p_hwfn->p_dev);
778 	while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
779 		OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
780 		if (++wait_cnt > wait_cnt_limit) {
781 			DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
782 				  "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
783 				  *(p_hwfn->dmae_info.p_completion_word),
784 				  DMAE_COMPLETION_VAL);
785 			ecore_status = ECORE_TIMEOUT;
786 			break;
787 		}
788 
789 		/* to sync the completion_word since we are not
790 		 * using the volatile keyword for p_completion_word
791 		 */
792 		OSAL_BARRIER(p_hwfn->p_dev);
793 	}
794 
795 	if (ecore_status == ECORE_SUCCESS)
796 		*p_hwfn->dmae_info.p_completion_word = 0;
797 
798 	return ecore_status;
799 }
800 
801 static enum _ecore_status_t ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
802 							     struct ecore_ptt *p_ptt,
803 							     u64 src_addr,
804 							     u64 dst_addr,
805 							     u8 src_type,
806 							     u8 dst_type,
807 							     u32 length_dw)
808 {
809 	dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
810 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
811 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
812 
813 	switch (src_type) {
814 	case ECORE_DMAE_ADDRESS_GRC:
815 	case ECORE_DMAE_ADDRESS_HOST_PHYS:
816 		cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
817 		cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
818 		break;
819 	/* for virtual source addresses we use the intermediate buffer. */
820 	case ECORE_DMAE_ADDRESS_HOST_VIRT:
821 		cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
822 		cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
823 		OSAL_MEMCPY(&(p_hwfn->dmae_info.p_intermediate_buffer[0]),
824 			    (void *)(osal_uintptr_t)src_addr,
825 			    length_dw * sizeof(u32));
826 		break;
827 	default:
828 		return ECORE_INVAL;
829 	}
830 
831 	switch (dst_type) {
832 	case ECORE_DMAE_ADDRESS_GRC:
833 	case ECORE_DMAE_ADDRESS_HOST_PHYS:
834 		cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
835 		cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
836 		break;
837 	/* for virtual destination addresses we use the intermediate buffer. */
838 	case ECORE_DMAE_ADDRESS_HOST_VIRT:
839 		cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
840 		cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
841 		break;
842 	default:
843 		return ECORE_INVAL;
844 	}
845 
846 	cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
847 	if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
848 	    src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
849 		OSAL_DMA_SYNC(p_hwfn->p_dev,
850 			      (void *)HILO_U64(cmd->src_addr_hi,
851 					       cmd->src_addr_lo),
852 			      length_dw * sizeof(u32), false);
853 
854 	ecore_dmae_post_command(p_hwfn, p_ptt);
855 
856 	ecore_status = ecore_dmae_operation_wait(p_hwfn);
857 
858 	/* TODO - is it true ? */
859 	if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
860 	    src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
861 		OSAL_DMA_SYNC(p_hwfn->p_dev,
862 			      (void *)HILO_U64(cmd->src_addr_hi,
863 					       cmd->src_addr_lo),
864 			      length_dw * sizeof(u32), true);
865 
866 	if (ecore_status != ECORE_SUCCESS) {
867 		DP_NOTICE(p_hwfn, ECORE_MSG_HW,
868 			  "Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x, intermediate buffer 0x%llx.\n",
869 			  (unsigned long long)src_addr,
870 			(unsigned long long)dst_addr, length_dw,
871 			  (unsigned long long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
872 		return ecore_status;
873 	}
874 
875 	if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
876 		OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
877 			    &p_hwfn->dmae_info.p_intermediate_buffer[0],
878 			    length_dw * sizeof(u32));
879 
880 	return ECORE_SUCCESS;
881 }
882 
883 static enum _ecore_status_t ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
884 						       struct ecore_ptt *p_ptt,
885 						       u64 src_addr, u64 dst_addr,
886 						       u8 src_type, u8 dst_type,
887 						       u32 size_in_dwords,
888 						       struct ecore_dmae_params *p_params)
889 {
890 	dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
891 	u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
892 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
893 	u64 src_addr_split = 0, dst_addr_split = 0;
894 	u16 length_limit = DMAE_MAX_RW_SIZE;
895 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
896 	u32 offset = 0;
897 
898 	if (p_hwfn->p_dev->recov_in_prog) {
899 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
900 			   "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
901 			   (unsigned long long)src_addr, src_type, (unsigned long long)dst_addr, dst_type,
902 			   size_in_dwords);
903 		/* Return success to let the flow to be completed successfully
904 		 * w/o any error handling.
905 		 */
906 		return ECORE_SUCCESS;
907 	}
908 
909 	ecore_dmae_opcode(p_hwfn,
910 			  (src_type == ECORE_DMAE_ADDRESS_GRC),
911 			  (dst_type == ECORE_DMAE_ADDRESS_GRC),
912 			  p_params);
913 
914 	cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
915 	cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
916 	cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
917 
918 	/* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
919 	cnt_split = size_in_dwords / length_limit;
920 	length_mod = size_in_dwords % length_limit;
921 
922 	src_addr_split = src_addr;
923 	dst_addr_split = dst_addr;
924 
925 	for (i = 0; i <= cnt_split; i++) {
926 		offset = length_limit * i;
927 
928 		if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
929 			if (src_type == ECORE_DMAE_ADDRESS_GRC)
930 				src_addr_split = src_addr + offset;
931 			else
932 				src_addr_split = src_addr + (offset*4);
933 		}
934 
935 		if (dst_type == ECORE_DMAE_ADDRESS_GRC)
936 			dst_addr_split = dst_addr + offset;
937 		else
938 			dst_addr_split = dst_addr + (offset*4);
939 
940 		length_cur = (cnt_split == i) ? length_mod : length_limit;
941 
942 		/* might be zero on last iteration */
943 		if (!length_cur)
944 			continue;
945 
946 		ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
947 								p_ptt,
948 								src_addr_split,
949 								dst_addr_split,
950 								src_type,
951 								dst_type,
952 								length_cur);
953 		if (ecore_status != ECORE_SUCCESS) {
954 			DP_NOTICE(p_hwfn, false,
955 				  "ecore_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
956 				  ecore_status, (unsigned long long)src_addr, (unsigned long long)dst_addr, length_cur);
957 
958 			ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
959 			break;
960 		}
961 	}
962 
963 	return ecore_status;
964 }
965 
966 enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
967 					 struct ecore_ptt *p_ptt,
968 					 u64 source_addr,
969 					 u32 grc_addr,
970 					 u32 size_in_dwords,
971 					 u32 flags)
972 {
973 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
974 	struct ecore_dmae_params params;
975 	enum _ecore_status_t rc;
976 
977 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
978 	params.flags = flags;
979 
980 	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
981 
982 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
983 					grc_addr_in_dw,
984 					ECORE_DMAE_ADDRESS_HOST_VIRT,
985 					ECORE_DMAE_ADDRESS_GRC,
986 					size_in_dwords, &params);
987 
988 	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
989 
990 	return rc;
991 }
992 
993 enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
994 					 struct ecore_ptt *p_ptt,
995 					 u32 grc_addr,
996 					 dma_addr_t dest_addr,
997 					 u32 size_in_dwords,
998 					 u32 flags)
999 {
1000 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
1001 	struct ecore_dmae_params params;
1002 	enum _ecore_status_t rc;
1003 
1004 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
1005 	params.flags = flags;
1006 
1007 	OSAL_MUTEX_ACQUIRE(&(p_hwfn->dmae_info.mutex));
1008 
1009 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
1010 					dest_addr, ECORE_DMAE_ADDRESS_GRC,
1011 					ECORE_DMAE_ADDRESS_HOST_VIRT,
1012 					size_in_dwords, &params);
1013 
1014 	OSAL_MUTEX_RELEASE(&(p_hwfn->dmae_info.mutex));
1015 
1016 	return rc;
1017 }
1018 
1019 enum _ecore_status_t ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
1020 					  struct ecore_ptt *p_ptt,
1021 					  dma_addr_t source_addr,
1022 					  dma_addr_t dest_addr,
1023 					  u32 size_in_dwords,
1024 					  struct ecore_dmae_params *p_params)
1025 {
1026 	enum _ecore_status_t rc;
1027 
1028 	OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
1029 
1030 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
1031 					dest_addr,
1032 					ECORE_DMAE_ADDRESS_HOST_PHYS,
1033 					ECORE_DMAE_ADDRESS_HOST_PHYS,
1034 					size_in_dwords,
1035 					p_params);
1036 
1037 	OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
1038 
1039 	return rc;
1040 }
1041 
1042 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
1043 			 enum ecore_hw_err_type err_type)
1044 {
1045 	/* Fan failure cannot be masked by handling of another HW error */
1046 	if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
1047 		DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
1048 			   "Recovery is in progress. Avoid notifying about HW error %d.\n",
1049 			   err_type);
1050 		return;
1051 	}
1052 
1053 	OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
1054 }
1055