1*d14abf15SRobert Mustacchi #ifndef ECORE_ERASE
2*d14abf15SRobert Mustacchi #ifdef __LINUX
3*d14abf15SRobert Mustacchi 
4*d14abf15SRobert Mustacchi #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5*d14abf15SRobert Mustacchi 
6*d14abf15SRobert Mustacchi #include <linux/kernel.h>
7*d14abf15SRobert Mustacchi #include <linux/types.h>
8*d14abf15SRobert Mustacchi #include <asm/byteorder.h>
9*d14abf15SRobert Mustacchi #include <linux/version.h>
10*d14abf15SRobert Mustacchi #include <linux/module.h>
11*d14abf15SRobert Mustacchi #include <linux/crc32.h>
12*d14abf15SRobert Mustacchi #include <linux/etherdevice.h>
13*d14abf15SRobert Mustacchi 
14*d14abf15SRobert Mustacchi #define ECORE_ALIGN(x, a) ALIGN(x, a)
15*d14abf15SRobert Mustacchi #endif
16*d14abf15SRobert Mustacchi 
17*d14abf15SRobert Mustacchi /* Always define ECORE_OOO for VBD */
18*d14abf15SRobert Mustacchi #define ECORE_OOO
19*d14abf15SRobert Mustacchi 
20*d14abf15SRobert Mustacchi #include "bcmtype.h"
21*d14abf15SRobert Mustacchi #include "utils.h"
22*d14abf15SRobert Mustacchi #include "lm5710.h"
23*d14abf15SRobert Mustacchi #include "ecore_sp_verbs.h"
24*d14abf15SRobert Mustacchi #include "command.h"
25*d14abf15SRobert Mustacchi #include "debug.h"
26*d14abf15SRobert Mustacchi #include "ecore_common.h"
27*d14abf15SRobert Mustacchi 
28*d14abf15SRobert Mustacchi /************************ Debug print macros **********************************/
29*d14abf15SRobert Mustacchi #if !defined(UEFI) && defined(DBG)
30*d14abf15SRobert Mustacchi #define ECORE_MSG(pdev, m, ...) \
31*d14abf15SRobert Mustacchi 	DbgMessage(pdev, WARNi, m, ##__VA_ARGS__)
32*d14abf15SRobert Mustacchi #else
33*d14abf15SRobert Mustacchi #define ECORE_MSG
34*d14abf15SRobert Mustacchi #endif
35*d14abf15SRobert Mustacchi 
36*d14abf15SRobert Mustacchi /************************ Error prints ****************************************/
37*d14abf15SRobert Mustacchi #if !defined(UEFI) && defined(DBG)
38*d14abf15SRobert Mustacchi #define ECORE_ERR(str, ...) DbgMessage(pdev, FATAL, str, ##__VA_ARGS__)
39*d14abf15SRobert Mustacchi #else
40*d14abf15SRobert Mustacchi #define ECORE_ERR
41*d14abf15SRobert Mustacchi #endif
42*d14abf15SRobert Mustacchi 
43*d14abf15SRobert Mustacchi 
44*d14abf15SRobert Mustacchi /***********************  ECORE WRAPPER MACROS ********************************/
45*d14abf15SRobert Mustacchi 
46*d14abf15SRobert Mustacchi #define ECORE_RET_PENDING(pending_bit, pending) \
47*d14abf15SRobert Mustacchi 	(ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS)
48*d14abf15SRobert Mustacchi 
49*d14abf15SRobert Mustacchi #define ECORE_ZALLOC(_size, _flags, _pdev) mm_rt_zalloc_mem(_pdev, _size)
50*d14abf15SRobert Mustacchi #define ECORE_CALLOC(_len, _size, _flags, _pdev) mm_rt_zalloc_mem(_pdev, _len * _size)
51*d14abf15SRobert Mustacchi #define ECORE_FREE(_pdev, _buf, _size) mm_rt_free_mem(_pdev, _buf, _size, 0)
52*d14abf15SRobert Mustacchi 
53*d14abf15SRobert Mustacchi /*
54*d14abf15SRobert Mustacchi  *  Ecore implementation of set/get flag
55*d14abf15SRobert Mustacchi  *  (differs from VBD set_flags, get_flags)
56*d14abf15SRobert Mustacchi  */
57*d14abf15SRobert Mustacchi #define ECORE_SET_FLAG(value, mask, flag) \
58*d14abf15SRobert Mustacchi 	do {\
59*d14abf15SRobert Mustacchi 		(value) &= ~(mask);\
60*d14abf15SRobert Mustacchi 		(value) |= ((flag) << (mask##_SHIFT));\
61*d14abf15SRobert Mustacchi 	} while (0)
62*d14abf15SRobert Mustacchi 
63*d14abf15SRobert Mustacchi #define ECORE_GET_FLAG(value, mask) \
64*d14abf15SRobert Mustacchi 	(((value) &= (mask)) >> (mask##_SHIFT))
65*d14abf15SRobert Mustacchi 
66*d14abf15SRobert Mustacchi #define ecore_sp_post(_pdev, _cmd , _cid, _data, _con_type) \
67*d14abf15SRobert Mustacchi 	lm_sq_post(_pdev, _cid, (u8)(_cmd), CMD_PRIORITY_NORMAL, _con_type, \
68*d14abf15SRobert Mustacchi 	_data)
69*d14abf15SRobert Mustacchi 
70*d14abf15SRobert Mustacchi #define ECORE_SET_CTX_VALIDATION(_pdev, _cxt, _cid) \
71*d14abf15SRobert Mustacchi 	lm_set_cdu_validation_data(_pdev, _cid, FALSE) /* context? type? */
72*d14abf15SRobert Mustacchi /************************ TODO for LM people!!! *******************************/
73*d14abf15SRobert Mustacchi #define ECORE_TODO_UPDATE_COALESCE_SB_INDEX(a1, a2, a3, a4, a5)
74*d14abf15SRobert Mustacchi #define ECORE_TODO_LINK_REPORT(pdev)
75*d14abf15SRobert Mustacchi #define ECORE_TODO_FW_COMMAND(_pdev, _drv_msg_code, _val) (-1)
76*d14abf15SRobert Mustacchi 
77*d14abf15SRobert Mustacchi /************************ Lists ***********************************************/
78*d14abf15SRobert Mustacchi #define ECORE_LIST_FOR_EACH_ENTRY(pos, _head, _link, cast) \
79*d14abf15SRobert Mustacchi 	for (pos = (cast *)d_list_peek_head(_head); \
80*d14abf15SRobert Mustacchi 	     pos; \
81*d14abf15SRobert Mustacchi 	     pos = (cast *)d_list_next_entry(&pos->_link))
82*d14abf15SRobert Mustacchi 
83*d14abf15SRobert Mustacchi /**
84*d14abf15SRobert Mustacchi  * ECORE_LIST_FOR_EACH_ENTRY_SAFE - iterate over list of given type
85*d14abf15SRobert Mustacchi  * @pos:        the type * to use as a loop cursor.
86*d14abf15SRobert Mustacchi  * @n:          another type * to use as temporary storage
87*d14abf15SRobert Mustacchi  * @head:       the head for your list.
88*d14abf15SRobert Mustacchi  * @member:     the name of the list_struct within the struct.
89*d14abf15SRobert Mustacchi  *
90*d14abf15SRobert Mustacchi  * iterate over list of given type safe against removal of list entry
91*d14abf15SRobert Mustacchi  */
92*d14abf15SRobert Mustacchi #define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, head, member, cast)   \
93*d14abf15SRobert Mustacchi 	 for (pos = (cast *)d_list_peek_head(head), \
94*d14abf15SRobert Mustacchi 	      n = (pos) ? (cast *)d_list_next_entry(&pos->member) : NULL; \
95*d14abf15SRobert Mustacchi 	      pos != NULL;  \
96*d14abf15SRobert Mustacchi 	      pos = (cast *)n, \
97*d14abf15SRobert Mustacchi 	      n = (pos) ? (cast *)d_list_next_entry(&pos->member) : NULL)
98*d14abf15SRobert Mustacchi 
99*d14abf15SRobert Mustacchi #define ECORE_LIST_IS_LAST(_link, _list)                (_link == (_list)->tail)
100*d14abf15SRobert Mustacchi 
101*d14abf15SRobert Mustacchi #define ECORE_LIST_IS_EMPTY(head)                       \
102*d14abf15SRobert Mustacchi 	d_list_is_empty(head)
103*d14abf15SRobert Mustacchi 
104*d14abf15SRobert Mustacchi #define ECORE_LIST_FIRST_ENTRY(head, cast, link)	\
105*d14abf15SRobert Mustacchi 	(cast *)d_list_peek_head(head)
106*d14abf15SRobert Mustacchi 
107*d14abf15SRobert Mustacchi #define ECORE_LIST_NEXT(pos, link, cast)	\
108*d14abf15SRobert Mustacchi 	(cast *)d_list_next_entry(&((pos)->link))
109*d14abf15SRobert Mustacchi 
110*d14abf15SRobert Mustacchi #define ECORE_LIST_INIT(head)				\
111*d14abf15SRobert Mustacchi do { \
112*d14abf15SRobert Mustacchi 	d_list_clear(head); \
113*d14abf15SRobert Mustacchi } while (0)
114*d14abf15SRobert Mustacchi 
115*d14abf15SRobert Mustacchi #define ECORE_LIST_PUSH_TAIL(link, head)		\
116*d14abf15SRobert Mustacchi do { \
117*d14abf15SRobert Mustacchi 	d_list_push_tail(head, link); \
118*d14abf15SRobert Mustacchi } while (0)
119*d14abf15SRobert Mustacchi 
120*d14abf15SRobert Mustacchi #define ECORE_LIST_PUSH_HEAD(link, head)		\
121*d14abf15SRobert Mustacchi do { \
122*d14abf15SRobert Mustacchi 	d_list_push_head(head, link); \
123*d14abf15SRobert Mustacchi } while (0)
124*d14abf15SRobert Mustacchi 
125*d14abf15SRobert Mustacchi #define ECORE_LIST_REMOVE_ENTRY(link, head)		\
126*d14abf15SRobert Mustacchi do { \
127*d14abf15SRobert Mustacchi 	d_list_remove_entry(head, link); \
128*d14abf15SRobert Mustacchi } while (0)
129*d14abf15SRobert Mustacchi 
130*d14abf15SRobert Mustacchi #define ECORE_LIST_SPLICE_INIT(new_head, head) \
131*d14abf15SRobert Mustacchi do { \
132*d14abf15SRobert Mustacchi 	d_list_add_head(head, new_head); \
133*d14abf15SRobert Mustacchi 	d_list_clear(new_head); \
134*d14abf15SRobert Mustacchi } while (0)
135*d14abf15SRobert Mustacchi 
136*d14abf15SRobert Mustacchi static __inline u32_t ecore_crc32_le(u32_t seed, u8_t *mac, u32_t len)
137*d14abf15SRobert Mustacchi {
138*d14abf15SRobert Mustacchi 	u32_t packet_buf[2] = {0};
139*d14abf15SRobert Mustacchi 
140*d14abf15SRobert Mustacchi 	memcpy(((u8_t *)(&packet_buf[0]))+2, &mac[0], 2);
141*d14abf15SRobert Mustacchi 	memcpy(&packet_buf[1], &mac[2], 4);
142*d14abf15SRobert Mustacchi 	return SWAP_BYTES32(calc_crc32((u8_t *)packet_buf, 8, seed, 0));
143*d14abf15SRobert Mustacchi }
144*d14abf15SRobert Mustacchi 
145*d14abf15SRobert Mustacchi /************************ Per compilation target ******************************/
146*d14abf15SRobert Mustacchi #ifdef __LINUX
147*d14abf15SRobert Mustacchi 
148*d14abf15SRobert Mustacchi #define ECORE_UNLIKELY	unlikely
149*d14abf15SRobert Mustacchi #define ECORE_LIKELY	likely
150*d14abf15SRobert Mustacchi 
151*d14abf15SRobert Mustacchi #define ecore_atomic_read		mm_atomic_read
152*d14abf15SRobert Mustacchi #define ecore_atomic_cmpxchg		mm_atomic_cmpxchg
153*d14abf15SRobert Mustacchi #define ecore_atomic_set(a, v)		mm_atomic_set((u32_t *)(a), v)
154*d14abf15SRobert Mustacchi #define smp_mb__before_atomic() mm_barrier()
155*d14abf15SRobert Mustacchi #define smp_mb__after_atomic()  mm_barrier()
156*d14abf15SRobert Mustacchi 
157*d14abf15SRobert Mustacchi /* Other */
158*d14abf15SRobert Mustacchi #define ECORE_IS_VALID_ETHER_ADDR(_mac)               is_valid_ether_addr(_mac)
159*d14abf15SRobert Mustacchi #define ECORE_SET_WAIT_COUNT(_cnt)
160*d14abf15SRobert Mustacchi #define ECORE_SET_WAIT_DELAY_US(_cnt, _delay_us)
161*d14abf15SRobert Mustacchi 
162*d14abf15SRobert Mustacchi /* Mutex related */
163*d14abf15SRobert Mustacchi #define ECORE_MUTEX_INIT(_mutex)	mutex_init(_mutex)
164*d14abf15SRobert Mustacchi #define ECORE_MUTEX_LOCK(_mutex)	mutex_lock(_mutex)
165*d14abf15SRobert Mustacchi #define ECORE_MUTEX_UNLOCK(_mutex)	mutex_unlock(_mutex)
166*d14abf15SRobert Mustacchi 
167*d14abf15SRobert Mustacchi #define ECORE_MIGHT_SLEEP() ediag_might_sleep()
168*d14abf15SRobert Mustacchi #define ECORE_TEST_BIT(bit, var)  test_bit(bit, var)
169*d14abf15SRobert Mustacchi #define ECORE_TEST_AND_CLEAR_BIT(bit, var) test_and_clear_bit(bit, var)
170*d14abf15SRobert Mustacchi 
171*d14abf15SRobert Mustacchi #else /* ! LINUX */
172*d14abf15SRobert Mustacchi 
173*d14abf15SRobert Mustacchi typedef u16 __le16;
174*d14abf15SRobert Mustacchi 
175*d14abf15SRobert Mustacchi #define ecore_atomic_read		mm_atomic_read
176*d14abf15SRobert Mustacchi #define ecore_atomic_cmpxchg		mm_atomic_cmpxchg
177*d14abf15SRobert Mustacchi #define ecore_atomic_set(a, val)	mm_atomic_set((u32_t *)(a), val)
178*d14abf15SRobert Mustacchi 
179*d14abf15SRobert Mustacchi #define ECORE_UNLIKELY(x)	(x)
180*d14abf15SRobert Mustacchi #define ECORE_LIKELY(x)		(x)
181*d14abf15SRobert Mustacchi #define BUG() DbgBreakMsg("Bug")
182*d14abf15SRobert Mustacchi #define smp_mb()                   mm_barrier()
183*d14abf15SRobert Mustacchi #define smp_mb__before_atomic() mm_barrier()
184*d14abf15SRobert Mustacchi #define smp_mb__after_atomic()  mm_barrier()
185*d14abf15SRobert Mustacchi #define mb()                       mm_barrier()
186*d14abf15SRobert Mustacchi #define wmb()                      mm_barrier()
187*d14abf15SRobert Mustacchi #define mmiowb()		   mm_barrier()
188*d14abf15SRobert Mustacchi 
189*d14abf15SRobert Mustacchi #define ECORE_MIGHT_SLEEP() /* IRQL_PASSIVE_CODE() */
190*d14abf15SRobert Mustacchi 
191*d14abf15SRobert Mustacchi /* Mutex related */
192*d14abf15SRobert Mustacchi #define ECORE_MUTEX_INIT(_mutex)
193*d14abf15SRobert Mustacchi #define ECORE_MUTEX_LOCK(_mutex)
194*d14abf15SRobert Mustacchi #define ECORE_MUTEX_UNLOCK(_mutex)
195*d14abf15SRobert Mustacchi 
196*d14abf15SRobert Mustacchi /* Atomic Bit Manipulation */
197*d14abf15SRobert Mustacchi #define ECORE_TEST_BIT(_bit, _var) \
198*d14abf15SRobert Mustacchi 	(mm_atomic_long_read(_var) & (1 << (_bit)))
199*d14abf15SRobert Mustacchi 
200*d14abf15SRobert Mustacchi /* Other */
201*d14abf15SRobert Mustacchi #define ECORE_IS_VALID_ETHER_ADDR(_mac)         TRUE
202*d14abf15SRobert Mustacchi #define ECORE_SET_WAIT_DELAY_US(_cnt, _delay_us) \
203*d14abf15SRobert Mustacchi do { \
204*d14abf15SRobert Mustacchi 	_delay_us = (_cnt >= 2360) ? 100 : 25000; \
205*d14abf15SRobert Mustacchi } while (0)
206*d14abf15SRobert Mustacchi 
207*d14abf15SRobert Mustacchi /*
208*d14abf15SRobert Mustacchi  * In VBD We'll wait 10,000 times 100us (1 second) +
209*d14abf15SRobert Mustacchi  * 2360 times 25000us (59sec) = total 60 sec
210*d14abf15SRobert Mustacchi  * (Winodws only note) the 25000 wait will cause
211*d14abf15SRobert Mustacchi  * wait to be without CPU stall (look in win_util.c)
212*d14abf15SRobert Mustacchi  */
213*d14abf15SRobert Mustacchi #define ECORE_SET_WAIT_COUNT(_cnt) \
214*d14abf15SRobert Mustacchi do { \
215*d14abf15SRobert Mustacchi 	_cnt = 10000 + 2360; \
216*d14abf15SRobert Mustacchi } while (0)
217*d14abf15SRobert Mustacchi 
218*d14abf15SRobert Mustacchi static __inline BOOL ECORE_TEST_AND_CLEAR_BIT(int bit, unsigned long *vec)
219*d14abf15SRobert Mustacchi {
220*d14abf15SRobert Mustacchi 	BOOL set = ECORE_TEST_BIT(bit, vec);
221*d14abf15SRobert Mustacchi 	ECORE_CLEAR_BIT(bit, vec);
222*d14abf15SRobert Mustacchi 
223*d14abf15SRobert Mustacchi 	return set;
224*d14abf15SRobert Mustacchi }
225*d14abf15SRobert Mustacchi 
226*d14abf15SRobert Mustacchi #endif /* END if "per LM target type" */
227*d14abf15SRobert Mustacchi 
228*d14abf15SRobert Mustacchi /* Spin lock related */
229*d14abf15SRobert Mustacchi #define ECORE_SPIN_LOCK_INIT(_spin, _pdev)	mm_init_lock(_pdev, _spin)
230*d14abf15SRobert Mustacchi #define ECORE_SPIN_LOCK_BH(_spin)		mm_acquire_lock(_spin)
231*d14abf15SRobert Mustacchi #define ECORE_SPIN_UNLOCK_BH(_spin)		mm_release_lock(_spin)
232*d14abf15SRobert Mustacchi 
233*d14abf15SRobert Mustacchi #endif /* not ECORE_ERASE */
234*d14abf15SRobert Mustacchi #if defined(__FreeBSD__) && !defined(NOT_LINUX)
235*d14abf15SRobert Mustacchi #include "bxe.h"
236*d14abf15SRobert Mustacchi #include "ecore_init.h"
237*d14abf15SRobert Mustacchi #elif !defined(EDIAG)
238*d14abf15SRobert Mustacchi #ifdef ECORE_ERASE
239*d14abf15SRobert Mustacchi #include <linux/version.h>
240*d14abf15SRobert Mustacchi #include <linux/module.h>
241*d14abf15SRobert Mustacchi #include <linux/crc32.h>
242*d14abf15SRobert Mustacchi #include <linux/netdevice.h>
243*d14abf15SRobert Mustacchi #include <linux/etherdevice.h>
244*d14abf15SRobert Mustacchi #if (LINUX_VERSION_CODE >= 0x02061b) && !defined(BNX2X_DRIVER_DISK) && !defined(__VMKLNX__) /* BNX2X_UPSTREAM */
245*d14abf15SRobert Mustacchi #include <linux/crc32c.h>
246*d14abf15SRobert Mustacchi #endif
247*d14abf15SRobert Mustacchi #include "bnx2x.h"
248*d14abf15SRobert Mustacchi #include "bnx2x_cmn.h"
249*d14abf15SRobert Mustacchi #include "bnx2x_sp.h"
250*d14abf15SRobert Mustacchi 
251*d14abf15SRobert Mustacchi #define ECORE_MAX_EMUL_MULTI		16
252*d14abf15SRobert Mustacchi #endif
253*d14abf15SRobert Mustacchi #endif
254*d14abf15SRobert Mustacchi 
255*d14abf15SRobert Mustacchi /**** Exe Queue interfaces ****/
256*d14abf15SRobert Mustacchi 
257*d14abf15SRobert Mustacchi /**
258*d14abf15SRobert Mustacchi  * ecore_exe_queue_init - init the Exe Queue object
259*d14abf15SRobert Mustacchi  *
260*d14abf15SRobert Mustacchi  * @o:		pointer to the object
261*d14abf15SRobert Mustacchi  * @exe_len:	length
262*d14abf15SRobert Mustacchi  * @owner:	pointer to the owner
263*d14abf15SRobert Mustacchi  * @validate:	validate function pointer
264*d14abf15SRobert Mustacchi  * @optimize:	optimize function pointer
265*d14abf15SRobert Mustacchi  * @exec:	execute function pointer
266*d14abf15SRobert Mustacchi  * @get:	get function pointer
267*d14abf15SRobert Mustacchi  */
268*d14abf15SRobert Mustacchi static INLINE void ecore_exe_queue_init(struct _lm_device_t *pdev,
269*d14abf15SRobert Mustacchi 					struct ecore_exe_queue_obj *o,
270*d14abf15SRobert Mustacchi 					int exe_len,
271*d14abf15SRobert Mustacchi 					union ecore_qable_obj *owner,
272*d14abf15SRobert Mustacchi 					exe_q_validate validate,
273*d14abf15SRobert Mustacchi 					exe_q_remove remove,
274*d14abf15SRobert Mustacchi 					exe_q_optimize optimize,
275*d14abf15SRobert Mustacchi 					exe_q_execute exec,
276*d14abf15SRobert Mustacchi 					exe_q_get get)
277*d14abf15SRobert Mustacchi {
278*d14abf15SRobert Mustacchi 	mm_memset(o, 0, sizeof(*o));
279*d14abf15SRobert Mustacchi 
280*d14abf15SRobert Mustacchi 	ECORE_LIST_INIT(&o->exe_queue);
281*d14abf15SRobert Mustacchi 	ECORE_LIST_INIT(&o->pending_comp);
282*d14abf15SRobert Mustacchi 
283*d14abf15SRobert Mustacchi 	ECORE_SPIN_LOCK_INIT(&o->lock, pdev);
284*d14abf15SRobert Mustacchi 
285*d14abf15SRobert Mustacchi 	o->exe_chunk_len = exe_len;
286*d14abf15SRobert Mustacchi 	o->owner         = owner;
287*d14abf15SRobert Mustacchi 
288*d14abf15SRobert Mustacchi 	/* Owner specific callbacks */
289*d14abf15SRobert Mustacchi 	o->validate      = validate;
290*d14abf15SRobert Mustacchi 	o->remove        = remove;
291*d14abf15SRobert Mustacchi 	o->optimize      = optimize;
292*d14abf15SRobert Mustacchi 	o->execute       = exec;
293*d14abf15SRobert Mustacchi 	o->get           = get;
294*d14abf15SRobert Mustacchi 
295*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Setup the execution queue with the chunk length of %d\n",
296*d14abf15SRobert Mustacchi 		  exe_len);
297*d14abf15SRobert Mustacchi }
298*d14abf15SRobert Mustacchi 
299*d14abf15SRobert Mustacchi static INLINE void ecore_exe_queue_free_elem(struct _lm_device_t *pdev,
300*d14abf15SRobert Mustacchi 					     struct ecore_exeq_elem *elem)
301*d14abf15SRobert Mustacchi {
302*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Deleting an exe_queue element\n");
303*d14abf15SRobert Mustacchi 	ECORE_FREE(pdev, elem, sizeof(*elem));
304*d14abf15SRobert Mustacchi }
305*d14abf15SRobert Mustacchi 
306*d14abf15SRobert Mustacchi static INLINE int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
307*d14abf15SRobert Mustacchi {
308*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *elem;
309*d14abf15SRobert Mustacchi 	int cnt = 0;
310*d14abf15SRobert Mustacchi 
311*d14abf15SRobert Mustacchi #ifdef ECORE_ERASE
312*d14abf15SRobert Mustacchi 	spin_lock_bh(&o->lock);
313*d14abf15SRobert Mustacchi #endif
314*d14abf15SRobert Mustacchi 
315*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
316*d14abf15SRobert Mustacchi 				  struct ecore_exeq_elem)
317*d14abf15SRobert Mustacchi 		cnt++;
318*d14abf15SRobert Mustacchi 
319*d14abf15SRobert Mustacchi #ifdef ECORE_ERASE
320*d14abf15SRobert Mustacchi 	spin_unlock_bh(&o->lock);
321*d14abf15SRobert Mustacchi #endif
322*d14abf15SRobert Mustacchi 
323*d14abf15SRobert Mustacchi 	return cnt;
324*d14abf15SRobert Mustacchi }
325*d14abf15SRobert Mustacchi 
326*d14abf15SRobert Mustacchi /**
327*d14abf15SRobert Mustacchi  * ecore_exe_queue_add - add a new element to the execution queue
328*d14abf15SRobert Mustacchi  *
329*d14abf15SRobert Mustacchi  * @pdev:	driver handle
330*d14abf15SRobert Mustacchi  * @o:		queue
331*d14abf15SRobert Mustacchi  * @cmd:	new command to add
332*d14abf15SRobert Mustacchi  * @restore:	true - do not optimize the command
333*d14abf15SRobert Mustacchi  *
334*d14abf15SRobert Mustacchi  * If the element is optimized or is illegal, frees it.
335*d14abf15SRobert Mustacchi  */
336*d14abf15SRobert Mustacchi static INLINE int ecore_exe_queue_add(struct _lm_device_t *pdev,
337*d14abf15SRobert Mustacchi 				      struct ecore_exe_queue_obj *o,
338*d14abf15SRobert Mustacchi 				      struct ecore_exeq_elem *elem,
339*d14abf15SRobert Mustacchi 				      BOOL restore)
340*d14abf15SRobert Mustacchi {
341*d14abf15SRobert Mustacchi 	int rc;
342*d14abf15SRobert Mustacchi 
343*d14abf15SRobert Mustacchi 	ECORE_SPIN_LOCK_BH(&o->lock);
344*d14abf15SRobert Mustacchi 
345*d14abf15SRobert Mustacchi 	if (!restore) {
346*d14abf15SRobert Mustacchi 		/* Try to cancel this element queue */
347*d14abf15SRobert Mustacchi 		rc = o->optimize(pdev, o->owner, elem);
348*d14abf15SRobert Mustacchi 		if (rc)
349*d14abf15SRobert Mustacchi 			goto free_and_exit;
350*d14abf15SRobert Mustacchi 
351*d14abf15SRobert Mustacchi 		/* Check if this request is ok */
352*d14abf15SRobert Mustacchi 		rc = o->validate(pdev, o->owner, elem);
353*d14abf15SRobert Mustacchi 		if (rc) {
354*d14abf15SRobert Mustacchi 			ECORE_MSG(pdev, "Preamble failed: %d\n", rc);
355*d14abf15SRobert Mustacchi 			goto free_and_exit;
356*d14abf15SRobert Mustacchi 		}
357*d14abf15SRobert Mustacchi 	}
358*d14abf15SRobert Mustacchi 
359*d14abf15SRobert Mustacchi 	/* If so, add it to the execution queue */
360*d14abf15SRobert Mustacchi 	ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
361*d14abf15SRobert Mustacchi 
362*d14abf15SRobert Mustacchi 	ECORE_SPIN_UNLOCK_BH(&o->lock);
363*d14abf15SRobert Mustacchi 
364*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
365*d14abf15SRobert Mustacchi 
366*d14abf15SRobert Mustacchi free_and_exit:
367*d14abf15SRobert Mustacchi 	ecore_exe_queue_free_elem(pdev, elem);
368*d14abf15SRobert Mustacchi 
369*d14abf15SRobert Mustacchi 	ECORE_SPIN_UNLOCK_BH(&o->lock);
370*d14abf15SRobert Mustacchi 
371*d14abf15SRobert Mustacchi 	return rc;
372*d14abf15SRobert Mustacchi }
373*d14abf15SRobert Mustacchi 
374*d14abf15SRobert Mustacchi static INLINE void __ecore_exe_queue_reset_pending(
375*d14abf15SRobert Mustacchi 	struct _lm_device_t *pdev,
376*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *o)
377*d14abf15SRobert Mustacchi {
378*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *elem;
379*d14abf15SRobert Mustacchi 
380*d14abf15SRobert Mustacchi 	while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
381*d14abf15SRobert Mustacchi 		elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
382*d14abf15SRobert Mustacchi 					      struct ecore_exeq_elem,
383*d14abf15SRobert Mustacchi 					      link);
384*d14abf15SRobert Mustacchi 
385*d14abf15SRobert Mustacchi 		ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
386*d14abf15SRobert Mustacchi 		ecore_exe_queue_free_elem(pdev, elem);
387*d14abf15SRobert Mustacchi 	}
388*d14abf15SRobert Mustacchi }
389*d14abf15SRobert Mustacchi 
390*d14abf15SRobert Mustacchi /**
391*d14abf15SRobert Mustacchi  * ecore_exe_queue_step - execute one execution chunk atomically
392*d14abf15SRobert Mustacchi  *
393*d14abf15SRobert Mustacchi  * @pdev:		driver handle
394*d14abf15SRobert Mustacchi  * @o:			queue
395*d14abf15SRobert Mustacchi  * @ramrod_flags:	flags
396*d14abf15SRobert Mustacchi  *
397*d14abf15SRobert Mustacchi  * (Should be called while holding the exe_queue->lock).
398*d14abf15SRobert Mustacchi  */
399*d14abf15SRobert Mustacchi static INLINE int ecore_exe_queue_step(struct _lm_device_t *pdev,
400*d14abf15SRobert Mustacchi 				       struct ecore_exe_queue_obj *o,
401*d14abf15SRobert Mustacchi 				       unsigned long *ramrod_flags)
402*d14abf15SRobert Mustacchi {
403*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *elem, spacer;
404*d14abf15SRobert Mustacchi 	int cur_len = 0, rc;
405*d14abf15SRobert Mustacchi 
406*d14abf15SRobert Mustacchi 	mm_memset(&spacer, 0, sizeof(spacer));
407*d14abf15SRobert Mustacchi 
408*d14abf15SRobert Mustacchi 	/* Next step should not be performed until the current is finished,
409*d14abf15SRobert Mustacchi 	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
410*d14abf15SRobert Mustacchi 	 * properly clear object internals without sending any command to the FW
411*d14abf15SRobert Mustacchi 	 * which also implies there won't be any completion to clear the
412*d14abf15SRobert Mustacchi 	 * 'pending' list.
413*d14abf15SRobert Mustacchi 	 */
414*d14abf15SRobert Mustacchi 	if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
415*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
416*d14abf15SRobert Mustacchi 			ECORE_MSG(pdev, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
417*d14abf15SRobert Mustacchi 			__ecore_exe_queue_reset_pending(pdev, o);
418*d14abf15SRobert Mustacchi 		} else {
419*d14abf15SRobert Mustacchi 			return ECORE_PENDING;
420*d14abf15SRobert Mustacchi 		}
421*d14abf15SRobert Mustacchi 	}
422*d14abf15SRobert Mustacchi 
423*d14abf15SRobert Mustacchi 	/* Run through the pending commands list and create a next
424*d14abf15SRobert Mustacchi 	 * execution chunk.
425*d14abf15SRobert Mustacchi 	 */
426*d14abf15SRobert Mustacchi 	while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
427*d14abf15SRobert Mustacchi 		elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
428*d14abf15SRobert Mustacchi 					      struct ecore_exeq_elem,
429*d14abf15SRobert Mustacchi 					      link);
430*d14abf15SRobert Mustacchi 		DbgBreakIf(!elem->cmd_len);
431*d14abf15SRobert Mustacchi 
432*d14abf15SRobert Mustacchi 		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
433*d14abf15SRobert Mustacchi 			cur_len += elem->cmd_len;
434*d14abf15SRobert Mustacchi 			/* Prevent from both lists being empty when moving an
435*d14abf15SRobert Mustacchi 			 * element. This will allow the call of
436*d14abf15SRobert Mustacchi 			 * ecore_exe_queue_empty() without locking.
437*d14abf15SRobert Mustacchi 			 */
438*d14abf15SRobert Mustacchi 			ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
439*d14abf15SRobert Mustacchi 			mb();
440*d14abf15SRobert Mustacchi 			ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
441*d14abf15SRobert Mustacchi 			ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
442*d14abf15SRobert Mustacchi 			ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
443*d14abf15SRobert Mustacchi 		} else
444*d14abf15SRobert Mustacchi 			break;
445*d14abf15SRobert Mustacchi 	}
446*d14abf15SRobert Mustacchi 
447*d14abf15SRobert Mustacchi 	/* Sanity check */
448*d14abf15SRobert Mustacchi 	if (!cur_len)
449*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
450*d14abf15SRobert Mustacchi 
451*d14abf15SRobert Mustacchi 	rc = o->execute(pdev, o->owner, &o->pending_comp, ramrod_flags);
452*d14abf15SRobert Mustacchi 	if (rc < 0)
453*d14abf15SRobert Mustacchi 		/* In case of an error return the commands back to the queue
454*d14abf15SRobert Mustacchi 		 *  and reset the pending_comp.
455*d14abf15SRobert Mustacchi 		 */
456*d14abf15SRobert Mustacchi 		ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
457*d14abf15SRobert Mustacchi 	else if (!rc)
458*d14abf15SRobert Mustacchi 		/* If zero is returned, means there are no outstanding pending
459*d14abf15SRobert Mustacchi 		 * completions and we may dismiss the pending list.
460*d14abf15SRobert Mustacchi 		 */
461*d14abf15SRobert Mustacchi 		__ecore_exe_queue_reset_pending(pdev, o);
462*d14abf15SRobert Mustacchi 
463*d14abf15SRobert Mustacchi 	return rc;
464*d14abf15SRobert Mustacchi }
465*d14abf15SRobert Mustacchi 
466*d14abf15SRobert Mustacchi static INLINE BOOL ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
467*d14abf15SRobert Mustacchi {
468*d14abf15SRobert Mustacchi 	BOOL empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
469*d14abf15SRobert Mustacchi 
470*d14abf15SRobert Mustacchi 	/* Don't reorder!!! */
471*d14abf15SRobert Mustacchi 	mb();
472*d14abf15SRobert Mustacchi 
473*d14abf15SRobert Mustacchi 	return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
474*d14abf15SRobert Mustacchi }
475*d14abf15SRobert Mustacchi 
476*d14abf15SRobert Mustacchi static INLINE struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
477*d14abf15SRobert Mustacchi 	struct _lm_device_t *pdev)
478*d14abf15SRobert Mustacchi {
479*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Allocating a new exe_queue element\n");
480*d14abf15SRobert Mustacchi 	return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
481*d14abf15SRobert Mustacchi 			    pdev);
482*d14abf15SRobert Mustacchi }
483*d14abf15SRobert Mustacchi 
484*d14abf15SRobert Mustacchi /************************ raw_obj functions ***********************************/
485*d14abf15SRobert Mustacchi static BOOL ecore_raw_check_pending(struct ecore_raw_obj *o)
486*d14abf15SRobert Mustacchi {
487*d14abf15SRobert Mustacchi 	/*
488*d14abf15SRobert Mustacchi      * !! converts the value returned by ECORE_TEST_BIT such that it
489*d14abf15SRobert Mustacchi      * is guaranteed not to be truncated regardless of BOOL definition.
490*d14abf15SRobert Mustacchi 	 *
491*d14abf15SRobert Mustacchi 	 * Note we cannot simply define the function's return value type
492*d14abf15SRobert Mustacchi      * to match the type returned by ECORE_TEST_BIT, as it varies by
493*d14abf15SRobert Mustacchi      * platform/implementation.
494*d14abf15SRobert Mustacchi 	 */
495*d14abf15SRobert Mustacchi 
496*d14abf15SRobert Mustacchi 	return !!ECORE_TEST_BIT(o->state, o->pstate);
497*d14abf15SRobert Mustacchi }
498*d14abf15SRobert Mustacchi 
499*d14abf15SRobert Mustacchi static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
500*d14abf15SRobert Mustacchi {
501*d14abf15SRobert Mustacchi 	smp_mb__before_atomic();
502*d14abf15SRobert Mustacchi 	ECORE_CLEAR_BIT(o->state, o->pstate);
503*d14abf15SRobert Mustacchi 	smp_mb__after_atomic();
504*d14abf15SRobert Mustacchi }
505*d14abf15SRobert Mustacchi 
506*d14abf15SRobert Mustacchi static void ecore_raw_set_pending(struct ecore_raw_obj *o)
507*d14abf15SRobert Mustacchi {
508*d14abf15SRobert Mustacchi 	smp_mb__before_atomic();
509*d14abf15SRobert Mustacchi 	ECORE_SET_BIT(o->state, o->pstate);
510*d14abf15SRobert Mustacchi 	smp_mb__after_atomic();
511*d14abf15SRobert Mustacchi }
512*d14abf15SRobert Mustacchi 
513*d14abf15SRobert Mustacchi /**
514*d14abf15SRobert Mustacchi  * ecore_state_wait - wait until the given bit(state) is cleared
515*d14abf15SRobert Mustacchi  *
516*d14abf15SRobert Mustacchi  * @pdev:	device handle
517*d14abf15SRobert Mustacchi  * @state:	state which is to be cleared
518*d14abf15SRobert Mustacchi  * @state_p:	state buffer
519*d14abf15SRobert Mustacchi  *
520*d14abf15SRobert Mustacchi  */
521*d14abf15SRobert Mustacchi static INLINE int ecore_state_wait(struct _lm_device_t *pdev, int state,
522*d14abf15SRobert Mustacchi 				   unsigned long *pstate)
523*d14abf15SRobert Mustacchi {
524*d14abf15SRobert Mustacchi 	/* can take a while if any port is running */
525*d14abf15SRobert Mustacchi 	int cnt = 5000;
526*d14abf15SRobert Mustacchi 
527*d14abf15SRobert Mustacchi #ifndef ECORE_ERASE
528*d14abf15SRobert Mustacchi 	int delay_us = 1000;
529*d14abf15SRobert Mustacchi 
530*d14abf15SRobert Mustacchi 	/* In VBD We'll wait 10,000 times 100us (1 second) +
531*d14abf15SRobert Mustacchi 	* 2360 times 25000us (59sec) = total 60 sec
532*d14abf15SRobert Mustacchi 	* (Winodws only note) the 25000 wait will cause wait
533*d14abf15SRobert Mustacchi 	* to be without CPU stall (look in win_util.c)
534*d14abf15SRobert Mustacchi 	*/
535*d14abf15SRobert Mustacchi 	cnt = 10000 + 2360;
536*d14abf15SRobert Mustacchi #endif
537*d14abf15SRobert Mustacchi 
538*d14abf15SRobert Mustacchi 	if (CHIP_REV_IS_EMUL(pdev))
539*d14abf15SRobert Mustacchi 		cnt *= 20;
540*d14abf15SRobert Mustacchi 
541*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "waiting for state to become %d\n", state);
542*d14abf15SRobert Mustacchi 
543*d14abf15SRobert Mustacchi 	ECORE_MIGHT_SLEEP();
544*d14abf15SRobert Mustacchi 	while (cnt--) {
545*d14abf15SRobert Mustacchi 		if (!ECORE_TEST_BIT(state, pstate)) {
546*d14abf15SRobert Mustacchi #ifdef ECORE_STOP_ON_ERROR
547*d14abf15SRobert Mustacchi 			ECORE_MSG(pdev, "exit  (cnt %d)\n", 5000 - cnt);
548*d14abf15SRobert Mustacchi #endif
549*d14abf15SRobert Mustacchi 			return ECORE_SUCCESS;
550*d14abf15SRobert Mustacchi 		}
551*d14abf15SRobert Mustacchi 
552*d14abf15SRobert Mustacchi #ifndef ECORE_ERASE
553*d14abf15SRobert Mustacchi 		/* in case reset is in progress we won't get completion */
554*d14abf15SRobert Mustacchi 		if (lm_reset_is_inprogress(pdev))
555*d14abf15SRobert Mustacchi 			return 0;
556*d14abf15SRobert Mustacchi 
557*d14abf15SRobert Mustacchi 		delay_us = (cnt >= 2360) ? 100 : 25000;
558*d14abf15SRobert Mustacchi #endif
559*d14abf15SRobert Mustacchi 		mm_wait(pdev, delay_us);
560*d14abf15SRobert Mustacchi 
561*d14abf15SRobert Mustacchi 		if (pdev->panic)
562*d14abf15SRobert Mustacchi 			return ECORE_IO;
563*d14abf15SRobert Mustacchi 	}
564*d14abf15SRobert Mustacchi 
565*d14abf15SRobert Mustacchi 	/* timeout! */
566*d14abf15SRobert Mustacchi 	ECORE_ERR("timeout waiting for state %d\n", state);
567*d14abf15SRobert Mustacchi #ifdef ECORE_STOP_ON_ERROR
568*d14abf15SRobert Mustacchi 	ecore_panic();
569*d14abf15SRobert Mustacchi #endif
570*d14abf15SRobert Mustacchi 
571*d14abf15SRobert Mustacchi 	return ECORE_TIMEOUT;
572*d14abf15SRobert Mustacchi }
573*d14abf15SRobert Mustacchi 
574*d14abf15SRobert Mustacchi static int ecore_raw_wait(struct _lm_device_t *pdev, struct ecore_raw_obj *raw)
575*d14abf15SRobert Mustacchi {
576*d14abf15SRobert Mustacchi 	return ecore_state_wait(pdev, raw->state, raw->pstate);
577*d14abf15SRobert Mustacchi }
578*d14abf15SRobert Mustacchi 
579*d14abf15SRobert Mustacchi /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
580*d14abf15SRobert Mustacchi /* credit handling callbacks */
581*d14abf15SRobert Mustacchi static BOOL ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
582*d14abf15SRobert Mustacchi {
583*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *mp = o->macs_pool;
584*d14abf15SRobert Mustacchi 
585*d14abf15SRobert Mustacchi 	DbgBreakIf(!mp);
586*d14abf15SRobert Mustacchi 
587*d14abf15SRobert Mustacchi 	return mp->get_entry(mp, offset);
588*d14abf15SRobert Mustacchi }
589*d14abf15SRobert Mustacchi 
590*d14abf15SRobert Mustacchi static BOOL ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
591*d14abf15SRobert Mustacchi {
592*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *mp = o->macs_pool;
593*d14abf15SRobert Mustacchi 
594*d14abf15SRobert Mustacchi 	DbgBreakIf(!mp);
595*d14abf15SRobert Mustacchi 
596*d14abf15SRobert Mustacchi 	return mp->get(mp, 1);
597*d14abf15SRobert Mustacchi }
598*d14abf15SRobert Mustacchi 
599*d14abf15SRobert Mustacchi static BOOL ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
600*d14abf15SRobert Mustacchi {
601*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
602*d14abf15SRobert Mustacchi 
603*d14abf15SRobert Mustacchi 	DbgBreakIf(!vp);
604*d14abf15SRobert Mustacchi 
605*d14abf15SRobert Mustacchi 	return vp->get_entry(vp, offset);
606*d14abf15SRobert Mustacchi }
607*d14abf15SRobert Mustacchi 
608*d14abf15SRobert Mustacchi static BOOL ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
609*d14abf15SRobert Mustacchi {
610*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
611*d14abf15SRobert Mustacchi 
612*d14abf15SRobert Mustacchi 	DbgBreakIf(!vp);
613*d14abf15SRobert Mustacchi 
614*d14abf15SRobert Mustacchi 	return vp->get(vp, 1);
615*d14abf15SRobert Mustacchi }
616*d14abf15SRobert Mustacchi 
617*d14abf15SRobert Mustacchi static BOOL ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
618*d14abf15SRobert Mustacchi {
619*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *mp = o->macs_pool;
620*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
621*d14abf15SRobert Mustacchi 
622*d14abf15SRobert Mustacchi 	if (!mp->get(mp, 1))
623*d14abf15SRobert Mustacchi 		return FALSE;
624*d14abf15SRobert Mustacchi 
625*d14abf15SRobert Mustacchi 	if (!vp->get(vp, 1)) {
626*d14abf15SRobert Mustacchi 		mp->put(mp, 1);
627*d14abf15SRobert Mustacchi 		return FALSE;
628*d14abf15SRobert Mustacchi 	}
629*d14abf15SRobert Mustacchi 
630*d14abf15SRobert Mustacchi 	return TRUE;
631*d14abf15SRobert Mustacchi }
632*d14abf15SRobert Mustacchi 
633*d14abf15SRobert Mustacchi static BOOL ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
634*d14abf15SRobert Mustacchi {
635*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *mp = o->macs_pool;
636*d14abf15SRobert Mustacchi 
637*d14abf15SRobert Mustacchi 	return mp->put_entry(mp, offset);
638*d14abf15SRobert Mustacchi }
639*d14abf15SRobert Mustacchi 
640*d14abf15SRobert Mustacchi static BOOL ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
641*d14abf15SRobert Mustacchi {
642*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *mp = o->macs_pool;
643*d14abf15SRobert Mustacchi 
644*d14abf15SRobert Mustacchi 	return mp->put(mp, 1);
645*d14abf15SRobert Mustacchi }
646*d14abf15SRobert Mustacchi 
647*d14abf15SRobert Mustacchi static BOOL ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
648*d14abf15SRobert Mustacchi {
649*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
650*d14abf15SRobert Mustacchi 
651*d14abf15SRobert Mustacchi 	return vp->put_entry(vp, offset);
652*d14abf15SRobert Mustacchi }
653*d14abf15SRobert Mustacchi 
654*d14abf15SRobert Mustacchi static BOOL ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
655*d14abf15SRobert Mustacchi {
656*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
657*d14abf15SRobert Mustacchi 
658*d14abf15SRobert Mustacchi 	return vp->put(vp, 1);
659*d14abf15SRobert Mustacchi }
660*d14abf15SRobert Mustacchi 
661*d14abf15SRobert Mustacchi static BOOL ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
662*d14abf15SRobert Mustacchi {
663*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *mp = o->macs_pool;
664*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
665*d14abf15SRobert Mustacchi 
666*d14abf15SRobert Mustacchi 	if (!mp->put(mp, 1))
667*d14abf15SRobert Mustacchi 		return FALSE;
668*d14abf15SRobert Mustacchi 
669*d14abf15SRobert Mustacchi 	if (!vp->put(vp, 1)) {
670*d14abf15SRobert Mustacchi 		mp->get(mp, 1);
671*d14abf15SRobert Mustacchi 		return FALSE;
672*d14abf15SRobert Mustacchi 	}
673*d14abf15SRobert Mustacchi 
674*d14abf15SRobert Mustacchi 	return TRUE;
675*d14abf15SRobert Mustacchi }
676*d14abf15SRobert Mustacchi 
677*d14abf15SRobert Mustacchi /**
678*d14abf15SRobert Mustacchi  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
679*d14abf15SRobert Mustacchi  * head list.
680*d14abf15SRobert Mustacchi  *
681*d14abf15SRobert Mustacchi  * @pdev:	device handle
682*d14abf15SRobert Mustacchi  * @o:		vlan_mac object
683*d14abf15SRobert Mustacchi  *
684*d14abf15SRobert Mustacchi  * @details: Non-blocking implementation; should be called under execution
685*d14abf15SRobert Mustacchi  *           queue lock.
686*d14abf15SRobert Mustacchi  */
687*d14abf15SRobert Mustacchi static int __ecore_vlan_mac_h_write_trylock(struct _lm_device_t *pdev,
688*d14abf15SRobert Mustacchi 					    struct ecore_vlan_mac_obj *o)
689*d14abf15SRobert Mustacchi {
690*d14abf15SRobert Mustacchi 	if (o->head_reader) {
691*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "vlan_mac_lock writer - There are readers; Busy\n");
692*d14abf15SRobert Mustacchi 		return ECORE_BUSY;
693*d14abf15SRobert Mustacchi 	}
694*d14abf15SRobert Mustacchi 
695*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "vlan_mac_lock writer - Taken\n");
696*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
697*d14abf15SRobert Mustacchi }
698*d14abf15SRobert Mustacchi 
699*d14abf15SRobert Mustacchi /**
700*d14abf15SRobert Mustacchi  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
701*d14abf15SRobert Mustacchi  * which wasn't able to run due to a taken lock on vlan mac head list.
702*d14abf15SRobert Mustacchi  *
703*d14abf15SRobert Mustacchi  * @pdev:	device handle
704*d14abf15SRobert Mustacchi  * @o:		vlan_mac object
705*d14abf15SRobert Mustacchi  *
706*d14abf15SRobert Mustacchi  * @details Should be called under execution queue lock; notice it might release
707*d14abf15SRobert Mustacchi  *          and reclaim it during its run.
708*d14abf15SRobert Mustacchi  */
709*d14abf15SRobert Mustacchi static void __ecore_vlan_mac_h_exec_pending(struct _lm_device_t *pdev,
710*d14abf15SRobert Mustacchi 					    struct ecore_vlan_mac_obj *o)
711*d14abf15SRobert Mustacchi {
712*d14abf15SRobert Mustacchi 	int rc;
713*d14abf15SRobert Mustacchi 	unsigned long ramrod_flags = o->saved_ramrod_flags;
714*d14abf15SRobert Mustacchi 
715*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
716*d14abf15SRobert Mustacchi 		  ramrod_flags);
717*d14abf15SRobert Mustacchi 	o->head_exe_request = FALSE;
718*d14abf15SRobert Mustacchi 	o->saved_ramrod_flags = 0;
719*d14abf15SRobert Mustacchi 	rc = ecore_exe_queue_step(pdev, &o->exe_queue, &ramrod_flags);
720*d14abf15SRobert Mustacchi 	if (rc != ECORE_SUCCESS) {
721*d14abf15SRobert Mustacchi 		ECORE_ERR("execution of pending commands failed with rc %d\n",
722*d14abf15SRobert Mustacchi 			  rc);
723*d14abf15SRobert Mustacchi #ifdef ECORE_STOP_ON_ERROR
724*d14abf15SRobert Mustacchi 		ecore_panic();
725*d14abf15SRobert Mustacchi #endif
726*d14abf15SRobert Mustacchi 	}
727*d14abf15SRobert Mustacchi }
728*d14abf15SRobert Mustacchi 
729*d14abf15SRobert Mustacchi /**
730*d14abf15SRobert Mustacchi  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
731*d14abf15SRobert Mustacchi  * called due to vlan mac head list lock being taken.
732*d14abf15SRobert Mustacchi  *
733*d14abf15SRobert Mustacchi  * @pdev:		device handle
734*d14abf15SRobert Mustacchi  * @o:			vlan_mac object
735*d14abf15SRobert Mustacchi  * @ramrod_flags:	ramrod flags of missed execution
736*d14abf15SRobert Mustacchi  *
737*d14abf15SRobert Mustacchi  * @details Should be called under execution queue lock.
738*d14abf15SRobert Mustacchi  */
739*d14abf15SRobert Mustacchi static void __ecore_vlan_mac_h_pend(struct _lm_device_t *pdev,
740*d14abf15SRobert Mustacchi 				    struct ecore_vlan_mac_obj *o,
741*d14abf15SRobert Mustacchi 				    unsigned long ramrod_flags)
742*d14abf15SRobert Mustacchi {
743*d14abf15SRobert Mustacchi 	o->head_exe_request = TRUE;
744*d14abf15SRobert Mustacchi 	o->saved_ramrod_flags = ramrod_flags;
745*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Placing pending execution with ramrod flags %lu\n",
746*d14abf15SRobert Mustacchi 		  ramrod_flags);
747*d14abf15SRobert Mustacchi }
748*d14abf15SRobert Mustacchi 
749*d14abf15SRobert Mustacchi /**
750*d14abf15SRobert Mustacchi  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
751*d14abf15SRobert Mustacchi  *
752*d14abf15SRobert Mustacchi  * @pdev:		device handle
753*d14abf15SRobert Mustacchi  * @o:			vlan_mac object
754*d14abf15SRobert Mustacchi  *
755*d14abf15SRobert Mustacchi  * @details Should be called under execution queue lock. Notice if a pending
756*d14abf15SRobert Mustacchi  *          execution exists, it would perform it - possibly releasing and
757*d14abf15SRobert Mustacchi  *          reclaiming the execution queue lock.
758*d14abf15SRobert Mustacchi  */
759*d14abf15SRobert Mustacchi static void __ecore_vlan_mac_h_write_unlock(struct _lm_device_t *pdev,
760*d14abf15SRobert Mustacchi 					    struct ecore_vlan_mac_obj *o)
761*d14abf15SRobert Mustacchi {
762*d14abf15SRobert Mustacchi 	/* It's possible a new pending execution was added since this writer
763*d14abf15SRobert Mustacchi 	 * executed. If so, execute again. [Ad infinitum]
764*d14abf15SRobert Mustacchi 	 */
765*d14abf15SRobert Mustacchi 	while(o->head_exe_request) {
766*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "vlan_mac_lock - writer release encountered a pending request\n");
767*d14abf15SRobert Mustacchi 		__ecore_vlan_mac_h_exec_pending(pdev, o);
768*d14abf15SRobert Mustacchi 	}
769*d14abf15SRobert Mustacchi }
770*d14abf15SRobert Mustacchi 
771*d14abf15SRobert Mustacchi /**
772*d14abf15SRobert Mustacchi  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
773*d14abf15SRobert Mustacchi  *
774*d14abf15SRobert Mustacchi  * @pdev:		device handle
775*d14abf15SRobert Mustacchi  * @o:			vlan_mac object
776*d14abf15SRobert Mustacchi  *
777*d14abf15SRobert Mustacchi  * @details Notice if a pending execution exists, it would perform it -
778*d14abf15SRobert Mustacchi  *          possibly releasing and reclaiming the execution queue lock.
779*d14abf15SRobert Mustacchi  */
780*d14abf15SRobert Mustacchi void ecore_vlan_mac_h_write_unlock(struct _lm_device_t *pdev,
781*d14abf15SRobert Mustacchi 				   struct ecore_vlan_mac_obj *o)
782*d14abf15SRobert Mustacchi {
783*d14abf15SRobert Mustacchi 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
784*d14abf15SRobert Mustacchi 	__ecore_vlan_mac_h_write_unlock(pdev, o);
785*d14abf15SRobert Mustacchi 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
786*d14abf15SRobert Mustacchi }
787*d14abf15SRobert Mustacchi 
788*d14abf15SRobert Mustacchi /**
789*d14abf15SRobert Mustacchi  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
790*d14abf15SRobert Mustacchi  *
791*d14abf15SRobert Mustacchi  * @pdev:		device handle
792*d14abf15SRobert Mustacchi  * @o:			vlan_mac object
793*d14abf15SRobert Mustacchi  *
794*d14abf15SRobert Mustacchi  * @details Should be called under the execution queue lock. May sleep. May
795*d14abf15SRobert Mustacchi  *          release and reclaim execution queue lock during its run.
796*d14abf15SRobert Mustacchi  */
797*d14abf15SRobert Mustacchi static int __ecore_vlan_mac_h_read_lock(struct _lm_device_t *pdev,
798*d14abf15SRobert Mustacchi 					struct ecore_vlan_mac_obj *o)
799*d14abf15SRobert Mustacchi {
800*d14abf15SRobert Mustacchi 	/* If we got here, we're holding lock --> no WRITER exists */
801*d14abf15SRobert Mustacchi 	o->head_reader++;
802*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "vlan_mac_lock - locked reader - number %d\n",
803*d14abf15SRobert Mustacchi 		  o->head_reader);
804*d14abf15SRobert Mustacchi 
805*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
806*d14abf15SRobert Mustacchi }
807*d14abf15SRobert Mustacchi 
808*d14abf15SRobert Mustacchi /**
809*d14abf15SRobert Mustacchi  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
810*d14abf15SRobert Mustacchi  *
811*d14abf15SRobert Mustacchi  * @pdev:		device handle
812*d14abf15SRobert Mustacchi  * @o:			vlan_mac object
813*d14abf15SRobert Mustacchi  *
814*d14abf15SRobert Mustacchi  * @details May sleep. Claims and releases execution queue lock during its run.
815*d14abf15SRobert Mustacchi  */
816*d14abf15SRobert Mustacchi int ecore_vlan_mac_h_read_lock(struct _lm_device_t *pdev,
817*d14abf15SRobert Mustacchi 			       struct ecore_vlan_mac_obj *o)
818*d14abf15SRobert Mustacchi {
819*d14abf15SRobert Mustacchi 	int rc;
820*d14abf15SRobert Mustacchi 
821*d14abf15SRobert Mustacchi 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
822*d14abf15SRobert Mustacchi 	rc = __ecore_vlan_mac_h_read_lock(pdev, o);
823*d14abf15SRobert Mustacchi 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
824*d14abf15SRobert Mustacchi 
825*d14abf15SRobert Mustacchi 	return rc;
826*d14abf15SRobert Mustacchi }
827*d14abf15SRobert Mustacchi 
828*d14abf15SRobert Mustacchi /**
829*d14abf15SRobert Mustacchi  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
830*d14abf15SRobert Mustacchi  *
831*d14abf15SRobert Mustacchi  * @pdev:		device handle
832*d14abf15SRobert Mustacchi  * @o:			vlan_mac object
833*d14abf15SRobert Mustacchi  *
834*d14abf15SRobert Mustacchi  * @details Should be called under execution queue lock. Notice if a pending
835*d14abf15SRobert Mustacchi  *          execution exists, it would be performed if this was the last
836*d14abf15SRobert Mustacchi  *          reader. possibly releasing and reclaiming the execution queue lock.
837*d14abf15SRobert Mustacchi  */
838*d14abf15SRobert Mustacchi static void __ecore_vlan_mac_h_read_unlock(struct _lm_device_t *pdev,
839*d14abf15SRobert Mustacchi 					  struct ecore_vlan_mac_obj *o)
840*d14abf15SRobert Mustacchi {
841*d14abf15SRobert Mustacchi 	if (!o->head_reader) {
842*d14abf15SRobert Mustacchi 		ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
843*d14abf15SRobert Mustacchi #ifdef ECORE_STOP_ON_ERROR
844*d14abf15SRobert Mustacchi 		ecore_panic();
845*d14abf15SRobert Mustacchi #endif
846*d14abf15SRobert Mustacchi 	} else {
847*d14abf15SRobert Mustacchi 		o->head_reader--;
848*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "vlan_mac_lock - decreased readers to %d\n",
849*d14abf15SRobert Mustacchi 			  o->head_reader);
850*d14abf15SRobert Mustacchi 	}
851*d14abf15SRobert Mustacchi 
852*d14abf15SRobert Mustacchi 	/* It's possible a new pending execution was added, and that this reader
853*d14abf15SRobert Mustacchi 	 * was last - if so we need to execute the command.
854*d14abf15SRobert Mustacchi 	 */
855*d14abf15SRobert Mustacchi 	if (!o->head_reader && o->head_exe_request) {
856*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "vlan_mac_lock - reader release encountered a pending request\n");
857*d14abf15SRobert Mustacchi 
858*d14abf15SRobert Mustacchi 		/* Writer release will do the trick */
859*d14abf15SRobert Mustacchi 		__ecore_vlan_mac_h_write_unlock(pdev, o);
860*d14abf15SRobert Mustacchi 	}
861*d14abf15SRobert Mustacchi }
862*d14abf15SRobert Mustacchi 
863*d14abf15SRobert Mustacchi /**
864*d14abf15SRobert Mustacchi  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
865*d14abf15SRobert Mustacchi  *
866*d14abf15SRobert Mustacchi  * @pdev:		device handle
867*d14abf15SRobert Mustacchi  * @o:			vlan_mac object
868*d14abf15SRobert Mustacchi  *
869*d14abf15SRobert Mustacchi  * @details Notice if a pending execution exists, it would be performed if this
870*d14abf15SRobert Mustacchi  *          was the last reader. Claims and releases the execution queue lock
871*d14abf15SRobert Mustacchi  *          during its run.
872*d14abf15SRobert Mustacchi  */
873*d14abf15SRobert Mustacchi void ecore_vlan_mac_h_read_unlock(struct _lm_device_t *pdev,
874*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_obj *o)
875*d14abf15SRobert Mustacchi {
876*d14abf15SRobert Mustacchi 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
877*d14abf15SRobert Mustacchi 	__ecore_vlan_mac_h_read_unlock(pdev, o);
878*d14abf15SRobert Mustacchi 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
879*d14abf15SRobert Mustacchi }
880*d14abf15SRobert Mustacchi 
881*d14abf15SRobert Mustacchi /**
882*d14abf15SRobert Mustacchi  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
883*d14abf15SRobert Mustacchi  *
884*d14abf15SRobert Mustacchi  * @pdev:		device handle
885*d14abf15SRobert Mustacchi  * @o:			vlan_mac object
886*d14abf15SRobert Mustacchi  * @n:			number of elements to get
887*d14abf15SRobert Mustacchi  * @base:		base address for element placement
888*d14abf15SRobert Mustacchi  * @stride:		stride between elements (in bytes)
889*d14abf15SRobert Mustacchi  */
890*d14abf15SRobert Mustacchi static int ecore_get_n_elements(struct _lm_device_t *pdev, struct ecore_vlan_mac_obj *o,
891*d14abf15SRobert Mustacchi 				 int n, u8 *base, u8 stride, u8 size)
892*d14abf15SRobert Mustacchi {
893*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
894*d14abf15SRobert Mustacchi 	u8 *next = base;
895*d14abf15SRobert Mustacchi 	int counter = 0;
896*d14abf15SRobert Mustacchi 	int read_lock;
897*d14abf15SRobert Mustacchi 
898*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "get_n_elements - taking vlan_mac_lock (reader)\n");
899*d14abf15SRobert Mustacchi 	read_lock = ecore_vlan_mac_h_read_lock(pdev, o);
900*d14abf15SRobert Mustacchi 	if (read_lock != ECORE_SUCCESS)
901*d14abf15SRobert Mustacchi 		ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
902*d14abf15SRobert Mustacchi 
903*d14abf15SRobert Mustacchi 	/* traverse list */
904*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
905*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_registry_elem) {
906*d14abf15SRobert Mustacchi 		if (counter < n) {
907*d14abf15SRobert Mustacchi 			mm_memcpy(next, &pos->u, size);
908*d14abf15SRobert Mustacchi 			counter++;
909*d14abf15SRobert Mustacchi 			ECORE_MSG(pdev, "copied element number %d to address %p element was:\n",
910*d14abf15SRobert Mustacchi 				  counter, next);
911*d14abf15SRobert Mustacchi 			next += stride + size;
912*d14abf15SRobert Mustacchi 		}
913*d14abf15SRobert Mustacchi 	}
914*d14abf15SRobert Mustacchi 
915*d14abf15SRobert Mustacchi 	if (read_lock == ECORE_SUCCESS) {
916*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "get_n_elements - releasing vlan_mac_lock (reader)\n");
917*d14abf15SRobert Mustacchi 		ecore_vlan_mac_h_read_unlock(pdev, o);
918*d14abf15SRobert Mustacchi 	}
919*d14abf15SRobert Mustacchi 
920*d14abf15SRobert Mustacchi 	return counter * ETH_ALEN;
921*d14abf15SRobert Mustacchi }
922*d14abf15SRobert Mustacchi 
923*d14abf15SRobert Mustacchi /* check_add() callbacks */
924*d14abf15SRobert Mustacchi static int ecore_check_mac_add(struct _lm_device_t *pdev,
925*d14abf15SRobert Mustacchi 			       struct ecore_vlan_mac_obj *o,
926*d14abf15SRobert Mustacchi 			       union ecore_classification_ramrod_data *data)
927*d14abf15SRobert Mustacchi {
928*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
929*d14abf15SRobert Mustacchi 
930*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
931*d14abf15SRobert Mustacchi 
932*d14abf15SRobert Mustacchi 	if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
933*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
934*d14abf15SRobert Mustacchi 
935*d14abf15SRobert Mustacchi 	/* Check if a requested MAC already exists */
936*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
937*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_registry_elem)
938*d14abf15SRobert Mustacchi 		if (mm_memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
939*d14abf15SRobert Mustacchi 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
940*d14abf15SRobert Mustacchi 			return ECORE_EXISTS;
941*d14abf15SRobert Mustacchi 
942*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
943*d14abf15SRobert Mustacchi }
944*d14abf15SRobert Mustacchi 
945*d14abf15SRobert Mustacchi static int ecore_check_vlan_add(struct _lm_device_t *pdev,
946*d14abf15SRobert Mustacchi 				struct ecore_vlan_mac_obj *o,
947*d14abf15SRobert Mustacchi 				union ecore_classification_ramrod_data *data)
948*d14abf15SRobert Mustacchi {
949*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
950*d14abf15SRobert Mustacchi 
951*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
952*d14abf15SRobert Mustacchi 
953*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
954*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_registry_elem)
955*d14abf15SRobert Mustacchi 		if (data->vlan.vlan == pos->u.vlan.vlan)
956*d14abf15SRobert Mustacchi 			return ECORE_EXISTS;
957*d14abf15SRobert Mustacchi 
958*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
959*d14abf15SRobert Mustacchi }
960*d14abf15SRobert Mustacchi 
961*d14abf15SRobert Mustacchi static int ecore_check_vlan_mac_add(struct _lm_device_t *pdev,
962*d14abf15SRobert Mustacchi 				    struct ecore_vlan_mac_obj *o,
963*d14abf15SRobert Mustacchi 				   union ecore_classification_ramrod_data *data)
964*d14abf15SRobert Mustacchi {
965*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
966*d14abf15SRobert Mustacchi 
967*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
968*d14abf15SRobert Mustacchi 		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
969*d14abf15SRobert Mustacchi 
970*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
971*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_registry_elem)
972*d14abf15SRobert Mustacchi 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
973*d14abf15SRobert Mustacchi 		    (mm_memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
974*d14abf15SRobert Mustacchi 				  ETH_ALEN)) &&
975*d14abf15SRobert Mustacchi 		    (data->vlan_mac.is_inner_mac ==
976*d14abf15SRobert Mustacchi 		     pos->u.vlan_mac.is_inner_mac))
977*d14abf15SRobert Mustacchi 			return ECORE_EXISTS;
978*d14abf15SRobert Mustacchi 
979*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
980*d14abf15SRobert Mustacchi }
981*d14abf15SRobert Mustacchi 
982*d14abf15SRobert Mustacchi /* check_del() callbacks */
983*d14abf15SRobert Mustacchi static struct ecore_vlan_mac_registry_elem *
984*d14abf15SRobert Mustacchi 	ecore_check_mac_del(struct _lm_device_t *pdev,
985*d14abf15SRobert Mustacchi 			    struct ecore_vlan_mac_obj *o,
986*d14abf15SRobert Mustacchi 			    union ecore_classification_ramrod_data *data)
987*d14abf15SRobert Mustacchi {
988*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
989*d14abf15SRobert Mustacchi 
990*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
991*d14abf15SRobert Mustacchi 
992*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
993*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_registry_elem)
994*d14abf15SRobert Mustacchi 		if ((mm_memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
995*d14abf15SRobert Mustacchi 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
996*d14abf15SRobert Mustacchi 			return pos;
997*d14abf15SRobert Mustacchi 
998*d14abf15SRobert Mustacchi 	return NULL;
999*d14abf15SRobert Mustacchi }
1000*d14abf15SRobert Mustacchi 
1001*d14abf15SRobert Mustacchi static struct ecore_vlan_mac_registry_elem *
1002*d14abf15SRobert Mustacchi 	ecore_check_vlan_del(struct _lm_device_t *pdev,
1003*d14abf15SRobert Mustacchi 			     struct ecore_vlan_mac_obj *o,
1004*d14abf15SRobert Mustacchi 			     union ecore_classification_ramrod_data *data)
1005*d14abf15SRobert Mustacchi {
1006*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
1007*d14abf15SRobert Mustacchi 
1008*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
1009*d14abf15SRobert Mustacchi 
1010*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1011*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_registry_elem)
1012*d14abf15SRobert Mustacchi 		if (data->vlan.vlan == pos->u.vlan.vlan)
1013*d14abf15SRobert Mustacchi 			return pos;
1014*d14abf15SRobert Mustacchi 
1015*d14abf15SRobert Mustacchi 	return NULL;
1016*d14abf15SRobert Mustacchi }
1017*d14abf15SRobert Mustacchi 
1018*d14abf15SRobert Mustacchi static struct ecore_vlan_mac_registry_elem *
1019*d14abf15SRobert Mustacchi 	ecore_check_vlan_mac_del(struct _lm_device_t *pdev,
1020*d14abf15SRobert Mustacchi 				 struct ecore_vlan_mac_obj *o,
1021*d14abf15SRobert Mustacchi 				 union ecore_classification_ramrod_data *data)
1022*d14abf15SRobert Mustacchi {
1023*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
1024*d14abf15SRobert Mustacchi 
1025*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
1026*d14abf15SRobert Mustacchi 		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
1027*d14abf15SRobert Mustacchi 
1028*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1029*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_registry_elem)
1030*d14abf15SRobert Mustacchi 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
1031*d14abf15SRobert Mustacchi 		    (mm_memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
1032*d14abf15SRobert Mustacchi 			     ETH_ALEN)) &&
1033*d14abf15SRobert Mustacchi 		    (data->vlan_mac.is_inner_mac ==
1034*d14abf15SRobert Mustacchi 		     pos->u.vlan_mac.is_inner_mac))
1035*d14abf15SRobert Mustacchi 			return pos;
1036*d14abf15SRobert Mustacchi 
1037*d14abf15SRobert Mustacchi 	return NULL;
1038*d14abf15SRobert Mustacchi }
1039*d14abf15SRobert Mustacchi 
1040*d14abf15SRobert Mustacchi /* check_move() callback */
1041*d14abf15SRobert Mustacchi static BOOL ecore_check_move(struct _lm_device_t *pdev,
1042*d14abf15SRobert Mustacchi 			     struct ecore_vlan_mac_obj *src_o,
1043*d14abf15SRobert Mustacchi 			     struct ecore_vlan_mac_obj *dst_o,
1044*d14abf15SRobert Mustacchi 			     union ecore_classification_ramrod_data *data)
1045*d14abf15SRobert Mustacchi {
1046*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
1047*d14abf15SRobert Mustacchi 	int rc;
1048*d14abf15SRobert Mustacchi 
1049*d14abf15SRobert Mustacchi 	/* Check if we can delete the requested configuration from the first
1050*d14abf15SRobert Mustacchi 	 * object.
1051*d14abf15SRobert Mustacchi 	 */
1052*d14abf15SRobert Mustacchi 	pos = src_o->check_del(pdev, src_o, data);
1053*d14abf15SRobert Mustacchi 
1054*d14abf15SRobert Mustacchi 	/*  check if configuration can be added */
1055*d14abf15SRobert Mustacchi 	rc = dst_o->check_add(pdev, dst_o, data);
1056*d14abf15SRobert Mustacchi 
1057*d14abf15SRobert Mustacchi 	/* If this classification can not be added (is already set)
1058*d14abf15SRobert Mustacchi 	 * or can't be deleted - return an error.
1059*d14abf15SRobert Mustacchi 	 */
1060*d14abf15SRobert Mustacchi 	if (rc || !pos)
1061*d14abf15SRobert Mustacchi 		return FALSE;
1062*d14abf15SRobert Mustacchi 
1063*d14abf15SRobert Mustacchi 	return TRUE;
1064*d14abf15SRobert Mustacchi }
1065*d14abf15SRobert Mustacchi 
1066*d14abf15SRobert Mustacchi static BOOL ecore_check_move_always_err(
1067*d14abf15SRobert Mustacchi 	struct _lm_device_t *pdev,
1068*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *src_o,
1069*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *dst_o,
1070*d14abf15SRobert Mustacchi 	union ecore_classification_ramrod_data *data)
1071*d14abf15SRobert Mustacchi {
1072*d14abf15SRobert Mustacchi 	return FALSE;
1073*d14abf15SRobert Mustacchi }
1074*d14abf15SRobert Mustacchi 
1075*d14abf15SRobert Mustacchi static INLINE u8 ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
1076*d14abf15SRobert Mustacchi {
1077*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
1078*d14abf15SRobert Mustacchi 	u8 rx_tx_flag = 0;
1079*d14abf15SRobert Mustacchi 
1080*d14abf15SRobert Mustacchi 	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
1081*d14abf15SRobert Mustacchi 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
1082*d14abf15SRobert Mustacchi 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
1083*d14abf15SRobert Mustacchi 
1084*d14abf15SRobert Mustacchi 	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
1085*d14abf15SRobert Mustacchi 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
1086*d14abf15SRobert Mustacchi 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
1087*d14abf15SRobert Mustacchi 
1088*d14abf15SRobert Mustacchi 	return rx_tx_flag;
1089*d14abf15SRobert Mustacchi }
1090*d14abf15SRobert Mustacchi 
1091*d14abf15SRobert Mustacchi void ecore_set_mac_in_nig(struct _lm_device_t *pdev,
1092*d14abf15SRobert Mustacchi 			  BOOL add, unsigned char *dev_addr, int index)
1093*d14abf15SRobert Mustacchi {
1094*d14abf15SRobert Mustacchi 	u32 wb_data[2];
1095*d14abf15SRobert Mustacchi 	u32 reg_offset = PORT_ID(pdev) ? NIG_REG_LLH1_FUNC_MEM :
1096*d14abf15SRobert Mustacchi 			 NIG_REG_LLH0_FUNC_MEM;
1097*d14abf15SRobert Mustacchi 
1098*d14abf15SRobert Mustacchi 	if (!IS_MF_SI_MODE(pdev) && !IS_MF_AFEX(pdev))
1099*d14abf15SRobert Mustacchi 		return;
1100*d14abf15SRobert Mustacchi 
1101*d14abf15SRobert Mustacchi 	if (index > ECORE_LLH_CAM_MAX_PF_LINE)
1102*d14abf15SRobert Mustacchi 		return;
1103*d14abf15SRobert Mustacchi 
1104*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Going to %s LLH configuration at entry %d\n",
1105*d14abf15SRobert Mustacchi 		  (add ? "ADD" : "DELETE"), index);
1106*d14abf15SRobert Mustacchi 
1107*d14abf15SRobert Mustacchi 	if (add) {
1108*d14abf15SRobert Mustacchi 		/* LLH_FUNC_MEM is a u64 WB register */
1109*d14abf15SRobert Mustacchi 		reg_offset += 8*index;
1110*d14abf15SRobert Mustacchi 
1111*d14abf15SRobert Mustacchi 		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
1112*d14abf15SRobert Mustacchi 			      (dev_addr[4] <<  8) |  dev_addr[5]);
1113*d14abf15SRobert Mustacchi 		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
1114*d14abf15SRobert Mustacchi 
1115*d14abf15SRobert Mustacchi 		REG_WR_DMAE_LEN(pdev, reg_offset, wb_data, 2);
1116*d14abf15SRobert Mustacchi 	}
1117*d14abf15SRobert Mustacchi 
1118*d14abf15SRobert Mustacchi 	REG_WR(pdev, (PORT_ID(pdev) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
1119*d14abf15SRobert Mustacchi 				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
1120*d14abf15SRobert Mustacchi }
1121*d14abf15SRobert Mustacchi 
1122*d14abf15SRobert Mustacchi /**
1123*d14abf15SRobert Mustacchi  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
1124*d14abf15SRobert Mustacchi  *
1125*d14abf15SRobert Mustacchi  * @pdev:	device handle
1126*d14abf15SRobert Mustacchi  * @o:		queue for which we want to configure this rule
1127*d14abf15SRobert Mustacchi  * @add:	if TRUE the command is an ADD command, DEL otherwise
1128*d14abf15SRobert Mustacchi  * @opcode:	CLASSIFY_RULE_OPCODE_XXX
1129*d14abf15SRobert Mustacchi  * @hdr:	pointer to a header to setup
1130*d14abf15SRobert Mustacchi  *
1131*d14abf15SRobert Mustacchi  */
1132*d14abf15SRobert Mustacchi static INLINE void ecore_vlan_mac_set_cmd_hdr_e2(struct _lm_device_t *pdev,
1133*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o, BOOL add, int opcode,
1134*d14abf15SRobert Mustacchi 	struct eth_classify_cmd_header *hdr)
1135*d14abf15SRobert Mustacchi {
1136*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
1137*d14abf15SRobert Mustacchi 
1138*d14abf15SRobert Mustacchi 	hdr->client_id = raw->cl_id;
1139*d14abf15SRobert Mustacchi 	hdr->func_id = raw->func_id;
1140*d14abf15SRobert Mustacchi 
1141*d14abf15SRobert Mustacchi 	/* Rx or/and Tx (internal switching) configuration ? */
1142*d14abf15SRobert Mustacchi 	hdr->cmd_general_data |=
1143*d14abf15SRobert Mustacchi 		ecore_vlan_mac_get_rx_tx_flag(o);
1144*d14abf15SRobert Mustacchi 
1145*d14abf15SRobert Mustacchi 	if (add)
1146*d14abf15SRobert Mustacchi 		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
1147*d14abf15SRobert Mustacchi 
1148*d14abf15SRobert Mustacchi 	hdr->cmd_general_data |=
1149*d14abf15SRobert Mustacchi 		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
1150*d14abf15SRobert Mustacchi }
1151*d14abf15SRobert Mustacchi 
1152*d14abf15SRobert Mustacchi /**
1153*d14abf15SRobert Mustacchi  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
1154*d14abf15SRobert Mustacchi  *
1155*d14abf15SRobert Mustacchi  * @cid:	connection id
1156*d14abf15SRobert Mustacchi  * @type:	ECORE_FILTER_XXX_PENDING
1157*d14abf15SRobert Mustacchi  * @hdr:	pointer to header to setup
1158*d14abf15SRobert Mustacchi  * @rule_cnt:
1159*d14abf15SRobert Mustacchi  *
1160*d14abf15SRobert Mustacchi  * currently we always configure one rule and echo field to contain a CID and an
1161*d14abf15SRobert Mustacchi  * opcode type.
1162*d14abf15SRobert Mustacchi  */
1163*d14abf15SRobert Mustacchi static INLINE void ecore_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
1164*d14abf15SRobert Mustacchi 				struct eth_classify_header *hdr, int rule_cnt)
1165*d14abf15SRobert Mustacchi {
1166*d14abf15SRobert Mustacchi 	hdr->echo = mm_cpu_to_le32((cid & ECORE_SWCID_MASK) |
1167*d14abf15SRobert Mustacchi 				(type << ECORE_SWCID_SHIFT));
1168*d14abf15SRobert Mustacchi 	hdr->rule_cnt = (u8)rule_cnt;
1169*d14abf15SRobert Mustacchi }
1170*d14abf15SRobert Mustacchi 
1171*d14abf15SRobert Mustacchi /* hw_config() callbacks */
1172*d14abf15SRobert Mustacchi static void ecore_set_one_mac_e2(struct _lm_device_t *pdev,
1173*d14abf15SRobert Mustacchi 				 struct ecore_vlan_mac_obj *o,
1174*d14abf15SRobert Mustacchi 				 struct ecore_exeq_elem *elem, int rule_idx,
1175*d14abf15SRobert Mustacchi 				 int cam_offset)
1176*d14abf15SRobert Mustacchi {
1177*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
1178*d14abf15SRobert Mustacchi 	struct eth_classify_rules_ramrod_data *data =
1179*d14abf15SRobert Mustacchi 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1180*d14abf15SRobert Mustacchi 	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
1181*d14abf15SRobert Mustacchi 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1182*d14abf15SRobert Mustacchi 	BOOL add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1183*d14abf15SRobert Mustacchi 	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
1184*d14abf15SRobert Mustacchi 	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
1185*d14abf15SRobert Mustacchi 
1186*d14abf15SRobert Mustacchi 	/* Set LLH CAM entry: currently only iSCSI and ETH macs are
1187*d14abf15SRobert Mustacchi 	 * relevant. In addition, current implementation is tuned for a
1188*d14abf15SRobert Mustacchi 	 * single ETH MAC.
1189*d14abf15SRobert Mustacchi 	 *
1190*d14abf15SRobert Mustacchi 	 * When multiple unicast ETH MACs PF configuration in switch
1191*d14abf15SRobert Mustacchi 	 * independent mode is required (NetQ, multiple netdev MACs,
1192*d14abf15SRobert Mustacchi 	 * etc.), consider better utilisation of 8 per function MAC
1193*d14abf15SRobert Mustacchi 	 * entries in the LLH register. There is also
1194*d14abf15SRobert Mustacchi 	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
1195*d14abf15SRobert Mustacchi 	 * total number of CAM entries to 16.
1196*d14abf15SRobert Mustacchi 	 *
1197*d14abf15SRobert Mustacchi 	 * Currently we won't configure NIG for MACs other than a primary ETH
1198*d14abf15SRobert Mustacchi 	 * MAC and iSCSI L2 MAC.
1199*d14abf15SRobert Mustacchi 	 *
1200*d14abf15SRobert Mustacchi 	 * If this MAC is moving from one Queue to another, no need to change
1201*d14abf15SRobert Mustacchi 	 * NIG configuration.
1202*d14abf15SRobert Mustacchi 	 */
1203*d14abf15SRobert Mustacchi 	if (cmd != ECORE_VLAN_MAC_MOVE) {
1204*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
1205*d14abf15SRobert Mustacchi 			ecore_set_mac_in_nig(pdev, add, mac,
1206*d14abf15SRobert Mustacchi 					     ECORE_LLH_CAM_ISCSI_ETH_LINE);
1207*d14abf15SRobert Mustacchi 		else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
1208*d14abf15SRobert Mustacchi 			ecore_set_mac_in_nig(pdev, add, mac,
1209*d14abf15SRobert Mustacchi 					     ECORE_LLH_CAM_ETH_LINE);
1210*d14abf15SRobert Mustacchi 	}
1211*d14abf15SRobert Mustacchi 
1212*d14abf15SRobert Mustacchi 	/* Reset the ramrod data buffer for the first rule */
1213*d14abf15SRobert Mustacchi 	if (rule_idx == 0)
1214*d14abf15SRobert Mustacchi 		mm_memset(data, 0, sizeof(*data));
1215*d14abf15SRobert Mustacchi 
1216*d14abf15SRobert Mustacchi 	/* Setup a command header */
1217*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_cmd_hdr_e2(pdev, o, add, CLASSIFY_RULE_OPCODE_MAC,
1218*d14abf15SRobert Mustacchi 				      &rule_entry->mac.header);
1219*d14abf15SRobert Mustacchi 
1220*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
1221*d14abf15SRobert Mustacchi 		  (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
1222*d14abf15SRobert Mustacchi 
1223*d14abf15SRobert Mustacchi 	/* Set a MAC itself */
1224*d14abf15SRobert Mustacchi 	ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1225*d14abf15SRobert Mustacchi 			      &rule_entry->mac.mac_mid,
1226*d14abf15SRobert Mustacchi 			      &rule_entry->mac.mac_lsb, mac);
1227*d14abf15SRobert Mustacchi 	rule_entry->mac.inner_mac =
1228*d14abf15SRobert Mustacchi 		mm_cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
1229*d14abf15SRobert Mustacchi 
1230*d14abf15SRobert Mustacchi 	/* MOVE: Add a rule that will add this MAC to the target Queue */
1231*d14abf15SRobert Mustacchi 	if (cmd == ECORE_VLAN_MAC_MOVE) {
1232*d14abf15SRobert Mustacchi 		rule_entry++;
1233*d14abf15SRobert Mustacchi 		rule_cnt++;
1234*d14abf15SRobert Mustacchi 
1235*d14abf15SRobert Mustacchi 		/* Setup ramrod data */
1236*d14abf15SRobert Mustacchi 		ecore_vlan_mac_set_cmd_hdr_e2(pdev,
1237*d14abf15SRobert Mustacchi 					elem->cmd_data.vlan_mac.target_obj,
1238*d14abf15SRobert Mustacchi 					      TRUE, CLASSIFY_RULE_OPCODE_MAC,
1239*d14abf15SRobert Mustacchi 					      &rule_entry->mac.header);
1240*d14abf15SRobert Mustacchi 
1241*d14abf15SRobert Mustacchi 		/* Set a MAC itself */
1242*d14abf15SRobert Mustacchi 		ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1243*d14abf15SRobert Mustacchi 				      &rule_entry->mac.mac_mid,
1244*d14abf15SRobert Mustacchi 				      &rule_entry->mac.mac_lsb, mac);
1245*d14abf15SRobert Mustacchi 		rule_entry->mac.inner_mac =
1246*d14abf15SRobert Mustacchi 			mm_cpu_to_le16(elem->cmd_data.vlan_mac.
1247*d14abf15SRobert Mustacchi 				       u.mac.is_inner_mac);
1248*d14abf15SRobert Mustacchi 	}
1249*d14abf15SRobert Mustacchi 
1250*d14abf15SRobert Mustacchi 	/* Set the ramrod data header */
1251*d14abf15SRobert Mustacchi 	/* TODO: take this to the higher level in order to prevent multiple
1252*d14abf15SRobert Mustacchi 		 writing */
1253*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1254*d14abf15SRobert Mustacchi 					rule_cnt);
1255*d14abf15SRobert Mustacchi }
1256*d14abf15SRobert Mustacchi 
1257*d14abf15SRobert Mustacchi /**
1258*d14abf15SRobert Mustacchi  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
1259*d14abf15SRobert Mustacchi  *
1260*d14abf15SRobert Mustacchi  * @pdev:	device handle
1261*d14abf15SRobert Mustacchi  * @o:		queue
1262*d14abf15SRobert Mustacchi  * @type:
1263*d14abf15SRobert Mustacchi  * @cam_offset:	offset in cam memory
1264*d14abf15SRobert Mustacchi  * @hdr:	pointer to a header to setup
1265*d14abf15SRobert Mustacchi  *
1266*d14abf15SRobert Mustacchi  * E1/E1H
1267*d14abf15SRobert Mustacchi  */
1268*d14abf15SRobert Mustacchi static INLINE void ecore_vlan_mac_set_rdata_hdr_e1x(struct _lm_device_t *pdev,
1269*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o, int type, int cam_offset,
1270*d14abf15SRobert Mustacchi 	struct mac_configuration_hdr *hdr)
1271*d14abf15SRobert Mustacchi {
1272*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
1273*d14abf15SRobert Mustacchi 
1274*d14abf15SRobert Mustacchi 	hdr->length = 1;
1275*d14abf15SRobert Mustacchi 	hdr->offset = (u8)cam_offset;
1276*d14abf15SRobert Mustacchi 	hdr->client_id = mm_cpu_to_le16(0xff);
1277*d14abf15SRobert Mustacchi 	hdr->echo = mm_cpu_to_le32((r->cid & ECORE_SWCID_MASK) |
1278*d14abf15SRobert Mustacchi 				(type << ECORE_SWCID_SHIFT));
1279*d14abf15SRobert Mustacchi }
1280*d14abf15SRobert Mustacchi 
1281*d14abf15SRobert Mustacchi static INLINE void ecore_vlan_mac_set_cfg_entry_e1x(struct _lm_device_t *pdev,
1282*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o, BOOL add, int opcode, u8 *mac,
1283*d14abf15SRobert Mustacchi 	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
1284*d14abf15SRobert Mustacchi {
1285*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
1286*d14abf15SRobert Mustacchi 	u32 cl_bit_vec = (1 << r->cl_id);
1287*d14abf15SRobert Mustacchi 
1288*d14abf15SRobert Mustacchi 	cfg_entry->clients_bit_vector = mm_cpu_to_le32(cl_bit_vec);
1289*d14abf15SRobert Mustacchi 	cfg_entry->pf_id = r->func_id;
1290*d14abf15SRobert Mustacchi 	cfg_entry->vlan_id = mm_cpu_to_le16(vlan_id);
1291*d14abf15SRobert Mustacchi 
1292*d14abf15SRobert Mustacchi 	if (add) {
1293*d14abf15SRobert Mustacchi 		ECORE_SET_FLAG(cfg_entry->flags,
1294*d14abf15SRobert Mustacchi 			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1295*d14abf15SRobert Mustacchi 			       T_ETH_MAC_COMMAND_SET);
1296*d14abf15SRobert Mustacchi 		ECORE_SET_FLAG(cfg_entry->flags,
1297*d14abf15SRobert Mustacchi 			       MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
1298*d14abf15SRobert Mustacchi 			       opcode);
1299*d14abf15SRobert Mustacchi 
1300*d14abf15SRobert Mustacchi 		/* Set a MAC in a ramrod data */
1301*d14abf15SRobert Mustacchi 		ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1302*d14abf15SRobert Mustacchi 				      &cfg_entry->middle_mac_addr,
1303*d14abf15SRobert Mustacchi 				      &cfg_entry->lsb_mac_addr, mac);
1304*d14abf15SRobert Mustacchi 	} else
1305*d14abf15SRobert Mustacchi 		ECORE_SET_FLAG(cfg_entry->flags,
1306*d14abf15SRobert Mustacchi 			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1307*d14abf15SRobert Mustacchi 			       T_ETH_MAC_COMMAND_INVALIDATE);
1308*d14abf15SRobert Mustacchi }
1309*d14abf15SRobert Mustacchi 
1310*d14abf15SRobert Mustacchi static INLINE void ecore_vlan_mac_set_rdata_e1x(struct _lm_device_t *pdev,
1311*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o, int type, int cam_offset, BOOL add,
1312*d14abf15SRobert Mustacchi 	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
1313*d14abf15SRobert Mustacchi {
1314*d14abf15SRobert Mustacchi 	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1315*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
1316*d14abf15SRobert Mustacchi 
1317*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_rdata_hdr_e1x(pdev, o, type, cam_offset,
1318*d14abf15SRobert Mustacchi 					 &config->hdr);
1319*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_cfg_entry_e1x(pdev, o, add, opcode, mac, vlan_id,
1320*d14abf15SRobert Mustacchi 					 cfg_entry);
1321*d14abf15SRobert Mustacchi 
1322*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
1323*d14abf15SRobert Mustacchi 		  (add ? "setting" : "clearing"),
1324*d14abf15SRobert Mustacchi 		  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
1325*d14abf15SRobert Mustacchi }
1326*d14abf15SRobert Mustacchi 
1327*d14abf15SRobert Mustacchi /**
1328*d14abf15SRobert Mustacchi  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
1329*d14abf15SRobert Mustacchi  *
1330*d14abf15SRobert Mustacchi  * @pdev:	device handle
1331*d14abf15SRobert Mustacchi  * @o:		ecore_vlan_mac_obj
1332*d14abf15SRobert Mustacchi  * @elem:	ecore_exeq_elem
1333*d14abf15SRobert Mustacchi  * @rule_idx:	rule_idx
1334*d14abf15SRobert Mustacchi  * @cam_offset: cam_offset
1335*d14abf15SRobert Mustacchi  */
1336*d14abf15SRobert Mustacchi static void ecore_set_one_mac_e1x(struct _lm_device_t *pdev,
1337*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_obj *o,
1338*d14abf15SRobert Mustacchi 				  struct ecore_exeq_elem *elem, int rule_idx,
1339*d14abf15SRobert Mustacchi 				  int cam_offset)
1340*d14abf15SRobert Mustacchi {
1341*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
1342*d14abf15SRobert Mustacchi 	struct mac_configuration_cmd *config =
1343*d14abf15SRobert Mustacchi 		(struct mac_configuration_cmd *)(raw->rdata);
1344*d14abf15SRobert Mustacchi 	/* 57710 and 57711 do not support MOVE command,
1345*d14abf15SRobert Mustacchi 	 * so it's either ADD or DEL
1346*d14abf15SRobert Mustacchi 	 */
1347*d14abf15SRobert Mustacchi 	BOOL add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1348*d14abf15SRobert Mustacchi 		TRUE : FALSE;
1349*d14abf15SRobert Mustacchi 
1350*d14abf15SRobert Mustacchi 	/* Reset the ramrod data buffer */
1351*d14abf15SRobert Mustacchi 	mm_memset(config, 0, sizeof(*config));
1352*d14abf15SRobert Mustacchi 
1353*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_rdata_e1x(pdev, o, raw->state,
1354*d14abf15SRobert Mustacchi 				     cam_offset, add,
1355*d14abf15SRobert Mustacchi 				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
1356*d14abf15SRobert Mustacchi 				     ETH_VLAN_FILTER_ANY_VLAN, config);
1357*d14abf15SRobert Mustacchi }
1358*d14abf15SRobert Mustacchi 
1359*d14abf15SRobert Mustacchi static void ecore_set_one_vlan_e2(struct _lm_device_t *pdev,
1360*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_obj *o,
1361*d14abf15SRobert Mustacchi 				  struct ecore_exeq_elem *elem, int rule_idx,
1362*d14abf15SRobert Mustacchi 				  int cam_offset)
1363*d14abf15SRobert Mustacchi {
1364*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
1365*d14abf15SRobert Mustacchi 	struct eth_classify_rules_ramrod_data *data =
1366*d14abf15SRobert Mustacchi 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1367*d14abf15SRobert Mustacchi 	int rule_cnt = rule_idx + 1;
1368*d14abf15SRobert Mustacchi 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1369*d14abf15SRobert Mustacchi 	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1370*d14abf15SRobert Mustacchi 	BOOL add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1371*d14abf15SRobert Mustacchi 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1372*d14abf15SRobert Mustacchi 
1373*d14abf15SRobert Mustacchi 	/* Reset the ramrod data buffer for the first rule */
1374*d14abf15SRobert Mustacchi 	if (rule_idx == 0)
1375*d14abf15SRobert Mustacchi 		mm_memset(data, 0, sizeof(*data));
1376*d14abf15SRobert Mustacchi 
1377*d14abf15SRobert Mustacchi 	/* Set a rule header */
1378*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_cmd_hdr_e2(pdev, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1379*d14abf15SRobert Mustacchi 				      &rule_entry->vlan.header);
1380*d14abf15SRobert Mustacchi 
1381*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1382*d14abf15SRobert Mustacchi 		  vlan);
1383*d14abf15SRobert Mustacchi 
1384*d14abf15SRobert Mustacchi 	/* Set a VLAN itself */
1385*d14abf15SRobert Mustacchi 	rule_entry->vlan.vlan = mm_cpu_to_le16(vlan);
1386*d14abf15SRobert Mustacchi 
1387*d14abf15SRobert Mustacchi 	/* MOVE: Add a rule that will add this MAC to the target Queue */
1388*d14abf15SRobert Mustacchi 	if (cmd == ECORE_VLAN_MAC_MOVE) {
1389*d14abf15SRobert Mustacchi 		rule_entry++;
1390*d14abf15SRobert Mustacchi 		rule_cnt++;
1391*d14abf15SRobert Mustacchi 
1392*d14abf15SRobert Mustacchi 		/* Setup ramrod data */
1393*d14abf15SRobert Mustacchi 		ecore_vlan_mac_set_cmd_hdr_e2(pdev,
1394*d14abf15SRobert Mustacchi 					elem->cmd_data.vlan_mac.target_obj,
1395*d14abf15SRobert Mustacchi 					      TRUE, CLASSIFY_RULE_OPCODE_VLAN,
1396*d14abf15SRobert Mustacchi 					      &rule_entry->vlan.header);
1397*d14abf15SRobert Mustacchi 
1398*d14abf15SRobert Mustacchi 		/* Set a VLAN itself */
1399*d14abf15SRobert Mustacchi 		rule_entry->vlan.vlan = mm_cpu_to_le16(vlan);
1400*d14abf15SRobert Mustacchi 	}
1401*d14abf15SRobert Mustacchi 
1402*d14abf15SRobert Mustacchi 	/* Set the ramrod data header */
1403*d14abf15SRobert Mustacchi 	/* TODO: take this to the higher level in order to prevent multiple
1404*d14abf15SRobert Mustacchi 		 writing */
1405*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1406*d14abf15SRobert Mustacchi 					rule_cnt);
1407*d14abf15SRobert Mustacchi }
1408*d14abf15SRobert Mustacchi 
1409*d14abf15SRobert Mustacchi static void ecore_set_one_vlan_mac_e2(struct _lm_device_t *pdev,
1410*d14abf15SRobert Mustacchi 				      struct ecore_vlan_mac_obj *o,
1411*d14abf15SRobert Mustacchi 				      struct ecore_exeq_elem *elem,
1412*d14abf15SRobert Mustacchi 				      int rule_idx, int cam_offset)
1413*d14abf15SRobert Mustacchi {
1414*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
1415*d14abf15SRobert Mustacchi 	struct eth_classify_rules_ramrod_data *data =
1416*d14abf15SRobert Mustacchi 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1417*d14abf15SRobert Mustacchi 	int rule_cnt = rule_idx + 1;
1418*d14abf15SRobert Mustacchi 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1419*d14abf15SRobert Mustacchi 	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1420*d14abf15SRobert Mustacchi 	BOOL add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1421*d14abf15SRobert Mustacchi 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1422*d14abf15SRobert Mustacchi 	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1423*d14abf15SRobert Mustacchi 
1424*d14abf15SRobert Mustacchi 	/* Reset the ramrod data buffer for the first rule */
1425*d14abf15SRobert Mustacchi 	if (rule_idx == 0)
1426*d14abf15SRobert Mustacchi 		mm_memset(data, 0, sizeof(*data));
1427*d14abf15SRobert Mustacchi 
1428*d14abf15SRobert Mustacchi 	/* Set a rule header */
1429*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_cmd_hdr_e2(pdev, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1430*d14abf15SRobert Mustacchi 				      &rule_entry->pair.header);
1431*d14abf15SRobert Mustacchi 
1432*d14abf15SRobert Mustacchi 	/* Set VLAN and MAC themselves */
1433*d14abf15SRobert Mustacchi 	rule_entry->pair.vlan = mm_cpu_to_le16(vlan);
1434*d14abf15SRobert Mustacchi 	ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1435*d14abf15SRobert Mustacchi 			      &rule_entry->pair.mac_mid,
1436*d14abf15SRobert Mustacchi 			      &rule_entry->pair.mac_lsb, mac);
1437*d14abf15SRobert Mustacchi 	rule_entry->pair.inner_mac =
1438*d14abf15SRobert Mustacchi 			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1439*d14abf15SRobert Mustacchi 	/* MOVE: Add a rule that will add this MAC to the target Queue */
1440*d14abf15SRobert Mustacchi 	if (cmd == ECORE_VLAN_MAC_MOVE) {
1441*d14abf15SRobert Mustacchi 		rule_entry++;
1442*d14abf15SRobert Mustacchi 		rule_cnt++;
1443*d14abf15SRobert Mustacchi 
1444*d14abf15SRobert Mustacchi 		/* Setup ramrod data */
1445*d14abf15SRobert Mustacchi 		ecore_vlan_mac_set_cmd_hdr_e2(pdev,
1446*d14abf15SRobert Mustacchi 					elem->cmd_data.vlan_mac.target_obj,
1447*d14abf15SRobert Mustacchi 					      TRUE, CLASSIFY_RULE_OPCODE_PAIR,
1448*d14abf15SRobert Mustacchi 					      &rule_entry->pair.header);
1449*d14abf15SRobert Mustacchi 
1450*d14abf15SRobert Mustacchi 		/* Set a VLAN itself */
1451*d14abf15SRobert Mustacchi 		rule_entry->pair.vlan = mm_cpu_to_le16(vlan);
1452*d14abf15SRobert Mustacchi 		ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1453*d14abf15SRobert Mustacchi 				      &rule_entry->pair.mac_mid,
1454*d14abf15SRobert Mustacchi 				      &rule_entry->pair.mac_lsb, mac);
1455*d14abf15SRobert Mustacchi 		rule_entry->pair.inner_mac =
1456*d14abf15SRobert Mustacchi 			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1457*d14abf15SRobert Mustacchi 	}
1458*d14abf15SRobert Mustacchi 
1459*d14abf15SRobert Mustacchi 	/* Set the ramrod data header */
1460*d14abf15SRobert Mustacchi 	/* TODO: take this to the higher level in order to prevent multiple
1461*d14abf15SRobert Mustacchi 		 writing */
1462*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1463*d14abf15SRobert Mustacchi 					rule_cnt);
1464*d14abf15SRobert Mustacchi }
1465*d14abf15SRobert Mustacchi 
1466*d14abf15SRobert Mustacchi /**
1467*d14abf15SRobert Mustacchi  * ecore_set_one_vlan_mac_e1h -
1468*d14abf15SRobert Mustacchi  *
1469*d14abf15SRobert Mustacchi  * @pdev:	device handle
1470*d14abf15SRobert Mustacchi  * @o:		ecore_vlan_mac_obj
1471*d14abf15SRobert Mustacchi  * @elem:	ecore_exeq_elem
1472*d14abf15SRobert Mustacchi  * @rule_idx:	rule_idx
1473*d14abf15SRobert Mustacchi  * @cam_offset:	cam_offset
1474*d14abf15SRobert Mustacchi  */
1475*d14abf15SRobert Mustacchi static void ecore_set_one_vlan_mac_e1h(struct _lm_device_t *pdev,
1476*d14abf15SRobert Mustacchi 				       struct ecore_vlan_mac_obj *o,
1477*d14abf15SRobert Mustacchi 				       struct ecore_exeq_elem *elem,
1478*d14abf15SRobert Mustacchi 				       int rule_idx, int cam_offset)
1479*d14abf15SRobert Mustacchi {
1480*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
1481*d14abf15SRobert Mustacchi 	struct mac_configuration_cmd *config =
1482*d14abf15SRobert Mustacchi 		(struct mac_configuration_cmd *)(raw->rdata);
1483*d14abf15SRobert Mustacchi 	/* 57710 and 57711 do not support MOVE command,
1484*d14abf15SRobert Mustacchi 	 * so it's either ADD or DEL
1485*d14abf15SRobert Mustacchi 	 */
1486*d14abf15SRobert Mustacchi 	BOOL add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1487*d14abf15SRobert Mustacchi 		TRUE : FALSE;
1488*d14abf15SRobert Mustacchi 
1489*d14abf15SRobert Mustacchi 	/* Reset the ramrod data buffer */
1490*d14abf15SRobert Mustacchi 	mm_memset(config, 0, sizeof(*config));
1491*d14abf15SRobert Mustacchi 
1492*d14abf15SRobert Mustacchi 	ecore_vlan_mac_set_rdata_e1x(pdev, o, ECORE_FILTER_VLAN_MAC_PENDING,
1493*d14abf15SRobert Mustacchi 				     cam_offset, add,
1494*d14abf15SRobert Mustacchi 				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1495*d14abf15SRobert Mustacchi 				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1496*d14abf15SRobert Mustacchi 				     ETH_VLAN_FILTER_CLASSIFY, config);
1497*d14abf15SRobert Mustacchi }
1498*d14abf15SRobert Mustacchi 
1499*d14abf15SRobert Mustacchi #define list_next_entry(pos, member) \
1500*d14abf15SRobert Mustacchi 	list_entry((pos)->member.next, typeof(*(pos)), member)
1501*d14abf15SRobert Mustacchi 
1502*d14abf15SRobert Mustacchi /**
1503*d14abf15SRobert Mustacchi  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1504*d14abf15SRobert Mustacchi  *
1505*d14abf15SRobert Mustacchi  * @pdev:	device handle
1506*d14abf15SRobert Mustacchi  * @p:		command parameters
1507*d14abf15SRobert Mustacchi  * @ppos:	pointer to the cookie
1508*d14abf15SRobert Mustacchi  *
1509*d14abf15SRobert Mustacchi  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1510*d14abf15SRobert Mustacchi  * previously configured elements list.
1511*d14abf15SRobert Mustacchi  *
1512*d14abf15SRobert Mustacchi  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1513*d14abf15SRobert Mustacchi  * into an account
1514*d14abf15SRobert Mustacchi  *
1515*d14abf15SRobert Mustacchi  * pointer to the cookie  - that should be given back in the next call to make
1516*d14abf15SRobert Mustacchi  * function handle the next element. If *ppos is set to NULL it will restart the
1517*d14abf15SRobert Mustacchi  * iterator. If returned *ppos == NULL this means that the last element has been
1518*d14abf15SRobert Mustacchi  * handled.
1519*d14abf15SRobert Mustacchi  *
1520*d14abf15SRobert Mustacchi  */
1521*d14abf15SRobert Mustacchi static int ecore_vlan_mac_restore(struct _lm_device_t *pdev,
1522*d14abf15SRobert Mustacchi 			   struct ecore_vlan_mac_ramrod_params *p,
1523*d14abf15SRobert Mustacchi 			   struct ecore_vlan_mac_registry_elem **ppos)
1524*d14abf15SRobert Mustacchi {
1525*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
1526*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1527*d14abf15SRobert Mustacchi 
1528*d14abf15SRobert Mustacchi 	/* If list is empty - there is nothing to do here */
1529*d14abf15SRobert Mustacchi 	if (ECORE_LIST_IS_EMPTY(&o->head)) {
1530*d14abf15SRobert Mustacchi 		*ppos = NULL;
1531*d14abf15SRobert Mustacchi 		return 0;
1532*d14abf15SRobert Mustacchi 	}
1533*d14abf15SRobert Mustacchi 
1534*d14abf15SRobert Mustacchi 	/* make a step... */
1535*d14abf15SRobert Mustacchi 	if (*ppos == NULL)
1536*d14abf15SRobert Mustacchi 		*ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
1537*d14abf15SRobert Mustacchi 					    struct ecore_vlan_mac_registry_elem,
1538*d14abf15SRobert Mustacchi 					       link);
1539*d14abf15SRobert Mustacchi 	else
1540*d14abf15SRobert Mustacchi 		*ppos = ECORE_LIST_NEXT(*ppos, link,
1541*d14abf15SRobert Mustacchi 					struct ecore_vlan_mac_registry_elem);
1542*d14abf15SRobert Mustacchi 
1543*d14abf15SRobert Mustacchi 	pos = *ppos;
1544*d14abf15SRobert Mustacchi 
1545*d14abf15SRobert Mustacchi 	/* If it's the last step - return NULL */
1546*d14abf15SRobert Mustacchi 	if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1547*d14abf15SRobert Mustacchi 		*ppos = NULL;
1548*d14abf15SRobert Mustacchi 
1549*d14abf15SRobert Mustacchi 	/* Prepare a 'user_req' */
1550*d14abf15SRobert Mustacchi 	mm_memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1551*d14abf15SRobert Mustacchi 
1552*d14abf15SRobert Mustacchi 	/* Set the command */
1553*d14abf15SRobert Mustacchi 	p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1554*d14abf15SRobert Mustacchi 
1555*d14abf15SRobert Mustacchi 	/* Set vlan_mac_flags */
1556*d14abf15SRobert Mustacchi 	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1557*d14abf15SRobert Mustacchi 
1558*d14abf15SRobert Mustacchi 	/* Set a restore bit */
1559*d14abf15SRobert Mustacchi 	ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1560*d14abf15SRobert Mustacchi 
1561*d14abf15SRobert Mustacchi 	return ecore_config_vlan_mac(pdev, p);
1562*d14abf15SRobert Mustacchi }
1563*d14abf15SRobert Mustacchi 
1564*d14abf15SRobert Mustacchi /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1565*d14abf15SRobert Mustacchi  * pointer to an element with a specific criteria and NULL if such an element
1566*d14abf15SRobert Mustacchi  * hasn't been found.
1567*d14abf15SRobert Mustacchi  */
1568*d14abf15SRobert Mustacchi static struct ecore_exeq_elem *ecore_exeq_get_mac(
1569*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *o,
1570*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *elem)
1571*d14abf15SRobert Mustacchi {
1572*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *pos;
1573*d14abf15SRobert Mustacchi 	struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1574*d14abf15SRobert Mustacchi 
1575*d14abf15SRobert Mustacchi 	/* Check pending for execution commands */
1576*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1577*d14abf15SRobert Mustacchi 				  struct ecore_exeq_elem)
1578*d14abf15SRobert Mustacchi 		if (mm_memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1579*d14abf15SRobert Mustacchi 			      sizeof(*data)) &&
1580*d14abf15SRobert Mustacchi 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1581*d14abf15SRobert Mustacchi 			return pos;
1582*d14abf15SRobert Mustacchi 
1583*d14abf15SRobert Mustacchi 	return NULL;
1584*d14abf15SRobert Mustacchi }
1585*d14abf15SRobert Mustacchi 
1586*d14abf15SRobert Mustacchi static struct ecore_exeq_elem *ecore_exeq_get_vlan(
1587*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *o,
1588*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *elem)
1589*d14abf15SRobert Mustacchi {
1590*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *pos;
1591*d14abf15SRobert Mustacchi 	struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1592*d14abf15SRobert Mustacchi 
1593*d14abf15SRobert Mustacchi 	/* Check pending for execution commands */
1594*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1595*d14abf15SRobert Mustacchi 				  struct ecore_exeq_elem)
1596*d14abf15SRobert Mustacchi 		if (mm_memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1597*d14abf15SRobert Mustacchi 			      sizeof(*data)) &&
1598*d14abf15SRobert Mustacchi 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1599*d14abf15SRobert Mustacchi 			return pos;
1600*d14abf15SRobert Mustacchi 
1601*d14abf15SRobert Mustacchi 	return NULL;
1602*d14abf15SRobert Mustacchi }
1603*d14abf15SRobert Mustacchi 
1604*d14abf15SRobert Mustacchi static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
1605*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *o,
1606*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *elem)
1607*d14abf15SRobert Mustacchi {
1608*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *pos;
1609*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_ramrod_data *data =
1610*d14abf15SRobert Mustacchi 		&elem->cmd_data.vlan_mac.u.vlan_mac;
1611*d14abf15SRobert Mustacchi 
1612*d14abf15SRobert Mustacchi 	/* Check pending for execution commands */
1613*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1614*d14abf15SRobert Mustacchi 				  struct ecore_exeq_elem)
1615*d14abf15SRobert Mustacchi 		if (mm_memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1616*d14abf15SRobert Mustacchi 			      sizeof(*data)) &&
1617*d14abf15SRobert Mustacchi 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1618*d14abf15SRobert Mustacchi 			return pos;
1619*d14abf15SRobert Mustacchi 
1620*d14abf15SRobert Mustacchi 	return NULL;
1621*d14abf15SRobert Mustacchi }
1622*d14abf15SRobert Mustacchi 
1623*d14abf15SRobert Mustacchi /**
1624*d14abf15SRobert Mustacchi  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1625*d14abf15SRobert Mustacchi  *
1626*d14abf15SRobert Mustacchi  * @pdev:	device handle
1627*d14abf15SRobert Mustacchi  * @qo:		ecore_qable_obj
1628*d14abf15SRobert Mustacchi  * @elem:	ecore_exeq_elem
1629*d14abf15SRobert Mustacchi  *
1630*d14abf15SRobert Mustacchi  * Checks that the requested configuration can be added. If yes and if
1631*d14abf15SRobert Mustacchi  * requested, consume CAM credit.
1632*d14abf15SRobert Mustacchi  *
1633*d14abf15SRobert Mustacchi  * The 'validate' is run after the 'optimize'.
1634*d14abf15SRobert Mustacchi  *
1635*d14abf15SRobert Mustacchi  */
1636*d14abf15SRobert Mustacchi static INLINE int ecore_validate_vlan_mac_add(struct _lm_device_t *pdev,
1637*d14abf15SRobert Mustacchi 					      union ecore_qable_obj *qo,
1638*d14abf15SRobert Mustacchi 					      struct ecore_exeq_elem *elem)
1639*d14abf15SRobert Mustacchi {
1640*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1641*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1642*d14abf15SRobert Mustacchi 	int rc;
1643*d14abf15SRobert Mustacchi 
1644*d14abf15SRobert Mustacchi 	/* Check the registry */
1645*d14abf15SRobert Mustacchi 	rc = o->check_add(pdev, o, &elem->cmd_data.vlan_mac.u);
1646*d14abf15SRobert Mustacchi 	if (rc) {
1647*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "ADD command is not allowed considering current registry state.\n");
1648*d14abf15SRobert Mustacchi 		return rc;
1649*d14abf15SRobert Mustacchi 	}
1650*d14abf15SRobert Mustacchi 
1651*d14abf15SRobert Mustacchi 	/* Check if there is a pending ADD command for this
1652*d14abf15SRobert Mustacchi 	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1653*d14abf15SRobert Mustacchi 	 */
1654*d14abf15SRobert Mustacchi 	if (exeq->get(exeq, elem)) {
1655*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "There is a pending ADD command already\n");
1656*d14abf15SRobert Mustacchi 		return ECORE_EXISTS;
1657*d14abf15SRobert Mustacchi 	}
1658*d14abf15SRobert Mustacchi 
1659*d14abf15SRobert Mustacchi 	/* TODO: Check the pending MOVE from other objects where this
1660*d14abf15SRobert Mustacchi 	 * object is a destination object.
1661*d14abf15SRobert Mustacchi 	 */
1662*d14abf15SRobert Mustacchi 
1663*d14abf15SRobert Mustacchi 	/* Consume the credit if not requested not to */
1664*d14abf15SRobert Mustacchi 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1665*d14abf15SRobert Mustacchi 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1666*d14abf15SRobert Mustacchi 	    o->get_credit(o)))
1667*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1668*d14abf15SRobert Mustacchi 
1669*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
1670*d14abf15SRobert Mustacchi }
1671*d14abf15SRobert Mustacchi 
1672*d14abf15SRobert Mustacchi /**
1673*d14abf15SRobert Mustacchi  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1674*d14abf15SRobert Mustacchi  *
1675*d14abf15SRobert Mustacchi  * @pdev:	device handle
1676*d14abf15SRobert Mustacchi  * @qo:		quable object to check
1677*d14abf15SRobert Mustacchi  * @elem:	element that needs to be deleted
1678*d14abf15SRobert Mustacchi  *
1679*d14abf15SRobert Mustacchi  * Checks that the requested configuration can be deleted. If yes and if
1680*d14abf15SRobert Mustacchi  * requested, returns a CAM credit.
1681*d14abf15SRobert Mustacchi  *
1682*d14abf15SRobert Mustacchi  * The 'validate' is run after the 'optimize'.
1683*d14abf15SRobert Mustacchi  */
1684*d14abf15SRobert Mustacchi static INLINE int ecore_validate_vlan_mac_del(struct _lm_device_t *pdev,
1685*d14abf15SRobert Mustacchi 					      union ecore_qable_obj *qo,
1686*d14abf15SRobert Mustacchi 					      struct ecore_exeq_elem *elem)
1687*d14abf15SRobert Mustacchi {
1688*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1689*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos;
1690*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1691*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem query_elem;
1692*d14abf15SRobert Mustacchi 
1693*d14abf15SRobert Mustacchi 	/* If this classification can not be deleted (doesn't exist)
1694*d14abf15SRobert Mustacchi 	 * - return a ECORE_EXIST.
1695*d14abf15SRobert Mustacchi 	 */
1696*d14abf15SRobert Mustacchi 	pos = o->check_del(pdev, o, &elem->cmd_data.vlan_mac.u);
1697*d14abf15SRobert Mustacchi 	if (!pos) {
1698*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "DEL command is not allowed considering current registry state\n");
1699*d14abf15SRobert Mustacchi 		return ECORE_EXISTS;
1700*d14abf15SRobert Mustacchi 	}
1701*d14abf15SRobert Mustacchi 
1702*d14abf15SRobert Mustacchi 	/* Check if there are pending DEL or MOVE commands for this
1703*d14abf15SRobert Mustacchi 	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1704*d14abf15SRobert Mustacchi 	 */
1705*d14abf15SRobert Mustacchi 	mm_memcpy(&query_elem, elem, sizeof(query_elem));
1706*d14abf15SRobert Mustacchi 
1707*d14abf15SRobert Mustacchi 	/* Check for MOVE commands */
1708*d14abf15SRobert Mustacchi 	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1709*d14abf15SRobert Mustacchi 	if (exeq->get(exeq, &query_elem)) {
1710*d14abf15SRobert Mustacchi 		ECORE_ERR("There is a pending MOVE command already\n");
1711*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1712*d14abf15SRobert Mustacchi 	}
1713*d14abf15SRobert Mustacchi 
1714*d14abf15SRobert Mustacchi 	/* Check for DEL commands */
1715*d14abf15SRobert Mustacchi 	if (exeq->get(exeq, elem)) {
1716*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "There is a pending DEL command already\n");
1717*d14abf15SRobert Mustacchi 		return ECORE_EXISTS;
1718*d14abf15SRobert Mustacchi 	}
1719*d14abf15SRobert Mustacchi 
1720*d14abf15SRobert Mustacchi 	/* Return the credit to the credit pool if not requested not to */
1721*d14abf15SRobert Mustacchi 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1722*d14abf15SRobert Mustacchi 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1723*d14abf15SRobert Mustacchi 	    o->put_credit(o))) {
1724*d14abf15SRobert Mustacchi 		ECORE_ERR("Failed to return a credit\n");
1725*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1726*d14abf15SRobert Mustacchi 	}
1727*d14abf15SRobert Mustacchi 
1728*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
1729*d14abf15SRobert Mustacchi }
1730*d14abf15SRobert Mustacchi 
1731*d14abf15SRobert Mustacchi /**
1732*d14abf15SRobert Mustacchi  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1733*d14abf15SRobert Mustacchi  *
1734*d14abf15SRobert Mustacchi  * @pdev:	device handle
1735*d14abf15SRobert Mustacchi  * @qo:		quable object to check (source)
1736*d14abf15SRobert Mustacchi  * @elem:	element that needs to be moved
1737*d14abf15SRobert Mustacchi  *
1738*d14abf15SRobert Mustacchi  * Checks that the requested configuration can be moved. If yes and if
1739*d14abf15SRobert Mustacchi  * requested, returns a CAM credit.
1740*d14abf15SRobert Mustacchi  *
1741*d14abf15SRobert Mustacchi  * The 'validate' is run after the 'optimize'.
1742*d14abf15SRobert Mustacchi  */
1743*d14abf15SRobert Mustacchi static INLINE int ecore_validate_vlan_mac_move(struct _lm_device_t *pdev,
1744*d14abf15SRobert Mustacchi 					       union ecore_qable_obj *qo,
1745*d14abf15SRobert Mustacchi 					       struct ecore_exeq_elem *elem)
1746*d14abf15SRobert Mustacchi {
1747*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1748*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1749*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem query_elem;
1750*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1751*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1752*d14abf15SRobert Mustacchi 
1753*d14abf15SRobert Mustacchi 	/* Check if we can perform this operation based on the current registry
1754*d14abf15SRobert Mustacchi 	 * state.
1755*d14abf15SRobert Mustacchi 	 */
1756*d14abf15SRobert Mustacchi 	if (!src_o->check_move(pdev, src_o, dest_o,
1757*d14abf15SRobert Mustacchi 			       &elem->cmd_data.vlan_mac.u)) {
1758*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "MOVE command is not allowed considering current registry state\n");
1759*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1760*d14abf15SRobert Mustacchi 	}
1761*d14abf15SRobert Mustacchi 
1762*d14abf15SRobert Mustacchi 	/* Check if there is an already pending DEL or MOVE command for the
1763*d14abf15SRobert Mustacchi 	 * source object or ADD command for a destination object. Return an
1764*d14abf15SRobert Mustacchi 	 * error if so.
1765*d14abf15SRobert Mustacchi 	 */
1766*d14abf15SRobert Mustacchi 	mm_memcpy(&query_elem, elem, sizeof(query_elem));
1767*d14abf15SRobert Mustacchi 
1768*d14abf15SRobert Mustacchi 	/* Check DEL on source */
1769*d14abf15SRobert Mustacchi 	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1770*d14abf15SRobert Mustacchi 	if (src_exeq->get(src_exeq, &query_elem)) {
1771*d14abf15SRobert Mustacchi 		ECORE_ERR("There is a pending DEL command on the source queue already\n");
1772*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1773*d14abf15SRobert Mustacchi 	}
1774*d14abf15SRobert Mustacchi 
1775*d14abf15SRobert Mustacchi 	/* Check MOVE on source */
1776*d14abf15SRobert Mustacchi 	if (src_exeq->get(src_exeq, elem)) {
1777*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "There is a pending MOVE command already\n");
1778*d14abf15SRobert Mustacchi 		return ECORE_EXISTS;
1779*d14abf15SRobert Mustacchi 	}
1780*d14abf15SRobert Mustacchi 
1781*d14abf15SRobert Mustacchi 	/* Check ADD on destination */
1782*d14abf15SRobert Mustacchi 	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1783*d14abf15SRobert Mustacchi 	if (dest_exeq->get(dest_exeq, &query_elem)) {
1784*d14abf15SRobert Mustacchi 		ECORE_ERR("There is a pending ADD command on the destination queue already\n");
1785*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1786*d14abf15SRobert Mustacchi 	}
1787*d14abf15SRobert Mustacchi 
1788*d14abf15SRobert Mustacchi 	/* Consume the credit if not requested not to */
1789*d14abf15SRobert Mustacchi 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1790*d14abf15SRobert Mustacchi 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1791*d14abf15SRobert Mustacchi 	    dest_o->get_credit(dest_o)))
1792*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1793*d14abf15SRobert Mustacchi 
1794*d14abf15SRobert Mustacchi 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1795*d14abf15SRobert Mustacchi 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1796*d14abf15SRobert Mustacchi 	    src_o->put_credit(src_o))) {
1797*d14abf15SRobert Mustacchi 		/* return the credit taken from dest... */
1798*d14abf15SRobert Mustacchi 		dest_o->put_credit(dest_o);
1799*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1800*d14abf15SRobert Mustacchi 	}
1801*d14abf15SRobert Mustacchi 
1802*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
1803*d14abf15SRobert Mustacchi }
1804*d14abf15SRobert Mustacchi 
1805*d14abf15SRobert Mustacchi static int ecore_validate_vlan_mac(struct _lm_device_t *pdev,
1806*d14abf15SRobert Mustacchi 				   union ecore_qable_obj *qo,
1807*d14abf15SRobert Mustacchi 				   struct ecore_exeq_elem *elem)
1808*d14abf15SRobert Mustacchi {
1809*d14abf15SRobert Mustacchi 	switch (elem->cmd_data.vlan_mac.cmd) {
1810*d14abf15SRobert Mustacchi 	case ECORE_VLAN_MAC_ADD:
1811*d14abf15SRobert Mustacchi 		return ecore_validate_vlan_mac_add(pdev, qo, elem);
1812*d14abf15SRobert Mustacchi 	case ECORE_VLAN_MAC_DEL:
1813*d14abf15SRobert Mustacchi 		return ecore_validate_vlan_mac_del(pdev, qo, elem);
1814*d14abf15SRobert Mustacchi 	case ECORE_VLAN_MAC_MOVE:
1815*d14abf15SRobert Mustacchi 		return ecore_validate_vlan_mac_move(pdev, qo, elem);
1816*d14abf15SRobert Mustacchi 	default:
1817*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1818*d14abf15SRobert Mustacchi 	}
1819*d14abf15SRobert Mustacchi }
1820*d14abf15SRobert Mustacchi 
1821*d14abf15SRobert Mustacchi static int ecore_remove_vlan_mac(struct _lm_device_t *pdev,
1822*d14abf15SRobert Mustacchi 				  union ecore_qable_obj *qo,
1823*d14abf15SRobert Mustacchi 				  struct ecore_exeq_elem *elem)
1824*d14abf15SRobert Mustacchi {
1825*d14abf15SRobert Mustacchi 	int rc = 0;
1826*d14abf15SRobert Mustacchi 
1827*d14abf15SRobert Mustacchi 	/* If consumption wasn't required, nothing to do */
1828*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1829*d14abf15SRobert Mustacchi 			   &elem->cmd_data.vlan_mac.vlan_mac_flags))
1830*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
1831*d14abf15SRobert Mustacchi 
1832*d14abf15SRobert Mustacchi 	switch (elem->cmd_data.vlan_mac.cmd) {
1833*d14abf15SRobert Mustacchi 	case ECORE_VLAN_MAC_ADD:
1834*d14abf15SRobert Mustacchi 	case ECORE_VLAN_MAC_MOVE:
1835*d14abf15SRobert Mustacchi 		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1836*d14abf15SRobert Mustacchi 		break;
1837*d14abf15SRobert Mustacchi 	case ECORE_VLAN_MAC_DEL:
1838*d14abf15SRobert Mustacchi 		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1839*d14abf15SRobert Mustacchi 		break;
1840*d14abf15SRobert Mustacchi 	default:
1841*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1842*d14abf15SRobert Mustacchi 	}
1843*d14abf15SRobert Mustacchi 
1844*d14abf15SRobert Mustacchi 	if (rc != TRUE)
1845*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1846*d14abf15SRobert Mustacchi 
1847*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
1848*d14abf15SRobert Mustacchi }
1849*d14abf15SRobert Mustacchi 
1850*d14abf15SRobert Mustacchi /**
1851*d14abf15SRobert Mustacchi  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1852*d14abf15SRobert Mustacchi  *
1853*d14abf15SRobert Mustacchi  * @pdev:	device handle
1854*d14abf15SRobert Mustacchi  * @o:		ecore_vlan_mac_obj
1855*d14abf15SRobert Mustacchi  *
1856*d14abf15SRobert Mustacchi  */
1857*d14abf15SRobert Mustacchi static int ecore_wait_vlan_mac(struct _lm_device_t *pdev,
1858*d14abf15SRobert Mustacchi 			       struct ecore_vlan_mac_obj *o)
1859*d14abf15SRobert Mustacchi {
1860*d14abf15SRobert Mustacchi 	int cnt = 5000, rc;
1861*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1862*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
1863*d14abf15SRobert Mustacchi 
1864*d14abf15SRobert Mustacchi 	while (cnt--) {
1865*d14abf15SRobert Mustacchi 		/* Wait for the current command to complete */
1866*d14abf15SRobert Mustacchi 		rc = raw->wait_comp(pdev, raw);
1867*d14abf15SRobert Mustacchi 		if (rc)
1868*d14abf15SRobert Mustacchi 			return rc;
1869*d14abf15SRobert Mustacchi 
1870*d14abf15SRobert Mustacchi 		/* Wait until there are no pending commands */
1871*d14abf15SRobert Mustacchi 		if (!ecore_exe_queue_empty(exeq))
1872*d14abf15SRobert Mustacchi 			mm_wait(pdev, 1000);
1873*d14abf15SRobert Mustacchi 		else
1874*d14abf15SRobert Mustacchi 			return ECORE_SUCCESS;
1875*d14abf15SRobert Mustacchi 	}
1876*d14abf15SRobert Mustacchi 
1877*d14abf15SRobert Mustacchi 	return ECORE_TIMEOUT;
1878*d14abf15SRobert Mustacchi }
1879*d14abf15SRobert Mustacchi 
1880*d14abf15SRobert Mustacchi static int __ecore_vlan_mac_execute_step(struct _lm_device_t *pdev,
1881*d14abf15SRobert Mustacchi 					 struct ecore_vlan_mac_obj *o,
1882*d14abf15SRobert Mustacchi 					 unsigned long *ramrod_flags)
1883*d14abf15SRobert Mustacchi {
1884*d14abf15SRobert Mustacchi 	int rc = ECORE_SUCCESS;
1885*d14abf15SRobert Mustacchi 
1886*d14abf15SRobert Mustacchi 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1887*d14abf15SRobert Mustacchi 
1888*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "vlan_mac_execute_step - trying to take writer lock\n");
1889*d14abf15SRobert Mustacchi 	rc = __ecore_vlan_mac_h_write_trylock(pdev, o);
1890*d14abf15SRobert Mustacchi 
1891*d14abf15SRobert Mustacchi 	if (rc != ECORE_SUCCESS) {
1892*d14abf15SRobert Mustacchi 		__ecore_vlan_mac_h_pend(pdev, o, *ramrod_flags);
1893*d14abf15SRobert Mustacchi 
1894*d14abf15SRobert Mustacchi 		/** Calling function should not diffrentiate between this case
1895*d14abf15SRobert Mustacchi 		 *  and the case in which there is already a pending ramrod
1896*d14abf15SRobert Mustacchi 		 */
1897*d14abf15SRobert Mustacchi 		rc = ECORE_PENDING;
1898*d14abf15SRobert Mustacchi 	} else {
1899*d14abf15SRobert Mustacchi 		rc = ecore_exe_queue_step(pdev, &o->exe_queue, ramrod_flags);
1900*d14abf15SRobert Mustacchi 	}
1901*d14abf15SRobert Mustacchi 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1902*d14abf15SRobert Mustacchi 
1903*d14abf15SRobert Mustacchi 	return rc;
1904*d14abf15SRobert Mustacchi }
1905*d14abf15SRobert Mustacchi 
1906*d14abf15SRobert Mustacchi /**
1907*d14abf15SRobert Mustacchi  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1908*d14abf15SRobert Mustacchi  *
1909*d14abf15SRobert Mustacchi  * @pdev:	device handle
1910*d14abf15SRobert Mustacchi  * @o:		ecore_vlan_mac_obj
1911*d14abf15SRobert Mustacchi  * @cqe:
1912*d14abf15SRobert Mustacchi  * @cont:	if TRUE schedule next execution chunk
1913*d14abf15SRobert Mustacchi  *
1914*d14abf15SRobert Mustacchi  */
1915*d14abf15SRobert Mustacchi static int ecore_complete_vlan_mac(struct _lm_device_t *pdev,
1916*d14abf15SRobert Mustacchi 				   struct ecore_vlan_mac_obj *o,
1917*d14abf15SRobert Mustacchi 				   union event_ring_elem *cqe,
1918*d14abf15SRobert Mustacchi 				   unsigned long *ramrod_flags)
1919*d14abf15SRobert Mustacchi {
1920*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
1921*d14abf15SRobert Mustacchi 	int rc;
1922*d14abf15SRobert Mustacchi 
1923*d14abf15SRobert Mustacchi 	/* Clearing the pending list & raw state should be made
1924*d14abf15SRobert Mustacchi 	 * atomically (as execution flow assumes they represent the same)
1925*d14abf15SRobert Mustacchi 	 */
1926*d14abf15SRobert Mustacchi 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1927*d14abf15SRobert Mustacchi 
1928*d14abf15SRobert Mustacchi 	/* Reset pending list */
1929*d14abf15SRobert Mustacchi 	__ecore_exe_queue_reset_pending(pdev, &o->exe_queue);
1930*d14abf15SRobert Mustacchi 
1931*d14abf15SRobert Mustacchi 	/* Clear pending */
1932*d14abf15SRobert Mustacchi 	r->clear_pending(r);
1933*d14abf15SRobert Mustacchi 
1934*d14abf15SRobert Mustacchi 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1935*d14abf15SRobert Mustacchi 
1936*d14abf15SRobert Mustacchi 	/* If ramrod failed this is most likely a SW bug */
1937*d14abf15SRobert Mustacchi 	if (cqe->message.error)
1938*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
1939*d14abf15SRobert Mustacchi 
1940*d14abf15SRobert Mustacchi 	/* Run the next bulk of pending commands if requested */
1941*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1942*d14abf15SRobert Mustacchi 		rc = __ecore_vlan_mac_execute_step(pdev, o, ramrod_flags);
1943*d14abf15SRobert Mustacchi 		if (rc < 0)
1944*d14abf15SRobert Mustacchi 			return rc;
1945*d14abf15SRobert Mustacchi 	}
1946*d14abf15SRobert Mustacchi 
1947*d14abf15SRobert Mustacchi 	/* If there is more work to do return PENDING */
1948*d14abf15SRobert Mustacchi 	if (!ecore_exe_queue_empty(&o->exe_queue))
1949*d14abf15SRobert Mustacchi 		return ECORE_PENDING;
1950*d14abf15SRobert Mustacchi 
1951*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
1952*d14abf15SRobert Mustacchi }
1953*d14abf15SRobert Mustacchi 
1954*d14abf15SRobert Mustacchi /**
1955*d14abf15SRobert Mustacchi  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1956*d14abf15SRobert Mustacchi  *
1957*d14abf15SRobert Mustacchi  * @pdev:	device handle
1958*d14abf15SRobert Mustacchi  * @o:		ecore_qable_obj
1959*d14abf15SRobert Mustacchi  * @elem:	ecore_exeq_elem
1960*d14abf15SRobert Mustacchi  */
1961*d14abf15SRobert Mustacchi static int ecore_optimize_vlan_mac(struct _lm_device_t *pdev,
1962*d14abf15SRobert Mustacchi 				   union ecore_qable_obj *qo,
1963*d14abf15SRobert Mustacchi 				   struct ecore_exeq_elem *elem)
1964*d14abf15SRobert Mustacchi {
1965*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem query, *pos;
1966*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1967*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1968*d14abf15SRobert Mustacchi 
1969*d14abf15SRobert Mustacchi 	mm_memcpy(&query, elem, sizeof(query));
1970*d14abf15SRobert Mustacchi 
1971*d14abf15SRobert Mustacchi 	switch (elem->cmd_data.vlan_mac.cmd) {
1972*d14abf15SRobert Mustacchi 	case ECORE_VLAN_MAC_ADD:
1973*d14abf15SRobert Mustacchi 		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1974*d14abf15SRobert Mustacchi 		break;
1975*d14abf15SRobert Mustacchi 	case ECORE_VLAN_MAC_DEL:
1976*d14abf15SRobert Mustacchi 		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1977*d14abf15SRobert Mustacchi 		break;
1978*d14abf15SRobert Mustacchi 	default:
1979*d14abf15SRobert Mustacchi 		/* Don't handle anything other than ADD or DEL */
1980*d14abf15SRobert Mustacchi 		return 0;
1981*d14abf15SRobert Mustacchi 	}
1982*d14abf15SRobert Mustacchi 
1983*d14abf15SRobert Mustacchi 	/* If we found the appropriate element - delete it */
1984*d14abf15SRobert Mustacchi 	pos = exeq->get(exeq, &query);
1985*d14abf15SRobert Mustacchi 	if (pos) {
1986*d14abf15SRobert Mustacchi 
1987*d14abf15SRobert Mustacchi 		/* Return the credit of the optimized command */
1988*d14abf15SRobert Mustacchi 		if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1989*d14abf15SRobert Mustacchi 				     &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1990*d14abf15SRobert Mustacchi 			if ((query.cmd_data.vlan_mac.cmd ==
1991*d14abf15SRobert Mustacchi 			     ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1992*d14abf15SRobert Mustacchi 				ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
1993*d14abf15SRobert Mustacchi 				return ECORE_INVAL;
1994*d14abf15SRobert Mustacchi 			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1995*d14abf15SRobert Mustacchi 				ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
1996*d14abf15SRobert Mustacchi 				return ECORE_INVAL;
1997*d14abf15SRobert Mustacchi 			}
1998*d14abf15SRobert Mustacchi 		}
1999*d14abf15SRobert Mustacchi 
2000*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "Optimizing %s command\n",
2001*d14abf15SRobert Mustacchi 			  (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
2002*d14abf15SRobert Mustacchi 			  "ADD" : "DEL");
2003*d14abf15SRobert Mustacchi 
2004*d14abf15SRobert Mustacchi 		ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
2005*d14abf15SRobert Mustacchi 		ecore_exe_queue_free_elem(pdev, pos);
2006*d14abf15SRobert Mustacchi 		return 1;
2007*d14abf15SRobert Mustacchi 	}
2008*d14abf15SRobert Mustacchi 
2009*d14abf15SRobert Mustacchi 	return 0;
2010*d14abf15SRobert Mustacchi }
2011*d14abf15SRobert Mustacchi 
2012*d14abf15SRobert Mustacchi /**
2013*d14abf15SRobert Mustacchi  * ecore_vlan_mac_get_registry_elem - prepare a registry element
2014*d14abf15SRobert Mustacchi  *
2015*d14abf15SRobert Mustacchi  * @pdev:  device handle
2016*d14abf15SRobert Mustacchi  * @o:
2017*d14abf15SRobert Mustacchi  * @elem:
2018*d14abf15SRobert Mustacchi  * @restore:
2019*d14abf15SRobert Mustacchi  * @re:
2020*d14abf15SRobert Mustacchi  *
2021*d14abf15SRobert Mustacchi  * prepare a registry element according to the current command request.
2022*d14abf15SRobert Mustacchi  */
2023*d14abf15SRobert Mustacchi static INLINE int ecore_vlan_mac_get_registry_elem(
2024*d14abf15SRobert Mustacchi 	struct _lm_device_t *pdev,
2025*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o,
2026*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *elem,
2027*d14abf15SRobert Mustacchi 	BOOL restore,
2028*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem **re)
2029*d14abf15SRobert Mustacchi {
2030*d14abf15SRobert Mustacchi 	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
2031*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *reg_elem;
2032*d14abf15SRobert Mustacchi 
2033*d14abf15SRobert Mustacchi 	/* Allocate a new registry element if needed. */
2034*d14abf15SRobert Mustacchi 	if (!restore &&
2035*d14abf15SRobert Mustacchi 	    ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
2036*d14abf15SRobert Mustacchi 		reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, pdev);
2037*d14abf15SRobert Mustacchi 		if (!reg_elem)
2038*d14abf15SRobert Mustacchi 			return ECORE_NOMEM;
2039*d14abf15SRobert Mustacchi 
2040*d14abf15SRobert Mustacchi 		/* Get a new CAM offset */
2041*d14abf15SRobert Mustacchi 		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
2042*d14abf15SRobert Mustacchi 			/* This shall never happen, because we have checked the
2043*d14abf15SRobert Mustacchi 			 * CAM availability in the 'validate'.
2044*d14abf15SRobert Mustacchi 			 */
2045*d14abf15SRobert Mustacchi 			DbgBreakIf(1);
2046*d14abf15SRobert Mustacchi 			ECORE_FREE(pdev, reg_elem, sizeof(*reg_elem));
2047*d14abf15SRobert Mustacchi 			return ECORE_INVAL;
2048*d14abf15SRobert Mustacchi 		}
2049*d14abf15SRobert Mustacchi 
2050*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "Got cam offset %d\n", reg_elem->cam_offset);
2051*d14abf15SRobert Mustacchi 
2052*d14abf15SRobert Mustacchi 		/* Set a VLAN-MAC data */
2053*d14abf15SRobert Mustacchi 		mm_memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
2054*d14abf15SRobert Mustacchi 			  sizeof(reg_elem->u));
2055*d14abf15SRobert Mustacchi 
2056*d14abf15SRobert Mustacchi 		/* Copy the flags (needed for DEL and RESTORE flows) */
2057*d14abf15SRobert Mustacchi 		reg_elem->vlan_mac_flags =
2058*d14abf15SRobert Mustacchi 			elem->cmd_data.vlan_mac.vlan_mac_flags;
2059*d14abf15SRobert Mustacchi 	} else /* DEL, RESTORE */
2060*d14abf15SRobert Mustacchi 		reg_elem = o->check_del(pdev, o, &elem->cmd_data.vlan_mac.u);
2061*d14abf15SRobert Mustacchi 
2062*d14abf15SRobert Mustacchi 	*re = reg_elem;
2063*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
2064*d14abf15SRobert Mustacchi }
2065*d14abf15SRobert Mustacchi 
2066*d14abf15SRobert Mustacchi /**
2067*d14abf15SRobert Mustacchi  * ecore_execute_vlan_mac - execute vlan mac command
2068*d14abf15SRobert Mustacchi  *
2069*d14abf15SRobert Mustacchi  * @pdev:		device handle
2070*d14abf15SRobert Mustacchi  * @qo:
2071*d14abf15SRobert Mustacchi  * @exe_chunk:
2072*d14abf15SRobert Mustacchi  * @ramrod_flags:
2073*d14abf15SRobert Mustacchi  *
2074*d14abf15SRobert Mustacchi  * go and send a ramrod!
2075*d14abf15SRobert Mustacchi  */
2076*d14abf15SRobert Mustacchi static int ecore_execute_vlan_mac(struct _lm_device_t *pdev,
2077*d14abf15SRobert Mustacchi 				  union ecore_qable_obj *qo,
2078*d14abf15SRobert Mustacchi 				  d_list_t *exe_chunk,
2079*d14abf15SRobert Mustacchi 				  unsigned long *ramrod_flags)
2080*d14abf15SRobert Mustacchi {
2081*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *elem;
2082*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
2083*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
2084*d14abf15SRobert Mustacchi 	int rc, idx = 0;
2085*d14abf15SRobert Mustacchi 	BOOL restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
2086*d14abf15SRobert Mustacchi 	BOOL drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
2087*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *reg_elem;
2088*d14abf15SRobert Mustacchi 	enum ecore_vlan_mac_cmd cmd;
2089*d14abf15SRobert Mustacchi 
2090*d14abf15SRobert Mustacchi 	/* If DRIVER_ONLY execution is requested, cleanup a registry
2091*d14abf15SRobert Mustacchi 	 * and exit. Otherwise send a ramrod to FW.
2092*d14abf15SRobert Mustacchi 	 */
2093*d14abf15SRobert Mustacchi 	if (!drv_only) {
2094*d14abf15SRobert Mustacchi 		DbgBreakIf(r->check_pending(r));
2095*d14abf15SRobert Mustacchi 
2096*d14abf15SRobert Mustacchi 		/* Set pending */
2097*d14abf15SRobert Mustacchi 		r->set_pending(r);
2098*d14abf15SRobert Mustacchi 
2099*d14abf15SRobert Mustacchi 		/* Fill the ramrod data */
2100*d14abf15SRobert Mustacchi 		ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
2101*d14abf15SRobert Mustacchi 					  struct ecore_exeq_elem) {
2102*d14abf15SRobert Mustacchi 			cmd = elem->cmd_data.vlan_mac.cmd;
2103*d14abf15SRobert Mustacchi 			/* We will add to the target object in MOVE command, so
2104*d14abf15SRobert Mustacchi 			 * change the object for a CAM search.
2105*d14abf15SRobert Mustacchi 			 */
2106*d14abf15SRobert Mustacchi 			if (cmd == ECORE_VLAN_MAC_MOVE)
2107*d14abf15SRobert Mustacchi 				cam_obj = elem->cmd_data.vlan_mac.target_obj;
2108*d14abf15SRobert Mustacchi 			else
2109*d14abf15SRobert Mustacchi 				cam_obj = o;
2110*d14abf15SRobert Mustacchi 
2111*d14abf15SRobert Mustacchi 			rc = ecore_vlan_mac_get_registry_elem(pdev, cam_obj,
2112*d14abf15SRobert Mustacchi 							      elem, restore,
2113*d14abf15SRobert Mustacchi 							      &reg_elem);
2114*d14abf15SRobert Mustacchi 			if (rc)
2115*d14abf15SRobert Mustacchi 				goto error_exit;
2116*d14abf15SRobert Mustacchi 
2117*d14abf15SRobert Mustacchi 			DbgBreakIf(!reg_elem);
2118*d14abf15SRobert Mustacchi 
2119*d14abf15SRobert Mustacchi 			/* Push a new entry into the registry */
2120*d14abf15SRobert Mustacchi 			if (!restore &&
2121*d14abf15SRobert Mustacchi 			    ((cmd == ECORE_VLAN_MAC_ADD) ||
2122*d14abf15SRobert Mustacchi 			    (cmd == ECORE_VLAN_MAC_MOVE)))
2123*d14abf15SRobert Mustacchi 				ECORE_LIST_PUSH_HEAD(&reg_elem->link,
2124*d14abf15SRobert Mustacchi 						     &cam_obj->head);
2125*d14abf15SRobert Mustacchi 
2126*d14abf15SRobert Mustacchi 			/* Configure a single command in a ramrod data buffer */
2127*d14abf15SRobert Mustacchi 			o->set_one_rule(pdev, o, elem, idx,
2128*d14abf15SRobert Mustacchi 					reg_elem->cam_offset);
2129*d14abf15SRobert Mustacchi 
2130*d14abf15SRobert Mustacchi 			/* MOVE command consumes 2 entries in the ramrod data */
2131*d14abf15SRobert Mustacchi 			if (cmd == ECORE_VLAN_MAC_MOVE)
2132*d14abf15SRobert Mustacchi 				idx += 2;
2133*d14abf15SRobert Mustacchi 			else
2134*d14abf15SRobert Mustacchi 				idx++;
2135*d14abf15SRobert Mustacchi 		}
2136*d14abf15SRobert Mustacchi 
2137*d14abf15SRobert Mustacchi 		/* No need for an explicit memory barrier here as long as we
2138*d14abf15SRobert Mustacchi 		 * ensure the ordering of writing to the SPQ element
2139*d14abf15SRobert Mustacchi 		 * and updating of the SPQ producer which involves a memory
2140*d14abf15SRobert Mustacchi 		 * read. If the memory read is removed we will have to put a
2141*d14abf15SRobert Mustacchi 		 * full memory barrier there (inside ecore_sp_post()).
2142*d14abf15SRobert Mustacchi 		 */
2143*d14abf15SRobert Mustacchi 		rc = ecore_sp_post(pdev, o->ramrod_cmd, r->cid,
2144*d14abf15SRobert Mustacchi 				   r->rdata_mapping.as_u64,
2145*d14abf15SRobert Mustacchi 				   ETH_CONNECTION_TYPE);
2146*d14abf15SRobert Mustacchi 		if (rc)
2147*d14abf15SRobert Mustacchi 			goto error_exit;
2148*d14abf15SRobert Mustacchi 	}
2149*d14abf15SRobert Mustacchi 
2150*d14abf15SRobert Mustacchi 	/* Now, when we are done with the ramrod - clean up the registry */
2151*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
2152*d14abf15SRobert Mustacchi 				  struct ecore_exeq_elem) {
2153*d14abf15SRobert Mustacchi 		cmd = elem->cmd_data.vlan_mac.cmd;
2154*d14abf15SRobert Mustacchi 		if ((cmd == ECORE_VLAN_MAC_DEL) ||
2155*d14abf15SRobert Mustacchi 		    (cmd == ECORE_VLAN_MAC_MOVE)) {
2156*d14abf15SRobert Mustacchi 			reg_elem = o->check_del(pdev, o,
2157*d14abf15SRobert Mustacchi 						&elem->cmd_data.vlan_mac.u);
2158*d14abf15SRobert Mustacchi 
2159*d14abf15SRobert Mustacchi 			DbgBreakIf(!reg_elem);
2160*d14abf15SRobert Mustacchi 
2161*d14abf15SRobert Mustacchi 			o->put_cam_offset(o, reg_elem->cam_offset);
2162*d14abf15SRobert Mustacchi 			ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
2163*d14abf15SRobert Mustacchi 			ECORE_FREE(pdev, reg_elem, sizeof(*reg_elem));
2164*d14abf15SRobert Mustacchi 		}
2165*d14abf15SRobert Mustacchi 	}
2166*d14abf15SRobert Mustacchi 
2167*d14abf15SRobert Mustacchi 	if (!drv_only)
2168*d14abf15SRobert Mustacchi 		return ECORE_PENDING;
2169*d14abf15SRobert Mustacchi 	else
2170*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
2171*d14abf15SRobert Mustacchi 
2172*d14abf15SRobert Mustacchi error_exit:
2173*d14abf15SRobert Mustacchi 	r->clear_pending(r);
2174*d14abf15SRobert Mustacchi 
2175*d14abf15SRobert Mustacchi 	/* Cleanup a registry in case of a failure */
2176*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
2177*d14abf15SRobert Mustacchi 				  struct ecore_exeq_elem) {
2178*d14abf15SRobert Mustacchi 		cmd = elem->cmd_data.vlan_mac.cmd;
2179*d14abf15SRobert Mustacchi 
2180*d14abf15SRobert Mustacchi 		if (cmd == ECORE_VLAN_MAC_MOVE)
2181*d14abf15SRobert Mustacchi 			cam_obj = elem->cmd_data.vlan_mac.target_obj;
2182*d14abf15SRobert Mustacchi 		else
2183*d14abf15SRobert Mustacchi 			cam_obj = o;
2184*d14abf15SRobert Mustacchi 
2185*d14abf15SRobert Mustacchi 		/* Delete all newly added above entries */
2186*d14abf15SRobert Mustacchi 		if (!restore &&
2187*d14abf15SRobert Mustacchi 		    ((cmd == ECORE_VLAN_MAC_ADD) ||
2188*d14abf15SRobert Mustacchi 		    (cmd == ECORE_VLAN_MAC_MOVE))) {
2189*d14abf15SRobert Mustacchi 			reg_elem = o->check_del(pdev, cam_obj,
2190*d14abf15SRobert Mustacchi 						&elem->cmd_data.vlan_mac.u);
2191*d14abf15SRobert Mustacchi 			if (reg_elem) {
2192*d14abf15SRobert Mustacchi 				ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
2193*d14abf15SRobert Mustacchi 							&cam_obj->head);
2194*d14abf15SRobert Mustacchi 				ECORE_FREE(pdev, reg_elem, sizeof(*reg_elem));
2195*d14abf15SRobert Mustacchi 			}
2196*d14abf15SRobert Mustacchi 		}
2197*d14abf15SRobert Mustacchi 	}
2198*d14abf15SRobert Mustacchi 
2199*d14abf15SRobert Mustacchi 	return rc;
2200*d14abf15SRobert Mustacchi }
2201*d14abf15SRobert Mustacchi 
2202*d14abf15SRobert Mustacchi static INLINE int ecore_vlan_mac_push_new_cmd(
2203*d14abf15SRobert Mustacchi 	struct _lm_device_t *pdev,
2204*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_ramrod_params *p)
2205*d14abf15SRobert Mustacchi {
2206*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *elem;
2207*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2208*d14abf15SRobert Mustacchi 	BOOL restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
2209*d14abf15SRobert Mustacchi 
2210*d14abf15SRobert Mustacchi 	/* Allocate the execution queue element */
2211*d14abf15SRobert Mustacchi 	elem = ecore_exe_queue_alloc_elem(pdev);
2212*d14abf15SRobert Mustacchi 	if (!elem)
2213*d14abf15SRobert Mustacchi 		return ECORE_NOMEM;
2214*d14abf15SRobert Mustacchi 
2215*d14abf15SRobert Mustacchi 	/* Set the command 'length' */
2216*d14abf15SRobert Mustacchi 	switch (p->user_req.cmd) {
2217*d14abf15SRobert Mustacchi 	case ECORE_VLAN_MAC_MOVE:
2218*d14abf15SRobert Mustacchi 		elem->cmd_len = 2;
2219*d14abf15SRobert Mustacchi 		break;
2220*d14abf15SRobert Mustacchi 	default:
2221*d14abf15SRobert Mustacchi 		elem->cmd_len = 1;
2222*d14abf15SRobert Mustacchi 	}
2223*d14abf15SRobert Mustacchi 
2224*d14abf15SRobert Mustacchi 	/* Fill the object specific info */
2225*d14abf15SRobert Mustacchi 	mm_memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
2226*d14abf15SRobert Mustacchi 
2227*d14abf15SRobert Mustacchi 	/* Try to add a new command to the pending list */
2228*d14abf15SRobert Mustacchi 	return ecore_exe_queue_add(pdev, &o->exe_queue, elem, restore);
2229*d14abf15SRobert Mustacchi }
2230*d14abf15SRobert Mustacchi 
2231*d14abf15SRobert Mustacchi /**
2232*d14abf15SRobert Mustacchi  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
2233*d14abf15SRobert Mustacchi  *
2234*d14abf15SRobert Mustacchi  * @pdev:  device handle
2235*d14abf15SRobert Mustacchi  * @p:
2236*d14abf15SRobert Mustacchi  *
2237*d14abf15SRobert Mustacchi  */
2238*d14abf15SRobert Mustacchi int ecore_config_vlan_mac(struct _lm_device_t *pdev,
2239*d14abf15SRobert Mustacchi 			   struct ecore_vlan_mac_ramrod_params *p)
2240*d14abf15SRobert Mustacchi {
2241*d14abf15SRobert Mustacchi 	int rc = ECORE_SUCCESS;
2242*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2243*d14abf15SRobert Mustacchi 	unsigned long *ramrod_flags = &p->ramrod_flags;
2244*d14abf15SRobert Mustacchi 	BOOL cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
2245*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
2246*d14abf15SRobert Mustacchi 
2247*d14abf15SRobert Mustacchi 	/*
2248*d14abf15SRobert Mustacchi 	 * Add new elements to the execution list for commands that require it.
2249*d14abf15SRobert Mustacchi 	 */
2250*d14abf15SRobert Mustacchi 	if (!cont) {
2251*d14abf15SRobert Mustacchi 		rc = ecore_vlan_mac_push_new_cmd(pdev, p);
2252*d14abf15SRobert Mustacchi 		if (rc)
2253*d14abf15SRobert Mustacchi 			return rc;
2254*d14abf15SRobert Mustacchi 	}
2255*d14abf15SRobert Mustacchi 
2256*d14abf15SRobert Mustacchi 	/* If nothing will be executed further in this iteration we want to
2257*d14abf15SRobert Mustacchi 	 * return PENDING if there are pending commands
2258*d14abf15SRobert Mustacchi 	 */
2259*d14abf15SRobert Mustacchi 	if (!ecore_exe_queue_empty(&o->exe_queue))
2260*d14abf15SRobert Mustacchi 		rc = ECORE_PENDING;
2261*d14abf15SRobert Mustacchi 
2262*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
2263*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
2264*d14abf15SRobert Mustacchi 		raw->clear_pending(raw);
2265*d14abf15SRobert Mustacchi 	}
2266*d14abf15SRobert Mustacchi 
2267*d14abf15SRobert Mustacchi 	/* Execute commands if required */
2268*d14abf15SRobert Mustacchi 	if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
2269*d14abf15SRobert Mustacchi 	    ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
2270*d14abf15SRobert Mustacchi 		rc = __ecore_vlan_mac_execute_step(pdev, p->vlan_mac_obj,
2271*d14abf15SRobert Mustacchi 						   &p->ramrod_flags);
2272*d14abf15SRobert Mustacchi 		if (rc < 0)
2273*d14abf15SRobert Mustacchi 			return rc;
2274*d14abf15SRobert Mustacchi 	}
2275*d14abf15SRobert Mustacchi 
2276*d14abf15SRobert Mustacchi 	/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
2277*d14abf15SRobert Mustacchi 	 * then user want to wait until the last command is done.
2278*d14abf15SRobert Mustacchi 	 */
2279*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2280*d14abf15SRobert Mustacchi 		/* Wait maximum for the current exe_queue length iterations plus
2281*d14abf15SRobert Mustacchi 		 * one (for the current pending command).
2282*d14abf15SRobert Mustacchi 		 */
2283*d14abf15SRobert Mustacchi 		int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
2284*d14abf15SRobert Mustacchi 
2285*d14abf15SRobert Mustacchi 		while (!ecore_exe_queue_empty(&o->exe_queue) &&
2286*d14abf15SRobert Mustacchi 		       max_iterations--) {
2287*d14abf15SRobert Mustacchi 
2288*d14abf15SRobert Mustacchi 			/* Wait for the current command to complete */
2289*d14abf15SRobert Mustacchi 			rc = raw->wait_comp(pdev, raw);
2290*d14abf15SRobert Mustacchi 			if (rc)
2291*d14abf15SRobert Mustacchi 				return rc;
2292*d14abf15SRobert Mustacchi 
2293*d14abf15SRobert Mustacchi 			/* Make a next step */
2294*d14abf15SRobert Mustacchi 			rc = __ecore_vlan_mac_execute_step(pdev,
2295*d14abf15SRobert Mustacchi 							   p->vlan_mac_obj,
2296*d14abf15SRobert Mustacchi 							   &p->ramrod_flags);
2297*d14abf15SRobert Mustacchi 			if (rc < 0)
2298*d14abf15SRobert Mustacchi 				return rc;
2299*d14abf15SRobert Mustacchi 		}
2300*d14abf15SRobert Mustacchi 
2301*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
2302*d14abf15SRobert Mustacchi 	}
2303*d14abf15SRobert Mustacchi 
2304*d14abf15SRobert Mustacchi 	return rc;
2305*d14abf15SRobert Mustacchi }
2306*d14abf15SRobert Mustacchi 
2307*d14abf15SRobert Mustacchi /**
2308*d14abf15SRobert Mustacchi  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2309*d14abf15SRobert Mustacchi  *
2310*d14abf15SRobert Mustacchi  * @pdev:		device handle
2311*d14abf15SRobert Mustacchi  * @o:
2312*d14abf15SRobert Mustacchi  * @vlan_mac_flags:
2313*d14abf15SRobert Mustacchi  * @ramrod_flags:	execution flags to be used for this deletion
2314*d14abf15SRobert Mustacchi  *
2315*d14abf15SRobert Mustacchi  * if the last operation has completed successfully and there are no
2316*d14abf15SRobert Mustacchi  * more elements left, positive value if the last operation has completed
2317*d14abf15SRobert Mustacchi  * successfully and there are more previously configured elements, negative
2318*d14abf15SRobert Mustacchi  * value is current operation has failed.
2319*d14abf15SRobert Mustacchi  */
2320*d14abf15SRobert Mustacchi static int ecore_vlan_mac_del_all(struct _lm_device_t *pdev,
2321*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_obj *o,
2322*d14abf15SRobert Mustacchi 				  unsigned long *vlan_mac_flags,
2323*d14abf15SRobert Mustacchi 				  unsigned long *ramrod_flags)
2324*d14abf15SRobert Mustacchi {
2325*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_registry_elem *pos = NULL;
2326*d14abf15SRobert Mustacchi 	struct ecore_vlan_mac_ramrod_params p;
2327*d14abf15SRobert Mustacchi 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
2328*d14abf15SRobert Mustacchi 	struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
2329*d14abf15SRobert Mustacchi 	unsigned long flags;
2330*d14abf15SRobert Mustacchi 	int read_lock;
2331*d14abf15SRobert Mustacchi 	int rc = 0;
2332*d14abf15SRobert Mustacchi 
2333*d14abf15SRobert Mustacchi 	/* Clear pending commands first */
2334*d14abf15SRobert Mustacchi 
2335*d14abf15SRobert Mustacchi 	ECORE_SPIN_LOCK_BH(&exeq->lock);
2336*d14abf15SRobert Mustacchi 
2337*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
2338*d14abf15SRobert Mustacchi 				       &exeq->exe_queue, link,
2339*d14abf15SRobert Mustacchi 				       struct ecore_exeq_elem) {
2340*d14abf15SRobert Mustacchi 		flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2341*d14abf15SRobert Mustacchi 		if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
2342*d14abf15SRobert Mustacchi 		    ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2343*d14abf15SRobert Mustacchi 			rc = exeq->remove(pdev, exeq->owner, exeq_pos);
2344*d14abf15SRobert Mustacchi 			if (rc) {
2345*d14abf15SRobert Mustacchi 				ECORE_ERR("Failed to remove command\n");
2346*d14abf15SRobert Mustacchi 				ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2347*d14abf15SRobert Mustacchi 				return rc;
2348*d14abf15SRobert Mustacchi 			}
2349*d14abf15SRobert Mustacchi 			ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
2350*d14abf15SRobert Mustacchi 						&exeq->exe_queue);
2351*d14abf15SRobert Mustacchi 			ecore_exe_queue_free_elem(pdev, exeq_pos);
2352*d14abf15SRobert Mustacchi 		}
2353*d14abf15SRobert Mustacchi 	}
2354*d14abf15SRobert Mustacchi 
2355*d14abf15SRobert Mustacchi 	ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2356*d14abf15SRobert Mustacchi 
2357*d14abf15SRobert Mustacchi 	/* Prepare a command request */
2358*d14abf15SRobert Mustacchi 	mm_memset(&p, 0, sizeof(p));
2359*d14abf15SRobert Mustacchi 	p.vlan_mac_obj = o;
2360*d14abf15SRobert Mustacchi 	p.ramrod_flags = *ramrod_flags;
2361*d14abf15SRobert Mustacchi 	p.user_req.cmd = ECORE_VLAN_MAC_DEL;
2362*d14abf15SRobert Mustacchi 
2363*d14abf15SRobert Mustacchi 	/* Add all but the last VLAN-MAC to the execution queue without actually
2364*d14abf15SRobert Mustacchi 	 * execution anything.
2365*d14abf15SRobert Mustacchi 	 */
2366*d14abf15SRobert Mustacchi 	ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
2367*d14abf15SRobert Mustacchi 	ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
2368*d14abf15SRobert Mustacchi 	ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2369*d14abf15SRobert Mustacchi 
2370*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2371*d14abf15SRobert Mustacchi 	read_lock = ecore_vlan_mac_h_read_lock(pdev, o);
2372*d14abf15SRobert Mustacchi 	if (read_lock != ECORE_SUCCESS)
2373*d14abf15SRobert Mustacchi 		return read_lock;
2374*d14abf15SRobert Mustacchi 
2375*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
2376*d14abf15SRobert Mustacchi 				  struct ecore_vlan_mac_registry_elem) {
2377*d14abf15SRobert Mustacchi 		flags = pos->vlan_mac_flags;
2378*d14abf15SRobert Mustacchi 		if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
2379*d14abf15SRobert Mustacchi 		    ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2380*d14abf15SRobert Mustacchi 			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2381*d14abf15SRobert Mustacchi 			mm_memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2382*d14abf15SRobert Mustacchi 			rc = ecore_config_vlan_mac(pdev, &p);
2383*d14abf15SRobert Mustacchi 			if (rc < 0) {
2384*d14abf15SRobert Mustacchi 				ECORE_ERR("Failed to add a new DEL command\n");
2385*d14abf15SRobert Mustacchi 				ecore_vlan_mac_h_read_unlock(pdev, o);
2386*d14abf15SRobert Mustacchi 				return rc;
2387*d14abf15SRobert Mustacchi 			}
2388*d14abf15SRobert Mustacchi 		}
2389*d14abf15SRobert Mustacchi 	}
2390*d14abf15SRobert Mustacchi 
2391*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2392*d14abf15SRobert Mustacchi 	ecore_vlan_mac_h_read_unlock(pdev, o);
2393*d14abf15SRobert Mustacchi 
2394*d14abf15SRobert Mustacchi 	p.ramrod_flags = *ramrod_flags;
2395*d14abf15SRobert Mustacchi 	ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2396*d14abf15SRobert Mustacchi 
2397*d14abf15SRobert Mustacchi 	return ecore_config_vlan_mac(pdev, &p);
2398*d14abf15SRobert Mustacchi }
2399*d14abf15SRobert Mustacchi 
2400*d14abf15SRobert Mustacchi static INLINE void ecore_init_raw_obj(struct ecore_raw_obj *raw, u8 cl_id,
2401*d14abf15SRobert Mustacchi 	u32 cid, u8 func_id, void *rdata, lm_address_t rdata_mapping, int state,
2402*d14abf15SRobert Mustacchi 	unsigned long *pstate, ecore_obj_type type)
2403*d14abf15SRobert Mustacchi {
2404*d14abf15SRobert Mustacchi 	raw->func_id = func_id;
2405*d14abf15SRobert Mustacchi 	raw->cid = cid;
2406*d14abf15SRobert Mustacchi 	raw->cl_id = cl_id;
2407*d14abf15SRobert Mustacchi 	raw->rdata = rdata;
2408*d14abf15SRobert Mustacchi 	raw->rdata_mapping = rdata_mapping;
2409*d14abf15SRobert Mustacchi 	raw->state = state;
2410*d14abf15SRobert Mustacchi 	raw->pstate = pstate;
2411*d14abf15SRobert Mustacchi 	raw->obj_type = type;
2412*d14abf15SRobert Mustacchi 	raw->check_pending = ecore_raw_check_pending;
2413*d14abf15SRobert Mustacchi 	raw->clear_pending = ecore_raw_clear_pending;
2414*d14abf15SRobert Mustacchi 	raw->set_pending = ecore_raw_set_pending;
2415*d14abf15SRobert Mustacchi 	raw->wait_comp = ecore_raw_wait;
2416*d14abf15SRobert Mustacchi }
2417*d14abf15SRobert Mustacchi 
2418*d14abf15SRobert Mustacchi static INLINE void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
2419*d14abf15SRobert Mustacchi 	u8 cl_id, u32 cid, u8 func_id, void *rdata, lm_address_t rdata_mapping,
2420*d14abf15SRobert Mustacchi 	int state, unsigned long *pstate, ecore_obj_type type,
2421*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *macs_pool,
2422*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *vlans_pool)
2423*d14abf15SRobert Mustacchi {
2424*d14abf15SRobert Mustacchi 	ECORE_LIST_INIT(&o->head);
2425*d14abf15SRobert Mustacchi 	o->head_reader = 0;
2426*d14abf15SRobert Mustacchi 	o->head_exe_request = FALSE;
2427*d14abf15SRobert Mustacchi 	o->saved_ramrod_flags = 0;
2428*d14abf15SRobert Mustacchi 
2429*d14abf15SRobert Mustacchi 	o->macs_pool = macs_pool;
2430*d14abf15SRobert Mustacchi 	o->vlans_pool = vlans_pool;
2431*d14abf15SRobert Mustacchi 
2432*d14abf15SRobert Mustacchi 	o->delete_all = ecore_vlan_mac_del_all;
2433*d14abf15SRobert Mustacchi 	o->restore = ecore_vlan_mac_restore;
2434*d14abf15SRobert Mustacchi 	o->complete = ecore_complete_vlan_mac;
2435*d14abf15SRobert Mustacchi 	o->wait = ecore_wait_vlan_mac;
2436*d14abf15SRobert Mustacchi 
2437*d14abf15SRobert Mustacchi 	ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2438*d14abf15SRobert Mustacchi 			   state, pstate, type);
2439*d14abf15SRobert Mustacchi }
2440*d14abf15SRobert Mustacchi 
2441*d14abf15SRobert Mustacchi void ecore_init_mac_obj(struct _lm_device_t *pdev,
2442*d14abf15SRobert Mustacchi 			struct ecore_vlan_mac_obj *mac_obj,
2443*d14abf15SRobert Mustacchi 			u8 cl_id, u32 cid, u8 func_id, void *rdata,
2444*d14abf15SRobert Mustacchi 			lm_address_t rdata_mapping, int state,
2445*d14abf15SRobert Mustacchi 			unsigned long *pstate, ecore_obj_type type,
2446*d14abf15SRobert Mustacchi 			struct ecore_credit_pool_obj *macs_pool)
2447*d14abf15SRobert Mustacchi {
2448*d14abf15SRobert Mustacchi 	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
2449*d14abf15SRobert Mustacchi 
2450*d14abf15SRobert Mustacchi 	ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2451*d14abf15SRobert Mustacchi 				   rdata_mapping, state, pstate, type,
2452*d14abf15SRobert Mustacchi 				   macs_pool, NULL);
2453*d14abf15SRobert Mustacchi 
2454*d14abf15SRobert Mustacchi 	/* CAM credit pool handling */
2455*d14abf15SRobert Mustacchi 	mac_obj->get_credit = ecore_get_credit_mac;
2456*d14abf15SRobert Mustacchi 	mac_obj->put_credit = ecore_put_credit_mac;
2457*d14abf15SRobert Mustacchi 	mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2458*d14abf15SRobert Mustacchi 	mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2459*d14abf15SRobert Mustacchi 
2460*d14abf15SRobert Mustacchi 	if (CHIP_IS_E1x(pdev)) {
2461*d14abf15SRobert Mustacchi 		mac_obj->set_one_rule      = ecore_set_one_mac_e1x;
2462*d14abf15SRobert Mustacchi 		mac_obj->check_del         = ecore_check_mac_del;
2463*d14abf15SRobert Mustacchi 		mac_obj->check_add         = ecore_check_mac_add;
2464*d14abf15SRobert Mustacchi 		mac_obj->check_move        = ecore_check_move_always_err;
2465*d14abf15SRobert Mustacchi 		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2466*d14abf15SRobert Mustacchi 
2467*d14abf15SRobert Mustacchi 		/* Exe Queue */
2468*d14abf15SRobert Mustacchi 		ecore_exe_queue_init(pdev,
2469*d14abf15SRobert Mustacchi 				     &mac_obj->exe_queue, 1, qable_obj,
2470*d14abf15SRobert Mustacchi 				     ecore_validate_vlan_mac,
2471*d14abf15SRobert Mustacchi 				     ecore_remove_vlan_mac,
2472*d14abf15SRobert Mustacchi 				     ecore_optimize_vlan_mac,
2473*d14abf15SRobert Mustacchi 				     ecore_execute_vlan_mac,
2474*d14abf15SRobert Mustacchi 				     ecore_exeq_get_mac);
2475*d14abf15SRobert Mustacchi 	} else {
2476*d14abf15SRobert Mustacchi 		mac_obj->set_one_rule      = ecore_set_one_mac_e2;
2477*d14abf15SRobert Mustacchi 		mac_obj->check_del         = ecore_check_mac_del;
2478*d14abf15SRobert Mustacchi 		mac_obj->check_add         = ecore_check_mac_add;
2479*d14abf15SRobert Mustacchi 		mac_obj->check_move        = ecore_check_move;
2480*d14abf15SRobert Mustacchi 		mac_obj->ramrod_cmd        =
2481*d14abf15SRobert Mustacchi 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2482*d14abf15SRobert Mustacchi 		mac_obj->get_n_elements    = ecore_get_n_elements;
2483*d14abf15SRobert Mustacchi 
2484*d14abf15SRobert Mustacchi 		/* Exe Queue */
2485*d14abf15SRobert Mustacchi 		ecore_exe_queue_init(pdev,
2486*d14abf15SRobert Mustacchi 				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2487*d14abf15SRobert Mustacchi 				     qable_obj, ecore_validate_vlan_mac,
2488*d14abf15SRobert Mustacchi 				     ecore_remove_vlan_mac,
2489*d14abf15SRobert Mustacchi 				     ecore_optimize_vlan_mac,
2490*d14abf15SRobert Mustacchi 				     ecore_execute_vlan_mac,
2491*d14abf15SRobert Mustacchi 				     ecore_exeq_get_mac);
2492*d14abf15SRobert Mustacchi 	}
2493*d14abf15SRobert Mustacchi }
2494*d14abf15SRobert Mustacchi 
2495*d14abf15SRobert Mustacchi void ecore_init_vlan_obj(struct _lm_device_t *pdev,
2496*d14abf15SRobert Mustacchi 			 struct ecore_vlan_mac_obj *vlan_obj,
2497*d14abf15SRobert Mustacchi 			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2498*d14abf15SRobert Mustacchi 			 lm_address_t rdata_mapping, int state,
2499*d14abf15SRobert Mustacchi 			 unsigned long *pstate, ecore_obj_type type,
2500*d14abf15SRobert Mustacchi 			 struct ecore_credit_pool_obj *vlans_pool)
2501*d14abf15SRobert Mustacchi {
2502*d14abf15SRobert Mustacchi 	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
2503*d14abf15SRobert Mustacchi 
2504*d14abf15SRobert Mustacchi 	ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2505*d14abf15SRobert Mustacchi 				   rdata_mapping, state, pstate, type, NULL,
2506*d14abf15SRobert Mustacchi 				   vlans_pool);
2507*d14abf15SRobert Mustacchi 
2508*d14abf15SRobert Mustacchi 	vlan_obj->get_credit = ecore_get_credit_vlan;
2509*d14abf15SRobert Mustacchi 	vlan_obj->put_credit = ecore_put_credit_vlan;
2510*d14abf15SRobert Mustacchi 	vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
2511*d14abf15SRobert Mustacchi 	vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
2512*d14abf15SRobert Mustacchi 
2513*d14abf15SRobert Mustacchi 	if (CHIP_IS_E1x(pdev)) {
2514*d14abf15SRobert Mustacchi 		ECORE_ERR("Do not support chips others than E2 and newer\n");
2515*d14abf15SRobert Mustacchi 		BUG();
2516*d14abf15SRobert Mustacchi 	} else {
2517*d14abf15SRobert Mustacchi 		vlan_obj->set_one_rule      = ecore_set_one_vlan_e2;
2518*d14abf15SRobert Mustacchi 		vlan_obj->check_del         = ecore_check_vlan_del;
2519*d14abf15SRobert Mustacchi 		vlan_obj->check_add         = ecore_check_vlan_add;
2520*d14abf15SRobert Mustacchi 		vlan_obj->check_move        = ecore_check_move;
2521*d14abf15SRobert Mustacchi 		vlan_obj->ramrod_cmd        =
2522*d14abf15SRobert Mustacchi 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2523*d14abf15SRobert Mustacchi 		vlan_obj->get_n_elements    = ecore_get_n_elements;
2524*d14abf15SRobert Mustacchi 
2525*d14abf15SRobert Mustacchi 		/* Exe Queue */
2526*d14abf15SRobert Mustacchi 		ecore_exe_queue_init(pdev,
2527*d14abf15SRobert Mustacchi 				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2528*d14abf15SRobert Mustacchi 				     qable_obj, ecore_validate_vlan_mac,
2529*d14abf15SRobert Mustacchi 				     ecore_remove_vlan_mac,
2530*d14abf15SRobert Mustacchi 				     ecore_optimize_vlan_mac,
2531*d14abf15SRobert Mustacchi 				     ecore_execute_vlan_mac,
2532*d14abf15SRobert Mustacchi 				     ecore_exeq_get_vlan);
2533*d14abf15SRobert Mustacchi 	}
2534*d14abf15SRobert Mustacchi }
2535*d14abf15SRobert Mustacchi 
2536*d14abf15SRobert Mustacchi void ecore_init_vlan_mac_obj(struct _lm_device_t *pdev,
2537*d14abf15SRobert Mustacchi 			     struct ecore_vlan_mac_obj *vlan_mac_obj,
2538*d14abf15SRobert Mustacchi 			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
2539*d14abf15SRobert Mustacchi 			     lm_address_t rdata_mapping, int state,
2540*d14abf15SRobert Mustacchi 			     unsigned long *pstate, ecore_obj_type type,
2541*d14abf15SRobert Mustacchi 			     struct ecore_credit_pool_obj *macs_pool,
2542*d14abf15SRobert Mustacchi 			     struct ecore_credit_pool_obj *vlans_pool)
2543*d14abf15SRobert Mustacchi {
2544*d14abf15SRobert Mustacchi 	union ecore_qable_obj *qable_obj =
2545*d14abf15SRobert Mustacchi 		(union ecore_qable_obj *)vlan_mac_obj;
2546*d14abf15SRobert Mustacchi 
2547*d14abf15SRobert Mustacchi 	ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2548*d14abf15SRobert Mustacchi 				   rdata_mapping, state, pstate, type,
2549*d14abf15SRobert Mustacchi 				   macs_pool, vlans_pool);
2550*d14abf15SRobert Mustacchi 
2551*d14abf15SRobert Mustacchi 	/* CAM pool handling */
2552*d14abf15SRobert Mustacchi 	vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2553*d14abf15SRobert Mustacchi 	vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2554*d14abf15SRobert Mustacchi 	/* CAM offset is relevant for 57710 and 57711 chips only which have a
2555*d14abf15SRobert Mustacchi 	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2556*d14abf15SRobert Mustacchi 	 * will be taken from MACs' pool object only.
2557*d14abf15SRobert Mustacchi 	 */
2558*d14abf15SRobert Mustacchi 	vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2559*d14abf15SRobert Mustacchi 	vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2560*d14abf15SRobert Mustacchi 
2561*d14abf15SRobert Mustacchi 	if (CHIP_IS_E1(pdev)) {
2562*d14abf15SRobert Mustacchi 		ECORE_ERR("Do not support chips others than E2\n");
2563*d14abf15SRobert Mustacchi 		BUG();
2564*d14abf15SRobert Mustacchi 	} else if (CHIP_IS_E1H(pdev)) {
2565*d14abf15SRobert Mustacchi 		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e1h;
2566*d14abf15SRobert Mustacchi 		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2567*d14abf15SRobert Mustacchi 		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2568*d14abf15SRobert Mustacchi 		vlan_mac_obj->check_move        = ecore_check_move_always_err;
2569*d14abf15SRobert Mustacchi 		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2570*d14abf15SRobert Mustacchi 
2571*d14abf15SRobert Mustacchi 		/* Exe Queue */
2572*d14abf15SRobert Mustacchi 		ecore_exe_queue_init(pdev,
2573*d14abf15SRobert Mustacchi 				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2574*d14abf15SRobert Mustacchi 				     ecore_validate_vlan_mac,
2575*d14abf15SRobert Mustacchi 				     ecore_remove_vlan_mac,
2576*d14abf15SRobert Mustacchi 				     ecore_optimize_vlan_mac,
2577*d14abf15SRobert Mustacchi 				     ecore_execute_vlan_mac,
2578*d14abf15SRobert Mustacchi 				     ecore_exeq_get_vlan_mac);
2579*d14abf15SRobert Mustacchi 	} else {
2580*d14abf15SRobert Mustacchi 		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e2;
2581*d14abf15SRobert Mustacchi 		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2582*d14abf15SRobert Mustacchi 		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2583*d14abf15SRobert Mustacchi 		vlan_mac_obj->check_move        = ecore_check_move;
2584*d14abf15SRobert Mustacchi 		vlan_mac_obj->ramrod_cmd        =
2585*d14abf15SRobert Mustacchi 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2586*d14abf15SRobert Mustacchi 
2587*d14abf15SRobert Mustacchi 		/* Exe Queue */
2588*d14abf15SRobert Mustacchi 		ecore_exe_queue_init(pdev,
2589*d14abf15SRobert Mustacchi 				     &vlan_mac_obj->exe_queue,
2590*d14abf15SRobert Mustacchi 				     CLASSIFY_RULES_COUNT,
2591*d14abf15SRobert Mustacchi 				     qable_obj, ecore_validate_vlan_mac,
2592*d14abf15SRobert Mustacchi 				     ecore_remove_vlan_mac,
2593*d14abf15SRobert Mustacchi 				     ecore_optimize_vlan_mac,
2594*d14abf15SRobert Mustacchi 				     ecore_execute_vlan_mac,
2595*d14abf15SRobert Mustacchi 				     ecore_exeq_get_vlan_mac);
2596*d14abf15SRobert Mustacchi 	}
2597*d14abf15SRobert Mustacchi }
2598*d14abf15SRobert Mustacchi 
2599*d14abf15SRobert Mustacchi /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2600*d14abf15SRobert Mustacchi static INLINE void __storm_memset_mac_filters(struct _lm_device_t *pdev,
2601*d14abf15SRobert Mustacchi 			struct tstorm_eth_mac_filter_config *mac_filters,
2602*d14abf15SRobert Mustacchi 			u16 pf_id)
2603*d14abf15SRobert Mustacchi {
2604*d14abf15SRobert Mustacchi 	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2605*d14abf15SRobert Mustacchi 
2606*d14abf15SRobert Mustacchi 	u32 addr = BAR_TSTRORM_INTMEM +
2607*d14abf15SRobert Mustacchi 			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2608*d14abf15SRobert Mustacchi 
2609*d14abf15SRobert Mustacchi 	__storm_memset_struct(pdev, addr, size, (u32 *)mac_filters);
2610*d14abf15SRobert Mustacchi }
2611*d14abf15SRobert Mustacchi 
2612*d14abf15SRobert Mustacchi static int ecore_set_rx_mode_e1x(struct _lm_device_t *pdev,
2613*d14abf15SRobert Mustacchi 				 struct ecore_rx_mode_ramrod_params *p)
2614*d14abf15SRobert Mustacchi {
2615*d14abf15SRobert Mustacchi 	/* update the pdev MAC filter structure */
2616*d14abf15SRobert Mustacchi 	u32 mask = (1 << p->cl_id);
2617*d14abf15SRobert Mustacchi 
2618*d14abf15SRobert Mustacchi 	struct tstorm_eth_mac_filter_config *mac_filters =
2619*d14abf15SRobert Mustacchi 		(struct tstorm_eth_mac_filter_config *)p->rdata;
2620*d14abf15SRobert Mustacchi 
2621*d14abf15SRobert Mustacchi 	/* initial setting is drop-all */
2622*d14abf15SRobert Mustacchi 	u8 drop_all_ucast = 1, drop_all_mcast = 1;
2623*d14abf15SRobert Mustacchi 	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2624*d14abf15SRobert Mustacchi 	u8 unmatched_unicast = 0;
2625*d14abf15SRobert Mustacchi 
2626*d14abf15SRobert Mustacchi     /* In e1x there we only take into account rx accept flag since tx switching
2627*d14abf15SRobert Mustacchi      * isn't enabled. */
2628*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
2629*d14abf15SRobert Mustacchi 		/* accept matched ucast */
2630*d14abf15SRobert Mustacchi 		drop_all_ucast = 0;
2631*d14abf15SRobert Mustacchi 
2632*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
2633*d14abf15SRobert Mustacchi 		/* accept matched mcast */
2634*d14abf15SRobert Mustacchi 		drop_all_mcast = 0;
2635*d14abf15SRobert Mustacchi 
2636*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2637*d14abf15SRobert Mustacchi 		/* accept all mcast */
2638*d14abf15SRobert Mustacchi 		drop_all_ucast = 0;
2639*d14abf15SRobert Mustacchi 		accp_all_ucast = 1;
2640*d14abf15SRobert Mustacchi 	}
2641*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2642*d14abf15SRobert Mustacchi 		/* accept all mcast */
2643*d14abf15SRobert Mustacchi 		drop_all_mcast = 0;
2644*d14abf15SRobert Mustacchi 		accp_all_mcast = 1;
2645*d14abf15SRobert Mustacchi 	}
2646*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2647*d14abf15SRobert Mustacchi 		/* accept (all) bcast */
2648*d14abf15SRobert Mustacchi 		accp_all_bcast = 1;
2649*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2650*d14abf15SRobert Mustacchi 		/* accept unmatched unicasts */
2651*d14abf15SRobert Mustacchi 		unmatched_unicast = 1;
2652*d14abf15SRobert Mustacchi 
2653*d14abf15SRobert Mustacchi 	mac_filters->ucast_drop_all = drop_all_ucast ?
2654*d14abf15SRobert Mustacchi 		mac_filters->ucast_drop_all | mask :
2655*d14abf15SRobert Mustacchi 		mac_filters->ucast_drop_all & ~mask;
2656*d14abf15SRobert Mustacchi 
2657*d14abf15SRobert Mustacchi 	mac_filters->mcast_drop_all = drop_all_mcast ?
2658*d14abf15SRobert Mustacchi 		mac_filters->mcast_drop_all | mask :
2659*d14abf15SRobert Mustacchi 		mac_filters->mcast_drop_all & ~mask;
2660*d14abf15SRobert Mustacchi 
2661*d14abf15SRobert Mustacchi 	mac_filters->ucast_accept_all = accp_all_ucast ?
2662*d14abf15SRobert Mustacchi 		mac_filters->ucast_accept_all | mask :
2663*d14abf15SRobert Mustacchi 		mac_filters->ucast_accept_all & ~mask;
2664*d14abf15SRobert Mustacchi 
2665*d14abf15SRobert Mustacchi 	mac_filters->mcast_accept_all = accp_all_mcast ?
2666*d14abf15SRobert Mustacchi 		mac_filters->mcast_accept_all | mask :
2667*d14abf15SRobert Mustacchi 		mac_filters->mcast_accept_all & ~mask;
2668*d14abf15SRobert Mustacchi 
2669*d14abf15SRobert Mustacchi 	mac_filters->bcast_accept_all = accp_all_bcast ?
2670*d14abf15SRobert Mustacchi 		mac_filters->bcast_accept_all | mask :
2671*d14abf15SRobert Mustacchi 		mac_filters->bcast_accept_all & ~mask;
2672*d14abf15SRobert Mustacchi 
2673*d14abf15SRobert Mustacchi 	mac_filters->unmatched_unicast = unmatched_unicast ?
2674*d14abf15SRobert Mustacchi 		mac_filters->unmatched_unicast | mask :
2675*d14abf15SRobert Mustacchi 		mac_filters->unmatched_unicast & ~mask;
2676*d14abf15SRobert Mustacchi 
2677*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2678*d14abf15SRobert Mustacchi 			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2679*d14abf15SRobert Mustacchi 	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2680*d14abf15SRobert Mustacchi 	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2681*d14abf15SRobert Mustacchi 	   mac_filters->bcast_accept_all);
2682*d14abf15SRobert Mustacchi 
2683*d14abf15SRobert Mustacchi 	/* write the MAC filter structure*/
2684*d14abf15SRobert Mustacchi 	__storm_memset_mac_filters(pdev, mac_filters, p->func_id);
2685*d14abf15SRobert Mustacchi 
2686*d14abf15SRobert Mustacchi 	/* The operation is completed */
2687*d14abf15SRobert Mustacchi 	ECORE_CLEAR_BIT(p->state, p->pstate);
2688*d14abf15SRobert Mustacchi 	smp_mb__after_atomic();
2689*d14abf15SRobert Mustacchi 
2690*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
2691*d14abf15SRobert Mustacchi }
2692*d14abf15SRobert Mustacchi 
2693*d14abf15SRobert Mustacchi /* Setup ramrod data */
2694*d14abf15SRobert Mustacchi static INLINE void ecore_rx_mode_set_rdata_hdr_e2(u32 cid,
2695*d14abf15SRobert Mustacchi 				struct eth_classify_header *hdr,
2696*d14abf15SRobert Mustacchi 				u8 rule_cnt)
2697*d14abf15SRobert Mustacchi {
2698*d14abf15SRobert Mustacchi 	hdr->echo = mm_cpu_to_le32(cid);
2699*d14abf15SRobert Mustacchi 	hdr->rule_cnt = rule_cnt;
2700*d14abf15SRobert Mustacchi }
2701*d14abf15SRobert Mustacchi 
2702*d14abf15SRobert Mustacchi static INLINE void ecore_rx_mode_set_cmd_state_e2(struct _lm_device_t *pdev,
2703*d14abf15SRobert Mustacchi 				unsigned long *accept_flags,
2704*d14abf15SRobert Mustacchi 				struct eth_filter_rules_cmd *cmd,
2705*d14abf15SRobert Mustacchi 				BOOL clear_accept_all)
2706*d14abf15SRobert Mustacchi {
2707*d14abf15SRobert Mustacchi 	u16 state;
2708*d14abf15SRobert Mustacchi 
2709*d14abf15SRobert Mustacchi 	/* start with 'drop-all' */
2710*d14abf15SRobert Mustacchi 	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2711*d14abf15SRobert Mustacchi 		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2712*d14abf15SRobert Mustacchi 
2713*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2714*d14abf15SRobert Mustacchi 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2715*d14abf15SRobert Mustacchi 
2716*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2717*d14abf15SRobert Mustacchi 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2718*d14abf15SRobert Mustacchi 
2719*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2720*d14abf15SRobert Mustacchi 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2721*d14abf15SRobert Mustacchi 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2722*d14abf15SRobert Mustacchi 	}
2723*d14abf15SRobert Mustacchi 
2724*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2725*d14abf15SRobert Mustacchi 		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2726*d14abf15SRobert Mustacchi 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2727*d14abf15SRobert Mustacchi 	}
2728*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2729*d14abf15SRobert Mustacchi 		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2730*d14abf15SRobert Mustacchi 
2731*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2732*d14abf15SRobert Mustacchi 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2733*d14abf15SRobert Mustacchi 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2734*d14abf15SRobert Mustacchi 	}
2735*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2736*d14abf15SRobert Mustacchi 		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2737*d14abf15SRobert Mustacchi 
2738*d14abf15SRobert Mustacchi 	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2739*d14abf15SRobert Mustacchi 	if (clear_accept_all) {
2740*d14abf15SRobert Mustacchi 		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2741*d14abf15SRobert Mustacchi 		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2742*d14abf15SRobert Mustacchi 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2743*d14abf15SRobert Mustacchi 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2744*d14abf15SRobert Mustacchi 	}
2745*d14abf15SRobert Mustacchi 
2746*d14abf15SRobert Mustacchi 	cmd->state = mm_cpu_to_le16(state);
2747*d14abf15SRobert Mustacchi }
2748*d14abf15SRobert Mustacchi 
2749*d14abf15SRobert Mustacchi static int ecore_set_rx_mode_e2(struct _lm_device_t *pdev,
2750*d14abf15SRobert Mustacchi 				struct ecore_rx_mode_ramrod_params *p)
2751*d14abf15SRobert Mustacchi {
2752*d14abf15SRobert Mustacchi 	struct eth_filter_rules_ramrod_data *data = p->rdata;
2753*d14abf15SRobert Mustacchi 	int rc;
2754*d14abf15SRobert Mustacchi 	u8 rule_idx = 0;
2755*d14abf15SRobert Mustacchi 
2756*d14abf15SRobert Mustacchi 	/* Reset the ramrod data buffer */
2757*d14abf15SRobert Mustacchi 	mm_memset(data, 0, sizeof(*data));
2758*d14abf15SRobert Mustacchi 
2759*d14abf15SRobert Mustacchi 	/* Setup ramrod data */
2760*d14abf15SRobert Mustacchi 
2761*d14abf15SRobert Mustacchi 	/* Tx (internal switching) */
2762*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2763*d14abf15SRobert Mustacchi 		data->rules[rule_idx].client_id = p->cl_id;
2764*d14abf15SRobert Mustacchi 		data->rules[rule_idx].func_id = p->func_id;
2765*d14abf15SRobert Mustacchi 
2766*d14abf15SRobert Mustacchi 		data->rules[rule_idx].cmd_general_data =
2767*d14abf15SRobert Mustacchi 			ETH_FILTER_RULES_CMD_TX_CMD;
2768*d14abf15SRobert Mustacchi 
2769*d14abf15SRobert Mustacchi 		ecore_rx_mode_set_cmd_state_e2(pdev, &p->tx_accept_flags,
2770*d14abf15SRobert Mustacchi 					       &(data->rules[rule_idx++]),
2771*d14abf15SRobert Mustacchi 					       FALSE);
2772*d14abf15SRobert Mustacchi 	}
2773*d14abf15SRobert Mustacchi 
2774*d14abf15SRobert Mustacchi 	/* Rx */
2775*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2776*d14abf15SRobert Mustacchi 		data->rules[rule_idx].client_id = p->cl_id;
2777*d14abf15SRobert Mustacchi 		data->rules[rule_idx].func_id = p->func_id;
2778*d14abf15SRobert Mustacchi 
2779*d14abf15SRobert Mustacchi 		data->rules[rule_idx].cmd_general_data =
2780*d14abf15SRobert Mustacchi 			ETH_FILTER_RULES_CMD_RX_CMD;
2781*d14abf15SRobert Mustacchi 
2782*d14abf15SRobert Mustacchi 		ecore_rx_mode_set_cmd_state_e2(pdev, &p->rx_accept_flags,
2783*d14abf15SRobert Mustacchi 					       &(data->rules[rule_idx++]),
2784*d14abf15SRobert Mustacchi 					       FALSE);
2785*d14abf15SRobert Mustacchi 	}
2786*d14abf15SRobert Mustacchi 
2787*d14abf15SRobert Mustacchi 	/* If FCoE Queue configuration has been requested configure the Rx and
2788*d14abf15SRobert Mustacchi 	 * internal switching modes for this queue in separate rules.
2789*d14abf15SRobert Mustacchi 	 *
2790*d14abf15SRobert Mustacchi 	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2791*d14abf15SRobert Mustacchi 	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2792*d14abf15SRobert Mustacchi 	 */
2793*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2794*d14abf15SRobert Mustacchi 		/*  Tx (internal switching) */
2795*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2796*d14abf15SRobert Mustacchi 			data->rules[rule_idx].client_id = FCOE_CID(pdev);
2797*d14abf15SRobert Mustacchi 			data->rules[rule_idx].func_id = p->func_id;
2798*d14abf15SRobert Mustacchi 
2799*d14abf15SRobert Mustacchi 			data->rules[rule_idx].cmd_general_data =
2800*d14abf15SRobert Mustacchi 						ETH_FILTER_RULES_CMD_TX_CMD;
2801*d14abf15SRobert Mustacchi 
2802*d14abf15SRobert Mustacchi 			ecore_rx_mode_set_cmd_state_e2(pdev, &p->tx_accept_flags,
2803*d14abf15SRobert Mustacchi 						       &(data->rules[rule_idx]),
2804*d14abf15SRobert Mustacchi 						       TRUE);
2805*d14abf15SRobert Mustacchi 			rule_idx++;
2806*d14abf15SRobert Mustacchi 		}
2807*d14abf15SRobert Mustacchi 
2808*d14abf15SRobert Mustacchi 		/* Rx */
2809*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2810*d14abf15SRobert Mustacchi 			data->rules[rule_idx].client_id = FCOE_CID(pdev);
2811*d14abf15SRobert Mustacchi 			data->rules[rule_idx].func_id = p->func_id;
2812*d14abf15SRobert Mustacchi 
2813*d14abf15SRobert Mustacchi 			data->rules[rule_idx].cmd_general_data =
2814*d14abf15SRobert Mustacchi 						ETH_FILTER_RULES_CMD_RX_CMD;
2815*d14abf15SRobert Mustacchi 
2816*d14abf15SRobert Mustacchi 			ecore_rx_mode_set_cmd_state_e2(pdev, &p->rx_accept_flags,
2817*d14abf15SRobert Mustacchi 						       &(data->rules[rule_idx]),
2818*d14abf15SRobert Mustacchi 						       TRUE);
2819*d14abf15SRobert Mustacchi 			rule_idx++;
2820*d14abf15SRobert Mustacchi 		}
2821*d14abf15SRobert Mustacchi 	}
2822*d14abf15SRobert Mustacchi 
2823*d14abf15SRobert Mustacchi 	/* Set the ramrod header (most importantly - number of rules to
2824*d14abf15SRobert Mustacchi 	 * configure).
2825*d14abf15SRobert Mustacchi 	 */
2826*d14abf15SRobert Mustacchi 	ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2827*d14abf15SRobert Mustacchi 
2828*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2829*d14abf15SRobert Mustacchi 		  data->header.rule_cnt, p->rx_accept_flags,
2830*d14abf15SRobert Mustacchi 		  p->tx_accept_flags);
2831*d14abf15SRobert Mustacchi 
2832*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
2833*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
2834*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
2835*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
2836*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
2837*d14abf15SRobert Mustacchi 	 */
2838*d14abf15SRobert Mustacchi 
2839*d14abf15SRobert Mustacchi 	/* Send a ramrod */
2840*d14abf15SRobert Mustacchi 	rc = ecore_sp_post(pdev,
2841*d14abf15SRobert Mustacchi 			   RAMROD_CMD_ID_ETH_FILTER_RULES,
2842*d14abf15SRobert Mustacchi 			   p->cid,
2843*d14abf15SRobert Mustacchi 			   p->rdata_mapping.as_u64,
2844*d14abf15SRobert Mustacchi 			   ETH_CONNECTION_TYPE);
2845*d14abf15SRobert Mustacchi 	if (rc)
2846*d14abf15SRobert Mustacchi 		return rc;
2847*d14abf15SRobert Mustacchi 
2848*d14abf15SRobert Mustacchi 	/* Ramrod completion is pending */
2849*d14abf15SRobert Mustacchi 	return ECORE_PENDING;
2850*d14abf15SRobert Mustacchi }
2851*d14abf15SRobert Mustacchi 
2852*d14abf15SRobert Mustacchi static int ecore_wait_rx_mode_comp_e2(struct _lm_device_t *pdev,
2853*d14abf15SRobert Mustacchi 				      struct ecore_rx_mode_ramrod_params *p)
2854*d14abf15SRobert Mustacchi {
2855*d14abf15SRobert Mustacchi 	return ecore_state_wait(pdev, p->state, p->pstate);
2856*d14abf15SRobert Mustacchi }
2857*d14abf15SRobert Mustacchi 
2858*d14abf15SRobert Mustacchi static int ecore_empty_rx_mode_wait(struct _lm_device_t *pdev,
2859*d14abf15SRobert Mustacchi 				    struct ecore_rx_mode_ramrod_params *p)
2860*d14abf15SRobert Mustacchi {
2861*d14abf15SRobert Mustacchi 	/* Do nothing */
2862*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
2863*d14abf15SRobert Mustacchi }
2864*d14abf15SRobert Mustacchi 
2865*d14abf15SRobert Mustacchi int ecore_config_rx_mode(struct _lm_device_t *pdev,
2866*d14abf15SRobert Mustacchi 			 struct ecore_rx_mode_ramrod_params *p)
2867*d14abf15SRobert Mustacchi {
2868*d14abf15SRobert Mustacchi 	int rc;
2869*d14abf15SRobert Mustacchi 
2870*d14abf15SRobert Mustacchi 	/* Configure the new classification in the chip */
2871*d14abf15SRobert Mustacchi 	rc = p->rx_mode_obj->config_rx_mode(pdev, p);
2872*d14abf15SRobert Mustacchi 	if (rc < 0)
2873*d14abf15SRobert Mustacchi 		return rc;
2874*d14abf15SRobert Mustacchi 
2875*d14abf15SRobert Mustacchi 	/* Wait for a ramrod completion if was requested */
2876*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2877*d14abf15SRobert Mustacchi 		rc = p->rx_mode_obj->wait_comp(pdev, p);
2878*d14abf15SRobert Mustacchi 		if (rc)
2879*d14abf15SRobert Mustacchi 			return rc;
2880*d14abf15SRobert Mustacchi 	}
2881*d14abf15SRobert Mustacchi 
2882*d14abf15SRobert Mustacchi 	return rc;
2883*d14abf15SRobert Mustacchi }
2884*d14abf15SRobert Mustacchi 
2885*d14abf15SRobert Mustacchi void ecore_init_rx_mode_obj(struct _lm_device_t *pdev,
2886*d14abf15SRobert Mustacchi 			    struct ecore_rx_mode_obj *o)
2887*d14abf15SRobert Mustacchi {
2888*d14abf15SRobert Mustacchi 	if (CHIP_IS_E1x(pdev)) {
2889*d14abf15SRobert Mustacchi 		o->wait_comp      = ecore_empty_rx_mode_wait;
2890*d14abf15SRobert Mustacchi 		o->config_rx_mode = ecore_set_rx_mode_e1x;
2891*d14abf15SRobert Mustacchi 	} else {
2892*d14abf15SRobert Mustacchi 		o->wait_comp      = ecore_wait_rx_mode_comp_e2;
2893*d14abf15SRobert Mustacchi 		o->config_rx_mode = ecore_set_rx_mode_e2;
2894*d14abf15SRobert Mustacchi 	}
2895*d14abf15SRobert Mustacchi }
2896*d14abf15SRobert Mustacchi 
2897*d14abf15SRobert Mustacchi /********************* Multicast verbs: SET, CLEAR ****************************/
2898*d14abf15SRobert Mustacchi static INLINE u8 ecore_mcast_bin_from_mac(u8 *mac)
2899*d14abf15SRobert Mustacchi {
2900*d14abf15SRobert Mustacchi 	return (ecore_crc32_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2901*d14abf15SRobert Mustacchi }
2902*d14abf15SRobert Mustacchi 
2903*d14abf15SRobert Mustacchi struct ecore_mcast_mac_elem {
2904*d14abf15SRobert Mustacchi 	d_list_entry_t link;
2905*d14abf15SRobert Mustacchi 	u8 mac[ETH_ALEN];
2906*d14abf15SRobert Mustacchi 	u8 pad[2]; /* For a natural alignment of the following buffer */
2907*d14abf15SRobert Mustacchi };
2908*d14abf15SRobert Mustacchi 
2909*d14abf15SRobert Mustacchi struct ecore_pending_mcast_cmd {
2910*d14abf15SRobert Mustacchi 	d_list_entry_t link;
2911*d14abf15SRobert Mustacchi 	int type; /* ECORE_MCAST_CMD_X */
2912*d14abf15SRobert Mustacchi 	union {
2913*d14abf15SRobert Mustacchi 		d_list_t macs_head;
2914*d14abf15SRobert Mustacchi 		u32 macs_num; /* Needed for DEL command */
2915*d14abf15SRobert Mustacchi 		int next_bin; /* Needed for RESTORE flow with aprox match */
2916*d14abf15SRobert Mustacchi 	} data;
2917*d14abf15SRobert Mustacchi 
2918*d14abf15SRobert Mustacchi 	BOOL done; /* set to TRUE, when the command has been handled,
2919*d14abf15SRobert Mustacchi 		    * practically used in 57712 handling only, where one pending
2920*d14abf15SRobert Mustacchi 		    * command may be handled in a few operations. As long as for
2921*d14abf15SRobert Mustacchi 		    * other chips every operation handling is completed in a
2922*d14abf15SRobert Mustacchi 		    * single ramrod, there is no need to utilize this field.
2923*d14abf15SRobert Mustacchi 		    */
2924*d14abf15SRobert Mustacchi #ifndef ECORE_ERASE
2925*d14abf15SRobert Mustacchi 	u32 alloc_len; /* passed to ECORE_FREE */
2926*d14abf15SRobert Mustacchi #endif
2927*d14abf15SRobert Mustacchi };
2928*d14abf15SRobert Mustacchi 
2929*d14abf15SRobert Mustacchi static int ecore_mcast_wait(struct _lm_device_t *pdev,
2930*d14abf15SRobert Mustacchi 			    struct ecore_mcast_obj *o)
2931*d14abf15SRobert Mustacchi {
2932*d14abf15SRobert Mustacchi 	if (ecore_state_wait(pdev, o->sched_state, o->raw.pstate) ||
2933*d14abf15SRobert Mustacchi 			o->raw.wait_comp(pdev, &o->raw))
2934*d14abf15SRobert Mustacchi 		return ECORE_TIMEOUT;
2935*d14abf15SRobert Mustacchi 
2936*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
2937*d14abf15SRobert Mustacchi }
2938*d14abf15SRobert Mustacchi 
2939*d14abf15SRobert Mustacchi static int ecore_mcast_enqueue_cmd(struct _lm_device_t *pdev,
2940*d14abf15SRobert Mustacchi 				   struct ecore_mcast_obj *o,
2941*d14abf15SRobert Mustacchi 				   struct ecore_mcast_ramrod_params *p,
2942*d14abf15SRobert Mustacchi 				   enum ecore_mcast_cmd cmd)
2943*d14abf15SRobert Mustacchi {
2944*d14abf15SRobert Mustacchi 	int total_sz;
2945*d14abf15SRobert Mustacchi 	struct ecore_pending_mcast_cmd *new_cmd;
2946*d14abf15SRobert Mustacchi 	struct ecore_mcast_mac_elem *cur_mac = NULL;
2947*d14abf15SRobert Mustacchi 	struct ecore_mcast_list_elem *pos;
2948*d14abf15SRobert Mustacchi 	int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2949*d14abf15SRobert Mustacchi 			     p->mcast_list_len : 0);
2950*d14abf15SRobert Mustacchi 
2951*d14abf15SRobert Mustacchi 	/* If the command is empty ("handle pending commands only"), break */
2952*d14abf15SRobert Mustacchi 	if (!p->mcast_list_len)
2953*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
2954*d14abf15SRobert Mustacchi 
2955*d14abf15SRobert Mustacchi 	total_sz = sizeof(*new_cmd) +
2956*d14abf15SRobert Mustacchi 		macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2957*d14abf15SRobert Mustacchi 
2958*d14abf15SRobert Mustacchi 	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2959*d14abf15SRobert Mustacchi 	new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, pdev);
2960*d14abf15SRobert Mustacchi 
2961*d14abf15SRobert Mustacchi 	if (!new_cmd)
2962*d14abf15SRobert Mustacchi 		return ECORE_NOMEM;
2963*d14abf15SRobert Mustacchi 
2964*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "About to enqueue a new %d command. macs_list_len=%d\n",
2965*d14abf15SRobert Mustacchi 		  cmd, macs_list_len);
2966*d14abf15SRobert Mustacchi 
2967*d14abf15SRobert Mustacchi 	ECORE_LIST_INIT(&new_cmd->data.macs_head);
2968*d14abf15SRobert Mustacchi 
2969*d14abf15SRobert Mustacchi 	new_cmd->type = cmd;
2970*d14abf15SRobert Mustacchi 	new_cmd->done = FALSE;
2971*d14abf15SRobert Mustacchi #ifndef ECORE_ERASE
2972*d14abf15SRobert Mustacchi 	new_cmd->alloc_len = total_sz;
2973*d14abf15SRobert Mustacchi #endif
2974*d14abf15SRobert Mustacchi 
2975*d14abf15SRobert Mustacchi 	switch (cmd) {
2976*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_ADD:
2977*d14abf15SRobert Mustacchi 		cur_mac = (struct ecore_mcast_mac_elem *)
2978*d14abf15SRobert Mustacchi 			  ((u8 *)new_cmd + sizeof(*new_cmd));
2979*d14abf15SRobert Mustacchi 
2980*d14abf15SRobert Mustacchi 		/* Push the MACs of the current command into the pending command
2981*d14abf15SRobert Mustacchi 		 * MACs list: FIFO
2982*d14abf15SRobert Mustacchi 		 */
2983*d14abf15SRobert Mustacchi 		ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2984*d14abf15SRobert Mustacchi 					  struct ecore_mcast_list_elem) {
2985*d14abf15SRobert Mustacchi 			mm_memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2986*d14abf15SRobert Mustacchi 			ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2987*d14abf15SRobert Mustacchi 					     &new_cmd->data.macs_head);
2988*d14abf15SRobert Mustacchi 			cur_mac++;
2989*d14abf15SRobert Mustacchi 		}
2990*d14abf15SRobert Mustacchi 
2991*d14abf15SRobert Mustacchi 		break;
2992*d14abf15SRobert Mustacchi 
2993*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_DEL:
2994*d14abf15SRobert Mustacchi 		new_cmd->data.macs_num = p->mcast_list_len;
2995*d14abf15SRobert Mustacchi 		break;
2996*d14abf15SRobert Mustacchi 
2997*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_RESTORE:
2998*d14abf15SRobert Mustacchi 		new_cmd->data.next_bin = 0;
2999*d14abf15SRobert Mustacchi 		break;
3000*d14abf15SRobert Mustacchi 
3001*d14abf15SRobert Mustacchi 	default:
3002*d14abf15SRobert Mustacchi 		ECORE_FREE(pdev, new_cmd, total_sz);
3003*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", cmd);
3004*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
3005*d14abf15SRobert Mustacchi 	}
3006*d14abf15SRobert Mustacchi 
3007*d14abf15SRobert Mustacchi 	/* Push the new pending command to the tail of the pending list: FIFO */
3008*d14abf15SRobert Mustacchi 	ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
3009*d14abf15SRobert Mustacchi 
3010*d14abf15SRobert Mustacchi 	o->set_sched(o);
3011*d14abf15SRobert Mustacchi 
3012*d14abf15SRobert Mustacchi 	return ECORE_PENDING;
3013*d14abf15SRobert Mustacchi }
3014*d14abf15SRobert Mustacchi 
3015*d14abf15SRobert Mustacchi /**
3016*d14abf15SRobert Mustacchi  * ecore_mcast_get_next_bin - get the next set bin (index)
3017*d14abf15SRobert Mustacchi  *
3018*d14abf15SRobert Mustacchi  * @o:
3019*d14abf15SRobert Mustacchi  * @last:	index to start looking from (including)
3020*d14abf15SRobert Mustacchi  *
3021*d14abf15SRobert Mustacchi  * returns the next found (set) bin or a negative value if none is found.
3022*d14abf15SRobert Mustacchi  */
3023*d14abf15SRobert Mustacchi static INLINE int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
3024*d14abf15SRobert Mustacchi {
3025*d14abf15SRobert Mustacchi 	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
3026*d14abf15SRobert Mustacchi 
3027*d14abf15SRobert Mustacchi 	for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
3028*d14abf15SRobert Mustacchi 		if (o->registry.aprox_match.vec[i])
3029*d14abf15SRobert Mustacchi 			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
3030*d14abf15SRobert Mustacchi 				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
3031*d14abf15SRobert Mustacchi 				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
3032*d14abf15SRobert Mustacchi 						       vec, cur_bit)) {
3033*d14abf15SRobert Mustacchi 					return cur_bit;
3034*d14abf15SRobert Mustacchi 				}
3035*d14abf15SRobert Mustacchi 			}
3036*d14abf15SRobert Mustacchi 		inner_start = 0;
3037*d14abf15SRobert Mustacchi 	}
3038*d14abf15SRobert Mustacchi 
3039*d14abf15SRobert Mustacchi 	/* None found */
3040*d14abf15SRobert Mustacchi 	return -1;
3041*d14abf15SRobert Mustacchi }
3042*d14abf15SRobert Mustacchi 
3043*d14abf15SRobert Mustacchi /**
3044*d14abf15SRobert Mustacchi  * ecore_mcast_clear_first_bin - find the first set bin and clear it
3045*d14abf15SRobert Mustacchi  *
3046*d14abf15SRobert Mustacchi  * @o:
3047*d14abf15SRobert Mustacchi  *
3048*d14abf15SRobert Mustacchi  * returns the index of the found bin or -1 if none is found
3049*d14abf15SRobert Mustacchi  */
3050*d14abf15SRobert Mustacchi static INLINE int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
3051*d14abf15SRobert Mustacchi {
3052*d14abf15SRobert Mustacchi 	int cur_bit = ecore_mcast_get_next_bin(o, 0);
3053*d14abf15SRobert Mustacchi 
3054*d14abf15SRobert Mustacchi 	if (cur_bit >= 0)
3055*d14abf15SRobert Mustacchi 		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
3056*d14abf15SRobert Mustacchi 
3057*d14abf15SRobert Mustacchi 	return cur_bit;
3058*d14abf15SRobert Mustacchi }
3059*d14abf15SRobert Mustacchi 
3060*d14abf15SRobert Mustacchi static INLINE u8 ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
3061*d14abf15SRobert Mustacchi {
3062*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
3063*d14abf15SRobert Mustacchi 	u8 rx_tx_flag = 0;
3064*d14abf15SRobert Mustacchi 
3065*d14abf15SRobert Mustacchi 	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
3066*d14abf15SRobert Mustacchi 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
3067*d14abf15SRobert Mustacchi 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
3068*d14abf15SRobert Mustacchi 
3069*d14abf15SRobert Mustacchi 	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
3070*d14abf15SRobert Mustacchi 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
3071*d14abf15SRobert Mustacchi 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
3072*d14abf15SRobert Mustacchi 
3073*d14abf15SRobert Mustacchi 	return rx_tx_flag;
3074*d14abf15SRobert Mustacchi }
3075*d14abf15SRobert Mustacchi 
3076*d14abf15SRobert Mustacchi static void ecore_mcast_set_one_rule_e2(struct _lm_device_t *pdev,
3077*d14abf15SRobert Mustacchi 					struct ecore_mcast_obj *o, int idx,
3078*d14abf15SRobert Mustacchi 					union ecore_mcast_config_data *cfg_data,
3079*d14abf15SRobert Mustacchi 					enum ecore_mcast_cmd cmd)
3080*d14abf15SRobert Mustacchi {
3081*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
3082*d14abf15SRobert Mustacchi 	struct eth_multicast_rules_ramrod_data *data =
3083*d14abf15SRobert Mustacchi 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
3084*d14abf15SRobert Mustacchi 	u8 func_id = r->func_id;
3085*d14abf15SRobert Mustacchi 	u8 rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
3086*d14abf15SRobert Mustacchi 	int bin;
3087*d14abf15SRobert Mustacchi 
3088*d14abf15SRobert Mustacchi 	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
3089*d14abf15SRobert Mustacchi 		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
3090*d14abf15SRobert Mustacchi 
3091*d14abf15SRobert Mustacchi 	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
3092*d14abf15SRobert Mustacchi 
3093*d14abf15SRobert Mustacchi 	/* Get a bin and update a bins' vector */
3094*d14abf15SRobert Mustacchi 	switch (cmd) {
3095*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_ADD:
3096*d14abf15SRobert Mustacchi 		bin = ecore_mcast_bin_from_mac(cfg_data->mac);
3097*d14abf15SRobert Mustacchi 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
3098*d14abf15SRobert Mustacchi 		break;
3099*d14abf15SRobert Mustacchi 
3100*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_DEL:
3101*d14abf15SRobert Mustacchi 		/* If there were no more bins to clear
3102*d14abf15SRobert Mustacchi 		 * (ecore_mcast_clear_first_bin() returns -1) then we would
3103*d14abf15SRobert Mustacchi 		 * clear any (0xff) bin.
3104*d14abf15SRobert Mustacchi 		 * See ecore_mcast_validate_e2() for explanation when it may
3105*d14abf15SRobert Mustacchi 		 * happen.
3106*d14abf15SRobert Mustacchi 		 */
3107*d14abf15SRobert Mustacchi 		bin = ecore_mcast_clear_first_bin(o);
3108*d14abf15SRobert Mustacchi 		break;
3109*d14abf15SRobert Mustacchi 
3110*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_RESTORE:
3111*d14abf15SRobert Mustacchi 		bin = cfg_data->bin;
3112*d14abf15SRobert Mustacchi 		break;
3113*d14abf15SRobert Mustacchi 
3114*d14abf15SRobert Mustacchi 	default:
3115*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", cmd);
3116*d14abf15SRobert Mustacchi 		return;
3117*d14abf15SRobert Mustacchi 	}
3118*d14abf15SRobert Mustacchi 
3119*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "%s bin %d\n",
3120*d14abf15SRobert Mustacchi 		  ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
3121*d14abf15SRobert Mustacchi 		   "Setting"  : "Clearing"), bin);
3122*d14abf15SRobert Mustacchi 
3123*d14abf15SRobert Mustacchi 	data->rules[idx].bin_id    = (u8)bin;
3124*d14abf15SRobert Mustacchi 	data->rules[idx].func_id   = func_id;
3125*d14abf15SRobert Mustacchi 	data->rules[idx].engine_id = o->engine_id;
3126*d14abf15SRobert Mustacchi }
3127*d14abf15SRobert Mustacchi 
3128*d14abf15SRobert Mustacchi /**
3129*d14abf15SRobert Mustacchi  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
3130*d14abf15SRobert Mustacchi  *
3131*d14abf15SRobert Mustacchi  * @pdev:	device handle
3132*d14abf15SRobert Mustacchi  * @o:
3133*d14abf15SRobert Mustacchi  * @start_bin:	index in the registry to start from (including)
3134*d14abf15SRobert Mustacchi  * @rdata_idx:	index in the ramrod data to start from
3135*d14abf15SRobert Mustacchi  *
3136*d14abf15SRobert Mustacchi  * returns last handled bin index or -1 if all bins have been handled
3137*d14abf15SRobert Mustacchi  */
3138*d14abf15SRobert Mustacchi static INLINE int ecore_mcast_handle_restore_cmd_e2(
3139*d14abf15SRobert Mustacchi 	struct _lm_device_t *pdev, struct ecore_mcast_obj *o , int start_bin,
3140*d14abf15SRobert Mustacchi 	int *rdata_idx)
3141*d14abf15SRobert Mustacchi {
3142*d14abf15SRobert Mustacchi 	int cur_bin, cnt = *rdata_idx;
3143*d14abf15SRobert Mustacchi 	union ecore_mcast_config_data cfg_data = {NULL};
3144*d14abf15SRobert Mustacchi 
3145*d14abf15SRobert Mustacchi 	/* go through the registry and configure the bins from it */
3146*d14abf15SRobert Mustacchi 	for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
3147*d14abf15SRobert Mustacchi 	    cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
3148*d14abf15SRobert Mustacchi 
3149*d14abf15SRobert Mustacchi 		cfg_data.bin = (u8)cur_bin;
3150*d14abf15SRobert Mustacchi 		o->set_one_rule(pdev, o, cnt, &cfg_data,
3151*d14abf15SRobert Mustacchi 				ECORE_MCAST_CMD_RESTORE);
3152*d14abf15SRobert Mustacchi 
3153*d14abf15SRobert Mustacchi 		cnt++;
3154*d14abf15SRobert Mustacchi 
3155*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "About to configure a bin %d\n", cur_bin);
3156*d14abf15SRobert Mustacchi 
3157*d14abf15SRobert Mustacchi 		/* Break if we reached the maximum number
3158*d14abf15SRobert Mustacchi 		 * of rules.
3159*d14abf15SRobert Mustacchi 		 */
3160*d14abf15SRobert Mustacchi 		if (cnt >= o->max_cmd_len)
3161*d14abf15SRobert Mustacchi 			break;
3162*d14abf15SRobert Mustacchi 	}
3163*d14abf15SRobert Mustacchi 
3164*d14abf15SRobert Mustacchi 	*rdata_idx = cnt;
3165*d14abf15SRobert Mustacchi 
3166*d14abf15SRobert Mustacchi 	return cur_bin;
3167*d14abf15SRobert Mustacchi }
3168*d14abf15SRobert Mustacchi 
3169*d14abf15SRobert Mustacchi static INLINE void ecore_mcast_hdl_pending_add_e2(struct _lm_device_t *pdev,
3170*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
3171*d14abf15SRobert Mustacchi 	int *line_idx)
3172*d14abf15SRobert Mustacchi {
3173*d14abf15SRobert Mustacchi 	struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
3174*d14abf15SRobert Mustacchi 	int cnt = *line_idx;
3175*d14abf15SRobert Mustacchi 	union ecore_mcast_config_data cfg_data = {NULL};
3176*d14abf15SRobert Mustacchi 
3177*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
3178*d14abf15SRobert Mustacchi 		&cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
3179*d14abf15SRobert Mustacchi 
3180*d14abf15SRobert Mustacchi 		cfg_data.mac = &pmac_pos->mac[0];
3181*d14abf15SRobert Mustacchi 		o->set_one_rule(pdev, o, cnt, &cfg_data, cmd_pos->type);
3182*d14abf15SRobert Mustacchi 
3183*d14abf15SRobert Mustacchi 		cnt++;
3184*d14abf15SRobert Mustacchi 
3185*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3186*d14abf15SRobert Mustacchi 			  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3187*d14abf15SRobert Mustacchi 
3188*d14abf15SRobert Mustacchi 		ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
3189*d14abf15SRobert Mustacchi 					&cmd_pos->data.macs_head);
3190*d14abf15SRobert Mustacchi 
3191*d14abf15SRobert Mustacchi 		/* Break if we reached the maximum number
3192*d14abf15SRobert Mustacchi 		 * of rules.
3193*d14abf15SRobert Mustacchi 		 */
3194*d14abf15SRobert Mustacchi 		if (cnt >= o->max_cmd_len)
3195*d14abf15SRobert Mustacchi 			break;
3196*d14abf15SRobert Mustacchi 	}
3197*d14abf15SRobert Mustacchi 
3198*d14abf15SRobert Mustacchi 	*line_idx = cnt;
3199*d14abf15SRobert Mustacchi 
3200*d14abf15SRobert Mustacchi 	/* if no more MACs to configure - we are done */
3201*d14abf15SRobert Mustacchi 	if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
3202*d14abf15SRobert Mustacchi 		cmd_pos->done = TRUE;
3203*d14abf15SRobert Mustacchi }
3204*d14abf15SRobert Mustacchi 
3205*d14abf15SRobert Mustacchi static INLINE void ecore_mcast_hdl_pending_del_e2(struct _lm_device_t *pdev,
3206*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
3207*d14abf15SRobert Mustacchi 	int *line_idx)
3208*d14abf15SRobert Mustacchi {
3209*d14abf15SRobert Mustacchi 	int cnt = *line_idx;
3210*d14abf15SRobert Mustacchi 
3211*d14abf15SRobert Mustacchi 	while (cmd_pos->data.macs_num) {
3212*d14abf15SRobert Mustacchi 		o->set_one_rule(pdev, o, cnt, NULL, cmd_pos->type);
3213*d14abf15SRobert Mustacchi 
3214*d14abf15SRobert Mustacchi 		cnt++;
3215*d14abf15SRobert Mustacchi 
3216*d14abf15SRobert Mustacchi 		cmd_pos->data.macs_num--;
3217*d14abf15SRobert Mustacchi 
3218*d14abf15SRobert Mustacchi 		  ECORE_MSG(pdev, "Deleting MAC. %d left,cnt is %d\n",
3219*d14abf15SRobert Mustacchi 				  cmd_pos->data.macs_num, cnt);
3220*d14abf15SRobert Mustacchi 
3221*d14abf15SRobert Mustacchi 		/* Break if we reached the maximum
3222*d14abf15SRobert Mustacchi 		 * number of rules.
3223*d14abf15SRobert Mustacchi 		 */
3224*d14abf15SRobert Mustacchi 		if (cnt >= o->max_cmd_len)
3225*d14abf15SRobert Mustacchi 			break;
3226*d14abf15SRobert Mustacchi 	}
3227*d14abf15SRobert Mustacchi 
3228*d14abf15SRobert Mustacchi 	*line_idx = cnt;
3229*d14abf15SRobert Mustacchi 
3230*d14abf15SRobert Mustacchi 	/* If we cleared all bins - we are done */
3231*d14abf15SRobert Mustacchi 	if (!cmd_pos->data.macs_num)
3232*d14abf15SRobert Mustacchi 		cmd_pos->done = TRUE;
3233*d14abf15SRobert Mustacchi }
3234*d14abf15SRobert Mustacchi 
3235*d14abf15SRobert Mustacchi static INLINE void ecore_mcast_hdl_pending_restore_e2(struct _lm_device_t *pdev,
3236*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
3237*d14abf15SRobert Mustacchi 	int *line_idx)
3238*d14abf15SRobert Mustacchi {
3239*d14abf15SRobert Mustacchi 	cmd_pos->data.next_bin = o->hdl_restore(pdev, o, cmd_pos->data.next_bin,
3240*d14abf15SRobert Mustacchi 						line_idx);
3241*d14abf15SRobert Mustacchi 
3242*d14abf15SRobert Mustacchi 	if (cmd_pos->data.next_bin < 0)
3243*d14abf15SRobert Mustacchi 		/* If o->set_restore returned -1 we are done */
3244*d14abf15SRobert Mustacchi 		cmd_pos->done = TRUE;
3245*d14abf15SRobert Mustacchi 	else
3246*d14abf15SRobert Mustacchi 		/* Start from the next bin next time */
3247*d14abf15SRobert Mustacchi 		cmd_pos->data.next_bin++;
3248*d14abf15SRobert Mustacchi }
3249*d14abf15SRobert Mustacchi 
3250*d14abf15SRobert Mustacchi static INLINE int ecore_mcast_handle_pending_cmds_e2(struct _lm_device_t *pdev,
3251*d14abf15SRobert Mustacchi 				struct ecore_mcast_ramrod_params *p)
3252*d14abf15SRobert Mustacchi {
3253*d14abf15SRobert Mustacchi 	struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3254*d14abf15SRobert Mustacchi 	int cnt = 0;
3255*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3256*d14abf15SRobert Mustacchi 
3257*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
3258*d14abf15SRobert Mustacchi 		&o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
3259*d14abf15SRobert Mustacchi 		switch (cmd_pos->type) {
3260*d14abf15SRobert Mustacchi 		case ECORE_MCAST_CMD_ADD:
3261*d14abf15SRobert Mustacchi 			ecore_mcast_hdl_pending_add_e2(pdev, o, cmd_pos, &cnt);
3262*d14abf15SRobert Mustacchi 			break;
3263*d14abf15SRobert Mustacchi 
3264*d14abf15SRobert Mustacchi 		case ECORE_MCAST_CMD_DEL:
3265*d14abf15SRobert Mustacchi 			ecore_mcast_hdl_pending_del_e2(pdev, o, cmd_pos, &cnt);
3266*d14abf15SRobert Mustacchi 			break;
3267*d14abf15SRobert Mustacchi 
3268*d14abf15SRobert Mustacchi 		case ECORE_MCAST_CMD_RESTORE:
3269*d14abf15SRobert Mustacchi 			ecore_mcast_hdl_pending_restore_e2(pdev, o, cmd_pos,
3270*d14abf15SRobert Mustacchi 							   &cnt);
3271*d14abf15SRobert Mustacchi 			break;
3272*d14abf15SRobert Mustacchi 
3273*d14abf15SRobert Mustacchi 		default:
3274*d14abf15SRobert Mustacchi 			ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3275*d14abf15SRobert Mustacchi 			return ECORE_INVAL;
3276*d14abf15SRobert Mustacchi 		}
3277*d14abf15SRobert Mustacchi 
3278*d14abf15SRobert Mustacchi 		/* If the command has been completed - remove it from the list
3279*d14abf15SRobert Mustacchi 		 * and free the memory
3280*d14abf15SRobert Mustacchi 		 */
3281*d14abf15SRobert Mustacchi 		if (cmd_pos->done) {
3282*d14abf15SRobert Mustacchi 			ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
3283*d14abf15SRobert Mustacchi 						&o->pending_cmds_head);
3284*d14abf15SRobert Mustacchi 			ECORE_FREE(pdev, cmd_pos, cmd_pos->alloc_len);
3285*d14abf15SRobert Mustacchi 		}
3286*d14abf15SRobert Mustacchi 
3287*d14abf15SRobert Mustacchi 		/* Break if we reached the maximum number of rules */
3288*d14abf15SRobert Mustacchi 		if (cnt >= o->max_cmd_len)
3289*d14abf15SRobert Mustacchi 			break;
3290*d14abf15SRobert Mustacchi 	}
3291*d14abf15SRobert Mustacchi 
3292*d14abf15SRobert Mustacchi 	return cnt;
3293*d14abf15SRobert Mustacchi }
3294*d14abf15SRobert Mustacchi 
3295*d14abf15SRobert Mustacchi static INLINE void ecore_mcast_hdl_add(struct _lm_device_t *pdev,
3296*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3297*d14abf15SRobert Mustacchi 	int *line_idx)
3298*d14abf15SRobert Mustacchi {
3299*d14abf15SRobert Mustacchi 	struct ecore_mcast_list_elem *mlist_pos;
3300*d14abf15SRobert Mustacchi 	union ecore_mcast_config_data cfg_data = {NULL};
3301*d14abf15SRobert Mustacchi 	int cnt = *line_idx;
3302*d14abf15SRobert Mustacchi 
3303*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3304*d14abf15SRobert Mustacchi 				  struct ecore_mcast_list_elem) {
3305*d14abf15SRobert Mustacchi 		cfg_data.mac = mlist_pos->mac;
3306*d14abf15SRobert Mustacchi 		o->set_one_rule(pdev, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
3307*d14abf15SRobert Mustacchi 
3308*d14abf15SRobert Mustacchi 		cnt++;
3309*d14abf15SRobert Mustacchi 
3310*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3311*d14abf15SRobert Mustacchi 			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
3312*d14abf15SRobert Mustacchi 	}
3313*d14abf15SRobert Mustacchi 
3314*d14abf15SRobert Mustacchi 	*line_idx = cnt;
3315*d14abf15SRobert Mustacchi }
3316*d14abf15SRobert Mustacchi 
3317*d14abf15SRobert Mustacchi static INLINE void ecore_mcast_hdl_del(struct _lm_device_t *pdev,
3318*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3319*d14abf15SRobert Mustacchi 	int *line_idx)
3320*d14abf15SRobert Mustacchi {
3321*d14abf15SRobert Mustacchi 	int cnt = *line_idx, i;
3322*d14abf15SRobert Mustacchi 
3323*d14abf15SRobert Mustacchi 	for (i = 0; i < p->mcast_list_len; i++) {
3324*d14abf15SRobert Mustacchi 		o->set_one_rule(pdev, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
3325*d14abf15SRobert Mustacchi 
3326*d14abf15SRobert Mustacchi 		cnt++;
3327*d14abf15SRobert Mustacchi 
3328*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "Deleting MAC. %d left\n",
3329*d14abf15SRobert Mustacchi 			  p->mcast_list_len - i - 1);
3330*d14abf15SRobert Mustacchi 	}
3331*d14abf15SRobert Mustacchi 
3332*d14abf15SRobert Mustacchi 	*line_idx = cnt;
3333*d14abf15SRobert Mustacchi }
3334*d14abf15SRobert Mustacchi 
3335*d14abf15SRobert Mustacchi /**
3336*d14abf15SRobert Mustacchi  * ecore_mcast_handle_current_cmd -
3337*d14abf15SRobert Mustacchi  *
3338*d14abf15SRobert Mustacchi  * @pdev:	device handle
3339*d14abf15SRobert Mustacchi  * @p:
3340*d14abf15SRobert Mustacchi  * @cmd:
3341*d14abf15SRobert Mustacchi  * @start_cnt:	first line in the ramrod data that may be used
3342*d14abf15SRobert Mustacchi  *
3343*d14abf15SRobert Mustacchi  * This function is called iff there is enough place for the current command in
3344*d14abf15SRobert Mustacchi  * the ramrod data.
3345*d14abf15SRobert Mustacchi  * Returns number of lines filled in the ramrod data in total.
3346*d14abf15SRobert Mustacchi  */
3347*d14abf15SRobert Mustacchi static INLINE int ecore_mcast_handle_current_cmd(struct _lm_device_t *pdev,
3348*d14abf15SRobert Mustacchi 			struct ecore_mcast_ramrod_params *p,
3349*d14abf15SRobert Mustacchi 			enum ecore_mcast_cmd cmd,
3350*d14abf15SRobert Mustacchi 			int start_cnt)
3351*d14abf15SRobert Mustacchi {
3352*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3353*d14abf15SRobert Mustacchi 	int cnt = start_cnt;
3354*d14abf15SRobert Mustacchi 
3355*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "p->mcast_list_len=%d\n", p->mcast_list_len);
3356*d14abf15SRobert Mustacchi 
3357*d14abf15SRobert Mustacchi 	switch (cmd) {
3358*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_ADD:
3359*d14abf15SRobert Mustacchi 		ecore_mcast_hdl_add(pdev, o, p, &cnt);
3360*d14abf15SRobert Mustacchi 		break;
3361*d14abf15SRobert Mustacchi 
3362*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_DEL:
3363*d14abf15SRobert Mustacchi 		ecore_mcast_hdl_del(pdev, o, p, &cnt);
3364*d14abf15SRobert Mustacchi 		break;
3365*d14abf15SRobert Mustacchi 
3366*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_RESTORE:
3367*d14abf15SRobert Mustacchi 		o->hdl_restore(pdev, o, 0, &cnt);
3368*d14abf15SRobert Mustacchi 		break;
3369*d14abf15SRobert Mustacchi 
3370*d14abf15SRobert Mustacchi 	default:
3371*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", cmd);
3372*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
3373*d14abf15SRobert Mustacchi 	}
3374*d14abf15SRobert Mustacchi 
3375*d14abf15SRobert Mustacchi 	/* The current command has been handled */
3376*d14abf15SRobert Mustacchi 	p->mcast_list_len = 0;
3377*d14abf15SRobert Mustacchi 
3378*d14abf15SRobert Mustacchi 	return cnt;
3379*d14abf15SRobert Mustacchi }
3380*d14abf15SRobert Mustacchi 
3381*d14abf15SRobert Mustacchi static int ecore_mcast_validate_e2(struct _lm_device_t *pdev,
3382*d14abf15SRobert Mustacchi 				   struct ecore_mcast_ramrod_params *p,
3383*d14abf15SRobert Mustacchi 				   enum ecore_mcast_cmd cmd)
3384*d14abf15SRobert Mustacchi {
3385*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3386*d14abf15SRobert Mustacchi 	int reg_sz = o->get_registry_size(o);
3387*d14abf15SRobert Mustacchi 
3388*d14abf15SRobert Mustacchi 	switch (cmd) {
3389*d14abf15SRobert Mustacchi 	/* DEL command deletes all currently configured MACs */
3390*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_DEL:
3391*d14abf15SRobert Mustacchi 		o->set_registry_size(o, 0);
3392*d14abf15SRobert Mustacchi 		/* Don't break */
3393*d14abf15SRobert Mustacchi 
3394*d14abf15SRobert Mustacchi 	/* RESTORE command will restore the entire multicast configuration */
3395*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_RESTORE:
3396*d14abf15SRobert Mustacchi 		/* Here we set the approximate amount of work to do, which in
3397*d14abf15SRobert Mustacchi 		 * fact may be only less as some MACs in postponed ADD
3398*d14abf15SRobert Mustacchi 		 * command(s) scheduled before this command may fall into
3399*d14abf15SRobert Mustacchi 		 * the same bin and the actual number of bins set in the
3400*d14abf15SRobert Mustacchi 		 * registry would be less than we estimated here. See
3401*d14abf15SRobert Mustacchi 		 * ecore_mcast_set_one_rule_e2() for further details.
3402*d14abf15SRobert Mustacchi 		 */
3403*d14abf15SRobert Mustacchi 		p->mcast_list_len = reg_sz;
3404*d14abf15SRobert Mustacchi 		break;
3405*d14abf15SRobert Mustacchi 
3406*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_ADD:
3407*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_CONT:
3408*d14abf15SRobert Mustacchi 		/* Here we assume that all new MACs will fall into new bins.
3409*d14abf15SRobert Mustacchi 		 * However we will correct the real registry size after we
3410*d14abf15SRobert Mustacchi 		 * handle all pending commands.
3411*d14abf15SRobert Mustacchi 		 */
3412*d14abf15SRobert Mustacchi 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
3413*d14abf15SRobert Mustacchi 		break;
3414*d14abf15SRobert Mustacchi 
3415*d14abf15SRobert Mustacchi 	default:
3416*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", cmd);
3417*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
3418*d14abf15SRobert Mustacchi 	}
3419*d14abf15SRobert Mustacchi 
3420*d14abf15SRobert Mustacchi 	/* Increase the total number of MACs pending to be configured */
3421*d14abf15SRobert Mustacchi 	o->total_pending_num += p->mcast_list_len;
3422*d14abf15SRobert Mustacchi 
3423*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
3424*d14abf15SRobert Mustacchi }
3425*d14abf15SRobert Mustacchi 
3426*d14abf15SRobert Mustacchi static void ecore_mcast_revert_e2(struct _lm_device_t *pdev,
3427*d14abf15SRobert Mustacchi 				      struct ecore_mcast_ramrod_params *p,
3428*d14abf15SRobert Mustacchi 				      int old_num_bins)
3429*d14abf15SRobert Mustacchi {
3430*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3431*d14abf15SRobert Mustacchi 
3432*d14abf15SRobert Mustacchi 	o->set_registry_size(o, old_num_bins);
3433*d14abf15SRobert Mustacchi 	o->total_pending_num -= p->mcast_list_len;
3434*d14abf15SRobert Mustacchi }
3435*d14abf15SRobert Mustacchi 
3436*d14abf15SRobert Mustacchi /**
3437*d14abf15SRobert Mustacchi  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
3438*d14abf15SRobert Mustacchi  *
3439*d14abf15SRobert Mustacchi  * @pdev:	device handle
3440*d14abf15SRobert Mustacchi  * @p:
3441*d14abf15SRobert Mustacchi  * @len:	number of rules to handle
3442*d14abf15SRobert Mustacchi  */
3443*d14abf15SRobert Mustacchi static INLINE void ecore_mcast_set_rdata_hdr_e2(struct _lm_device_t *pdev,
3444*d14abf15SRobert Mustacchi 					struct ecore_mcast_ramrod_params *p,
3445*d14abf15SRobert Mustacchi 					u8 len)
3446*d14abf15SRobert Mustacchi {
3447*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3448*d14abf15SRobert Mustacchi 	struct eth_multicast_rules_ramrod_data *data =
3449*d14abf15SRobert Mustacchi 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
3450*d14abf15SRobert Mustacchi 
3451*d14abf15SRobert Mustacchi 	data->header.echo = mm_cpu_to_le32((r->cid & ECORE_SWCID_MASK) |
3452*d14abf15SRobert Mustacchi 					(ECORE_FILTER_MCAST_PENDING <<
3453*d14abf15SRobert Mustacchi 					 ECORE_SWCID_SHIFT));
3454*d14abf15SRobert Mustacchi 	data->header.rule_cnt = len;
3455*d14abf15SRobert Mustacchi }
3456*d14abf15SRobert Mustacchi 
3457*d14abf15SRobert Mustacchi /**
3458*d14abf15SRobert Mustacchi  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3459*d14abf15SRobert Mustacchi  *
3460*d14abf15SRobert Mustacchi  * @pdev:	device handle
3461*d14abf15SRobert Mustacchi  * @o:
3462*d14abf15SRobert Mustacchi  *
3463*d14abf15SRobert Mustacchi  * Recalculate the actual number of set bins in the registry using Brian
3464*d14abf15SRobert Mustacchi  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3465*d14abf15SRobert Mustacchi  *
3466*d14abf15SRobert Mustacchi  * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
3467*d14abf15SRobert Mustacchi  */
3468*d14abf15SRobert Mustacchi static INLINE int ecore_mcast_refresh_registry_e2(struct _lm_device_t *pdev,
3469*d14abf15SRobert Mustacchi 						  struct ecore_mcast_obj *o)
3470*d14abf15SRobert Mustacchi {
3471*d14abf15SRobert Mustacchi 	int i, cnt = 0;
3472*d14abf15SRobert Mustacchi 	u64 elem;
3473*d14abf15SRobert Mustacchi 
3474*d14abf15SRobert Mustacchi 	for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
3475*d14abf15SRobert Mustacchi 		elem = o->registry.aprox_match.vec[i];
3476*d14abf15SRobert Mustacchi 		for (; elem; cnt++)
3477*d14abf15SRobert Mustacchi 			elem &= elem - 1;
3478*d14abf15SRobert Mustacchi 	}
3479*d14abf15SRobert Mustacchi 
3480*d14abf15SRobert Mustacchi 	o->set_registry_size(o, cnt);
3481*d14abf15SRobert Mustacchi 
3482*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
3483*d14abf15SRobert Mustacchi }
3484*d14abf15SRobert Mustacchi 
3485*d14abf15SRobert Mustacchi static int ecore_mcast_setup_e2(struct _lm_device_t *pdev,
3486*d14abf15SRobert Mustacchi 				struct ecore_mcast_ramrod_params *p,
3487*d14abf15SRobert Mustacchi 				enum ecore_mcast_cmd cmd)
3488*d14abf15SRobert Mustacchi {
3489*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &p->mcast_obj->raw;
3490*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3491*d14abf15SRobert Mustacchi 	struct eth_multicast_rules_ramrod_data *data =
3492*d14abf15SRobert Mustacchi 		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3493*d14abf15SRobert Mustacchi 	int cnt = 0, rc;
3494*d14abf15SRobert Mustacchi 
3495*d14abf15SRobert Mustacchi 	/* Reset the ramrod data buffer */
3496*d14abf15SRobert Mustacchi 	mm_memset(data, 0, sizeof(*data));
3497*d14abf15SRobert Mustacchi 
3498*d14abf15SRobert Mustacchi 	cnt = ecore_mcast_handle_pending_cmds_e2(pdev, p);
3499*d14abf15SRobert Mustacchi 
3500*d14abf15SRobert Mustacchi 	/* If there are no more pending commands - clear SCHEDULED state */
3501*d14abf15SRobert Mustacchi 	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3502*d14abf15SRobert Mustacchi 		o->clear_sched(o);
3503*d14abf15SRobert Mustacchi 
3504*d14abf15SRobert Mustacchi 	/* The below may be TRUE iff there was enough room in ramrod
3505*d14abf15SRobert Mustacchi 	 * data for all pending commands and for the current
3506*d14abf15SRobert Mustacchi 	 * command. Otherwise the current command would have been added
3507*d14abf15SRobert Mustacchi 	 * to the pending commands and p->mcast_list_len would have been
3508*d14abf15SRobert Mustacchi 	 * zeroed.
3509*d14abf15SRobert Mustacchi 	 */
3510*d14abf15SRobert Mustacchi 	if (p->mcast_list_len > 0)
3511*d14abf15SRobert Mustacchi 		cnt = ecore_mcast_handle_current_cmd(pdev, p, cmd, cnt);
3512*d14abf15SRobert Mustacchi 
3513*d14abf15SRobert Mustacchi 	/* We've pulled out some MACs - update the total number of
3514*d14abf15SRobert Mustacchi 	 * outstanding.
3515*d14abf15SRobert Mustacchi 	 */
3516*d14abf15SRobert Mustacchi 	o->total_pending_num -= cnt;
3517*d14abf15SRobert Mustacchi 
3518*d14abf15SRobert Mustacchi 	/* send a ramrod */
3519*d14abf15SRobert Mustacchi 	DbgBreakIf(o->total_pending_num < 0);
3520*d14abf15SRobert Mustacchi 	DbgBreakIf(cnt > o->max_cmd_len);
3521*d14abf15SRobert Mustacchi 
3522*d14abf15SRobert Mustacchi 	ecore_mcast_set_rdata_hdr_e2(pdev, p, (u8)cnt);
3523*d14abf15SRobert Mustacchi 
3524*d14abf15SRobert Mustacchi 	/* Update a registry size if there are no more pending operations.
3525*d14abf15SRobert Mustacchi 	 *
3526*d14abf15SRobert Mustacchi 	 * We don't want to change the value of the registry size if there are
3527*d14abf15SRobert Mustacchi 	 * pending operations because we want it to always be equal to the
3528*d14abf15SRobert Mustacchi 	 * exact or the approximate number (see ecore_mcast_validate_e2()) of
3529*d14abf15SRobert Mustacchi 	 * set bins after the last requested operation in order to properly
3530*d14abf15SRobert Mustacchi 	 * evaluate the size of the next DEL/RESTORE operation.
3531*d14abf15SRobert Mustacchi 	 *
3532*d14abf15SRobert Mustacchi 	 * Note that we update the registry itself during command(s) handling
3533*d14abf15SRobert Mustacchi 	 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
3534*d14abf15SRobert Mustacchi 	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3535*d14abf15SRobert Mustacchi 	 * with a limited amount of update commands (per MAC/bin) and we don't
3536*d14abf15SRobert Mustacchi 	 * know in this scope what the actual state of bins configuration is
3537*d14abf15SRobert Mustacchi 	 * going to be after this ramrod.
3538*d14abf15SRobert Mustacchi 	 */
3539*d14abf15SRobert Mustacchi 	if (!o->total_pending_num)
3540*d14abf15SRobert Mustacchi 		ecore_mcast_refresh_registry_e2(pdev, o);
3541*d14abf15SRobert Mustacchi 
3542*d14abf15SRobert Mustacchi 	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3543*d14abf15SRobert Mustacchi 	 * RAMROD_PENDING status immediately.
3544*d14abf15SRobert Mustacchi 	 */
3545*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3546*d14abf15SRobert Mustacchi 		raw->clear_pending(raw);
3547*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
3548*d14abf15SRobert Mustacchi 	} else {
3549*d14abf15SRobert Mustacchi 		/* No need for an explicit memory barrier here as long as we
3550*d14abf15SRobert Mustacchi 		 * ensure the ordering of writing to the SPQ element
3551*d14abf15SRobert Mustacchi 		 * and updating of the SPQ producer which involves a memory
3552*d14abf15SRobert Mustacchi 		 * read. If the memory read is removed we will have to put a
3553*d14abf15SRobert Mustacchi 		 * full memory barrier there (inside ecore_sp_post()).
3554*d14abf15SRobert Mustacchi 		 */
3555*d14abf15SRobert Mustacchi 
3556*d14abf15SRobert Mustacchi 		/* Send a ramrod */
3557*d14abf15SRobert Mustacchi 		rc = ecore_sp_post( pdev,
3558*d14abf15SRobert Mustacchi 				    RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3559*d14abf15SRobert Mustacchi 				    raw->cid,
3560*d14abf15SRobert Mustacchi 				    raw->rdata_mapping.as_u64,
3561*d14abf15SRobert Mustacchi 				    ETH_CONNECTION_TYPE);
3562*d14abf15SRobert Mustacchi 		if (rc)
3563*d14abf15SRobert Mustacchi 			return rc;
3564*d14abf15SRobert Mustacchi 
3565*d14abf15SRobert Mustacchi 		/* Ramrod completion is pending */
3566*d14abf15SRobert Mustacchi 		return ECORE_PENDING;
3567*d14abf15SRobert Mustacchi 	}
3568*d14abf15SRobert Mustacchi }
3569*d14abf15SRobert Mustacchi 
3570*d14abf15SRobert Mustacchi static int ecore_mcast_validate_e1h(struct _lm_device_t *pdev,
3571*d14abf15SRobert Mustacchi 				    struct ecore_mcast_ramrod_params *p,
3572*d14abf15SRobert Mustacchi 				    enum ecore_mcast_cmd cmd)
3573*d14abf15SRobert Mustacchi {
3574*d14abf15SRobert Mustacchi 	/* Mark, that there is a work to do */
3575*d14abf15SRobert Mustacchi 	if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
3576*d14abf15SRobert Mustacchi 		p->mcast_list_len = 1;
3577*d14abf15SRobert Mustacchi 
3578*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
3579*d14abf15SRobert Mustacchi }
3580*d14abf15SRobert Mustacchi 
3581*d14abf15SRobert Mustacchi static void ecore_mcast_revert_e1h(struct _lm_device_t *pdev,
3582*d14abf15SRobert Mustacchi 				       struct ecore_mcast_ramrod_params *p,
3583*d14abf15SRobert Mustacchi 				       int old_num_bins)
3584*d14abf15SRobert Mustacchi {
3585*d14abf15SRobert Mustacchi 	/* Do nothing */
3586*d14abf15SRobert Mustacchi }
3587*d14abf15SRobert Mustacchi 
3588*d14abf15SRobert Mustacchi #define ECORE_57711_SET_MC_FILTER(filter, bit) \
3589*d14abf15SRobert Mustacchi do { \
3590*d14abf15SRobert Mustacchi 	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3591*d14abf15SRobert Mustacchi } while (0)
3592*d14abf15SRobert Mustacchi 
3593*d14abf15SRobert Mustacchi static INLINE void ecore_mcast_hdl_add_e1h(struct _lm_device_t *pdev,
3594*d14abf15SRobert Mustacchi 					   struct ecore_mcast_obj *o,
3595*d14abf15SRobert Mustacchi 					   struct ecore_mcast_ramrod_params *p,
3596*d14abf15SRobert Mustacchi 					   u32 *mc_filter)
3597*d14abf15SRobert Mustacchi {
3598*d14abf15SRobert Mustacchi 	struct ecore_mcast_list_elem *mlist_pos;
3599*d14abf15SRobert Mustacchi 	int bit;
3600*d14abf15SRobert Mustacchi 
3601*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3602*d14abf15SRobert Mustacchi 				  struct ecore_mcast_list_elem) {
3603*d14abf15SRobert Mustacchi 		bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
3604*d14abf15SRobert Mustacchi 		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3605*d14abf15SRobert Mustacchi 
3606*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
3607*d14abf15SRobert Mustacchi 			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
3608*d14abf15SRobert Mustacchi 
3609*d14abf15SRobert Mustacchi 		/* bookkeeping... */
3610*d14abf15SRobert Mustacchi 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3611*d14abf15SRobert Mustacchi 				  bit);
3612*d14abf15SRobert Mustacchi 	}
3613*d14abf15SRobert Mustacchi }
3614*d14abf15SRobert Mustacchi 
3615*d14abf15SRobert Mustacchi static INLINE void ecore_mcast_hdl_restore_e1h(struct _lm_device_t *pdev,
3616*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3617*d14abf15SRobert Mustacchi 	u32 *mc_filter)
3618*d14abf15SRobert Mustacchi {
3619*d14abf15SRobert Mustacchi 	int bit;
3620*d14abf15SRobert Mustacchi 
3621*d14abf15SRobert Mustacchi 	for (bit = ecore_mcast_get_next_bin(o, 0);
3622*d14abf15SRobert Mustacchi 	     bit >= 0;
3623*d14abf15SRobert Mustacchi 	     bit = ecore_mcast_get_next_bin(o, bit + 1)) {
3624*d14abf15SRobert Mustacchi 		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3625*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "About to set bin %d\n", bit);
3626*d14abf15SRobert Mustacchi 	}
3627*d14abf15SRobert Mustacchi }
3628*d14abf15SRobert Mustacchi 
3629*d14abf15SRobert Mustacchi /* On 57711 we write the multicast MACs' approximate match
3630*d14abf15SRobert Mustacchi  * table by directly into the TSTORM's internal RAM. So we don't
3631*d14abf15SRobert Mustacchi  * really need to handle any tricks to make it work.
3632*d14abf15SRobert Mustacchi  */
3633*d14abf15SRobert Mustacchi static int ecore_mcast_setup_e1h(struct _lm_device_t *pdev,
3634*d14abf15SRobert Mustacchi 				 struct ecore_mcast_ramrod_params *p,
3635*d14abf15SRobert Mustacchi 				 enum ecore_mcast_cmd cmd)
3636*d14abf15SRobert Mustacchi {
3637*d14abf15SRobert Mustacchi 	int i;
3638*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3639*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
3640*d14abf15SRobert Mustacchi 
3641*d14abf15SRobert Mustacchi 	/* If CLEAR_ONLY has been requested - clear the registry
3642*d14abf15SRobert Mustacchi 	 * and clear a pending bit.
3643*d14abf15SRobert Mustacchi 	 */
3644*d14abf15SRobert Mustacchi 	if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3645*d14abf15SRobert Mustacchi 		u32 mc_filter[MC_HASH_SIZE] = {0};
3646*d14abf15SRobert Mustacchi 
3647*d14abf15SRobert Mustacchi 		/* Set the multicast filter bits before writing it into
3648*d14abf15SRobert Mustacchi 		 * the internal memory.
3649*d14abf15SRobert Mustacchi 		 */
3650*d14abf15SRobert Mustacchi 		switch (cmd) {
3651*d14abf15SRobert Mustacchi 		case ECORE_MCAST_CMD_ADD:
3652*d14abf15SRobert Mustacchi 			ecore_mcast_hdl_add_e1h(pdev, o, p, mc_filter);
3653*d14abf15SRobert Mustacchi 			break;
3654*d14abf15SRobert Mustacchi 
3655*d14abf15SRobert Mustacchi 		case ECORE_MCAST_CMD_DEL:
3656*d14abf15SRobert Mustacchi 			ECORE_MSG(pdev,
3657*d14abf15SRobert Mustacchi 				  "Invalidating multicast MACs configuration\n");
3658*d14abf15SRobert Mustacchi 
3659*d14abf15SRobert Mustacchi 			/* clear the registry */
3660*d14abf15SRobert Mustacchi 			mm_memset(o->registry.aprox_match.vec, 0,
3661*d14abf15SRobert Mustacchi 			       sizeof(o->registry.aprox_match.vec));
3662*d14abf15SRobert Mustacchi 			break;
3663*d14abf15SRobert Mustacchi 
3664*d14abf15SRobert Mustacchi 		case ECORE_MCAST_CMD_RESTORE:
3665*d14abf15SRobert Mustacchi 			ecore_mcast_hdl_restore_e1h(pdev, o, p, mc_filter);
3666*d14abf15SRobert Mustacchi 			break;
3667*d14abf15SRobert Mustacchi 
3668*d14abf15SRobert Mustacchi 		default:
3669*d14abf15SRobert Mustacchi 			ECORE_ERR("Unknown command: %d\n", cmd);
3670*d14abf15SRobert Mustacchi 			return ECORE_INVAL;
3671*d14abf15SRobert Mustacchi 		}
3672*d14abf15SRobert Mustacchi 
3673*d14abf15SRobert Mustacchi 		/* Set the mcast filter in the internal memory */
3674*d14abf15SRobert Mustacchi 		for (i = 0; i < MC_HASH_SIZE; i++)
3675*d14abf15SRobert Mustacchi 			REG_WR(pdev, MC_HASH_OFFSET(pdev, i), mc_filter[i]);
3676*d14abf15SRobert Mustacchi 	} else
3677*d14abf15SRobert Mustacchi 		/* clear the registry */
3678*d14abf15SRobert Mustacchi 		mm_memset(o->registry.aprox_match.vec, 0,
3679*d14abf15SRobert Mustacchi 		       sizeof(o->registry.aprox_match.vec));
3680*d14abf15SRobert Mustacchi 
3681*d14abf15SRobert Mustacchi 	/* We are done */
3682*d14abf15SRobert Mustacchi 	r->clear_pending(r);
3683*d14abf15SRobert Mustacchi 
3684*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
3685*d14abf15SRobert Mustacchi }
3686*d14abf15SRobert Mustacchi 
3687*d14abf15SRobert Mustacchi static int ecore_mcast_validate_e1(struct _lm_device_t *pdev,
3688*d14abf15SRobert Mustacchi 				   struct ecore_mcast_ramrod_params *p,
3689*d14abf15SRobert Mustacchi 				   enum ecore_mcast_cmd cmd)
3690*d14abf15SRobert Mustacchi {
3691*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3692*d14abf15SRobert Mustacchi 	int reg_sz = o->get_registry_size(o);
3693*d14abf15SRobert Mustacchi 
3694*d14abf15SRobert Mustacchi 	switch (cmd) {
3695*d14abf15SRobert Mustacchi 	/* DEL command deletes all currently configured MACs */
3696*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_DEL:
3697*d14abf15SRobert Mustacchi 		o->set_registry_size(o, 0);
3698*d14abf15SRobert Mustacchi 		/* Don't break */
3699*d14abf15SRobert Mustacchi 
3700*d14abf15SRobert Mustacchi 	/* RESTORE command will restore the entire multicast configuration */
3701*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_RESTORE:
3702*d14abf15SRobert Mustacchi 		p->mcast_list_len = reg_sz;
3703*d14abf15SRobert Mustacchi 		  ECORE_MSG(pdev, "Command %d, p->mcast_list_len=%d\n",
3704*d14abf15SRobert Mustacchi 				  cmd, p->mcast_list_len);
3705*d14abf15SRobert Mustacchi 		break;
3706*d14abf15SRobert Mustacchi 
3707*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_ADD:
3708*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_CONT:
3709*d14abf15SRobert Mustacchi 		/* Multicast MACs on 57710 are configured as unicast MACs and
3710*d14abf15SRobert Mustacchi 		 * there is only a limited number of CAM entries for that
3711*d14abf15SRobert Mustacchi 		 * matter.
3712*d14abf15SRobert Mustacchi 		 */
3713*d14abf15SRobert Mustacchi 		if (p->mcast_list_len > o->max_cmd_len) {
3714*d14abf15SRobert Mustacchi 			ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
3715*d14abf15SRobert Mustacchi 				  o->max_cmd_len);
3716*d14abf15SRobert Mustacchi 			return ECORE_INVAL;
3717*d14abf15SRobert Mustacchi 		}
3718*d14abf15SRobert Mustacchi 		/* Every configured MAC should be cleared if DEL command is
3719*d14abf15SRobert Mustacchi 		 * called. Only the last ADD command is relevant as long as
3720*d14abf15SRobert Mustacchi 		 * every ADD commands overrides the previous configuration.
3721*d14abf15SRobert Mustacchi 		 */
3722*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "p->mcast_list_len=%d\n", p->mcast_list_len);
3723*d14abf15SRobert Mustacchi 		if (p->mcast_list_len > 0)
3724*d14abf15SRobert Mustacchi 			o->set_registry_size(o, p->mcast_list_len);
3725*d14abf15SRobert Mustacchi 
3726*d14abf15SRobert Mustacchi 		break;
3727*d14abf15SRobert Mustacchi 
3728*d14abf15SRobert Mustacchi 	default:
3729*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", cmd);
3730*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
3731*d14abf15SRobert Mustacchi 	}
3732*d14abf15SRobert Mustacchi 
3733*d14abf15SRobert Mustacchi 	/* We want to ensure that commands are executed one by one for 57710.
3734*d14abf15SRobert Mustacchi 	 * Therefore each none-empty command will consume o->max_cmd_len.
3735*d14abf15SRobert Mustacchi 	 */
3736*d14abf15SRobert Mustacchi 	if (p->mcast_list_len)
3737*d14abf15SRobert Mustacchi 		o->total_pending_num += o->max_cmd_len;
3738*d14abf15SRobert Mustacchi 
3739*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
3740*d14abf15SRobert Mustacchi }
3741*d14abf15SRobert Mustacchi 
3742*d14abf15SRobert Mustacchi static void ecore_mcast_revert_e1(struct _lm_device_t *pdev,
3743*d14abf15SRobert Mustacchi 				      struct ecore_mcast_ramrod_params *p,
3744*d14abf15SRobert Mustacchi 				      int old_num_macs)
3745*d14abf15SRobert Mustacchi {
3746*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3747*d14abf15SRobert Mustacchi 
3748*d14abf15SRobert Mustacchi 	o->set_registry_size(o, old_num_macs);
3749*d14abf15SRobert Mustacchi 
3750*d14abf15SRobert Mustacchi 	/* If current command hasn't been handled yet and we are
3751*d14abf15SRobert Mustacchi 	 * here means that it's meant to be dropped and we have to
3752*d14abf15SRobert Mustacchi 	 * update the number of outstanding MACs accordingly.
3753*d14abf15SRobert Mustacchi 	 */
3754*d14abf15SRobert Mustacchi 	if (p->mcast_list_len)
3755*d14abf15SRobert Mustacchi 		o->total_pending_num -= o->max_cmd_len;
3756*d14abf15SRobert Mustacchi }
3757*d14abf15SRobert Mustacchi 
3758*d14abf15SRobert Mustacchi static void ecore_mcast_set_one_rule_e1(struct _lm_device_t *pdev,
3759*d14abf15SRobert Mustacchi 					struct ecore_mcast_obj *o, int idx,
3760*d14abf15SRobert Mustacchi 					union ecore_mcast_config_data *cfg_data,
3761*d14abf15SRobert Mustacchi 					enum ecore_mcast_cmd cmd)
3762*d14abf15SRobert Mustacchi {
3763*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
3764*d14abf15SRobert Mustacchi 	struct mac_configuration_cmd *data =
3765*d14abf15SRobert Mustacchi 		(struct mac_configuration_cmd *)(r->rdata);
3766*d14abf15SRobert Mustacchi 
3767*d14abf15SRobert Mustacchi 	/* copy mac */
3768*d14abf15SRobert Mustacchi 	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) {
3769*d14abf15SRobert Mustacchi 		ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3770*d14abf15SRobert Mustacchi 				      &data->config_table[idx].middle_mac_addr,
3771*d14abf15SRobert Mustacchi 				      &data->config_table[idx].lsb_mac_addr,
3772*d14abf15SRobert Mustacchi 				      cfg_data->mac);
3773*d14abf15SRobert Mustacchi 
3774*d14abf15SRobert Mustacchi 		data->config_table[idx].vlan_id = 0;
3775*d14abf15SRobert Mustacchi 		data->config_table[idx].pf_id = r->func_id;
3776*d14abf15SRobert Mustacchi 		data->config_table[idx].clients_bit_vector =
3777*d14abf15SRobert Mustacchi 			mm_cpu_to_le32(1 << r->cl_id);
3778*d14abf15SRobert Mustacchi 
3779*d14abf15SRobert Mustacchi 		ECORE_SET_FLAG(data->config_table[idx].flags,
3780*d14abf15SRobert Mustacchi 			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3781*d14abf15SRobert Mustacchi 			       T_ETH_MAC_COMMAND_SET);
3782*d14abf15SRobert Mustacchi 	}
3783*d14abf15SRobert Mustacchi }
3784*d14abf15SRobert Mustacchi 
3785*d14abf15SRobert Mustacchi /**
3786*d14abf15SRobert Mustacchi  * ecore_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3787*d14abf15SRobert Mustacchi  *
3788*d14abf15SRobert Mustacchi  * @pdev:	device handle
3789*d14abf15SRobert Mustacchi  * @p:
3790*d14abf15SRobert Mustacchi  * @len:	number of rules to handle
3791*d14abf15SRobert Mustacchi  */
3792*d14abf15SRobert Mustacchi static INLINE void ecore_mcast_set_rdata_hdr_e1(struct _lm_device_t *pdev,
3793*d14abf15SRobert Mustacchi 					struct ecore_mcast_ramrod_params *p,
3794*d14abf15SRobert Mustacchi 					u8 len)
3795*d14abf15SRobert Mustacchi {
3796*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3797*d14abf15SRobert Mustacchi 	struct mac_configuration_cmd *data =
3798*d14abf15SRobert Mustacchi 		(struct mac_configuration_cmd *)(r->rdata);
3799*d14abf15SRobert Mustacchi 
3800*d14abf15SRobert Mustacchi 	u8 offset = (CHIP_REV_IS_SLOW(pdev) ?
3801*d14abf15SRobert Mustacchi 		     ECORE_MAX_EMUL_MULTI*(1 + r->func_id) :
3802*d14abf15SRobert Mustacchi 		     ECORE_MAX_MULTICAST*(1 + r->func_id));
3803*d14abf15SRobert Mustacchi 
3804*d14abf15SRobert Mustacchi 	data->hdr.offset = offset;
3805*d14abf15SRobert Mustacchi 	data->hdr.client_id = mm_cpu_to_le16(0xff);
3806*d14abf15SRobert Mustacchi 	data->hdr.echo = mm_cpu_to_le32((r->cid & ECORE_SWCID_MASK) |
3807*d14abf15SRobert Mustacchi 				     (ECORE_FILTER_MCAST_PENDING <<
3808*d14abf15SRobert Mustacchi 				      ECORE_SWCID_SHIFT));
3809*d14abf15SRobert Mustacchi 	data->hdr.length = len;
3810*d14abf15SRobert Mustacchi }
3811*d14abf15SRobert Mustacchi 
3812*d14abf15SRobert Mustacchi /**
3813*d14abf15SRobert Mustacchi  * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
3814*d14abf15SRobert Mustacchi  *
3815*d14abf15SRobert Mustacchi  * @pdev:	device handle
3816*d14abf15SRobert Mustacchi  * @o:
3817*d14abf15SRobert Mustacchi  * @start_idx:	index in the registry to start from
3818*d14abf15SRobert Mustacchi  * @rdata_idx:	index in the ramrod data to start from
3819*d14abf15SRobert Mustacchi  *
3820*d14abf15SRobert Mustacchi  * restore command for 57710 is like all other commands - always a stand alone
3821*d14abf15SRobert Mustacchi  * command - start_idx and rdata_idx will always be 0. This function will always
3822*d14abf15SRobert Mustacchi  * succeed.
3823*d14abf15SRobert Mustacchi  * returns -1 to comply with 57712 variant.
3824*d14abf15SRobert Mustacchi  */
3825*d14abf15SRobert Mustacchi static INLINE int ecore_mcast_handle_restore_cmd_e1(
3826*d14abf15SRobert Mustacchi 	struct _lm_device_t *pdev, struct ecore_mcast_obj *o , int start_idx,
3827*d14abf15SRobert Mustacchi 	int *rdata_idx)
3828*d14abf15SRobert Mustacchi {
3829*d14abf15SRobert Mustacchi 	struct ecore_mcast_mac_elem *elem;
3830*d14abf15SRobert Mustacchi 	int i = 0;
3831*d14abf15SRobert Mustacchi 	union ecore_mcast_config_data cfg_data = {NULL};
3832*d14abf15SRobert Mustacchi 
3833*d14abf15SRobert Mustacchi 	/* go through the registry and configure the MACs from it. */
3834*d14abf15SRobert Mustacchi 	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link,
3835*d14abf15SRobert Mustacchi 				  struct ecore_mcast_mac_elem) {
3836*d14abf15SRobert Mustacchi 		cfg_data.mac = &elem->mac[0];
3837*d14abf15SRobert Mustacchi 		o->set_one_rule(pdev, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE);
3838*d14abf15SRobert Mustacchi 
3839*d14abf15SRobert Mustacchi 		i++;
3840*d14abf15SRobert Mustacchi 
3841*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3842*d14abf15SRobert Mustacchi 			  cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
3843*d14abf15SRobert Mustacchi 	}
3844*d14abf15SRobert Mustacchi 
3845*d14abf15SRobert Mustacchi 	*rdata_idx = i;
3846*d14abf15SRobert Mustacchi 
3847*d14abf15SRobert Mustacchi 	return -1;
3848*d14abf15SRobert Mustacchi }
3849*d14abf15SRobert Mustacchi 
3850*d14abf15SRobert Mustacchi static INLINE int ecore_mcast_handle_pending_cmds_e1(
3851*d14abf15SRobert Mustacchi 	struct _lm_device_t *pdev, struct ecore_mcast_ramrod_params *p)
3852*d14abf15SRobert Mustacchi {
3853*d14abf15SRobert Mustacchi 	struct ecore_pending_mcast_cmd *cmd_pos;
3854*d14abf15SRobert Mustacchi 	struct ecore_mcast_mac_elem *pmac_pos;
3855*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3856*d14abf15SRobert Mustacchi 	union ecore_mcast_config_data cfg_data = {NULL};
3857*d14abf15SRobert Mustacchi 	int cnt = 0;
3858*d14abf15SRobert Mustacchi 
3859*d14abf15SRobert Mustacchi 	/* If nothing to be done - return */
3860*d14abf15SRobert Mustacchi 	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3861*d14abf15SRobert Mustacchi 		return 0;
3862*d14abf15SRobert Mustacchi 
3863*d14abf15SRobert Mustacchi 	/* Handle the first command */
3864*d14abf15SRobert Mustacchi 	cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head,
3865*d14abf15SRobert Mustacchi 					 struct ecore_pending_mcast_cmd, link);
3866*d14abf15SRobert Mustacchi 
3867*d14abf15SRobert Mustacchi 	switch (cmd_pos->type) {
3868*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_ADD:
3869*d14abf15SRobert Mustacchi 		ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head,
3870*d14abf15SRobert Mustacchi 					  link, struct ecore_mcast_mac_elem) {
3871*d14abf15SRobert Mustacchi 			cfg_data.mac = &pmac_pos->mac[0];
3872*d14abf15SRobert Mustacchi 			o->set_one_rule(pdev, o, cnt, &cfg_data, cmd_pos->type);
3873*d14abf15SRobert Mustacchi 
3874*d14abf15SRobert Mustacchi 			cnt++;
3875*d14abf15SRobert Mustacchi 
3876*d14abf15SRobert Mustacchi 			ECORE_MSG(pdev, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3877*d14abf15SRobert Mustacchi 				  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3878*d14abf15SRobert Mustacchi 		}
3879*d14abf15SRobert Mustacchi 		break;
3880*d14abf15SRobert Mustacchi 
3881*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_DEL:
3882*d14abf15SRobert Mustacchi 		cnt = cmd_pos->data.macs_num;
3883*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "About to delete %d multicast MACs\n", cnt);
3884*d14abf15SRobert Mustacchi 		break;
3885*d14abf15SRobert Mustacchi 
3886*d14abf15SRobert Mustacchi 	case ECORE_MCAST_CMD_RESTORE:
3887*d14abf15SRobert Mustacchi 		o->hdl_restore(pdev, o, 0, &cnt);
3888*d14abf15SRobert Mustacchi 		break;
3889*d14abf15SRobert Mustacchi 
3890*d14abf15SRobert Mustacchi 	default:
3891*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3892*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
3893*d14abf15SRobert Mustacchi 	}
3894*d14abf15SRobert Mustacchi 
3895*d14abf15SRobert Mustacchi 	ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head);
3896*d14abf15SRobert Mustacchi 	ECORE_FREE(pdev, cmd_pos, cmd_pos->alloc_len);
3897*d14abf15SRobert Mustacchi 
3898*d14abf15SRobert Mustacchi 	return cnt;
3899*d14abf15SRobert Mustacchi }
3900*d14abf15SRobert Mustacchi 
3901*d14abf15SRobert Mustacchi /**
3902*d14abf15SRobert Mustacchi  * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
3903*d14abf15SRobert Mustacchi  *
3904*d14abf15SRobert Mustacchi  * @fw_hi:
3905*d14abf15SRobert Mustacchi  * @fw_mid:
3906*d14abf15SRobert Mustacchi  * @fw_lo:
3907*d14abf15SRobert Mustacchi  * @mac:
3908*d14abf15SRobert Mustacchi  */
3909*d14abf15SRobert Mustacchi static INLINE void ecore_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3910*d14abf15SRobert Mustacchi 					 __le16 *fw_lo, u8 *mac)
3911*d14abf15SRobert Mustacchi {
3912*d14abf15SRobert Mustacchi 	mac[1] = ((u8 *)fw_hi)[0];
3913*d14abf15SRobert Mustacchi 	mac[0] = ((u8 *)fw_hi)[1];
3914*d14abf15SRobert Mustacchi 	mac[3] = ((u8 *)fw_mid)[0];
3915*d14abf15SRobert Mustacchi 	mac[2] = ((u8 *)fw_mid)[1];
3916*d14abf15SRobert Mustacchi 	mac[5] = ((u8 *)fw_lo)[0];
3917*d14abf15SRobert Mustacchi 	mac[4] = ((u8 *)fw_lo)[1];
3918*d14abf15SRobert Mustacchi }
3919*d14abf15SRobert Mustacchi 
3920*d14abf15SRobert Mustacchi /**
3921*d14abf15SRobert Mustacchi  * ecore_mcast_refresh_registry_e1 -
3922*d14abf15SRobert Mustacchi  *
3923*d14abf15SRobert Mustacchi  * @pdev:	device handle
3924*d14abf15SRobert Mustacchi  * @cnt:
3925*d14abf15SRobert Mustacchi  *
3926*d14abf15SRobert Mustacchi  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3927*d14abf15SRobert Mustacchi  * and update the registry correspondingly: if ADD - allocate a memory and add
3928*d14abf15SRobert Mustacchi  * the entries to the registry (list), if DELETE - clear the registry and free
3929*d14abf15SRobert Mustacchi  * the memory.
3930*d14abf15SRobert Mustacchi  */
3931*d14abf15SRobert Mustacchi static INLINE int ecore_mcast_refresh_registry_e1(struct _lm_device_t *pdev,
3932*d14abf15SRobert Mustacchi 						  struct ecore_mcast_obj *o)
3933*d14abf15SRobert Mustacchi {
3934*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
3935*d14abf15SRobert Mustacchi 	struct ecore_mcast_mac_elem *elem;
3936*d14abf15SRobert Mustacchi 	struct mac_configuration_cmd *data =
3937*d14abf15SRobert Mustacchi 			(struct mac_configuration_cmd *)(raw->rdata);
3938*d14abf15SRobert Mustacchi 
3939*d14abf15SRobert Mustacchi 	/* If first entry contains a SET bit - the command was ADD,
3940*d14abf15SRobert Mustacchi 	 * otherwise - DEL_ALL
3941*d14abf15SRobert Mustacchi 	 */
3942*d14abf15SRobert Mustacchi 	if (ECORE_GET_FLAG(data->config_table[0].flags,
3943*d14abf15SRobert Mustacchi 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3944*d14abf15SRobert Mustacchi 		int i, len = data->hdr.length;
3945*d14abf15SRobert Mustacchi 
3946*d14abf15SRobert Mustacchi 		/* Break if it was a RESTORE command */
3947*d14abf15SRobert Mustacchi 		if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs))
3948*d14abf15SRobert Mustacchi 			return ECORE_SUCCESS;
3949*d14abf15SRobert Mustacchi 
3950*d14abf15SRobert Mustacchi 		elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, pdev);
3951*d14abf15SRobert Mustacchi 		if (!elem) {
3952*d14abf15SRobert Mustacchi 			ECORE_ERR("Failed to allocate registry memory\n");
3953*d14abf15SRobert Mustacchi 			return ECORE_NOMEM;
3954*d14abf15SRobert Mustacchi 		}
3955*d14abf15SRobert Mustacchi 
3956*d14abf15SRobert Mustacchi 		for (i = 0; i < len; i++, elem++) {
3957*d14abf15SRobert Mustacchi 			ecore_get_fw_mac_addr(
3958*d14abf15SRobert Mustacchi 				&data->config_table[i].msb_mac_addr,
3959*d14abf15SRobert Mustacchi 				&data->config_table[i].middle_mac_addr,
3960*d14abf15SRobert Mustacchi 				&data->config_table[i].lsb_mac_addr,
3961*d14abf15SRobert Mustacchi 				elem->mac);
3962*d14abf15SRobert Mustacchi 			ECORE_MSG(pdev, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n",
3963*d14abf15SRobert Mustacchi 				  elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]);
3964*d14abf15SRobert Mustacchi 			ECORE_LIST_PUSH_TAIL(&elem->link,
3965*d14abf15SRobert Mustacchi 					     &o->registry.exact_match.macs);
3966*d14abf15SRobert Mustacchi 		}
3967*d14abf15SRobert Mustacchi 	} else {
3968*d14abf15SRobert Mustacchi 		elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs,
3969*d14abf15SRobert Mustacchi 					      struct ecore_mcast_mac_elem,
3970*d14abf15SRobert Mustacchi 					      link);
3971*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "Deleting a registry\n");
3972*d14abf15SRobert Mustacchi 		ECORE_FREE(pdev, elem, sizeof(*elem));
3973*d14abf15SRobert Mustacchi 		ECORE_LIST_INIT(&o->registry.exact_match.macs);
3974*d14abf15SRobert Mustacchi 	}
3975*d14abf15SRobert Mustacchi 
3976*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
3977*d14abf15SRobert Mustacchi }
3978*d14abf15SRobert Mustacchi 
3979*d14abf15SRobert Mustacchi static int ecore_mcast_setup_e1(struct _lm_device_t *pdev,
3980*d14abf15SRobert Mustacchi 				struct ecore_mcast_ramrod_params *p,
3981*d14abf15SRobert Mustacchi 				enum ecore_mcast_cmd cmd)
3982*d14abf15SRobert Mustacchi {
3983*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
3984*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *raw = &o->raw;
3985*d14abf15SRobert Mustacchi 	struct mac_configuration_cmd *data =
3986*d14abf15SRobert Mustacchi 		(struct mac_configuration_cmd *)(raw->rdata);
3987*d14abf15SRobert Mustacchi 	int cnt = 0, i, rc;
3988*d14abf15SRobert Mustacchi 
3989*d14abf15SRobert Mustacchi 	/* Reset the ramrod data buffer */
3990*d14abf15SRobert Mustacchi 	mm_memset(data, 0, sizeof(*data));
3991*d14abf15SRobert Mustacchi 
3992*d14abf15SRobert Mustacchi 	/* First set all entries as invalid */
3993*d14abf15SRobert Mustacchi 	for (i = 0; i < o->max_cmd_len ; i++)
3994*d14abf15SRobert Mustacchi 		ECORE_SET_FLAG(data->config_table[i].flags,
3995*d14abf15SRobert Mustacchi 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3996*d14abf15SRobert Mustacchi 			T_ETH_MAC_COMMAND_INVALIDATE);
3997*d14abf15SRobert Mustacchi 
3998*d14abf15SRobert Mustacchi 	/* Handle pending commands first */
3999*d14abf15SRobert Mustacchi 	cnt = ecore_mcast_handle_pending_cmds_e1(pdev, p);
4000*d14abf15SRobert Mustacchi 
4001*d14abf15SRobert Mustacchi 	/* If there are no more pending commands - clear SCHEDULED state */
4002*d14abf15SRobert Mustacchi 	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
4003*d14abf15SRobert Mustacchi 		o->clear_sched(o);
4004*d14abf15SRobert Mustacchi 
4005*d14abf15SRobert Mustacchi 	/* The below may be TRUE iff there were no pending commands */
4006*d14abf15SRobert Mustacchi 	if (!cnt)
4007*d14abf15SRobert Mustacchi 		cnt = ecore_mcast_handle_current_cmd(pdev, p, cmd, 0);
4008*d14abf15SRobert Mustacchi 
4009*d14abf15SRobert Mustacchi 	/* For 57710 every command has o->max_cmd_len length to ensure that
4010*d14abf15SRobert Mustacchi 	 * commands are done one at a time.
4011*d14abf15SRobert Mustacchi 	 */
4012*d14abf15SRobert Mustacchi 	o->total_pending_num -= o->max_cmd_len;
4013*d14abf15SRobert Mustacchi 
4014*d14abf15SRobert Mustacchi 	/* send a ramrod */
4015*d14abf15SRobert Mustacchi 
4016*d14abf15SRobert Mustacchi 	DbgBreakIf(cnt > o->max_cmd_len);
4017*d14abf15SRobert Mustacchi 
4018*d14abf15SRobert Mustacchi 	/* Set ramrod header (in particular, a number of entries to update) */
4019*d14abf15SRobert Mustacchi 	ecore_mcast_set_rdata_hdr_e1(pdev, p, (u8)cnt);
4020*d14abf15SRobert Mustacchi 
4021*d14abf15SRobert Mustacchi 	/* update a registry: we need the registry contents to be always up
4022*d14abf15SRobert Mustacchi 	 * to date in order to be able to execute a RESTORE opcode. Here
4023*d14abf15SRobert Mustacchi 	 * we use the fact that for 57710 we sent one command at a time
4024*d14abf15SRobert Mustacchi 	 * hence we may take the registry update out of the command handling
4025*d14abf15SRobert Mustacchi 	 * and do it in a simpler way here.
4026*d14abf15SRobert Mustacchi 	 */
4027*d14abf15SRobert Mustacchi 	rc = ecore_mcast_refresh_registry_e1(pdev, o);
4028*d14abf15SRobert Mustacchi 	if (rc)
4029*d14abf15SRobert Mustacchi 		return rc;
4030*d14abf15SRobert Mustacchi 
4031*d14abf15SRobert Mustacchi 	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
4032*d14abf15SRobert Mustacchi 	 * RAMROD_PENDING status immediately.
4033*d14abf15SRobert Mustacchi 	 */
4034*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4035*d14abf15SRobert Mustacchi 		raw->clear_pending(raw);
4036*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
4037*d14abf15SRobert Mustacchi 	} else {
4038*d14abf15SRobert Mustacchi 		/* No need for an explicit memory barrier here as long as we
4039*d14abf15SRobert Mustacchi 		 * ensure the ordering of writing to the SPQ element
4040*d14abf15SRobert Mustacchi 		 * and updating of the SPQ producer which involves a memory
4041*d14abf15SRobert Mustacchi 		 * read. If the memory read is removed we will have to put a
4042*d14abf15SRobert Mustacchi 		 * full memory barrier there (inside ecore_sp_post()).
4043*d14abf15SRobert Mustacchi 		 */
4044*d14abf15SRobert Mustacchi 
4045*d14abf15SRobert Mustacchi 		/* Send a ramrod */
4046*d14abf15SRobert Mustacchi 		rc = ecore_sp_post( pdev,
4047*d14abf15SRobert Mustacchi 				    RAMROD_CMD_ID_ETH_SET_MAC,
4048*d14abf15SRobert Mustacchi 				    raw->cid,
4049*d14abf15SRobert Mustacchi 				    raw->rdata_mapping.as_u64,
4050*d14abf15SRobert Mustacchi 				    ETH_CONNECTION_TYPE);
4051*d14abf15SRobert Mustacchi 		if (rc)
4052*d14abf15SRobert Mustacchi 			return rc;
4053*d14abf15SRobert Mustacchi 
4054*d14abf15SRobert Mustacchi 		/* Ramrod completion is pending */
4055*d14abf15SRobert Mustacchi 		return ECORE_PENDING;
4056*d14abf15SRobert Mustacchi 	}
4057*d14abf15SRobert Mustacchi }
4058*d14abf15SRobert Mustacchi 
4059*d14abf15SRobert Mustacchi static int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o)
4060*d14abf15SRobert Mustacchi {
4061*d14abf15SRobert Mustacchi 	return o->registry.exact_match.num_macs_set;
4062*d14abf15SRobert Mustacchi }
4063*d14abf15SRobert Mustacchi 
4064*d14abf15SRobert Mustacchi static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
4065*d14abf15SRobert Mustacchi {
4066*d14abf15SRobert Mustacchi 	return o->registry.aprox_match.num_bins_set;
4067*d14abf15SRobert Mustacchi }
4068*d14abf15SRobert Mustacchi 
4069*d14abf15SRobert Mustacchi static void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o,
4070*d14abf15SRobert Mustacchi 						int n)
4071*d14abf15SRobert Mustacchi {
4072*d14abf15SRobert Mustacchi 	o->registry.exact_match.num_macs_set = n;
4073*d14abf15SRobert Mustacchi }
4074*d14abf15SRobert Mustacchi 
4075*d14abf15SRobert Mustacchi static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
4076*d14abf15SRobert Mustacchi 						int n)
4077*d14abf15SRobert Mustacchi {
4078*d14abf15SRobert Mustacchi 	o->registry.aprox_match.num_bins_set = n;
4079*d14abf15SRobert Mustacchi }
4080*d14abf15SRobert Mustacchi 
4081*d14abf15SRobert Mustacchi int ecore_config_mcast(struct _lm_device_t *pdev,
4082*d14abf15SRobert Mustacchi 		       struct ecore_mcast_ramrod_params *p,
4083*d14abf15SRobert Mustacchi 		       enum ecore_mcast_cmd cmd)
4084*d14abf15SRobert Mustacchi {
4085*d14abf15SRobert Mustacchi 	struct ecore_mcast_obj *o = p->mcast_obj;
4086*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
4087*d14abf15SRobert Mustacchi 	int rc = 0, old_reg_size;
4088*d14abf15SRobert Mustacchi 
4089*d14abf15SRobert Mustacchi 	/* This is needed to recover number of currently configured mcast macs
4090*d14abf15SRobert Mustacchi 	 * in case of failure.
4091*d14abf15SRobert Mustacchi 	 */
4092*d14abf15SRobert Mustacchi 	old_reg_size = o->get_registry_size(o);
4093*d14abf15SRobert Mustacchi 
4094*d14abf15SRobert Mustacchi 	/* Do some calculations and checks */
4095*d14abf15SRobert Mustacchi 	rc = o->validate(pdev, p, cmd);
4096*d14abf15SRobert Mustacchi 	if (rc)
4097*d14abf15SRobert Mustacchi 		return rc;
4098*d14abf15SRobert Mustacchi 
4099*d14abf15SRobert Mustacchi 	/* Return if there is no work to do */
4100*d14abf15SRobert Mustacchi 	if ((!p->mcast_list_len) && (!o->check_sched(o)))
4101*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
4102*d14abf15SRobert Mustacchi 
4103*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
4104*d14abf15SRobert Mustacchi 		  o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
4105*d14abf15SRobert Mustacchi 
4106*d14abf15SRobert Mustacchi 	/* Enqueue the current command to the pending list if we can't complete
4107*d14abf15SRobert Mustacchi 	 * it in the current iteration
4108*d14abf15SRobert Mustacchi 	 */
4109*d14abf15SRobert Mustacchi 	if (r->check_pending(r) ||
4110*d14abf15SRobert Mustacchi 	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
4111*d14abf15SRobert Mustacchi 		rc = o->enqueue_cmd(pdev, p->mcast_obj, p, cmd);
4112*d14abf15SRobert Mustacchi 		if (rc < 0)
4113*d14abf15SRobert Mustacchi 			goto error_exit1;
4114*d14abf15SRobert Mustacchi 
4115*d14abf15SRobert Mustacchi 		/* As long as the current command is in a command list we
4116*d14abf15SRobert Mustacchi 		 * don't need to handle it separately.
4117*d14abf15SRobert Mustacchi 		 */
4118*d14abf15SRobert Mustacchi 		p->mcast_list_len = 0;
4119*d14abf15SRobert Mustacchi 	}
4120*d14abf15SRobert Mustacchi 
4121*d14abf15SRobert Mustacchi 	if (!r->check_pending(r)) {
4122*d14abf15SRobert Mustacchi 
4123*d14abf15SRobert Mustacchi 		/* Set 'pending' state */
4124*d14abf15SRobert Mustacchi 		r->set_pending(r);
4125*d14abf15SRobert Mustacchi 
4126*d14abf15SRobert Mustacchi 		/* Configure the new classification in the chip */
4127*d14abf15SRobert Mustacchi 		rc = o->config_mcast(pdev, p, cmd);
4128*d14abf15SRobert Mustacchi 		if (rc < 0)
4129*d14abf15SRobert Mustacchi 			goto error_exit2;
4130*d14abf15SRobert Mustacchi 
4131*d14abf15SRobert Mustacchi 		/* Wait for a ramrod completion if was requested */
4132*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4133*d14abf15SRobert Mustacchi 			rc = o->wait_comp(pdev, o);
4134*d14abf15SRobert Mustacchi 	}
4135*d14abf15SRobert Mustacchi 
4136*d14abf15SRobert Mustacchi 	return rc;
4137*d14abf15SRobert Mustacchi 
4138*d14abf15SRobert Mustacchi error_exit2:
4139*d14abf15SRobert Mustacchi 	r->clear_pending(r);
4140*d14abf15SRobert Mustacchi 
4141*d14abf15SRobert Mustacchi error_exit1:
4142*d14abf15SRobert Mustacchi 	o->revert(pdev, p, old_reg_size);
4143*d14abf15SRobert Mustacchi 
4144*d14abf15SRobert Mustacchi 	return rc;
4145*d14abf15SRobert Mustacchi }
4146*d14abf15SRobert Mustacchi 
4147*d14abf15SRobert Mustacchi static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
4148*d14abf15SRobert Mustacchi {
4149*d14abf15SRobert Mustacchi 	smp_mb__before_atomic();
4150*d14abf15SRobert Mustacchi 	ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
4151*d14abf15SRobert Mustacchi 	smp_mb__after_atomic();
4152*d14abf15SRobert Mustacchi }
4153*d14abf15SRobert Mustacchi 
4154*d14abf15SRobert Mustacchi static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
4155*d14abf15SRobert Mustacchi {
4156*d14abf15SRobert Mustacchi 	smp_mb__before_atomic();
4157*d14abf15SRobert Mustacchi 	ECORE_SET_BIT(o->sched_state, o->raw.pstate);
4158*d14abf15SRobert Mustacchi 	smp_mb__after_atomic();
4159*d14abf15SRobert Mustacchi }
4160*d14abf15SRobert Mustacchi 
4161*d14abf15SRobert Mustacchi static BOOL ecore_mcast_check_sched(struct ecore_mcast_obj *o)
4162*d14abf15SRobert Mustacchi {
4163*d14abf15SRobert Mustacchi 	return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
4164*d14abf15SRobert Mustacchi }
4165*d14abf15SRobert Mustacchi 
4166*d14abf15SRobert Mustacchi static BOOL ecore_mcast_check_pending(struct ecore_mcast_obj *o)
4167*d14abf15SRobert Mustacchi {
4168*d14abf15SRobert Mustacchi 	return o->raw.check_pending(&o->raw) || o->check_sched(o);
4169*d14abf15SRobert Mustacchi }
4170*d14abf15SRobert Mustacchi #ifndef ECORE_ERASE
4171*d14abf15SRobert Mustacchi typedef int (*enqueue_cmd_func)(struct _lm_device_t *pdev,
4172*d14abf15SRobert Mustacchi 				struct ecore_mcast_obj *o,
4173*d14abf15SRobert Mustacchi 				struct ecore_mcast_ramrod_params *p,
4174*d14abf15SRobert Mustacchi 				enum ecore_mcast_cmd cmd);
4175*d14abf15SRobert Mustacchi 
4176*d14abf15SRobert Mustacchi typedef int (*hdl_restore_func)(struct _lm_device_t *pdev,
4177*d14abf15SRobert Mustacchi 				struct ecore_mcast_obj *o,
4178*d14abf15SRobert Mustacchi 				int start_bin, int *rdata_idx);
4179*d14abf15SRobert Mustacchi 
4180*d14abf15SRobert Mustacchi typedef void (*set_one_rule_func)(struct _lm_device_t *pdev,
4181*d14abf15SRobert Mustacchi 				  struct ecore_mcast_obj *o, int idx,
4182*d14abf15SRobert Mustacchi 				  union ecore_mcast_config_data *cfg_data,
4183*d14abf15SRobert Mustacchi 				  enum ecore_mcast_cmd cmd);
4184*d14abf15SRobert Mustacchi #endif
4185*d14abf15SRobert Mustacchi 
4186*d14abf15SRobert Mustacchi void ecore_init_mcast_obj(struct _lm_device_t *pdev,
4187*d14abf15SRobert Mustacchi 			  struct ecore_mcast_obj *mcast_obj,
4188*d14abf15SRobert Mustacchi 			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
4189*d14abf15SRobert Mustacchi 			  u8 engine_id, void *rdata, lm_address_t rdata_mapping,
4190*d14abf15SRobert Mustacchi 			  int state, unsigned long *pstate, ecore_obj_type type)
4191*d14abf15SRobert Mustacchi {
4192*d14abf15SRobert Mustacchi 	mm_memset(mcast_obj, 0, sizeof(*mcast_obj));
4193*d14abf15SRobert Mustacchi 
4194*d14abf15SRobert Mustacchi 	ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
4195*d14abf15SRobert Mustacchi 			   rdata, rdata_mapping, state, pstate, type);
4196*d14abf15SRobert Mustacchi 
4197*d14abf15SRobert Mustacchi 	mcast_obj->engine_id = engine_id;
4198*d14abf15SRobert Mustacchi 
4199*d14abf15SRobert Mustacchi 	ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
4200*d14abf15SRobert Mustacchi 
4201*d14abf15SRobert Mustacchi 	mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
4202*d14abf15SRobert Mustacchi 	mcast_obj->check_sched = ecore_mcast_check_sched;
4203*d14abf15SRobert Mustacchi 	mcast_obj->set_sched = ecore_mcast_set_sched;
4204*d14abf15SRobert Mustacchi 	mcast_obj->clear_sched = ecore_mcast_clear_sched;
4205*d14abf15SRobert Mustacchi 
4206*d14abf15SRobert Mustacchi 	if (CHIP_IS_E1(pdev)) {
4207*d14abf15SRobert Mustacchi 		mcast_obj->config_mcast      = ecore_mcast_setup_e1;
4208*d14abf15SRobert Mustacchi 		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
4209*d14abf15SRobert Mustacchi 		mcast_obj->hdl_restore       =
4210*d14abf15SRobert Mustacchi 			ecore_mcast_handle_restore_cmd_e1;
4211*d14abf15SRobert Mustacchi 		mcast_obj->check_pending     = ecore_mcast_check_pending;
4212*d14abf15SRobert Mustacchi 
4213*d14abf15SRobert Mustacchi 		if (CHIP_REV_IS_SLOW(pdev))
4214*d14abf15SRobert Mustacchi 			mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI;
4215*d14abf15SRobert Mustacchi 		else
4216*d14abf15SRobert Mustacchi 			mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST;
4217*d14abf15SRobert Mustacchi 
4218*d14abf15SRobert Mustacchi 		mcast_obj->wait_comp         = ecore_mcast_wait;
4219*d14abf15SRobert Mustacchi 		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e1;
4220*d14abf15SRobert Mustacchi 		mcast_obj->validate          = ecore_mcast_validate_e1;
4221*d14abf15SRobert Mustacchi 		mcast_obj->revert            = ecore_mcast_revert_e1;
4222*d14abf15SRobert Mustacchi 		mcast_obj->get_registry_size =
4223*d14abf15SRobert Mustacchi 			ecore_mcast_get_registry_size_exact;
4224*d14abf15SRobert Mustacchi 		mcast_obj->set_registry_size =
4225*d14abf15SRobert Mustacchi 			ecore_mcast_set_registry_size_exact;
4226*d14abf15SRobert Mustacchi 
4227*d14abf15SRobert Mustacchi 		/* 57710 is the only chip that uses the exact match for mcast
4228*d14abf15SRobert Mustacchi 		 * at the moment.
4229*d14abf15SRobert Mustacchi 		 */
4230*d14abf15SRobert Mustacchi 		ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs);
4231*d14abf15SRobert Mustacchi 
4232*d14abf15SRobert Mustacchi 	} else if (CHIP_IS_E1H(pdev)) {
4233*d14abf15SRobert Mustacchi 		mcast_obj->config_mcast  = ecore_mcast_setup_e1h;
4234*d14abf15SRobert Mustacchi 		mcast_obj->enqueue_cmd   = (enqueue_cmd_func)NULL;
4235*d14abf15SRobert Mustacchi 		mcast_obj->hdl_restore   = (hdl_restore_func)NULL;
4236*d14abf15SRobert Mustacchi 		mcast_obj->check_pending = ecore_mcast_check_pending;
4237*d14abf15SRobert Mustacchi 
4238*d14abf15SRobert Mustacchi 		/* 57711 doesn't send a ramrod, so it has unlimited credit
4239*d14abf15SRobert Mustacchi 		 * for one command.
4240*d14abf15SRobert Mustacchi 		 */
4241*d14abf15SRobert Mustacchi 		mcast_obj->max_cmd_len       = -1;
4242*d14abf15SRobert Mustacchi 		mcast_obj->wait_comp         = ecore_mcast_wait;
4243*d14abf15SRobert Mustacchi 		mcast_obj->set_one_rule      = (set_one_rule_func)NULL;
4244*d14abf15SRobert Mustacchi 		mcast_obj->validate          = ecore_mcast_validate_e1h;
4245*d14abf15SRobert Mustacchi 		mcast_obj->revert            = ecore_mcast_revert_e1h;
4246*d14abf15SRobert Mustacchi 		mcast_obj->get_registry_size =
4247*d14abf15SRobert Mustacchi 			ecore_mcast_get_registry_size_aprox;
4248*d14abf15SRobert Mustacchi 		mcast_obj->set_registry_size =
4249*d14abf15SRobert Mustacchi 			ecore_mcast_set_registry_size_aprox;
4250*d14abf15SRobert Mustacchi 	} else {
4251*d14abf15SRobert Mustacchi 		mcast_obj->config_mcast      = ecore_mcast_setup_e2;
4252*d14abf15SRobert Mustacchi 		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
4253*d14abf15SRobert Mustacchi 		mcast_obj->hdl_restore       =
4254*d14abf15SRobert Mustacchi 			ecore_mcast_handle_restore_cmd_e2;
4255*d14abf15SRobert Mustacchi 		mcast_obj->check_pending     = ecore_mcast_check_pending;
4256*d14abf15SRobert Mustacchi 		/* TODO: There should be a proper HSI define for this number!!!
4257*d14abf15SRobert Mustacchi 		 */
4258*d14abf15SRobert Mustacchi 		mcast_obj->max_cmd_len       = 16;
4259*d14abf15SRobert Mustacchi 		mcast_obj->wait_comp         = ecore_mcast_wait;
4260*d14abf15SRobert Mustacchi 		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e2;
4261*d14abf15SRobert Mustacchi 		mcast_obj->validate          = ecore_mcast_validate_e2;
4262*d14abf15SRobert Mustacchi 		mcast_obj->revert            = ecore_mcast_revert_e2;
4263*d14abf15SRobert Mustacchi 		mcast_obj->get_registry_size =
4264*d14abf15SRobert Mustacchi 			ecore_mcast_get_registry_size_aprox;
4265*d14abf15SRobert Mustacchi 		mcast_obj->set_registry_size =
4266*d14abf15SRobert Mustacchi 			ecore_mcast_set_registry_size_aprox;
4267*d14abf15SRobert Mustacchi 	}
4268*d14abf15SRobert Mustacchi }
4269*d14abf15SRobert Mustacchi 
4270*d14abf15SRobert Mustacchi /*************************** Credit handling **********************************/
4271*d14abf15SRobert Mustacchi 
4272*d14abf15SRobert Mustacchi /**
4273*d14abf15SRobert Mustacchi  * atomic_add_ifless - add if the result is less than a given value.
4274*d14abf15SRobert Mustacchi  *
4275*d14abf15SRobert Mustacchi  * @v:	pointer of type atomic_t
4276*d14abf15SRobert Mustacchi  * @a:	the amount to add to v...
4277*d14abf15SRobert Mustacchi  * @u:	...if (v + a) is less than u.
4278*d14abf15SRobert Mustacchi  *
4279*d14abf15SRobert Mustacchi  * returns TRUE if (v + a) was less than u, and FALSE otherwise.
4280*d14abf15SRobert Mustacchi  *
4281*d14abf15SRobert Mustacchi  */
4282*d14abf15SRobert Mustacchi static INLINE BOOL __atomic_add_ifless(atomic_t *v, int a, int u)
4283*d14abf15SRobert Mustacchi {
4284*d14abf15SRobert Mustacchi 	int c, old;
4285*d14abf15SRobert Mustacchi 
4286*d14abf15SRobert Mustacchi 	c = ecore_atomic_read(v);
4287*d14abf15SRobert Mustacchi 	for (;;) {
4288*d14abf15SRobert Mustacchi 		if (ECORE_UNLIKELY(c + a >= u))
4289*d14abf15SRobert Mustacchi 			return FALSE;
4290*d14abf15SRobert Mustacchi 
4291*d14abf15SRobert Mustacchi 		old = ecore_atomic_cmpxchg((v), c, c + a);
4292*d14abf15SRobert Mustacchi 		if (ECORE_LIKELY(old == c))
4293*d14abf15SRobert Mustacchi 			break;
4294*d14abf15SRobert Mustacchi 		c = old;
4295*d14abf15SRobert Mustacchi 	}
4296*d14abf15SRobert Mustacchi 
4297*d14abf15SRobert Mustacchi 	return TRUE;
4298*d14abf15SRobert Mustacchi }
4299*d14abf15SRobert Mustacchi 
4300*d14abf15SRobert Mustacchi /**
4301*d14abf15SRobert Mustacchi  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
4302*d14abf15SRobert Mustacchi  *
4303*d14abf15SRobert Mustacchi  * @v:	pointer of type atomic_t
4304*d14abf15SRobert Mustacchi  * @a:	the amount to dec from v...
4305*d14abf15SRobert Mustacchi  * @u:	...if (v - a) is more or equal than u.
4306*d14abf15SRobert Mustacchi  *
4307*d14abf15SRobert Mustacchi  * returns TRUE if (v - a) was more or equal than u, and FALSE
4308*d14abf15SRobert Mustacchi  * otherwise.
4309*d14abf15SRobert Mustacchi  */
4310*d14abf15SRobert Mustacchi static INLINE BOOL __atomic_dec_ifmoe(atomic_t *v, int a, int u)
4311*d14abf15SRobert Mustacchi {
4312*d14abf15SRobert Mustacchi 	int c, old;
4313*d14abf15SRobert Mustacchi 
4314*d14abf15SRobert Mustacchi 	c = ecore_atomic_read(v);
4315*d14abf15SRobert Mustacchi 	for (;;) {
4316*d14abf15SRobert Mustacchi 		if (ECORE_UNLIKELY(c - a < u))
4317*d14abf15SRobert Mustacchi 			return FALSE;
4318*d14abf15SRobert Mustacchi 
4319*d14abf15SRobert Mustacchi 		old = ecore_atomic_cmpxchg((v), c, c - a);
4320*d14abf15SRobert Mustacchi 		if (ECORE_LIKELY(old == c))
4321*d14abf15SRobert Mustacchi 			break;
4322*d14abf15SRobert Mustacchi 		c = old;
4323*d14abf15SRobert Mustacchi 	}
4324*d14abf15SRobert Mustacchi 
4325*d14abf15SRobert Mustacchi 	return TRUE;
4326*d14abf15SRobert Mustacchi }
4327*d14abf15SRobert Mustacchi 
4328*d14abf15SRobert Mustacchi static BOOL ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
4329*d14abf15SRobert Mustacchi {
4330*d14abf15SRobert Mustacchi 	BOOL rc;
4331*d14abf15SRobert Mustacchi 
4332*d14abf15SRobert Mustacchi 	smp_mb();
4333*d14abf15SRobert Mustacchi 	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4334*d14abf15SRobert Mustacchi 	smp_mb();
4335*d14abf15SRobert Mustacchi 
4336*d14abf15SRobert Mustacchi 	return rc;
4337*d14abf15SRobert Mustacchi }
4338*d14abf15SRobert Mustacchi 
4339*d14abf15SRobert Mustacchi static BOOL ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
4340*d14abf15SRobert Mustacchi {
4341*d14abf15SRobert Mustacchi 	BOOL rc;
4342*d14abf15SRobert Mustacchi 
4343*d14abf15SRobert Mustacchi 	smp_mb();
4344*d14abf15SRobert Mustacchi 
4345*d14abf15SRobert Mustacchi 	/* Don't let to refill if credit + cnt > pool_sz */
4346*d14abf15SRobert Mustacchi 	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4347*d14abf15SRobert Mustacchi 
4348*d14abf15SRobert Mustacchi 	smp_mb();
4349*d14abf15SRobert Mustacchi 
4350*d14abf15SRobert Mustacchi 	return rc;
4351*d14abf15SRobert Mustacchi }
4352*d14abf15SRobert Mustacchi 
4353*d14abf15SRobert Mustacchi static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
4354*d14abf15SRobert Mustacchi {
4355*d14abf15SRobert Mustacchi 	int cur_credit;
4356*d14abf15SRobert Mustacchi 
4357*d14abf15SRobert Mustacchi 	smp_mb();
4358*d14abf15SRobert Mustacchi 	cur_credit = ecore_atomic_read(&o->credit);
4359*d14abf15SRobert Mustacchi 
4360*d14abf15SRobert Mustacchi 	return cur_credit;
4361*d14abf15SRobert Mustacchi }
4362*d14abf15SRobert Mustacchi 
4363*d14abf15SRobert Mustacchi static BOOL ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o,
4364*d14abf15SRobert Mustacchi 					  int cnt)
4365*d14abf15SRobert Mustacchi {
4366*d14abf15SRobert Mustacchi 	return TRUE;
4367*d14abf15SRobert Mustacchi }
4368*d14abf15SRobert Mustacchi 
4369*d14abf15SRobert Mustacchi static BOOL ecore_credit_pool_get_entry(
4370*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *o,
4371*d14abf15SRobert Mustacchi 	int *offset)
4372*d14abf15SRobert Mustacchi {
4373*d14abf15SRobert Mustacchi 	int idx, vec, i;
4374*d14abf15SRobert Mustacchi 
4375*d14abf15SRobert Mustacchi 	*offset = -1;
4376*d14abf15SRobert Mustacchi 
4377*d14abf15SRobert Mustacchi 	/* Find "internal cam-offset" then add to base for this object... */
4378*d14abf15SRobert Mustacchi 	for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
4379*d14abf15SRobert Mustacchi 
4380*d14abf15SRobert Mustacchi 		/* Skip the current vector if there are no free entries in it */
4381*d14abf15SRobert Mustacchi 		if (!o->pool_mirror[vec])
4382*d14abf15SRobert Mustacchi 			continue;
4383*d14abf15SRobert Mustacchi 
4384*d14abf15SRobert Mustacchi 		/* If we've got here we are going to find a free entry */
4385*d14abf15SRobert Mustacchi 		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4386*d14abf15SRobert Mustacchi 		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
4387*d14abf15SRobert Mustacchi 
4388*d14abf15SRobert Mustacchi 			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4389*d14abf15SRobert Mustacchi 				/* Got one!! */
4390*d14abf15SRobert Mustacchi 				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4391*d14abf15SRobert Mustacchi 				*offset = o->base_pool_offset + idx;
4392*d14abf15SRobert Mustacchi 				return TRUE;
4393*d14abf15SRobert Mustacchi 			}
4394*d14abf15SRobert Mustacchi 	}
4395*d14abf15SRobert Mustacchi 
4396*d14abf15SRobert Mustacchi 	return FALSE;
4397*d14abf15SRobert Mustacchi }
4398*d14abf15SRobert Mustacchi 
4399*d14abf15SRobert Mustacchi static BOOL ecore_credit_pool_put_entry(
4400*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *o,
4401*d14abf15SRobert Mustacchi 	int offset)
4402*d14abf15SRobert Mustacchi {
4403*d14abf15SRobert Mustacchi 	if (offset < o->base_pool_offset)
4404*d14abf15SRobert Mustacchi 		return FALSE;
4405*d14abf15SRobert Mustacchi 
4406*d14abf15SRobert Mustacchi 	offset -= o->base_pool_offset;
4407*d14abf15SRobert Mustacchi 
4408*d14abf15SRobert Mustacchi 	if (offset >= o->pool_sz)
4409*d14abf15SRobert Mustacchi 		return FALSE;
4410*d14abf15SRobert Mustacchi 
4411*d14abf15SRobert Mustacchi 	/* Return the entry to the pool */
4412*d14abf15SRobert Mustacchi 	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4413*d14abf15SRobert Mustacchi 
4414*d14abf15SRobert Mustacchi 	return TRUE;
4415*d14abf15SRobert Mustacchi }
4416*d14abf15SRobert Mustacchi 
4417*d14abf15SRobert Mustacchi static BOOL ecore_credit_pool_put_entry_always_TRUE(
4418*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *o,
4419*d14abf15SRobert Mustacchi 	int offset)
4420*d14abf15SRobert Mustacchi {
4421*d14abf15SRobert Mustacchi 	return TRUE;
4422*d14abf15SRobert Mustacchi }
4423*d14abf15SRobert Mustacchi 
4424*d14abf15SRobert Mustacchi static BOOL ecore_credit_pool_get_entry_always_TRUE(
4425*d14abf15SRobert Mustacchi 	struct ecore_credit_pool_obj *o,
4426*d14abf15SRobert Mustacchi 	int *offset)
4427*d14abf15SRobert Mustacchi {
4428*d14abf15SRobert Mustacchi 	*offset = -1;
4429*d14abf15SRobert Mustacchi 	return TRUE;
4430*d14abf15SRobert Mustacchi }
4431*d14abf15SRobert Mustacchi /**
4432*d14abf15SRobert Mustacchi  * ecore_init_credit_pool - initialize credit pool internals.
4433*d14abf15SRobert Mustacchi  *
4434*d14abf15SRobert Mustacchi  * @p:
4435*d14abf15SRobert Mustacchi  * @base:	Base entry in the CAM to use.
4436*d14abf15SRobert Mustacchi  * @credit:	pool size.
4437*d14abf15SRobert Mustacchi  *
4438*d14abf15SRobert Mustacchi  * If base is negative no CAM entries handling will be performed.
4439*d14abf15SRobert Mustacchi  * If credit is negative pool operations will always succeed (unlimited pool).
4440*d14abf15SRobert Mustacchi  *
4441*d14abf15SRobert Mustacchi  */
4442*d14abf15SRobert Mustacchi static INLINE void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
4443*d14abf15SRobert Mustacchi 					  int base, int credit)
4444*d14abf15SRobert Mustacchi {
4445*d14abf15SRobert Mustacchi 	/* Zero the object first */
4446*d14abf15SRobert Mustacchi 	mm_memset(p, 0, sizeof(*p));
4447*d14abf15SRobert Mustacchi 
4448*d14abf15SRobert Mustacchi 	/* Set the table to all 1s */
4449*d14abf15SRobert Mustacchi 	mm_memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4450*d14abf15SRobert Mustacchi 
4451*d14abf15SRobert Mustacchi 	/* Init a pool as full */
4452*d14abf15SRobert Mustacchi 	ecore_atomic_set(&p->credit, credit);
4453*d14abf15SRobert Mustacchi 
4454*d14abf15SRobert Mustacchi 	/* The total poll size */
4455*d14abf15SRobert Mustacchi 	p->pool_sz = credit;
4456*d14abf15SRobert Mustacchi 
4457*d14abf15SRobert Mustacchi 	p->base_pool_offset = base;
4458*d14abf15SRobert Mustacchi 
4459*d14abf15SRobert Mustacchi 	/* Commit the change */
4460*d14abf15SRobert Mustacchi 	smp_mb();
4461*d14abf15SRobert Mustacchi 
4462*d14abf15SRobert Mustacchi 	p->check = ecore_credit_pool_check;
4463*d14abf15SRobert Mustacchi 
4464*d14abf15SRobert Mustacchi 	/* if pool credit is negative - disable the checks */
4465*d14abf15SRobert Mustacchi 	if (credit >= 0) {
4466*d14abf15SRobert Mustacchi 		p->put      = ecore_credit_pool_put;
4467*d14abf15SRobert Mustacchi 		p->get      = ecore_credit_pool_get;
4468*d14abf15SRobert Mustacchi 		p->put_entry = ecore_credit_pool_put_entry;
4469*d14abf15SRobert Mustacchi 		p->get_entry = ecore_credit_pool_get_entry;
4470*d14abf15SRobert Mustacchi 	} else {
4471*d14abf15SRobert Mustacchi 		p->put      = ecore_credit_pool_always_TRUE;
4472*d14abf15SRobert Mustacchi 		p->get      = ecore_credit_pool_always_TRUE;
4473*d14abf15SRobert Mustacchi 		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4474*d14abf15SRobert Mustacchi 		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4475*d14abf15SRobert Mustacchi 	}
4476*d14abf15SRobert Mustacchi 
4477*d14abf15SRobert Mustacchi 	/* If base is negative - disable entries handling */
4478*d14abf15SRobert Mustacchi 	if (base < 0) {
4479*d14abf15SRobert Mustacchi 		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4480*d14abf15SRobert Mustacchi 		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4481*d14abf15SRobert Mustacchi 	}
4482*d14abf15SRobert Mustacchi }
4483*d14abf15SRobert Mustacchi 
4484*d14abf15SRobert Mustacchi void ecore_init_mac_credit_pool(struct _lm_device_t *pdev,
4485*d14abf15SRobert Mustacchi 				struct ecore_credit_pool_obj *p, u8 func_id,
4486*d14abf15SRobert Mustacchi 				u8 func_num)
4487*d14abf15SRobert Mustacchi {
4488*d14abf15SRobert Mustacchi /* TODO: this will be defined in consts as well... */
4489*d14abf15SRobert Mustacchi #define ECORE_CAM_SIZE_EMUL 5
4490*d14abf15SRobert Mustacchi 
4491*d14abf15SRobert Mustacchi 	int cam_sz;
4492*d14abf15SRobert Mustacchi 
4493*d14abf15SRobert Mustacchi 	if (CHIP_IS_E1(pdev)) {
4494*d14abf15SRobert Mustacchi 		/* In E1, Multicast is saved in cam... */
4495*d14abf15SRobert Mustacchi 		if (!CHIP_REV_IS_SLOW(pdev))
4496*d14abf15SRobert Mustacchi 			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST;
4497*d14abf15SRobert Mustacchi 		else
4498*d14abf15SRobert Mustacchi 			cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI;
4499*d14abf15SRobert Mustacchi 
4500*d14abf15SRobert Mustacchi 		ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4501*d14abf15SRobert Mustacchi 
4502*d14abf15SRobert Mustacchi 	} else if (CHIP_IS_E1H(pdev)) {
4503*d14abf15SRobert Mustacchi 		/* CAM credit is equally divided between all active functions
4504*d14abf15SRobert Mustacchi 		 * on the PORT!.
4505*d14abf15SRobert Mustacchi 		 */
4506*d14abf15SRobert Mustacchi 		if ((func_num > 0)) {
4507*d14abf15SRobert Mustacchi 			if (!CHIP_REV_IS_SLOW(pdev))
4508*d14abf15SRobert Mustacchi 				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4509*d14abf15SRobert Mustacchi 			else
4510*d14abf15SRobert Mustacchi 				cam_sz = ECORE_CAM_SIZE_EMUL;
4511*d14abf15SRobert Mustacchi 			ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4512*d14abf15SRobert Mustacchi 		} else {
4513*d14abf15SRobert Mustacchi 			/* this should never happen! Block MAC operations. */
4514*d14abf15SRobert Mustacchi 			ecore_init_credit_pool(p, 0, 0);
4515*d14abf15SRobert Mustacchi 		}
4516*d14abf15SRobert Mustacchi 
4517*d14abf15SRobert Mustacchi 	} else {
4518*d14abf15SRobert Mustacchi 
4519*d14abf15SRobert Mustacchi 		/*
4520*d14abf15SRobert Mustacchi 		 * CAM credit is equaly divided between all active functions
4521*d14abf15SRobert Mustacchi 		 * on the PATH.
4522*d14abf15SRobert Mustacchi 		 */
4523*d14abf15SRobert Mustacchi 		if ((func_num > 1)) {
4524*d14abf15SRobert Mustacchi 			if (!CHIP_REV_IS_SLOW(pdev))
4525*d14abf15SRobert Mustacchi 				cam_sz = (MAX_MAC_CREDIT_E2
4526*d14abf15SRobert Mustacchi 				- GET_NUM_VFS_PER_PATH(pdev))
4527*d14abf15SRobert Mustacchi 				/ func_num
4528*d14abf15SRobert Mustacchi 				+ GET_NUM_VFS_PER_PF(pdev);
4529*d14abf15SRobert Mustacchi 			else
4530*d14abf15SRobert Mustacchi 				cam_sz = ECORE_CAM_SIZE_EMUL;
4531*d14abf15SRobert Mustacchi 
4532*d14abf15SRobert Mustacchi 			/* No need for CAM entries handling for 57712 and
4533*d14abf15SRobert Mustacchi 			 * newer.
4534*d14abf15SRobert Mustacchi 			 */
4535*d14abf15SRobert Mustacchi 			ecore_init_credit_pool(p, -1, cam_sz);
4536*d14abf15SRobert Mustacchi 		} else if (func_num == 1) {
4537*d14abf15SRobert Mustacchi 			if (!CHIP_REV_IS_SLOW(pdev))
4538*d14abf15SRobert Mustacchi 				cam_sz = MAX_MAC_CREDIT_E2;
4539*d14abf15SRobert Mustacchi 			else
4540*d14abf15SRobert Mustacchi 				cam_sz = ECORE_CAM_SIZE_EMUL;
4541*d14abf15SRobert Mustacchi 
4542*d14abf15SRobert Mustacchi 			/* No need for CAM entries handling for 57712 and
4543*d14abf15SRobert Mustacchi 			 * newer.
4544*d14abf15SRobert Mustacchi 			 */
4545*d14abf15SRobert Mustacchi 			ecore_init_credit_pool(p, -1, cam_sz);
4546*d14abf15SRobert Mustacchi 		} else {
4547*d14abf15SRobert Mustacchi 			/* this should never happen! Block MAC operations. */
4548*d14abf15SRobert Mustacchi 			ecore_init_credit_pool(p, 0, 0);
4549*d14abf15SRobert Mustacchi 		}
4550*d14abf15SRobert Mustacchi 	}
4551*d14abf15SRobert Mustacchi }
4552*d14abf15SRobert Mustacchi 
4553*d14abf15SRobert Mustacchi void ecore_init_vlan_credit_pool(struct _lm_device_t *pdev,
4554*d14abf15SRobert Mustacchi 				 struct ecore_credit_pool_obj *p,
4555*d14abf15SRobert Mustacchi 				 u8 func_id,
4556*d14abf15SRobert Mustacchi 				 u8 func_num)
4557*d14abf15SRobert Mustacchi {
4558*d14abf15SRobert Mustacchi 	if (CHIP_IS_E1x(pdev)) {
4559*d14abf15SRobert Mustacchi 		/* There is no VLAN credit in HW on 57710 and 57711 only
4560*d14abf15SRobert Mustacchi 		 * MAC / MAC-VLAN can be set
4561*d14abf15SRobert Mustacchi 		 */
4562*d14abf15SRobert Mustacchi 		ecore_init_credit_pool(p, 0, -1);
4563*d14abf15SRobert Mustacchi 	} else {
4564*d14abf15SRobert Mustacchi 		/* CAM credit is equally divided between all active functions
4565*d14abf15SRobert Mustacchi 		 * on the PATH.
4566*d14abf15SRobert Mustacchi 		 */
4567*d14abf15SRobert Mustacchi 		if (func_num > 0) {
4568*d14abf15SRobert Mustacchi 			int credit = MAX_VLAN_CREDIT_E2 / func_num;
4569*d14abf15SRobert Mustacchi 			ecore_init_credit_pool(p, func_id * credit, credit);
4570*d14abf15SRobert Mustacchi 		} else
4571*d14abf15SRobert Mustacchi 			/* this should never happen! Block VLAN operations. */
4572*d14abf15SRobert Mustacchi 			ecore_init_credit_pool(p, 0, 0);
4573*d14abf15SRobert Mustacchi 	}
4574*d14abf15SRobert Mustacchi }
4575*d14abf15SRobert Mustacchi 
4576*d14abf15SRobert Mustacchi /****************** RSS Configuration ******************/
4577*d14abf15SRobert Mustacchi #if defined(ECORE_ERASE) && !defined(__FreeBSD__)
4578*d14abf15SRobert Mustacchi /**
4579*d14abf15SRobert Mustacchi  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4580*d14abf15SRobert Mustacchi  *
4581*d14abf15SRobert Mustacchi  * @bp:		driver handle
4582*d14abf15SRobert Mustacchi  * @p:		pointer to rss configuration
4583*d14abf15SRobert Mustacchi  *
4584*d14abf15SRobert Mustacchi  * Prints it when NETIF_MSG_IFUP debug level is configured.
4585*d14abf15SRobert Mustacchi  */
4586*d14abf15SRobert Mustacchi static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4587*d14abf15SRobert Mustacchi 					struct bnx2x_config_rss_params *p)
4588*d14abf15SRobert Mustacchi {
4589*d14abf15SRobert Mustacchi 	int i;
4590*d14abf15SRobert Mustacchi 
4591*d14abf15SRobert Mustacchi 	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4592*d14abf15SRobert Mustacchi 	DP(BNX2X_MSG_SP, "0x0000: ");
4593*d14abf15SRobert Mustacchi 	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4594*d14abf15SRobert Mustacchi 		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4595*d14abf15SRobert Mustacchi 
4596*d14abf15SRobert Mustacchi 		/* Print 4 bytes in a line */
4597*d14abf15SRobert Mustacchi 		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4598*d14abf15SRobert Mustacchi 		    (((i + 1) & 0x3) == 0)) {
4599*d14abf15SRobert Mustacchi 			DP_CONT(BNX2X_MSG_SP, "\n");
4600*d14abf15SRobert Mustacchi 			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4601*d14abf15SRobert Mustacchi 		}
4602*d14abf15SRobert Mustacchi 	}
4603*d14abf15SRobert Mustacchi 
4604*d14abf15SRobert Mustacchi 	DP_CONT(BNX2X_MSG_SP, "\n");
4605*d14abf15SRobert Mustacchi }
4606*d14abf15SRobert Mustacchi #endif /* ECORE_ERASE && !__FreeBSD__ */
4607*d14abf15SRobert Mustacchi 
4608*d14abf15SRobert Mustacchi /**
4609*d14abf15SRobert Mustacchi  * ecore_setup_rss - configure RSS
4610*d14abf15SRobert Mustacchi  *
4611*d14abf15SRobert Mustacchi  * @pdev:	device handle
4612*d14abf15SRobert Mustacchi  * @p:		rss configuration
4613*d14abf15SRobert Mustacchi  *
4614*d14abf15SRobert Mustacchi  * sends on UPDATE ramrod for that matter.
4615*d14abf15SRobert Mustacchi  */
4616*d14abf15SRobert Mustacchi static int ecore_setup_rss(struct _lm_device_t *pdev,
4617*d14abf15SRobert Mustacchi 			   struct ecore_config_rss_params *p)
4618*d14abf15SRobert Mustacchi {
4619*d14abf15SRobert Mustacchi 	struct ecore_rss_config_obj *o = p->rss_obj;
4620*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
4621*d14abf15SRobert Mustacchi 	struct eth_rss_update_ramrod_data *data =
4622*d14abf15SRobert Mustacchi 		(struct eth_rss_update_ramrod_data *)(r->rdata);
4623*d14abf15SRobert Mustacchi 	u16 caps = 0;
4624*d14abf15SRobert Mustacchi 	u8 rss_mode = 0;
4625*d14abf15SRobert Mustacchi 	int rc;
4626*d14abf15SRobert Mustacchi 
4627*d14abf15SRobert Mustacchi 	mm_memset(data, 0, sizeof(*data));
4628*d14abf15SRobert Mustacchi 
4629*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Configuring RSS\n");
4630*d14abf15SRobert Mustacchi 
4631*d14abf15SRobert Mustacchi 	/* Set an echo field */
4632*d14abf15SRobert Mustacchi 	data->echo = mm_cpu_to_le32((r->cid & ECORE_SWCID_MASK) |
4633*d14abf15SRobert Mustacchi 				 (r->state << ECORE_SWCID_SHIFT));
4634*d14abf15SRobert Mustacchi 
4635*d14abf15SRobert Mustacchi 	/* RSS mode */
4636*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
4637*d14abf15SRobert Mustacchi 		rss_mode = ETH_RSS_MODE_DISABLED;
4638*d14abf15SRobert Mustacchi 	else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
4639*d14abf15SRobert Mustacchi 		rss_mode = ETH_RSS_MODE_REGULAR;
4640*d14abf15SRobert Mustacchi #if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */
4641*d14abf15SRobert Mustacchi 	else if (ECORE_TEST_BIT(ECORE_RSS_MODE_ESX51, &p->rss_flags))
4642*d14abf15SRobert Mustacchi 		rss_mode = ETH_RSS_MODE_ESX51;
4643*d14abf15SRobert Mustacchi #endif
4644*d14abf15SRobert Mustacchi 
4645*d14abf15SRobert Mustacchi 	data->rss_mode = rss_mode;
4646*d14abf15SRobert Mustacchi 
4647*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "rss_mode=%d\n", rss_mode);
4648*d14abf15SRobert Mustacchi 
4649*d14abf15SRobert Mustacchi 	/* RSS capabilities */
4650*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
4651*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4652*d14abf15SRobert Mustacchi 
4653*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
4654*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4655*d14abf15SRobert Mustacchi 
4656*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
4657*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4658*d14abf15SRobert Mustacchi 
4659*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
4660*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4661*d14abf15SRobert Mustacchi 
4662*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
4663*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4664*d14abf15SRobert Mustacchi 
4665*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
4666*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4667*d14abf15SRobert Mustacchi 
4668*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_VXLAN, &p->rss_flags))
4669*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
4670*d14abf15SRobert Mustacchi 
4671*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_VXLAN, &p->rss_flags))
4672*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
4673*d14abf15SRobert Mustacchi 
4674*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_NVGRE_KEY_ENTROPY, &p->rss_flags))
4675*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY;
4676*d14abf15SRobert Mustacchi 
4677*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_GRE_INNER_HDRS, &p->rss_flags))
4678*d14abf15SRobert Mustacchi 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
4679*d14abf15SRobert Mustacchi 
4680*d14abf15SRobert Mustacchi 	data->capabilities = mm_cpu_to_le16(caps);
4681*d14abf15SRobert Mustacchi 
4682*d14abf15SRobert Mustacchi 	/* Hashing mask */
4683*d14abf15SRobert Mustacchi 	data->rss_result_mask = p->rss_result_mask;
4684*d14abf15SRobert Mustacchi 
4685*d14abf15SRobert Mustacchi 	/* RSS engine ID */
4686*d14abf15SRobert Mustacchi 	data->rss_engine_id = o->engine_id;
4687*d14abf15SRobert Mustacchi 
4688*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "rss_engine_id=%d\n", data->rss_engine_id);
4689*d14abf15SRobert Mustacchi 
4690*d14abf15SRobert Mustacchi 	/* Indirection table */
4691*d14abf15SRobert Mustacchi 	mm_memcpy(data->indirection_table, p->ind_table,
4692*d14abf15SRobert Mustacchi 		  T_ETH_INDIRECTION_TABLE_SIZE);
4693*d14abf15SRobert Mustacchi 
4694*d14abf15SRobert Mustacchi 	/* Remember the last configuration */
4695*d14abf15SRobert Mustacchi 	mm_memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4696*d14abf15SRobert Mustacchi 
4697*d14abf15SRobert Mustacchi #if defined(ECORE_ERASE) && !defined(__FreeBSD__)
4698*d14abf15SRobert Mustacchi 	/* Print the indirection table */
4699*d14abf15SRobert Mustacchi 	if (netif_msg_ifup(bp))
4700*d14abf15SRobert Mustacchi 		bnx2x_debug_print_ind_table(bp, p);
4701*d14abf15SRobert Mustacchi #endif
4702*d14abf15SRobert Mustacchi 
4703*d14abf15SRobert Mustacchi 	/* RSS keys */
4704*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
4705*d14abf15SRobert Mustacchi 		mm_memcpy(&data->rss_key[0], &p->rss_key[0],
4706*d14abf15SRobert Mustacchi 		       sizeof(data->rss_key));
4707*d14abf15SRobert Mustacchi 		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4708*d14abf15SRobert Mustacchi 	}
4709*d14abf15SRobert Mustacchi 
4710*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
4711*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
4712*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
4713*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
4714*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
4715*d14abf15SRobert Mustacchi 	 */
4716*d14abf15SRobert Mustacchi 
4717*d14abf15SRobert Mustacchi 	/* Send a ramrod */
4718*d14abf15SRobert Mustacchi 	rc = ecore_sp_post(pdev,
4719*d14abf15SRobert Mustacchi 			     RAMROD_CMD_ID_ETH_RSS_UPDATE,
4720*d14abf15SRobert Mustacchi 			     r->cid,
4721*d14abf15SRobert Mustacchi 			     r->rdata_mapping.as_u64,
4722*d14abf15SRobert Mustacchi 			     ETH_CONNECTION_TYPE);
4723*d14abf15SRobert Mustacchi 
4724*d14abf15SRobert Mustacchi 	if (rc < 0)
4725*d14abf15SRobert Mustacchi 		return rc;
4726*d14abf15SRobert Mustacchi 
4727*d14abf15SRobert Mustacchi 	return ECORE_PENDING;
4728*d14abf15SRobert Mustacchi }
4729*d14abf15SRobert Mustacchi 
4730*d14abf15SRobert Mustacchi void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
4731*d14abf15SRobert Mustacchi 			     u8 *ind_table)
4732*d14abf15SRobert Mustacchi {
4733*d14abf15SRobert Mustacchi 	mm_memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4734*d14abf15SRobert Mustacchi }
4735*d14abf15SRobert Mustacchi 
4736*d14abf15SRobert Mustacchi int ecore_config_rss(struct _lm_device_t *pdev,
4737*d14abf15SRobert Mustacchi 		     struct ecore_config_rss_params *p)
4738*d14abf15SRobert Mustacchi {
4739*d14abf15SRobert Mustacchi 	int rc;
4740*d14abf15SRobert Mustacchi 	struct ecore_rss_config_obj *o = p->rss_obj;
4741*d14abf15SRobert Mustacchi 	struct ecore_raw_obj *r = &o->raw;
4742*d14abf15SRobert Mustacchi 
4743*d14abf15SRobert Mustacchi 	/* Do nothing if only driver cleanup was requested */
4744*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4745*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "Not configuring RSS ramrod_flags=%lx\n",
4746*d14abf15SRobert Mustacchi 			  p->ramrod_flags);
4747*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
4748*d14abf15SRobert Mustacchi 	}
4749*d14abf15SRobert Mustacchi 
4750*d14abf15SRobert Mustacchi 	r->set_pending(r);
4751*d14abf15SRobert Mustacchi 
4752*d14abf15SRobert Mustacchi 	rc = o->config_rss(pdev, p);
4753*d14abf15SRobert Mustacchi 	if (rc < 0) {
4754*d14abf15SRobert Mustacchi 		r->clear_pending(r);
4755*d14abf15SRobert Mustacchi 		return rc;
4756*d14abf15SRobert Mustacchi 	}
4757*d14abf15SRobert Mustacchi 
4758*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4759*d14abf15SRobert Mustacchi 		rc = r->wait_comp(pdev, r);
4760*d14abf15SRobert Mustacchi 
4761*d14abf15SRobert Mustacchi 	return rc;
4762*d14abf15SRobert Mustacchi }
4763*d14abf15SRobert Mustacchi 
4764*d14abf15SRobert Mustacchi void ecore_init_rss_config_obj(struct _lm_device_t *pdev,
4765*d14abf15SRobert Mustacchi 			       struct ecore_rss_config_obj *rss_obj,
4766*d14abf15SRobert Mustacchi 			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4767*d14abf15SRobert Mustacchi 			       void *rdata, lm_address_t rdata_mapping,
4768*d14abf15SRobert Mustacchi 			       int state, unsigned long *pstate,
4769*d14abf15SRobert Mustacchi 			       ecore_obj_type type)
4770*d14abf15SRobert Mustacchi {
4771*d14abf15SRobert Mustacchi 	ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4772*d14abf15SRobert Mustacchi 			   rdata_mapping, state, pstate, type);
4773*d14abf15SRobert Mustacchi 
4774*d14abf15SRobert Mustacchi 	rss_obj->engine_id  = engine_id;
4775*d14abf15SRobert Mustacchi 	rss_obj->config_rss = ecore_setup_rss;
4776*d14abf15SRobert Mustacchi }
4777*d14abf15SRobert Mustacchi 
4778*d14abf15SRobert Mustacchi #ifdef ECORE_ERASE
4779*d14abf15SRobert Mustacchi /********************** Queue state object ***********************************/
4780*d14abf15SRobert Mustacchi 
4781*d14abf15SRobert Mustacchi /**
4782*d14abf15SRobert Mustacchi  * ecore_queue_state_change - perform Queue state change transition
4783*d14abf15SRobert Mustacchi  *
4784*d14abf15SRobert Mustacchi  * @pdev:	device handle
4785*d14abf15SRobert Mustacchi  * @params:	parameters to perform the transition
4786*d14abf15SRobert Mustacchi  *
4787*d14abf15SRobert Mustacchi  * returns 0 in case of successfully completed transition, negative error
4788*d14abf15SRobert Mustacchi  * code in case of failure, positive (EBUSY) value if there is a completion
4789*d14abf15SRobert Mustacchi  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4790*d14abf15SRobert Mustacchi  * not set in params->ramrod_flags for asynchronous commands).
4791*d14abf15SRobert Mustacchi  *
4792*d14abf15SRobert Mustacchi  */
4793*d14abf15SRobert Mustacchi int ecore_queue_state_change(struct _lm_device_t *pdev,
4794*d14abf15SRobert Mustacchi 			     struct ecore_queue_state_params *params)
4795*d14abf15SRobert Mustacchi {
4796*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
4797*d14abf15SRobert Mustacchi 	int rc, pending_bit;
4798*d14abf15SRobert Mustacchi 	unsigned long *pending = &o->pending;
4799*d14abf15SRobert Mustacchi 
4800*d14abf15SRobert Mustacchi 	/* Check that the requested transition is legal */
4801*d14abf15SRobert Mustacchi 	rc = o->check_transition(pdev, o, params);
4802*d14abf15SRobert Mustacchi 	if (rc) {
4803*d14abf15SRobert Mustacchi 		ECORE_ERR("check transition returned an error. rc %d\n", rc);
4804*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
4805*d14abf15SRobert Mustacchi 	}
4806*d14abf15SRobert Mustacchi 
4807*d14abf15SRobert Mustacchi 	/* Set "pending" bit */
4808*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "pending bit was=%lx\n", o->pending);
4809*d14abf15SRobert Mustacchi 	pending_bit = o->set_pending(o, params);
4810*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "pending bit now=%lx\n", o->pending);
4811*d14abf15SRobert Mustacchi 
4812*d14abf15SRobert Mustacchi 	/* Don't send a command if only driver cleanup was requested */
4813*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4814*d14abf15SRobert Mustacchi 		o->complete_cmd(pdev, o, pending_bit);
4815*d14abf15SRobert Mustacchi 	else {
4816*d14abf15SRobert Mustacchi 		/* Send a ramrod */
4817*d14abf15SRobert Mustacchi 		rc = o->send_cmd(pdev, params);
4818*d14abf15SRobert Mustacchi 		if (rc) {
4819*d14abf15SRobert Mustacchi 			o->next_state = ECORE_Q_STATE_MAX;
4820*d14abf15SRobert Mustacchi 			ECORE_CLEAR_BIT(pending_bit, pending);
4821*d14abf15SRobert Mustacchi 			smp_mb__after_atomic();
4822*d14abf15SRobert Mustacchi 			return rc;
4823*d14abf15SRobert Mustacchi 		}
4824*d14abf15SRobert Mustacchi 
4825*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4826*d14abf15SRobert Mustacchi 			rc = o->wait_comp(pdev, o, pending_bit);
4827*d14abf15SRobert Mustacchi 			if (rc)
4828*d14abf15SRobert Mustacchi 				return rc;
4829*d14abf15SRobert Mustacchi 
4830*d14abf15SRobert Mustacchi 			return ECORE_SUCCESS;
4831*d14abf15SRobert Mustacchi 		}
4832*d14abf15SRobert Mustacchi 	}
4833*d14abf15SRobert Mustacchi 
4834*d14abf15SRobert Mustacchi 	return ECORE_RET_PENDING(pending_bit, pending);
4835*d14abf15SRobert Mustacchi }
4836*d14abf15SRobert Mustacchi 
4837*d14abf15SRobert Mustacchi static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
4838*d14abf15SRobert Mustacchi 				   struct ecore_queue_state_params *params)
4839*d14abf15SRobert Mustacchi {
4840*d14abf15SRobert Mustacchi 	enum ecore_queue_cmd cmd = params->cmd, bit;
4841*d14abf15SRobert Mustacchi 
4842*d14abf15SRobert Mustacchi 	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4843*d14abf15SRobert Mustacchi 	 * UPDATE command.
4844*d14abf15SRobert Mustacchi 	 */
4845*d14abf15SRobert Mustacchi 	if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
4846*d14abf15SRobert Mustacchi 	    (cmd == ECORE_Q_CMD_DEACTIVATE))
4847*d14abf15SRobert Mustacchi 		bit = ECORE_Q_CMD_UPDATE;
4848*d14abf15SRobert Mustacchi 	else
4849*d14abf15SRobert Mustacchi 		bit = cmd;
4850*d14abf15SRobert Mustacchi 
4851*d14abf15SRobert Mustacchi 	ECORE_SET_BIT(bit, &obj->pending);
4852*d14abf15SRobert Mustacchi 	return bit;
4853*d14abf15SRobert Mustacchi }
4854*d14abf15SRobert Mustacchi 
4855*d14abf15SRobert Mustacchi static int ecore_queue_wait_comp(struct _lm_device_t *pdev,
4856*d14abf15SRobert Mustacchi 				 struct ecore_queue_sp_obj *o,
4857*d14abf15SRobert Mustacchi 				 enum ecore_queue_cmd cmd)
4858*d14abf15SRobert Mustacchi {
4859*d14abf15SRobert Mustacchi 	return ecore_state_wait(pdev, cmd, &o->pending);
4860*d14abf15SRobert Mustacchi }
4861*d14abf15SRobert Mustacchi 
4862*d14abf15SRobert Mustacchi /**
4863*d14abf15SRobert Mustacchi  * ecore_queue_comp_cmd - complete the state change command.
4864*d14abf15SRobert Mustacchi  *
4865*d14abf15SRobert Mustacchi  * @pdev:	device handle
4866*d14abf15SRobert Mustacchi  * @o:
4867*d14abf15SRobert Mustacchi  * @cmd:
4868*d14abf15SRobert Mustacchi  *
4869*d14abf15SRobert Mustacchi  * Checks that the arrived completion is expected.
4870*d14abf15SRobert Mustacchi  */
4871*d14abf15SRobert Mustacchi static int ecore_queue_comp_cmd(struct _lm_device_t *pdev,
4872*d14abf15SRobert Mustacchi 				struct ecore_queue_sp_obj *o,
4873*d14abf15SRobert Mustacchi 				enum ecore_queue_cmd cmd)
4874*d14abf15SRobert Mustacchi {
4875*d14abf15SRobert Mustacchi 	unsigned long cur_pending = o->pending;
4876*d14abf15SRobert Mustacchi 
4877*d14abf15SRobert Mustacchi 	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4878*d14abf15SRobert Mustacchi 		ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4879*d14abf15SRobert Mustacchi 			  cmd, o->cids[ECORE_PRIMARY_CID_INDEX],
4880*d14abf15SRobert Mustacchi 			  o->state, cur_pending, o->next_state);
4881*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
4882*d14abf15SRobert Mustacchi 	}
4883*d14abf15SRobert Mustacchi 
4884*d14abf15SRobert Mustacchi 	if (o->next_tx_only >= o->max_cos)
4885*d14abf15SRobert Mustacchi 		/* >= because tx only must always be smaller than cos since the
4886*d14abf15SRobert Mustacchi 		 * primary connection supports COS 0
4887*d14abf15SRobert Mustacchi 		 */
4888*d14abf15SRobert Mustacchi 		ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
4889*d14abf15SRobert Mustacchi 			  o->next_tx_only, o->max_cos);
4890*d14abf15SRobert Mustacchi 
4891*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev,
4892*d14abf15SRobert Mustacchi 		  "Completing command %d for queue %d, setting state to %d\n",
4893*d14abf15SRobert Mustacchi 		  cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
4894*d14abf15SRobert Mustacchi 
4895*d14abf15SRobert Mustacchi 	if (o->next_tx_only)  /* print num tx-only if any exist */
4896*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "primary cid %d: num tx-only cons %d\n",
4897*d14abf15SRobert Mustacchi 			  o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
4898*d14abf15SRobert Mustacchi 
4899*d14abf15SRobert Mustacchi 	o->state = o->next_state;
4900*d14abf15SRobert Mustacchi 	o->num_tx_only = o->next_tx_only;
4901*d14abf15SRobert Mustacchi 	o->next_state = ECORE_Q_STATE_MAX;
4902*d14abf15SRobert Mustacchi 
4903*d14abf15SRobert Mustacchi 	/* It's important that o->state and o->next_state are
4904*d14abf15SRobert Mustacchi 	 * updated before o->pending.
4905*d14abf15SRobert Mustacchi 	 */
4906*d14abf15SRobert Mustacchi 	wmb();
4907*d14abf15SRobert Mustacchi 
4908*d14abf15SRobert Mustacchi 	ECORE_CLEAR_BIT(cmd, &o->pending);
4909*d14abf15SRobert Mustacchi 	smp_mb__after_atomic();
4910*d14abf15SRobert Mustacchi 
4911*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
4912*d14abf15SRobert Mustacchi }
4913*d14abf15SRobert Mustacchi 
4914*d14abf15SRobert Mustacchi static void ecore_q_fill_setup_data_e2(struct _lm_device_t *pdev,
4915*d14abf15SRobert Mustacchi 				struct ecore_queue_state_params *cmd_params,
4916*d14abf15SRobert Mustacchi 				struct client_init_ramrod_data *data)
4917*d14abf15SRobert Mustacchi {
4918*d14abf15SRobert Mustacchi 	struct ecore_queue_setup_params *params = &cmd_params->params.setup;
4919*d14abf15SRobert Mustacchi 
4920*d14abf15SRobert Mustacchi 	/* Rx data */
4921*d14abf15SRobert Mustacchi 
4922*d14abf15SRobert Mustacchi 	/* IPv6 TPA supported for E2 and above only */
4923*d14abf15SRobert Mustacchi 	data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
4924*d14abf15SRobert Mustacchi 					  &params->flags) *
4925*d14abf15SRobert Mustacchi 				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4926*d14abf15SRobert Mustacchi }
4927*d14abf15SRobert Mustacchi 
4928*d14abf15SRobert Mustacchi static void ecore_q_fill_init_general_data(struct _lm_device_t *pdev,
4929*d14abf15SRobert Mustacchi 				struct ecore_queue_sp_obj *o,
4930*d14abf15SRobert Mustacchi 				struct ecore_general_setup_params *params,
4931*d14abf15SRobert Mustacchi 				struct client_init_general_data *gen_data,
4932*d14abf15SRobert Mustacchi 				unsigned long *flags)
4933*d14abf15SRobert Mustacchi {
4934*d14abf15SRobert Mustacchi 	gen_data->client_id = o->cl_id;
4935*d14abf15SRobert Mustacchi 
4936*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
4937*d14abf15SRobert Mustacchi 		gen_data->statistics_counter_id =
4938*d14abf15SRobert Mustacchi 					params->stat_id;
4939*d14abf15SRobert Mustacchi 		gen_data->statistics_en_flg = 1;
4940*d14abf15SRobert Mustacchi 		gen_data->statistics_zero_flg =
4941*d14abf15SRobert Mustacchi 			ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
4942*d14abf15SRobert Mustacchi 	} else
4943*d14abf15SRobert Mustacchi 		gen_data->statistics_counter_id =
4944*d14abf15SRobert Mustacchi 					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4945*d14abf15SRobert Mustacchi 
4946*d14abf15SRobert Mustacchi 	gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE,
4947*d14abf15SRobert Mustacchi 						   flags);
4948*d14abf15SRobert Mustacchi 	gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4949*d14abf15SRobert Mustacchi 						    flags);
4950*d14abf15SRobert Mustacchi 	gen_data->sp_client_id = params->spcl_id;
4951*d14abf15SRobert Mustacchi 	gen_data->mtu = mm_cpu_to_le16(params->mtu);
4952*d14abf15SRobert Mustacchi 	gen_data->func_id = o->func_id;
4953*d14abf15SRobert Mustacchi 
4954*d14abf15SRobert Mustacchi 	gen_data->cos = params->cos;
4955*d14abf15SRobert Mustacchi 
4956*d14abf15SRobert Mustacchi 	gen_data->traffic_type =
4957*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
4958*d14abf15SRobert Mustacchi 		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4959*d14abf15SRobert Mustacchi 
4960*d14abf15SRobert Mustacchi 	gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION;
4961*d14abf15SRobert Mustacchi 
4962*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "flags: active %d, cos %d, stats en %d\n",
4963*d14abf15SRobert Mustacchi 		  gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4964*d14abf15SRobert Mustacchi }
4965*d14abf15SRobert Mustacchi 
4966*d14abf15SRobert Mustacchi static void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o,
4967*d14abf15SRobert Mustacchi 				struct ecore_txq_setup_params *params,
4968*d14abf15SRobert Mustacchi 				struct client_init_tx_data *tx_data,
4969*d14abf15SRobert Mustacchi 				unsigned long *flags)
4970*d14abf15SRobert Mustacchi {
4971*d14abf15SRobert Mustacchi 	tx_data->enforce_security_flg =
4972*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
4973*d14abf15SRobert Mustacchi 	tx_data->default_vlan =
4974*d14abf15SRobert Mustacchi 		mm_cpu_to_le16(params->default_vlan);
4975*d14abf15SRobert Mustacchi 	tx_data->default_vlan_flg =
4976*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
4977*d14abf15SRobert Mustacchi 	tx_data->tx_switching_flg =
4978*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
4979*d14abf15SRobert Mustacchi 	tx_data->anti_spoofing_flg =
4980*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
4981*d14abf15SRobert Mustacchi 	tx_data->force_default_pri_flg =
4982*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
4983*d14abf15SRobert Mustacchi 	tx_data->refuse_outband_vlan_flg =
4984*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4985*d14abf15SRobert Mustacchi 	tx_data->tunnel_lso_inc_ip_id =
4986*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4987*d14abf15SRobert Mustacchi 	tx_data->tunnel_non_lso_pcsum_location =
4988*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4989*d14abf15SRobert Mustacchi 							    CSUM_ON_BD;
4990*d14abf15SRobert Mustacchi 
4991*d14abf15SRobert Mustacchi 	tx_data->tx_status_block_id = params->fw_sb_id;
4992*d14abf15SRobert Mustacchi 	tx_data->tx_sb_index_number = params->sb_cq_index;
4993*d14abf15SRobert Mustacchi 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4994*d14abf15SRobert Mustacchi 
4995*d14abf15SRobert Mustacchi 	tx_data->tx_bd_page_base.lo =
4996*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_LO(params->dscr_map.as_u64));
4997*d14abf15SRobert Mustacchi 	tx_data->tx_bd_page_base.hi =
4998*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_HI(params->dscr_map.as_u64));
4999*d14abf15SRobert Mustacchi 
5000*d14abf15SRobert Mustacchi 	/* Don't configure any Tx switching mode during queue SETUP */
5001*d14abf15SRobert Mustacchi 	tx_data->state = 0;
5002*d14abf15SRobert Mustacchi }
5003*d14abf15SRobert Mustacchi 
5004*d14abf15SRobert Mustacchi static void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o,
5005*d14abf15SRobert Mustacchi 				struct rxq_pause_params *params,
5006*d14abf15SRobert Mustacchi 				struct client_init_rx_data *rx_data)
5007*d14abf15SRobert Mustacchi {
5008*d14abf15SRobert Mustacchi 	/* flow control data */
5009*d14abf15SRobert Mustacchi 	rx_data->cqe_pause_thr_low = mm_cpu_to_le16(params->rcq_th_lo);
5010*d14abf15SRobert Mustacchi 	rx_data->cqe_pause_thr_high = mm_cpu_to_le16(params->rcq_th_hi);
5011*d14abf15SRobert Mustacchi 	rx_data->bd_pause_thr_low = mm_cpu_to_le16(params->bd_th_lo);
5012*d14abf15SRobert Mustacchi 	rx_data->bd_pause_thr_high = mm_cpu_to_le16(params->bd_th_hi);
5013*d14abf15SRobert Mustacchi 	rx_data->sge_pause_thr_low = mm_cpu_to_le16(params->sge_th_lo);
5014*d14abf15SRobert Mustacchi 	rx_data->sge_pause_thr_high = mm_cpu_to_le16(params->sge_th_hi);
5015*d14abf15SRobert Mustacchi 	rx_data->rx_cos_mask = mm_cpu_to_le16(params->pri_map);
5016*d14abf15SRobert Mustacchi }
5017*d14abf15SRobert Mustacchi 
5018*d14abf15SRobert Mustacchi static void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o,
5019*d14abf15SRobert Mustacchi 				struct ecore_rxq_setup_params *params,
5020*d14abf15SRobert Mustacchi 				struct client_init_rx_data *rx_data,
5021*d14abf15SRobert Mustacchi 				unsigned long *flags)
5022*d14abf15SRobert Mustacchi {
5023*d14abf15SRobert Mustacchi 	rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
5024*d14abf15SRobert Mustacchi 				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
5025*d14abf15SRobert Mustacchi 	rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
5026*d14abf15SRobert Mustacchi 				CLIENT_INIT_RX_DATA_TPA_MODE;
5027*d14abf15SRobert Mustacchi #ifdef ECORE_UPSTREAM /* ECORE_UPSTREAM */
5028*d14abf15SRobert Mustacchi 	rx_data->vmqueue_mode_en_flg = 0;
5029*d14abf15SRobert Mustacchi #else
5030*d14abf15SRobert Mustacchi 	rx_data->vmqueue_mode_en_flg =
5031*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_VMQUEUE_MODE, flags);
5032*d14abf15SRobert Mustacchi #endif
5033*d14abf15SRobert Mustacchi 
5034*d14abf15SRobert Mustacchi #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
5035*d14abf15SRobert Mustacchi 	rx_data->extra_data_over_sgl_en_flg =
5036*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
5037*d14abf15SRobert Mustacchi #endif
5038*d14abf15SRobert Mustacchi 	rx_data->cache_line_alignment_log_size =
5039*d14abf15SRobert Mustacchi 		params->cache_line_log;
5040*d14abf15SRobert Mustacchi 	rx_data->enable_dynamic_hc =
5041*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
5042*d14abf15SRobert Mustacchi 	rx_data->max_sges_for_packet = params->max_sges_pkt;
5043*d14abf15SRobert Mustacchi 	rx_data->client_qzone_id = params->cl_qzone_id;
5044*d14abf15SRobert Mustacchi 	rx_data->max_agg_size = mm_cpu_to_le16(params->tpa_agg_sz);
5045*d14abf15SRobert Mustacchi 
5046*d14abf15SRobert Mustacchi 	/* Always start in DROP_ALL mode */
5047*d14abf15SRobert Mustacchi 	rx_data->state = mm_cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
5048*d14abf15SRobert Mustacchi 				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
5049*d14abf15SRobert Mustacchi 
5050*d14abf15SRobert Mustacchi 	/* We don't set drop flags */
5051*d14abf15SRobert Mustacchi 	rx_data->drop_ip_cs_err_flg = 0;
5052*d14abf15SRobert Mustacchi 	rx_data->drop_tcp_cs_err_flg = 0;
5053*d14abf15SRobert Mustacchi 	rx_data->drop_ttl0_flg = 0;
5054*d14abf15SRobert Mustacchi 	rx_data->drop_udp_cs_err_flg = 0;
5055*d14abf15SRobert Mustacchi 	rx_data->inner_vlan_removal_enable_flg =
5056*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
5057*d14abf15SRobert Mustacchi 	rx_data->outer_vlan_removal_enable_flg =
5058*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
5059*d14abf15SRobert Mustacchi 	rx_data->status_block_id = params->fw_sb_id;
5060*d14abf15SRobert Mustacchi 	rx_data->rx_sb_index_number = params->sb_cq_index;
5061*d14abf15SRobert Mustacchi 	rx_data->max_tpa_queues = params->max_tpa_queues;
5062*d14abf15SRobert Mustacchi 	rx_data->max_bytes_on_bd = mm_cpu_to_le16(params->buf_sz);
5063*d14abf15SRobert Mustacchi 	rx_data->sge_buff_size = mm_cpu_to_le16(params->sge_buf_sz);
5064*d14abf15SRobert Mustacchi 	rx_data->bd_page_base.lo =
5065*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_LO(params->dscr_map.as_u64));
5066*d14abf15SRobert Mustacchi 	rx_data->bd_page_base.hi =
5067*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_HI(params->dscr_map.as_u64));
5068*d14abf15SRobert Mustacchi 	rx_data->sge_page_base.lo =
5069*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_LO(params->sge_map.as_u64));
5070*d14abf15SRobert Mustacchi 	rx_data->sge_page_base.hi =
5071*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_HI(params->sge_map.as_u64));
5072*d14abf15SRobert Mustacchi 	rx_data->cqe_page_base.lo =
5073*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_LO(params->rcq_map.as_u64));
5074*d14abf15SRobert Mustacchi 	rx_data->cqe_page_base.hi =
5075*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_HI(params->rcq_map.as_u64));
5076*d14abf15SRobert Mustacchi 	rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
5077*d14abf15SRobert Mustacchi 						 flags);
5078*d14abf15SRobert Mustacchi 
5079*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
5080*d14abf15SRobert Mustacchi 		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
5081*d14abf15SRobert Mustacchi 		rx_data->is_approx_mcast = 1;
5082*d14abf15SRobert Mustacchi 	}
5083*d14abf15SRobert Mustacchi 
5084*d14abf15SRobert Mustacchi 	rx_data->rss_engine_id = params->rss_engine_id;
5085*d14abf15SRobert Mustacchi 
5086*d14abf15SRobert Mustacchi 	/* silent vlan removal */
5087*d14abf15SRobert Mustacchi 	rx_data->silent_vlan_removal_flg =
5088*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
5089*d14abf15SRobert Mustacchi 	rx_data->silent_vlan_value =
5090*d14abf15SRobert Mustacchi 		mm_cpu_to_le16(params->silent_removal_value);
5091*d14abf15SRobert Mustacchi 	rx_data->silent_vlan_mask =
5092*d14abf15SRobert Mustacchi 		mm_cpu_to_le16(params->silent_removal_mask);
5093*d14abf15SRobert Mustacchi }
5094*d14abf15SRobert Mustacchi 
5095*d14abf15SRobert Mustacchi /* initialize the general, tx and rx parts of a queue object */
5096*d14abf15SRobert Mustacchi static void ecore_q_fill_setup_data_cmn(struct _lm_device_t *pdev,
5097*d14abf15SRobert Mustacchi 				struct ecore_queue_state_params *cmd_params,
5098*d14abf15SRobert Mustacchi 				struct client_init_ramrod_data *data)
5099*d14abf15SRobert Mustacchi {
5100*d14abf15SRobert Mustacchi 	ecore_q_fill_init_general_data(pdev, cmd_params->q_obj,
5101*d14abf15SRobert Mustacchi 				       &cmd_params->params.setup.gen_params,
5102*d14abf15SRobert Mustacchi 				       &data->general,
5103*d14abf15SRobert Mustacchi 				       &cmd_params->params.setup.flags);
5104*d14abf15SRobert Mustacchi 
5105*d14abf15SRobert Mustacchi 	ecore_q_fill_init_tx_data(cmd_params->q_obj,
5106*d14abf15SRobert Mustacchi 				  &cmd_params->params.setup.txq_params,
5107*d14abf15SRobert Mustacchi 				  &data->tx,
5108*d14abf15SRobert Mustacchi 				  &cmd_params->params.setup.flags);
5109*d14abf15SRobert Mustacchi 
5110*d14abf15SRobert Mustacchi 	ecore_q_fill_init_rx_data(cmd_params->q_obj,
5111*d14abf15SRobert Mustacchi 				  &cmd_params->params.setup.rxq_params,
5112*d14abf15SRobert Mustacchi 				  &data->rx,
5113*d14abf15SRobert Mustacchi 				  &cmd_params->params.setup.flags);
5114*d14abf15SRobert Mustacchi 
5115*d14abf15SRobert Mustacchi 	ecore_q_fill_init_pause_data(cmd_params->q_obj,
5116*d14abf15SRobert Mustacchi 				     &cmd_params->params.setup.pause_params,
5117*d14abf15SRobert Mustacchi 				     &data->rx);
5118*d14abf15SRobert Mustacchi }
5119*d14abf15SRobert Mustacchi 
5120*d14abf15SRobert Mustacchi /* initialize the general and tx parts of a tx-only queue object */
5121*d14abf15SRobert Mustacchi static void ecore_q_fill_setup_tx_only(struct _lm_device_t *pdev,
5122*d14abf15SRobert Mustacchi 				struct ecore_queue_state_params *cmd_params,
5123*d14abf15SRobert Mustacchi 				struct tx_queue_init_ramrod_data *data)
5124*d14abf15SRobert Mustacchi {
5125*d14abf15SRobert Mustacchi 	ecore_q_fill_init_general_data(pdev, cmd_params->q_obj,
5126*d14abf15SRobert Mustacchi 				       &cmd_params->params.tx_only.gen_params,
5127*d14abf15SRobert Mustacchi 				       &data->general,
5128*d14abf15SRobert Mustacchi 				       &cmd_params->params.tx_only.flags);
5129*d14abf15SRobert Mustacchi 
5130*d14abf15SRobert Mustacchi 	ecore_q_fill_init_tx_data(cmd_params->q_obj,
5131*d14abf15SRobert Mustacchi 				  &cmd_params->params.tx_only.txq_params,
5132*d14abf15SRobert Mustacchi 				  &data->tx,
5133*d14abf15SRobert Mustacchi 				  &cmd_params->params.tx_only.flags);
5134*d14abf15SRobert Mustacchi 
5135*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "cid %d, tx bd page lo %x hi %x",
5136*d14abf15SRobert Mustacchi 		  cmd_params->q_obj->cids[0],
5137*d14abf15SRobert Mustacchi 		  data->tx.tx_bd_page_base.lo,
5138*d14abf15SRobert Mustacchi 		  data->tx.tx_bd_page_base.hi);
5139*d14abf15SRobert Mustacchi }
5140*d14abf15SRobert Mustacchi 
5141*d14abf15SRobert Mustacchi /**
5142*d14abf15SRobert Mustacchi  * ecore_q_init - init HW/FW queue
5143*d14abf15SRobert Mustacchi  *
5144*d14abf15SRobert Mustacchi  * @pdev:	device handle
5145*d14abf15SRobert Mustacchi  * @params:
5146*d14abf15SRobert Mustacchi  *
5147*d14abf15SRobert Mustacchi  * HW/FW initial Queue configuration:
5148*d14abf15SRobert Mustacchi  *      - HC: Rx and Tx
5149*d14abf15SRobert Mustacchi  *      - CDU context validation
5150*d14abf15SRobert Mustacchi  *
5151*d14abf15SRobert Mustacchi  */
5152*d14abf15SRobert Mustacchi static INLINE int ecore_q_init(struct _lm_device_t *pdev,
5153*d14abf15SRobert Mustacchi 			       struct ecore_queue_state_params *params)
5154*d14abf15SRobert Mustacchi {
5155*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5156*d14abf15SRobert Mustacchi 	struct ecore_queue_init_params *init = &params->params.init;
5157*d14abf15SRobert Mustacchi 	u16 hc_usec;
5158*d14abf15SRobert Mustacchi 	u8 cos;
5159*d14abf15SRobert Mustacchi 
5160*d14abf15SRobert Mustacchi 	/* Tx HC configuration */
5161*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
5162*d14abf15SRobert Mustacchi 	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
5163*d14abf15SRobert Mustacchi 		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
5164*d14abf15SRobert Mustacchi 
5165*d14abf15SRobert Mustacchi 		ECORE_TODO_UPDATE_COALESCE_SB_INDEX(pdev, init->tx.fw_sb_id,
5166*d14abf15SRobert Mustacchi 			init->tx.sb_cq_index,
5167*d14abf15SRobert Mustacchi 			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags),
5168*d14abf15SRobert Mustacchi 			hc_usec);
5169*d14abf15SRobert Mustacchi 	}
5170*d14abf15SRobert Mustacchi 
5171*d14abf15SRobert Mustacchi 	/* Rx HC configuration */
5172*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
5173*d14abf15SRobert Mustacchi 	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
5174*d14abf15SRobert Mustacchi 		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
5175*d14abf15SRobert Mustacchi 
5176*d14abf15SRobert Mustacchi 		ECORE_TODO_UPDATE_COALESCE_SB_INDEX(pdev, init->rx.fw_sb_id,
5177*d14abf15SRobert Mustacchi 			init->rx.sb_cq_index,
5178*d14abf15SRobert Mustacchi 			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags),
5179*d14abf15SRobert Mustacchi 			hc_usec);
5180*d14abf15SRobert Mustacchi 	}
5181*d14abf15SRobert Mustacchi 
5182*d14abf15SRobert Mustacchi 	/* Set CDU context validation values */
5183*d14abf15SRobert Mustacchi 	for (cos = 0; cos < o->max_cos; cos++) {
5184*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "setting context validation. cid %d, cos %d\n",
5185*d14abf15SRobert Mustacchi 			  o->cids[cos], cos);
5186*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "context pointer %p\n", init->cxts[cos]);
5187*d14abf15SRobert Mustacchi 		ECORE_SET_CTX_VALIDATION(pdev, init->cxts[cos], o->cids[cos]);
5188*d14abf15SRobert Mustacchi 	}
5189*d14abf15SRobert Mustacchi 
5190*d14abf15SRobert Mustacchi 	/* As no ramrod is sent, complete the command immediately  */
5191*d14abf15SRobert Mustacchi 	o->complete_cmd(pdev, o, ECORE_Q_CMD_INIT);
5192*d14abf15SRobert Mustacchi 
5193*d14abf15SRobert Mustacchi 	mmiowb();
5194*d14abf15SRobert Mustacchi 	smp_mb();
5195*d14abf15SRobert Mustacchi 
5196*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
5197*d14abf15SRobert Mustacchi }
5198*d14abf15SRobert Mustacchi 
5199*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_setup_e1x(struct _lm_device_t *pdev,
5200*d14abf15SRobert Mustacchi 					struct ecore_queue_state_params *params)
5201*d14abf15SRobert Mustacchi {
5202*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5203*d14abf15SRobert Mustacchi 	struct client_init_ramrod_data *rdata =
5204*d14abf15SRobert Mustacchi 		(struct client_init_ramrod_data *)o->rdata;
5205*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->rdata_mapping;
5206*d14abf15SRobert Mustacchi 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5207*d14abf15SRobert Mustacchi 
5208*d14abf15SRobert Mustacchi 	/* Clear the ramrod data */
5209*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
5210*d14abf15SRobert Mustacchi 
5211*d14abf15SRobert Mustacchi 	/* Fill the ramrod data */
5212*d14abf15SRobert Mustacchi 	ecore_q_fill_setup_data_cmn(pdev, params, rdata);
5213*d14abf15SRobert Mustacchi 
5214*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
5215*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
5216*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
5217*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
5218*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
5219*d14abf15SRobert Mustacchi 	 */
5220*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev,
5221*d14abf15SRobert Mustacchi 			     ramrod,
5222*d14abf15SRobert Mustacchi 			     o->cids[ECORE_PRIMARY_CID_INDEX],
5223*d14abf15SRobert Mustacchi 			     data_mapping.as_u64,
5224*d14abf15SRobert Mustacchi 			     ETH_CONNECTION_TYPE);
5225*d14abf15SRobert Mustacchi }
5226*d14abf15SRobert Mustacchi 
5227*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_setup_e2(struct _lm_device_t *pdev,
5228*d14abf15SRobert Mustacchi 					struct ecore_queue_state_params *params)
5229*d14abf15SRobert Mustacchi {
5230*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5231*d14abf15SRobert Mustacchi 	struct client_init_ramrod_data *rdata =
5232*d14abf15SRobert Mustacchi 		(struct client_init_ramrod_data *)o->rdata;
5233*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->rdata_mapping;
5234*d14abf15SRobert Mustacchi 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5235*d14abf15SRobert Mustacchi 
5236*d14abf15SRobert Mustacchi 	/* Clear the ramrod data */
5237*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
5238*d14abf15SRobert Mustacchi 
5239*d14abf15SRobert Mustacchi 	/* Fill the ramrod data */
5240*d14abf15SRobert Mustacchi 	ecore_q_fill_setup_data_cmn(pdev, params, rdata);
5241*d14abf15SRobert Mustacchi 	ecore_q_fill_setup_data_e2(pdev, params, rdata);
5242*d14abf15SRobert Mustacchi 
5243*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
5244*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
5245*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
5246*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
5247*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
5248*d14abf15SRobert Mustacchi 	 */
5249*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev,
5250*d14abf15SRobert Mustacchi 			     ramrod,
5251*d14abf15SRobert Mustacchi 			     o->cids[ECORE_PRIMARY_CID_INDEX],
5252*d14abf15SRobert Mustacchi 			     data_mapping.as_u64,
5253*d14abf15SRobert Mustacchi 			     ETH_CONNECTION_TYPE);
5254*d14abf15SRobert Mustacchi }
5255*d14abf15SRobert Mustacchi 
5256*d14abf15SRobert Mustacchi static inline int ecore_q_send_setup_tx_only(struct _lm_device_t *pdev,
5257*d14abf15SRobert Mustacchi 				  struct ecore_queue_state_params *params)
5258*d14abf15SRobert Mustacchi {
5259*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5260*d14abf15SRobert Mustacchi 	struct tx_queue_init_ramrod_data *rdata =
5261*d14abf15SRobert Mustacchi 		(struct tx_queue_init_ramrod_data *)o->rdata;
5262*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->rdata_mapping;
5263*d14abf15SRobert Mustacchi 	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
5264*d14abf15SRobert Mustacchi 	struct ecore_queue_setup_tx_only_params *tx_only_params =
5265*d14abf15SRobert Mustacchi 		&params->params.tx_only;
5266*d14abf15SRobert Mustacchi 	u8 cid_index = tx_only_params->cid_index;
5267*d14abf15SRobert Mustacchi 
5268*d14abf15SRobert Mustacchi #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
5269*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
5270*d14abf15SRobert Mustacchi 		ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
5271*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "sending forward tx-only ramrod");
5272*d14abf15SRobert Mustacchi #endif
5273*d14abf15SRobert Mustacchi 
5274*d14abf15SRobert Mustacchi 	if (cid_index >= o->max_cos) {
5275*d14abf15SRobert Mustacchi 		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5276*d14abf15SRobert Mustacchi 			  o->cl_id, cid_index);
5277*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
5278*d14abf15SRobert Mustacchi 	}
5279*d14abf15SRobert Mustacchi 
5280*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "parameters received: cos: %d sp-id: %d\n",
5281*d14abf15SRobert Mustacchi 		  tx_only_params->gen_params.cos,
5282*d14abf15SRobert Mustacchi 		  tx_only_params->gen_params.spcl_id);
5283*d14abf15SRobert Mustacchi 
5284*d14abf15SRobert Mustacchi 	/* Clear the ramrod data */
5285*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
5286*d14abf15SRobert Mustacchi 
5287*d14abf15SRobert Mustacchi 	/* Fill the ramrod data */
5288*d14abf15SRobert Mustacchi 	ecore_q_fill_setup_tx_only(pdev, params, rdata);
5289*d14abf15SRobert Mustacchi 
5290*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
5291*d14abf15SRobert Mustacchi 		  o->cids[cid_index], rdata->general.client_id,
5292*d14abf15SRobert Mustacchi 		  rdata->general.sp_client_id, rdata->general.cos);
5293*d14abf15SRobert Mustacchi 
5294*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
5295*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
5296*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
5297*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
5298*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
5299*d14abf15SRobert Mustacchi 	 */
5300*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, ramrod, o->cids[cid_index],
5301*d14abf15SRobert Mustacchi 			     data_mapping.as_u64, ETH_CONNECTION_TYPE);
5302*d14abf15SRobert Mustacchi }
5303*d14abf15SRobert Mustacchi 
5304*d14abf15SRobert Mustacchi static void ecore_q_fill_update_data(struct _lm_device_t *pdev,
5305*d14abf15SRobert Mustacchi 				     struct ecore_queue_sp_obj *obj,
5306*d14abf15SRobert Mustacchi 				     struct ecore_queue_update_params *params,
5307*d14abf15SRobert Mustacchi 				     struct client_update_ramrod_data *data)
5308*d14abf15SRobert Mustacchi {
5309*d14abf15SRobert Mustacchi 	/* Client ID of the client to update */
5310*d14abf15SRobert Mustacchi 	data->client_id = obj->cl_id;
5311*d14abf15SRobert Mustacchi 
5312*d14abf15SRobert Mustacchi 	/* Function ID of the client to update */
5313*d14abf15SRobert Mustacchi 	data->func_id = obj->func_id;
5314*d14abf15SRobert Mustacchi 
5315*d14abf15SRobert Mustacchi 	/* Default VLAN value */
5316*d14abf15SRobert Mustacchi 	data->default_vlan = mm_cpu_to_le16(params->def_vlan);
5317*d14abf15SRobert Mustacchi 
5318*d14abf15SRobert Mustacchi 	/* Inner VLAN stripping */
5319*d14abf15SRobert Mustacchi 	data->inner_vlan_removal_enable_flg =
5320*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM,
5321*d14abf15SRobert Mustacchi 			       &params->update_flags);
5322*d14abf15SRobert Mustacchi 	data->inner_vlan_removal_change_flg =
5323*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
5324*d14abf15SRobert Mustacchi 		       &params->update_flags);
5325*d14abf15SRobert Mustacchi 
5326*d14abf15SRobert Mustacchi 	/* Outer VLAN stripping */
5327*d14abf15SRobert Mustacchi 	data->outer_vlan_removal_enable_flg =
5328*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM,
5329*d14abf15SRobert Mustacchi 			       &params->update_flags);
5330*d14abf15SRobert Mustacchi 	data->outer_vlan_removal_change_flg =
5331*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
5332*d14abf15SRobert Mustacchi 		       &params->update_flags);
5333*d14abf15SRobert Mustacchi 
5334*d14abf15SRobert Mustacchi 	/* Drop packets that have source MAC that doesn't belong to this
5335*d14abf15SRobert Mustacchi 	 * Queue.
5336*d14abf15SRobert Mustacchi 	 */
5337*d14abf15SRobert Mustacchi 	data->anti_spoofing_enable_flg =
5338*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF,
5339*d14abf15SRobert Mustacchi 			       &params->update_flags);
5340*d14abf15SRobert Mustacchi 	data->anti_spoofing_change_flg =
5341*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
5342*d14abf15SRobert Mustacchi 		       &params->update_flags);
5343*d14abf15SRobert Mustacchi 
5344*d14abf15SRobert Mustacchi 	/* Activate/Deactivate */
5345*d14abf15SRobert Mustacchi 	data->activate_flg =
5346*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
5347*d14abf15SRobert Mustacchi 	data->activate_change_flg =
5348*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5349*d14abf15SRobert Mustacchi 			       &params->update_flags);
5350*d14abf15SRobert Mustacchi 
5351*d14abf15SRobert Mustacchi 	/* Enable default VLAN */
5352*d14abf15SRobert Mustacchi 	data->default_vlan_enable_flg =
5353*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN,
5354*d14abf15SRobert Mustacchi 			       &params->update_flags);
5355*d14abf15SRobert Mustacchi 	data->default_vlan_change_flg =
5356*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
5357*d14abf15SRobert Mustacchi 		       &params->update_flags);
5358*d14abf15SRobert Mustacchi 
5359*d14abf15SRobert Mustacchi 	/* silent vlan removal */
5360*d14abf15SRobert Mustacchi 	data->silent_vlan_change_flg =
5361*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5362*d14abf15SRobert Mustacchi 			       &params->update_flags);
5363*d14abf15SRobert Mustacchi 	data->silent_vlan_removal_flg =
5364*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
5365*d14abf15SRobert Mustacchi 			       &params->update_flags);
5366*d14abf15SRobert Mustacchi 	data->silent_vlan_value = mm_cpu_to_le16(params->silent_removal_value);
5367*d14abf15SRobert Mustacchi 	data->silent_vlan_mask = mm_cpu_to_le16(params->silent_removal_mask);
5368*d14abf15SRobert Mustacchi 
5369*d14abf15SRobert Mustacchi 	/* tx switching */
5370*d14abf15SRobert Mustacchi 	data->tx_switching_flg =
5371*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING,
5372*d14abf15SRobert Mustacchi 			       &params->update_flags);
5373*d14abf15SRobert Mustacchi 	data->tx_switching_change_flg =
5374*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
5375*d14abf15SRobert Mustacchi 			       &params->update_flags);
5376*d14abf15SRobert Mustacchi 
5377*d14abf15SRobert Mustacchi 	/* PTP */
5378*d14abf15SRobert Mustacchi 	data->handle_ptp_pkts_flg =
5379*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS,
5380*d14abf15SRobert Mustacchi 			       &params->update_flags);
5381*d14abf15SRobert Mustacchi 	data->handle_ptp_pkts_change_flg =
5382*d14abf15SRobert Mustacchi 		ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS_CHNG,
5383*d14abf15SRobert Mustacchi 			       &params->update_flags);
5384*d14abf15SRobert Mustacchi }
5385*d14abf15SRobert Mustacchi 
5386*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_update(struct _lm_device_t *pdev,
5387*d14abf15SRobert Mustacchi 				      struct ecore_queue_state_params *params)
5388*d14abf15SRobert Mustacchi {
5389*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5390*d14abf15SRobert Mustacchi 	struct client_update_ramrod_data *rdata =
5391*d14abf15SRobert Mustacchi 		(struct client_update_ramrod_data *)o->rdata;
5392*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->rdata_mapping;
5393*d14abf15SRobert Mustacchi 	struct ecore_queue_update_params *update_params =
5394*d14abf15SRobert Mustacchi 		&params->params.update;
5395*d14abf15SRobert Mustacchi 	u8 cid_index = update_params->cid_index;
5396*d14abf15SRobert Mustacchi 
5397*d14abf15SRobert Mustacchi 	if (cid_index >= o->max_cos) {
5398*d14abf15SRobert Mustacchi 		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5399*d14abf15SRobert Mustacchi 			  o->cl_id, cid_index);
5400*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
5401*d14abf15SRobert Mustacchi 	}
5402*d14abf15SRobert Mustacchi 
5403*d14abf15SRobert Mustacchi 	/* Clear the ramrod data */
5404*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
5405*d14abf15SRobert Mustacchi 
5406*d14abf15SRobert Mustacchi 	/* Fill the ramrod data */
5407*d14abf15SRobert Mustacchi 	ecore_q_fill_update_data(pdev, o, update_params, rdata);
5408*d14abf15SRobert Mustacchi 
5409*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
5410*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
5411*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
5412*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
5413*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
5414*d14abf15SRobert Mustacchi 	 */
5415*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5416*d14abf15SRobert Mustacchi 			     o->cids[cid_index], data_mapping.as_u64,
5417*d14abf15SRobert Mustacchi 			     ETH_CONNECTION_TYPE);
5418*d14abf15SRobert Mustacchi }
5419*d14abf15SRobert Mustacchi 
5420*d14abf15SRobert Mustacchi /**
5421*d14abf15SRobert Mustacchi  * ecore_q_send_deactivate - send DEACTIVATE command
5422*d14abf15SRobert Mustacchi  *
5423*d14abf15SRobert Mustacchi  * @pdev:	device handle
5424*d14abf15SRobert Mustacchi  * @params:
5425*d14abf15SRobert Mustacchi  *
5426*d14abf15SRobert Mustacchi  * implemented using the UPDATE command.
5427*d14abf15SRobert Mustacchi  */
5428*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_deactivate(struct _lm_device_t *pdev,
5429*d14abf15SRobert Mustacchi 					struct ecore_queue_state_params *params)
5430*d14abf15SRobert Mustacchi {
5431*d14abf15SRobert Mustacchi 	struct ecore_queue_update_params *update = &params->params.update;
5432*d14abf15SRobert Mustacchi 
5433*d14abf15SRobert Mustacchi 	mm_memset(update, 0, sizeof(*update));
5434*d14abf15SRobert Mustacchi 
5435*d14abf15SRobert Mustacchi 	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5436*d14abf15SRobert Mustacchi 
5437*d14abf15SRobert Mustacchi 	return ecore_q_send_update(pdev, params);
5438*d14abf15SRobert Mustacchi }
5439*d14abf15SRobert Mustacchi 
5440*d14abf15SRobert Mustacchi /**
5441*d14abf15SRobert Mustacchi  * ecore_q_send_activate - send ACTIVATE command
5442*d14abf15SRobert Mustacchi  *
5443*d14abf15SRobert Mustacchi  * @pdev:	device handle
5444*d14abf15SRobert Mustacchi  * @params:
5445*d14abf15SRobert Mustacchi  *
5446*d14abf15SRobert Mustacchi  * implemented using the UPDATE command.
5447*d14abf15SRobert Mustacchi  */
5448*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_activate(struct _lm_device_t *pdev,
5449*d14abf15SRobert Mustacchi 					struct ecore_queue_state_params *params)
5450*d14abf15SRobert Mustacchi {
5451*d14abf15SRobert Mustacchi 	struct ecore_queue_update_params *update = &params->params.update;
5452*d14abf15SRobert Mustacchi 
5453*d14abf15SRobert Mustacchi 	mm_memset(update, 0, sizeof(*update));
5454*d14abf15SRobert Mustacchi 
5455*d14abf15SRobert Mustacchi 	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
5456*d14abf15SRobert Mustacchi 	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5457*d14abf15SRobert Mustacchi 
5458*d14abf15SRobert Mustacchi 	return ecore_q_send_update(pdev, params);
5459*d14abf15SRobert Mustacchi }
5460*d14abf15SRobert Mustacchi 
5461*d14abf15SRobert Mustacchi static void ecore_q_fill_update_tpa_data(struct _lm_device_t *pdev,
5462*d14abf15SRobert Mustacchi 				struct ecore_queue_sp_obj *obj,
5463*d14abf15SRobert Mustacchi 				struct ecore_queue_update_tpa_params *params,
5464*d14abf15SRobert Mustacchi 				struct tpa_update_ramrod_data *data)
5465*d14abf15SRobert Mustacchi {
5466*d14abf15SRobert Mustacchi 	data->client_id = obj->cl_id;
5467*d14abf15SRobert Mustacchi 	data->complete_on_both_clients = params->complete_on_both_clients;
5468*d14abf15SRobert Mustacchi 	data->dont_verify_rings_pause_thr_flg =
5469*d14abf15SRobert Mustacchi 		params->dont_verify_thr;
5470*d14abf15SRobert Mustacchi 	data->max_agg_size = mm_cpu_to_le16(params->max_agg_sz);
5471*d14abf15SRobert Mustacchi 	data->max_sges_for_packet = params->max_sges_pkt;
5472*d14abf15SRobert Mustacchi 	data->max_tpa_queues = params->max_tpa_queues;
5473*d14abf15SRobert Mustacchi 	data->sge_buff_size = mm_cpu_to_le16(params->sge_buff_sz);
5474*d14abf15SRobert Mustacchi 	data->sge_page_base_hi = mm_cpu_to_le32(U64_HI(params->sge_map.as_u64));
5475*d14abf15SRobert Mustacchi 	data->sge_page_base_lo = mm_cpu_to_le32(U64_LO(params->sge_map.as_u64));
5476*d14abf15SRobert Mustacchi 	data->sge_pause_thr_high = mm_cpu_to_le16(params->sge_pause_thr_high);
5477*d14abf15SRobert Mustacchi 	data->sge_pause_thr_low = mm_cpu_to_le16(params->sge_pause_thr_low);
5478*d14abf15SRobert Mustacchi 	data->tpa_mode = params->tpa_mode;
5479*d14abf15SRobert Mustacchi 	data->update_ipv4 = params->update_ipv4;
5480*d14abf15SRobert Mustacchi 	data->update_ipv6 = params->update_ipv6;
5481*d14abf15SRobert Mustacchi }
5482*d14abf15SRobert Mustacchi 
5483*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_update_tpa(struct _lm_device_t *pdev,
5484*d14abf15SRobert Mustacchi 					struct ecore_queue_state_params *params)
5485*d14abf15SRobert Mustacchi {
5486*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5487*d14abf15SRobert Mustacchi 	struct tpa_update_ramrod_data *rdata =
5488*d14abf15SRobert Mustacchi 		(struct tpa_update_ramrod_data *)o->rdata;
5489*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->rdata_mapping;
5490*d14abf15SRobert Mustacchi 	struct ecore_queue_update_tpa_params *update_tpa_params =
5491*d14abf15SRobert Mustacchi 		&params->params.update_tpa;
5492*d14abf15SRobert Mustacchi 	u16 type;
5493*d14abf15SRobert Mustacchi 
5494*d14abf15SRobert Mustacchi 	/* Clear the ramrod data */
5495*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
5496*d14abf15SRobert Mustacchi 
5497*d14abf15SRobert Mustacchi 	/* Fill the ramrod data */
5498*d14abf15SRobert Mustacchi 	ecore_q_fill_update_tpa_data(pdev, o, update_tpa_params, rdata);
5499*d14abf15SRobert Mustacchi 
5500*d14abf15SRobert Mustacchi 	/* Add the function id inside the type, so that sp post function
5501*d14abf15SRobert Mustacchi 	 * doesn't automatically add the PF func-id, this is required
5502*d14abf15SRobert Mustacchi 	 * for operations done by PFs on behalf of their VFs
5503*d14abf15SRobert Mustacchi 	 */
5504*d14abf15SRobert Mustacchi 	type = ETH_CONNECTION_TYPE |
5505*d14abf15SRobert Mustacchi 		((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
5506*d14abf15SRobert Mustacchi 
5507*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
5508*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
5509*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
5510*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
5511*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
5512*d14abf15SRobert Mustacchi 	 */
5513*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_ETH_TPA_UPDATE,
5514*d14abf15SRobert Mustacchi 			     o->cids[ECORE_PRIMARY_CID_INDEX],
5515*d14abf15SRobert Mustacchi 			     data_mapping.as_u64, type);
5516*d14abf15SRobert Mustacchi }
5517*d14abf15SRobert Mustacchi 
5518*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_halt(struct _lm_device_t *pdev,
5519*d14abf15SRobert Mustacchi 				    struct ecore_queue_state_params *params)
5520*d14abf15SRobert Mustacchi {
5521*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5522*d14abf15SRobert Mustacchi 
5523*d14abf15SRobert Mustacchi #if !defined(ECORE_ERASE) || defined(__FreeBSD__)
5524*d14abf15SRobert Mustacchi 	/* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
5525*d14abf15SRobert Mustacchi 	lm_address_t    data_mapping = { {0} };
5526*d14abf15SRobert Mustacchi 	data_mapping.as_u32.low = o->cl_id;
5527*d14abf15SRobert Mustacchi 
5528*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
5529*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
5530*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
5531*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
5532*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
5533*d14abf15SRobert Mustacchi 	 */
5534*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev,
5535*d14abf15SRobert Mustacchi 			     RAMROD_CMD_ID_ETH_HALT,
5536*d14abf15SRobert Mustacchi 			     o->cids[ECORE_PRIMARY_CID_INDEX],
5537*d14abf15SRobert Mustacchi 			     data_mapping.as_u64,
5538*d14abf15SRobert Mustacchi 			     ETH_CONNECTION_TYPE);
5539*d14abf15SRobert Mustacchi #else
5540*d14abf15SRobert Mustacchi 	return bnx2x_sp_post(pdev, RAMROD_CMD_ID_ETH_HALT,
5541*d14abf15SRobert Mustacchi 			     o->cids[ECORE_PRIMARY_CID_INDEX], 0, o->cl_id,
5542*d14abf15SRobert Mustacchi 			     ETH_CONNECTION_TYPE);
5543*d14abf15SRobert Mustacchi #endif
5544*d14abf15SRobert Mustacchi }
5545*d14abf15SRobert Mustacchi 
5546*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_cfc_del(struct _lm_device_t *pdev,
5547*d14abf15SRobert Mustacchi 				       struct ecore_queue_state_params *params)
5548*d14abf15SRobert Mustacchi {
5549*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5550*d14abf15SRobert Mustacchi 	u8 cid_idx = params->params.cfc_del.cid_index;
5551*d14abf15SRobert Mustacchi 
5552*d14abf15SRobert Mustacchi 	if (cid_idx >= o->max_cos) {
5553*d14abf15SRobert Mustacchi 		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5554*d14abf15SRobert Mustacchi 			  o->cl_id, cid_idx);
5555*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
5556*d14abf15SRobert Mustacchi 	}
5557*d14abf15SRobert Mustacchi 
5558*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5559*d14abf15SRobert Mustacchi 			     o->cids[cid_idx], 0,
5560*d14abf15SRobert Mustacchi 			     NONE_CONNECTION_TYPE);
5561*d14abf15SRobert Mustacchi }
5562*d14abf15SRobert Mustacchi 
5563*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_terminate(struct _lm_device_t *pdev,
5564*d14abf15SRobert Mustacchi 					struct ecore_queue_state_params *params)
5565*d14abf15SRobert Mustacchi {
5566*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5567*d14abf15SRobert Mustacchi 	u8 cid_index = params->params.terminate.cid_index;
5568*d14abf15SRobert Mustacchi 
5569*d14abf15SRobert Mustacchi 	if (cid_index >= o->max_cos) {
5570*d14abf15SRobert Mustacchi 		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5571*d14abf15SRobert Mustacchi 			  o->cl_id, cid_index);
5572*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
5573*d14abf15SRobert Mustacchi 	}
5574*d14abf15SRobert Mustacchi 
5575*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_ETH_TERMINATE,
5576*d14abf15SRobert Mustacchi 			     o->cids[cid_index], 0,
5577*d14abf15SRobert Mustacchi 			     ETH_CONNECTION_TYPE);
5578*d14abf15SRobert Mustacchi }
5579*d14abf15SRobert Mustacchi 
5580*d14abf15SRobert Mustacchi static INLINE int ecore_q_send_empty(struct _lm_device_t *pdev,
5581*d14abf15SRobert Mustacchi 				     struct ecore_queue_state_params *params)
5582*d14abf15SRobert Mustacchi {
5583*d14abf15SRobert Mustacchi 	struct ecore_queue_sp_obj *o = params->q_obj;
5584*d14abf15SRobert Mustacchi 
5585*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_ETH_EMPTY,
5586*d14abf15SRobert Mustacchi 			     o->cids[ECORE_PRIMARY_CID_INDEX], 0,
5587*d14abf15SRobert Mustacchi 			     ETH_CONNECTION_TYPE);
5588*d14abf15SRobert Mustacchi }
5589*d14abf15SRobert Mustacchi 
5590*d14abf15SRobert Mustacchi static INLINE int ecore_queue_send_cmd_cmn(struct _lm_device_t *pdev,
5591*d14abf15SRobert Mustacchi 					struct ecore_queue_state_params *params)
5592*d14abf15SRobert Mustacchi {
5593*d14abf15SRobert Mustacchi 	switch (params->cmd) {
5594*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_INIT:
5595*d14abf15SRobert Mustacchi 		return ecore_q_init(pdev, params);
5596*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_SETUP_TX_ONLY:
5597*d14abf15SRobert Mustacchi 		return ecore_q_send_setup_tx_only(pdev, params);
5598*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_DEACTIVATE:
5599*d14abf15SRobert Mustacchi 		return ecore_q_send_deactivate(pdev, params);
5600*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_ACTIVATE:
5601*d14abf15SRobert Mustacchi 		return ecore_q_send_activate(pdev, params);
5602*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_UPDATE:
5603*d14abf15SRobert Mustacchi 		return ecore_q_send_update(pdev, params);
5604*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_UPDATE_TPA:
5605*d14abf15SRobert Mustacchi 		return ecore_q_send_update_tpa(pdev, params);
5606*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_HALT:
5607*d14abf15SRobert Mustacchi 		return ecore_q_send_halt(pdev, params);
5608*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_CFC_DEL:
5609*d14abf15SRobert Mustacchi 		return ecore_q_send_cfc_del(pdev, params);
5610*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_TERMINATE:
5611*d14abf15SRobert Mustacchi 		return ecore_q_send_terminate(pdev, params);
5612*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_EMPTY:
5613*d14abf15SRobert Mustacchi 		return ecore_q_send_empty(pdev, params);
5614*d14abf15SRobert Mustacchi 	default:
5615*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", params->cmd);
5616*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
5617*d14abf15SRobert Mustacchi 	}
5618*d14abf15SRobert Mustacchi }
5619*d14abf15SRobert Mustacchi 
5620*d14abf15SRobert Mustacchi static int ecore_queue_send_cmd_e1x(struct _lm_device_t *pdev,
5621*d14abf15SRobert Mustacchi 				    struct ecore_queue_state_params *params)
5622*d14abf15SRobert Mustacchi {
5623*d14abf15SRobert Mustacchi 	switch (params->cmd) {
5624*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_SETUP:
5625*d14abf15SRobert Mustacchi 		return ecore_q_send_setup_e1x(pdev, params);
5626*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_INIT:
5627*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_SETUP_TX_ONLY:
5628*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_DEACTIVATE:
5629*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_ACTIVATE:
5630*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_UPDATE:
5631*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_UPDATE_TPA:
5632*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_HALT:
5633*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_CFC_DEL:
5634*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_TERMINATE:
5635*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_EMPTY:
5636*d14abf15SRobert Mustacchi 		return ecore_queue_send_cmd_cmn(pdev, params);
5637*d14abf15SRobert Mustacchi 	default:
5638*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", params->cmd);
5639*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
5640*d14abf15SRobert Mustacchi 	}
5641*d14abf15SRobert Mustacchi }
5642*d14abf15SRobert Mustacchi 
5643*d14abf15SRobert Mustacchi static int ecore_queue_send_cmd_e2(struct _lm_device_t *pdev,
5644*d14abf15SRobert Mustacchi 				   struct ecore_queue_state_params *params)
5645*d14abf15SRobert Mustacchi {
5646*d14abf15SRobert Mustacchi 	switch (params->cmd) {
5647*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_SETUP:
5648*d14abf15SRobert Mustacchi 		return ecore_q_send_setup_e2(pdev, params);
5649*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_INIT:
5650*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_SETUP_TX_ONLY:
5651*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_DEACTIVATE:
5652*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_ACTIVATE:
5653*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_UPDATE:
5654*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_UPDATE_TPA:
5655*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_HALT:
5656*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_CFC_DEL:
5657*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_TERMINATE:
5658*d14abf15SRobert Mustacchi 	case ECORE_Q_CMD_EMPTY:
5659*d14abf15SRobert Mustacchi 		return ecore_queue_send_cmd_cmn(pdev, params);
5660*d14abf15SRobert Mustacchi 	default:
5661*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", params->cmd);
5662*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
5663*d14abf15SRobert Mustacchi 	}
5664*d14abf15SRobert Mustacchi }
5665*d14abf15SRobert Mustacchi 
5666*d14abf15SRobert Mustacchi /**
5667*d14abf15SRobert Mustacchi  * ecore_queue_chk_transition - check state machine of a regular Queue
5668*d14abf15SRobert Mustacchi  *
5669*d14abf15SRobert Mustacchi  * @pdev:	device handle
5670*d14abf15SRobert Mustacchi  * @o:
5671*d14abf15SRobert Mustacchi  * @params:
5672*d14abf15SRobert Mustacchi  *
5673*d14abf15SRobert Mustacchi  * (not Forwarding)
5674*d14abf15SRobert Mustacchi  * It both checks if the requested command is legal in a current
5675*d14abf15SRobert Mustacchi  * state and, if it's legal, sets a `next_state' in the object
5676*d14abf15SRobert Mustacchi  * that will be used in the completion flow to set the `state'
5677*d14abf15SRobert Mustacchi  * of the object.
5678*d14abf15SRobert Mustacchi  *
5679*d14abf15SRobert Mustacchi  * returns 0 if a requested command is a legal transition,
5680*d14abf15SRobert Mustacchi  *         ECORE_INVAL otherwise.
5681*d14abf15SRobert Mustacchi  */
5682*d14abf15SRobert Mustacchi static int ecore_queue_chk_transition(struct _lm_device_t *pdev,
5683*d14abf15SRobert Mustacchi 				      struct ecore_queue_sp_obj *o,
5684*d14abf15SRobert Mustacchi 				      struct ecore_queue_state_params *params)
5685*d14abf15SRobert Mustacchi {
5686*d14abf15SRobert Mustacchi 	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5687*d14abf15SRobert Mustacchi 	enum ecore_queue_cmd cmd = params->cmd;
5688*d14abf15SRobert Mustacchi 	struct ecore_queue_update_params *update_params =
5689*d14abf15SRobert Mustacchi 		 &params->params.update;
5690*d14abf15SRobert Mustacchi 	u8 next_tx_only = o->num_tx_only;
5691*d14abf15SRobert Mustacchi 
5692*d14abf15SRobert Mustacchi 	/* Forget all pending for completion commands if a driver only state
5693*d14abf15SRobert Mustacchi 	 * transition has been requested.
5694*d14abf15SRobert Mustacchi 	 */
5695*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5696*d14abf15SRobert Mustacchi 		o->pending = 0;
5697*d14abf15SRobert Mustacchi 		o->next_state = ECORE_Q_STATE_MAX;
5698*d14abf15SRobert Mustacchi 	}
5699*d14abf15SRobert Mustacchi 
5700*d14abf15SRobert Mustacchi 	/* Don't allow a next state transition if we are in the middle of
5701*d14abf15SRobert Mustacchi 	 * the previous one.
5702*d14abf15SRobert Mustacchi 	 */
5703*d14abf15SRobert Mustacchi 	if (o->pending) {
5704*d14abf15SRobert Mustacchi 		ECORE_ERR("Blocking transition since pending was %lx\n",
5705*d14abf15SRobert Mustacchi 			  o->pending);
5706*d14abf15SRobert Mustacchi 		return ECORE_BUSY;
5707*d14abf15SRobert Mustacchi 	}
5708*d14abf15SRobert Mustacchi 
5709*d14abf15SRobert Mustacchi 	switch (state) {
5710*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_RESET:
5711*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_INIT)
5712*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_INITIALIZED;
5713*d14abf15SRobert Mustacchi 
5714*d14abf15SRobert Mustacchi 		break;
5715*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_INITIALIZED:
5716*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_SETUP) {
5717*d14abf15SRobert Mustacchi 			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5718*d14abf15SRobert Mustacchi 					   &params->params.setup.flags))
5719*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_ACTIVE;
5720*d14abf15SRobert Mustacchi 			else
5721*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_INACTIVE;
5722*d14abf15SRobert Mustacchi 		}
5723*d14abf15SRobert Mustacchi 
5724*d14abf15SRobert Mustacchi 		break;
5725*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_ACTIVE:
5726*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_DEACTIVATE)
5727*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_INACTIVE;
5728*d14abf15SRobert Mustacchi 
5729*d14abf15SRobert Mustacchi 		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5730*d14abf15SRobert Mustacchi 			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5731*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_ACTIVE;
5732*d14abf15SRobert Mustacchi 
5733*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5734*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_MULTI_COS;
5735*d14abf15SRobert Mustacchi 			next_tx_only = 1;
5736*d14abf15SRobert Mustacchi 		}
5737*d14abf15SRobert Mustacchi 
5738*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_Q_CMD_HALT)
5739*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_STOPPED;
5740*d14abf15SRobert Mustacchi 
5741*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_Q_CMD_UPDATE) {
5742*d14abf15SRobert Mustacchi 			/* If "active" state change is requested, update the
5743*d14abf15SRobert Mustacchi 			 *  state accordingly.
5744*d14abf15SRobert Mustacchi 			 */
5745*d14abf15SRobert Mustacchi 			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5746*d14abf15SRobert Mustacchi 					   &update_params->update_flags) &&
5747*d14abf15SRobert Mustacchi 			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5748*d14abf15SRobert Mustacchi 					    &update_params->update_flags))
5749*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_INACTIVE;
5750*d14abf15SRobert Mustacchi 			else
5751*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_ACTIVE;
5752*d14abf15SRobert Mustacchi 		}
5753*d14abf15SRobert Mustacchi 
5754*d14abf15SRobert Mustacchi 		break;
5755*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_MULTI_COS:
5756*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_TERMINATE)
5757*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_MCOS_TERMINATED;
5758*d14abf15SRobert Mustacchi 
5759*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5760*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_MULTI_COS;
5761*d14abf15SRobert Mustacchi 			next_tx_only = o->num_tx_only + 1;
5762*d14abf15SRobert Mustacchi 		}
5763*d14abf15SRobert Mustacchi 
5764*d14abf15SRobert Mustacchi 		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5765*d14abf15SRobert Mustacchi 			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5766*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_MULTI_COS;
5767*d14abf15SRobert Mustacchi 
5768*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_Q_CMD_UPDATE) {
5769*d14abf15SRobert Mustacchi 			/* If "active" state change is requested, update the
5770*d14abf15SRobert Mustacchi 			 *  state accordingly.
5771*d14abf15SRobert Mustacchi 			 */
5772*d14abf15SRobert Mustacchi 			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5773*d14abf15SRobert Mustacchi 					   &update_params->update_flags) &&
5774*d14abf15SRobert Mustacchi 			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5775*d14abf15SRobert Mustacchi 					    &update_params->update_flags))
5776*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_INACTIVE;
5777*d14abf15SRobert Mustacchi 			else
5778*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_MULTI_COS;
5779*d14abf15SRobert Mustacchi 		}
5780*d14abf15SRobert Mustacchi 
5781*d14abf15SRobert Mustacchi 		break;
5782*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_MCOS_TERMINATED:
5783*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_CFC_DEL) {
5784*d14abf15SRobert Mustacchi 			next_tx_only = o->num_tx_only - 1;
5785*d14abf15SRobert Mustacchi 			if (next_tx_only == 0)
5786*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_ACTIVE;
5787*d14abf15SRobert Mustacchi 			else
5788*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_MULTI_COS;
5789*d14abf15SRobert Mustacchi 		}
5790*d14abf15SRobert Mustacchi 
5791*d14abf15SRobert Mustacchi 		break;
5792*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_INACTIVE:
5793*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_ACTIVATE)
5794*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_ACTIVE;
5795*d14abf15SRobert Mustacchi 
5796*d14abf15SRobert Mustacchi 		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5797*d14abf15SRobert Mustacchi 			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5798*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_INACTIVE;
5799*d14abf15SRobert Mustacchi 
5800*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_Q_CMD_HALT)
5801*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_STOPPED;
5802*d14abf15SRobert Mustacchi 
5803*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_Q_CMD_UPDATE) {
5804*d14abf15SRobert Mustacchi 			/* If "active" state change is requested, update the
5805*d14abf15SRobert Mustacchi 			 * state accordingly.
5806*d14abf15SRobert Mustacchi 			 */
5807*d14abf15SRobert Mustacchi 			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5808*d14abf15SRobert Mustacchi 					   &update_params->update_flags) &&
5809*d14abf15SRobert Mustacchi 			    ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5810*d14abf15SRobert Mustacchi 					   &update_params->update_flags)){
5811*d14abf15SRobert Mustacchi 				if (o->num_tx_only == 0)
5812*d14abf15SRobert Mustacchi 					next_state = ECORE_Q_STATE_ACTIVE;
5813*d14abf15SRobert Mustacchi 				else /* tx only queues exist for this queue */
5814*d14abf15SRobert Mustacchi 					next_state = ECORE_Q_STATE_MULTI_COS;
5815*d14abf15SRobert Mustacchi 			} else
5816*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_INACTIVE;
5817*d14abf15SRobert Mustacchi 		}
5818*d14abf15SRobert Mustacchi 
5819*d14abf15SRobert Mustacchi 		break;
5820*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_STOPPED:
5821*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_TERMINATE)
5822*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_TERMINATED;
5823*d14abf15SRobert Mustacchi 
5824*d14abf15SRobert Mustacchi 		break;
5825*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_TERMINATED:
5826*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_CFC_DEL)
5827*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_RESET;
5828*d14abf15SRobert Mustacchi 
5829*d14abf15SRobert Mustacchi 		break;
5830*d14abf15SRobert Mustacchi 	default:
5831*d14abf15SRobert Mustacchi 		ECORE_ERR("Illegal state: %d\n", state);
5832*d14abf15SRobert Mustacchi 	}
5833*d14abf15SRobert Mustacchi 
5834*d14abf15SRobert Mustacchi 	/* Transition is assured */
5835*d14abf15SRobert Mustacchi 	if (next_state != ECORE_Q_STATE_MAX) {
5836*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "Good state transition: %d(%d)->%d\n",
5837*d14abf15SRobert Mustacchi 			  state, cmd, next_state);
5838*d14abf15SRobert Mustacchi 		o->next_state = next_state;
5839*d14abf15SRobert Mustacchi 		o->next_tx_only = next_tx_only;
5840*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
5841*d14abf15SRobert Mustacchi 	}
5842*d14abf15SRobert Mustacchi 
5843*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Bad state transition request: %d %d\n", state, cmd);
5844*d14abf15SRobert Mustacchi 
5845*d14abf15SRobert Mustacchi 	return ECORE_INVAL;
5846*d14abf15SRobert Mustacchi }
5847*d14abf15SRobert Mustacchi #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
5848*d14abf15SRobert Mustacchi 
5849*d14abf15SRobert Mustacchi /**
5850*d14abf15SRobert Mustacchi  * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
5851*d14abf15SRobert Mustacchi  *
5852*d14abf15SRobert Mustacchi  * @pdev:	device handle
5853*d14abf15SRobert Mustacchi  * @o:
5854*d14abf15SRobert Mustacchi  * @params:
5855*d14abf15SRobert Mustacchi  *
5856*d14abf15SRobert Mustacchi  * It both checks if the requested command is legal in a current
5857*d14abf15SRobert Mustacchi  * state and, if it's legal, sets a `next_state' in the object
5858*d14abf15SRobert Mustacchi  * that will be used in the completion flow to set the `state'
5859*d14abf15SRobert Mustacchi  * of the object.
5860*d14abf15SRobert Mustacchi  *
5861*d14abf15SRobert Mustacchi  * returns 0 if a requested command is a legal transition,
5862*d14abf15SRobert Mustacchi  *         ECORE_INVAL otherwise.
5863*d14abf15SRobert Mustacchi  */
5864*d14abf15SRobert Mustacchi static int ecore_queue_chk_fwd_transition(struct _lm_device_t *pdev,
5865*d14abf15SRobert Mustacchi 					  struct ecore_queue_sp_obj *o,
5866*d14abf15SRobert Mustacchi 					struct ecore_queue_state_params *params)
5867*d14abf15SRobert Mustacchi {
5868*d14abf15SRobert Mustacchi 	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5869*d14abf15SRobert Mustacchi 	enum ecore_queue_cmd cmd = params->cmd;
5870*d14abf15SRobert Mustacchi 
5871*d14abf15SRobert Mustacchi 	switch (state) {
5872*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_RESET:
5873*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_INIT)
5874*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_INITIALIZED;
5875*d14abf15SRobert Mustacchi 
5876*d14abf15SRobert Mustacchi 		break;
5877*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_INITIALIZED:
5878*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5879*d14abf15SRobert Mustacchi 			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5880*d14abf15SRobert Mustacchi 					   &params->params.tx_only.flags))
5881*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_ACTIVE;
5882*d14abf15SRobert Mustacchi 			else
5883*d14abf15SRobert Mustacchi 				next_state = ECORE_Q_STATE_INACTIVE;
5884*d14abf15SRobert Mustacchi 		}
5885*d14abf15SRobert Mustacchi 
5886*d14abf15SRobert Mustacchi 		break;
5887*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_ACTIVE:
5888*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_INACTIVE:
5889*d14abf15SRobert Mustacchi 		if (cmd == ECORE_Q_CMD_CFC_DEL)
5890*d14abf15SRobert Mustacchi 			next_state = ECORE_Q_STATE_RESET;
5891*d14abf15SRobert Mustacchi 
5892*d14abf15SRobert Mustacchi 		break;
5893*d14abf15SRobert Mustacchi 	default:
5894*d14abf15SRobert Mustacchi 		ECORE_ERR("Illegal state: %d\n", state);
5895*d14abf15SRobert Mustacchi 	}
5896*d14abf15SRobert Mustacchi 
5897*d14abf15SRobert Mustacchi 	/* Transition is assured */
5898*d14abf15SRobert Mustacchi 	if (next_state != ECORE_Q_STATE_MAX) {
5899*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "Good state transition: %d(%d)->%d\n",
5900*d14abf15SRobert Mustacchi 			  state, cmd, next_state);
5901*d14abf15SRobert Mustacchi 		o->next_state = next_state;
5902*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
5903*d14abf15SRobert Mustacchi 	}
5904*d14abf15SRobert Mustacchi 
5905*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Bad state transition request: %d %d\n", state, cmd);
5906*d14abf15SRobert Mustacchi 	return ECORE_INVAL;
5907*d14abf15SRobert Mustacchi }
5908*d14abf15SRobert Mustacchi #endif
5909*d14abf15SRobert Mustacchi 
5910*d14abf15SRobert Mustacchi void ecore_init_queue_obj(struct _lm_device_t *pdev,
5911*d14abf15SRobert Mustacchi 			  struct ecore_queue_sp_obj *obj,
5912*d14abf15SRobert Mustacchi 			  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5913*d14abf15SRobert Mustacchi 			  void *rdata,
5914*d14abf15SRobert Mustacchi 			  lm_address_t rdata_mapping, unsigned long type)
5915*d14abf15SRobert Mustacchi {
5916*d14abf15SRobert Mustacchi 	mm_memset(obj, 0, sizeof(*obj));
5917*d14abf15SRobert Mustacchi 
5918*d14abf15SRobert Mustacchi 	/* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
5919*d14abf15SRobert Mustacchi 	BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
5920*d14abf15SRobert Mustacchi 
5921*d14abf15SRobert Mustacchi 	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5922*d14abf15SRobert Mustacchi 	obj->max_cos = cid_cnt;
5923*d14abf15SRobert Mustacchi 	obj->cl_id = cl_id;
5924*d14abf15SRobert Mustacchi 	obj->func_id = func_id;
5925*d14abf15SRobert Mustacchi 	obj->rdata = rdata;
5926*d14abf15SRobert Mustacchi 	obj->rdata_mapping = rdata_mapping;
5927*d14abf15SRobert Mustacchi 	obj->type = type;
5928*d14abf15SRobert Mustacchi 	obj->next_state = ECORE_Q_STATE_MAX;
5929*d14abf15SRobert Mustacchi 
5930*d14abf15SRobert Mustacchi 	if (CHIP_IS_E1x(pdev))
5931*d14abf15SRobert Mustacchi 		obj->send_cmd = ecore_queue_send_cmd_e1x;
5932*d14abf15SRobert Mustacchi 	else
5933*d14abf15SRobert Mustacchi 		obj->send_cmd = ecore_queue_send_cmd_e2;
5934*d14abf15SRobert Mustacchi 
5935*d14abf15SRobert Mustacchi #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
5936*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
5937*d14abf15SRobert Mustacchi 		obj->check_transition = ecore_queue_chk_fwd_transition;
5938*d14abf15SRobert Mustacchi 	else
5939*d14abf15SRobert Mustacchi #endif
5940*d14abf15SRobert Mustacchi 	obj->check_transition = ecore_queue_chk_transition;
5941*d14abf15SRobert Mustacchi 
5942*d14abf15SRobert Mustacchi 	obj->complete_cmd = ecore_queue_comp_cmd;
5943*d14abf15SRobert Mustacchi 	obj->wait_comp = ecore_queue_wait_comp;
5944*d14abf15SRobert Mustacchi 	obj->set_pending = ecore_queue_set_pending;
5945*d14abf15SRobert Mustacchi }
5946*d14abf15SRobert Mustacchi 
5947*d14abf15SRobert Mustacchi /* return a queue object's logical state*/
5948*d14abf15SRobert Mustacchi int ecore_get_q_logical_state(struct _lm_device_t *pdev,
5949*d14abf15SRobert Mustacchi 			       struct ecore_queue_sp_obj *obj)
5950*d14abf15SRobert Mustacchi {
5951*d14abf15SRobert Mustacchi 	switch (obj->state) {
5952*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_ACTIVE:
5953*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_MULTI_COS:
5954*d14abf15SRobert Mustacchi 		return ECORE_Q_LOGICAL_STATE_ACTIVE;
5955*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_RESET:
5956*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_INITIALIZED:
5957*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_MCOS_TERMINATED:
5958*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_INACTIVE:
5959*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_STOPPED:
5960*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_TERMINATED:
5961*d14abf15SRobert Mustacchi 	case ECORE_Q_STATE_FLRED:
5962*d14abf15SRobert Mustacchi 		return ECORE_Q_LOGICAL_STATE_STOPPED;
5963*d14abf15SRobert Mustacchi 	default:
5964*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
5965*d14abf15SRobert Mustacchi 	}
5966*d14abf15SRobert Mustacchi }
5967*d14abf15SRobert Mustacchi 
5968*d14abf15SRobert Mustacchi /********************** Function state object *********************************/
5969*d14abf15SRobert Mustacchi enum ecore_func_state ecore_func_get_state(struct _lm_device_t *pdev,
5970*d14abf15SRobert Mustacchi 					   struct ecore_func_sp_obj *o)
5971*d14abf15SRobert Mustacchi {
5972*d14abf15SRobert Mustacchi 	/* in the middle of transaction - return INVALID state */
5973*d14abf15SRobert Mustacchi 	if (o->pending)
5974*d14abf15SRobert Mustacchi 		return ECORE_F_STATE_MAX;
5975*d14abf15SRobert Mustacchi 
5976*d14abf15SRobert Mustacchi 	/* unsure the order of reading of o->pending and o->state
5977*d14abf15SRobert Mustacchi 	 * o->pending should be read first
5978*d14abf15SRobert Mustacchi 	 */
5979*d14abf15SRobert Mustacchi 	rmb();
5980*d14abf15SRobert Mustacchi 
5981*d14abf15SRobert Mustacchi 	return o->state;
5982*d14abf15SRobert Mustacchi }
5983*d14abf15SRobert Mustacchi 
5984*d14abf15SRobert Mustacchi static int ecore_func_wait_comp(struct _lm_device_t *pdev,
5985*d14abf15SRobert Mustacchi 				struct ecore_func_sp_obj *o,
5986*d14abf15SRobert Mustacchi 				enum ecore_func_cmd cmd)
5987*d14abf15SRobert Mustacchi {
5988*d14abf15SRobert Mustacchi 	return ecore_state_wait(pdev, cmd, &o->pending);
5989*d14abf15SRobert Mustacchi }
5990*d14abf15SRobert Mustacchi 
5991*d14abf15SRobert Mustacchi /**
5992*d14abf15SRobert Mustacchi  * ecore_func_state_change_comp - complete the state machine transition
5993*d14abf15SRobert Mustacchi  *
5994*d14abf15SRobert Mustacchi  * @pdev:	device handle
5995*d14abf15SRobert Mustacchi  * @o:
5996*d14abf15SRobert Mustacchi  * @cmd:
5997*d14abf15SRobert Mustacchi  *
5998*d14abf15SRobert Mustacchi  * Called on state change transition. Completes the state
5999*d14abf15SRobert Mustacchi  * machine transition only - no HW interaction.
6000*d14abf15SRobert Mustacchi  */
6001*d14abf15SRobert Mustacchi static INLINE int ecore_func_state_change_comp(struct _lm_device_t *pdev,
6002*d14abf15SRobert Mustacchi 					       struct ecore_func_sp_obj *o,
6003*d14abf15SRobert Mustacchi 					       enum ecore_func_cmd cmd)
6004*d14abf15SRobert Mustacchi {
6005*d14abf15SRobert Mustacchi 	unsigned long cur_pending = o->pending;
6006*d14abf15SRobert Mustacchi 
6007*d14abf15SRobert Mustacchi 	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
6008*d14abf15SRobert Mustacchi 		ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
6009*d14abf15SRobert Mustacchi 			  cmd, FUNC_ID(pdev), o->state,
6010*d14abf15SRobert Mustacchi 			  cur_pending, o->next_state);
6011*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
6012*d14abf15SRobert Mustacchi 	}
6013*d14abf15SRobert Mustacchi 
6014*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev,
6015*d14abf15SRobert Mustacchi 		  "Completing command %d for func %d, setting state to %d\n",
6016*d14abf15SRobert Mustacchi 		  cmd, FUNC_ID(pdev), o->next_state);
6017*d14abf15SRobert Mustacchi 
6018*d14abf15SRobert Mustacchi 	o->state = o->next_state;
6019*d14abf15SRobert Mustacchi 	o->next_state = ECORE_F_STATE_MAX;
6020*d14abf15SRobert Mustacchi 
6021*d14abf15SRobert Mustacchi 	/* It's important that o->state and o->next_state are
6022*d14abf15SRobert Mustacchi 	 * updated before o->pending.
6023*d14abf15SRobert Mustacchi 	 */
6024*d14abf15SRobert Mustacchi 	wmb();
6025*d14abf15SRobert Mustacchi 
6026*d14abf15SRobert Mustacchi 	ECORE_CLEAR_BIT(cmd, &o->pending);
6027*d14abf15SRobert Mustacchi 	smp_mb__after_atomic();
6028*d14abf15SRobert Mustacchi 
6029*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
6030*d14abf15SRobert Mustacchi }
6031*d14abf15SRobert Mustacchi 
6032*d14abf15SRobert Mustacchi /**
6033*d14abf15SRobert Mustacchi  * ecore_func_comp_cmd - complete the state change command
6034*d14abf15SRobert Mustacchi  *
6035*d14abf15SRobert Mustacchi  * @pdev:	device handle
6036*d14abf15SRobert Mustacchi  * @o:
6037*d14abf15SRobert Mustacchi  * @cmd:
6038*d14abf15SRobert Mustacchi  *
6039*d14abf15SRobert Mustacchi  * Checks that the arrived completion is expected.
6040*d14abf15SRobert Mustacchi  */
6041*d14abf15SRobert Mustacchi static int ecore_func_comp_cmd(struct _lm_device_t *pdev,
6042*d14abf15SRobert Mustacchi 			       struct ecore_func_sp_obj *o,
6043*d14abf15SRobert Mustacchi 			       enum ecore_func_cmd cmd)
6044*d14abf15SRobert Mustacchi {
6045*d14abf15SRobert Mustacchi 	/* Complete the state machine part first, check if it's a
6046*d14abf15SRobert Mustacchi 	 * legal completion.
6047*d14abf15SRobert Mustacchi 	 */
6048*d14abf15SRobert Mustacchi 	int rc = ecore_func_state_change_comp(pdev, o, cmd);
6049*d14abf15SRobert Mustacchi 	return rc;
6050*d14abf15SRobert Mustacchi }
6051*d14abf15SRobert Mustacchi 
6052*d14abf15SRobert Mustacchi /**
6053*d14abf15SRobert Mustacchi  * ecore_func_chk_transition - perform function state machine transition
6054*d14abf15SRobert Mustacchi  *
6055*d14abf15SRobert Mustacchi  * @pdev:	device handle
6056*d14abf15SRobert Mustacchi  * @o:
6057*d14abf15SRobert Mustacchi  * @params:
6058*d14abf15SRobert Mustacchi  *
6059*d14abf15SRobert Mustacchi  * It both checks if the requested command is legal in a current
6060*d14abf15SRobert Mustacchi  * state and, if it's legal, sets a `next_state' in the object
6061*d14abf15SRobert Mustacchi  * that will be used in the completion flow to set the `state'
6062*d14abf15SRobert Mustacchi  * of the object.
6063*d14abf15SRobert Mustacchi  *
6064*d14abf15SRobert Mustacchi  * returns 0 if a requested command is a legal transition,
6065*d14abf15SRobert Mustacchi  *         ECORE_INVAL otherwise.
6066*d14abf15SRobert Mustacchi  */
6067*d14abf15SRobert Mustacchi static int ecore_func_chk_transition(struct _lm_device_t *pdev,
6068*d14abf15SRobert Mustacchi 				     struct ecore_func_sp_obj *o,
6069*d14abf15SRobert Mustacchi 				     struct ecore_func_state_params *params)
6070*d14abf15SRobert Mustacchi {
6071*d14abf15SRobert Mustacchi 	enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
6072*d14abf15SRobert Mustacchi 	enum ecore_func_cmd cmd = params->cmd;
6073*d14abf15SRobert Mustacchi 
6074*d14abf15SRobert Mustacchi 	/* Forget all pending for completion commands if a driver only state
6075*d14abf15SRobert Mustacchi 	 * transition has been requested.
6076*d14abf15SRobert Mustacchi 	 */
6077*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6078*d14abf15SRobert Mustacchi 		o->pending = 0;
6079*d14abf15SRobert Mustacchi 		o->next_state = ECORE_F_STATE_MAX;
6080*d14abf15SRobert Mustacchi 	}
6081*d14abf15SRobert Mustacchi 
6082*d14abf15SRobert Mustacchi 	/* Don't allow a next state transition if we are in the middle of
6083*d14abf15SRobert Mustacchi 	 * the previous one.
6084*d14abf15SRobert Mustacchi 	 */
6085*d14abf15SRobert Mustacchi 	if (o->pending)
6086*d14abf15SRobert Mustacchi 		return ECORE_BUSY;
6087*d14abf15SRobert Mustacchi 
6088*d14abf15SRobert Mustacchi 	switch (state) {
6089*d14abf15SRobert Mustacchi 	case ECORE_F_STATE_RESET:
6090*d14abf15SRobert Mustacchi 		if (cmd == ECORE_F_CMD_HW_INIT)
6091*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_INITIALIZED;
6092*d14abf15SRobert Mustacchi 
6093*d14abf15SRobert Mustacchi 		break;
6094*d14abf15SRobert Mustacchi 	case ECORE_F_STATE_INITIALIZED:
6095*d14abf15SRobert Mustacchi 		if (cmd == ECORE_F_CMD_START)
6096*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_STARTED;
6097*d14abf15SRobert Mustacchi 
6098*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_F_CMD_HW_RESET)
6099*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_RESET;
6100*d14abf15SRobert Mustacchi 
6101*d14abf15SRobert Mustacchi 		break;
6102*d14abf15SRobert Mustacchi 	case ECORE_F_STATE_STARTED:
6103*d14abf15SRobert Mustacchi 		if (cmd == ECORE_F_CMD_STOP)
6104*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_INITIALIZED;
6105*d14abf15SRobert Mustacchi 		/* afex ramrods can be sent only in started mode, and only
6106*d14abf15SRobert Mustacchi 		 * if not pending for function_stop ramrod completion
6107*d14abf15SRobert Mustacchi 		 * for these events - next state remained STARTED.
6108*d14abf15SRobert Mustacchi 		 */
6109*d14abf15SRobert Mustacchi 		else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
6110*d14abf15SRobert Mustacchi 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
6111*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_STARTED;
6112*d14abf15SRobert Mustacchi 
6113*d14abf15SRobert Mustacchi 		else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
6114*d14abf15SRobert Mustacchi 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
6115*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_STARTED;
6116*d14abf15SRobert Mustacchi 
6117*d14abf15SRobert Mustacchi 		/* Switch_update ramrod can be sent in either started or
6118*d14abf15SRobert Mustacchi 		 * tx_stopped state, and it doesn't change the state.
6119*d14abf15SRobert Mustacchi 		 */
6120*d14abf15SRobert Mustacchi 		else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
6121*d14abf15SRobert Mustacchi 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
6122*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_STARTED;
6123*d14abf15SRobert Mustacchi 
6124*d14abf15SRobert Mustacchi 		else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
6125*d14abf15SRobert Mustacchi 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
6126*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_STARTED;
6127*d14abf15SRobert Mustacchi 
6128*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_F_CMD_TX_STOP)
6129*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_TX_STOPPED;
6130*d14abf15SRobert Mustacchi 
6131*d14abf15SRobert Mustacchi 		break;
6132*d14abf15SRobert Mustacchi 	case ECORE_F_STATE_TX_STOPPED:
6133*d14abf15SRobert Mustacchi 		if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
6134*d14abf15SRobert Mustacchi 		    (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
6135*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_TX_STOPPED;
6136*d14abf15SRobert Mustacchi 
6137*d14abf15SRobert Mustacchi 		else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
6138*d14abf15SRobert Mustacchi 		    (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
6139*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_TX_STOPPED;
6140*d14abf15SRobert Mustacchi 
6141*d14abf15SRobert Mustacchi 		else if (cmd == ECORE_F_CMD_TX_START)
6142*d14abf15SRobert Mustacchi 			next_state = ECORE_F_STATE_STARTED;
6143*d14abf15SRobert Mustacchi 
6144*d14abf15SRobert Mustacchi 		break;
6145*d14abf15SRobert Mustacchi 	default:
6146*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown state: %d\n", state);
6147*d14abf15SRobert Mustacchi 	}
6148*d14abf15SRobert Mustacchi 
6149*d14abf15SRobert Mustacchi 	/* Transition is assured */
6150*d14abf15SRobert Mustacchi 	if (next_state != ECORE_F_STATE_MAX) {
6151*d14abf15SRobert Mustacchi 		ECORE_MSG(pdev, "Good function state transition: %d(%d)->%d\n",
6152*d14abf15SRobert Mustacchi 			  state, cmd, next_state);
6153*d14abf15SRobert Mustacchi 		o->next_state = next_state;
6154*d14abf15SRobert Mustacchi 		return ECORE_SUCCESS;
6155*d14abf15SRobert Mustacchi 	}
6156*d14abf15SRobert Mustacchi 
6157*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "Bad function state transition request: %d %d\n",
6158*d14abf15SRobert Mustacchi 		  state, cmd);
6159*d14abf15SRobert Mustacchi 
6160*d14abf15SRobert Mustacchi 	return ECORE_INVAL;
6161*d14abf15SRobert Mustacchi }
6162*d14abf15SRobert Mustacchi 
6163*d14abf15SRobert Mustacchi /**
6164*d14abf15SRobert Mustacchi  * ecore_func_init_func - performs HW init at function stage
6165*d14abf15SRobert Mustacchi  *
6166*d14abf15SRobert Mustacchi  * @pdev:	device handle
6167*d14abf15SRobert Mustacchi  * @drv:
6168*d14abf15SRobert Mustacchi  *
6169*d14abf15SRobert Mustacchi  * Init HW when the current phase is
6170*d14abf15SRobert Mustacchi  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
6171*d14abf15SRobert Mustacchi  * HW blocks.
6172*d14abf15SRobert Mustacchi  */
6173*d14abf15SRobert Mustacchi static INLINE int ecore_func_init_func(struct _lm_device_t *pdev,
6174*d14abf15SRobert Mustacchi 				       const struct ecore_func_sp_drv_ops *drv)
6175*d14abf15SRobert Mustacchi {
6176*d14abf15SRobert Mustacchi 	return drv->init_hw_func(pdev);
6177*d14abf15SRobert Mustacchi }
6178*d14abf15SRobert Mustacchi 
6179*d14abf15SRobert Mustacchi /**
6180*d14abf15SRobert Mustacchi  * ecore_func_init_port - performs HW init at port stage
6181*d14abf15SRobert Mustacchi  *
6182*d14abf15SRobert Mustacchi  * @pdev:	device handle
6183*d14abf15SRobert Mustacchi  * @drv:
6184*d14abf15SRobert Mustacchi  *
6185*d14abf15SRobert Mustacchi  * Init HW when the current phase is
6186*d14abf15SRobert Mustacchi  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
6187*d14abf15SRobert Mustacchi  * FUNCTION-only HW blocks.
6188*d14abf15SRobert Mustacchi  *
6189*d14abf15SRobert Mustacchi  */
6190*d14abf15SRobert Mustacchi static INLINE int ecore_func_init_port(struct _lm_device_t *pdev,
6191*d14abf15SRobert Mustacchi 				       const struct ecore_func_sp_drv_ops *drv)
6192*d14abf15SRobert Mustacchi {
6193*d14abf15SRobert Mustacchi 	int rc = drv->init_hw_port(pdev);
6194*d14abf15SRobert Mustacchi 	if (rc)
6195*d14abf15SRobert Mustacchi 		return rc;
6196*d14abf15SRobert Mustacchi 
6197*d14abf15SRobert Mustacchi 	return ecore_func_init_func(pdev, drv);
6198*d14abf15SRobert Mustacchi }
6199*d14abf15SRobert Mustacchi 
6200*d14abf15SRobert Mustacchi /**
6201*d14abf15SRobert Mustacchi  * ecore_func_init_cmn_chip - performs HW init at chip-common stage
6202*d14abf15SRobert Mustacchi  *
6203*d14abf15SRobert Mustacchi  * @pdev:	device handle
6204*d14abf15SRobert Mustacchi  * @drv:
6205*d14abf15SRobert Mustacchi  *
6206*d14abf15SRobert Mustacchi  * Init HW when the current phase is
6207*d14abf15SRobert Mustacchi  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
6208*d14abf15SRobert Mustacchi  * PORT-only and FUNCTION-only HW blocks.
6209*d14abf15SRobert Mustacchi  */
6210*d14abf15SRobert Mustacchi static INLINE int ecore_func_init_cmn_chip(struct _lm_device_t *pdev,
6211*d14abf15SRobert Mustacchi 					const struct ecore_func_sp_drv_ops *drv)
6212*d14abf15SRobert Mustacchi {
6213*d14abf15SRobert Mustacchi 	int rc = drv->init_hw_cmn_chip(pdev);
6214*d14abf15SRobert Mustacchi 	if (rc)
6215*d14abf15SRobert Mustacchi 		return rc;
6216*d14abf15SRobert Mustacchi 
6217*d14abf15SRobert Mustacchi 	return ecore_func_init_port(pdev, drv);
6218*d14abf15SRobert Mustacchi }
6219*d14abf15SRobert Mustacchi 
6220*d14abf15SRobert Mustacchi /**
6221*d14abf15SRobert Mustacchi  * ecore_func_init_cmn - performs HW init at common stage
6222*d14abf15SRobert Mustacchi  *
6223*d14abf15SRobert Mustacchi  * @pdev:	device handle
6224*d14abf15SRobert Mustacchi  * @drv:
6225*d14abf15SRobert Mustacchi  *
6226*d14abf15SRobert Mustacchi  * Init HW when the current phase is
6227*d14abf15SRobert Mustacchi  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
6228*d14abf15SRobert Mustacchi  * PORT-only and FUNCTION-only HW blocks.
6229*d14abf15SRobert Mustacchi  */
6230*d14abf15SRobert Mustacchi static INLINE int ecore_func_init_cmn(struct _lm_device_t *pdev,
6231*d14abf15SRobert Mustacchi 				      const struct ecore_func_sp_drv_ops *drv)
6232*d14abf15SRobert Mustacchi {
6233*d14abf15SRobert Mustacchi 	int rc = drv->init_hw_cmn(pdev);
6234*d14abf15SRobert Mustacchi 	if (rc)
6235*d14abf15SRobert Mustacchi 		return rc;
6236*d14abf15SRobert Mustacchi 
6237*d14abf15SRobert Mustacchi 	return ecore_func_init_port(pdev, drv);
6238*d14abf15SRobert Mustacchi }
6239*d14abf15SRobert Mustacchi 
6240*d14abf15SRobert Mustacchi static int ecore_func_hw_init(struct _lm_device_t *pdev,
6241*d14abf15SRobert Mustacchi 			      struct ecore_func_state_params *params)
6242*d14abf15SRobert Mustacchi {
6243*d14abf15SRobert Mustacchi 	u32 load_code = params->params.hw_init.load_phase;
6244*d14abf15SRobert Mustacchi 	struct ecore_func_sp_obj *o = params->f_obj;
6245*d14abf15SRobert Mustacchi 	const struct ecore_func_sp_drv_ops *drv = o->drv;
6246*d14abf15SRobert Mustacchi 	int rc = 0;
6247*d14abf15SRobert Mustacchi 
6248*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "function %d  load_code %x\n",
6249*d14abf15SRobert Mustacchi 		  ABS_FUNC_ID(pdev), load_code);
6250*d14abf15SRobert Mustacchi 
6251*d14abf15SRobert Mustacchi 	/* Prepare buffers for unzipping the FW */
6252*d14abf15SRobert Mustacchi 	rc = drv->gunzip_init(pdev);
6253*d14abf15SRobert Mustacchi 	if (rc)
6254*d14abf15SRobert Mustacchi 		return rc;
6255*d14abf15SRobert Mustacchi 
6256*d14abf15SRobert Mustacchi 	/* Prepare FW */
6257*d14abf15SRobert Mustacchi 	rc = drv->init_fw(pdev);
6258*d14abf15SRobert Mustacchi 	if (rc) {
6259*d14abf15SRobert Mustacchi 		ECORE_ERR("Error loading firmware\n");
6260*d14abf15SRobert Mustacchi 		goto init_err;
6261*d14abf15SRobert Mustacchi 	}
6262*d14abf15SRobert Mustacchi 
6263*d14abf15SRobert Mustacchi 	/* Handle the beginning of COMMON_XXX pases separately... */
6264*d14abf15SRobert Mustacchi 	switch (load_code) {
6265*d14abf15SRobert Mustacchi 	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6266*d14abf15SRobert Mustacchi 		rc = ecore_func_init_cmn_chip(pdev, drv);
6267*d14abf15SRobert Mustacchi 		if (rc)
6268*d14abf15SRobert Mustacchi 			goto init_err;
6269*d14abf15SRobert Mustacchi 
6270*d14abf15SRobert Mustacchi 		break;
6271*d14abf15SRobert Mustacchi 	case FW_MSG_CODE_DRV_LOAD_COMMON:
6272*d14abf15SRobert Mustacchi 		rc = ecore_func_init_cmn(pdev, drv);
6273*d14abf15SRobert Mustacchi 		if (rc)
6274*d14abf15SRobert Mustacchi 			goto init_err;
6275*d14abf15SRobert Mustacchi 
6276*d14abf15SRobert Mustacchi 		break;
6277*d14abf15SRobert Mustacchi 	case FW_MSG_CODE_DRV_LOAD_PORT:
6278*d14abf15SRobert Mustacchi 		rc = ecore_func_init_port(pdev, drv);
6279*d14abf15SRobert Mustacchi 		if (rc)
6280*d14abf15SRobert Mustacchi 			goto init_err;
6281*d14abf15SRobert Mustacchi 
6282*d14abf15SRobert Mustacchi 		break;
6283*d14abf15SRobert Mustacchi 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6284*d14abf15SRobert Mustacchi 		rc = ecore_func_init_func(pdev, drv);
6285*d14abf15SRobert Mustacchi 		if (rc)
6286*d14abf15SRobert Mustacchi 			goto init_err;
6287*d14abf15SRobert Mustacchi 
6288*d14abf15SRobert Mustacchi 		break;
6289*d14abf15SRobert Mustacchi 	default:
6290*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6291*d14abf15SRobert Mustacchi 		rc = ECORE_INVAL;
6292*d14abf15SRobert Mustacchi 	}
6293*d14abf15SRobert Mustacchi 
6294*d14abf15SRobert Mustacchi init_err:
6295*d14abf15SRobert Mustacchi 	drv->gunzip_end(pdev);
6296*d14abf15SRobert Mustacchi 
6297*d14abf15SRobert Mustacchi 	/* In case of success, complete the command immediately: no ramrods
6298*d14abf15SRobert Mustacchi 	 * have been sent.
6299*d14abf15SRobert Mustacchi 	 */
6300*d14abf15SRobert Mustacchi 	if (!rc)
6301*d14abf15SRobert Mustacchi 		o->complete_cmd(pdev, o, ECORE_F_CMD_HW_INIT);
6302*d14abf15SRobert Mustacchi 
6303*d14abf15SRobert Mustacchi 	return rc;
6304*d14abf15SRobert Mustacchi }
6305*d14abf15SRobert Mustacchi 
6306*d14abf15SRobert Mustacchi /**
6307*d14abf15SRobert Mustacchi  * ecore_func_reset_func - reset HW at function stage
6308*d14abf15SRobert Mustacchi  *
6309*d14abf15SRobert Mustacchi  * @pdev:	device handle
6310*d14abf15SRobert Mustacchi  * @drv:
6311*d14abf15SRobert Mustacchi  *
6312*d14abf15SRobert Mustacchi  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
6313*d14abf15SRobert Mustacchi  * FUNCTION-only HW blocks.
6314*d14abf15SRobert Mustacchi  */
6315*d14abf15SRobert Mustacchi static INLINE void ecore_func_reset_func(struct _lm_device_t *pdev,
6316*d14abf15SRobert Mustacchi 					const struct ecore_func_sp_drv_ops *drv)
6317*d14abf15SRobert Mustacchi {
6318*d14abf15SRobert Mustacchi 	drv->reset_hw_func(pdev);
6319*d14abf15SRobert Mustacchi }
6320*d14abf15SRobert Mustacchi 
6321*d14abf15SRobert Mustacchi /**
6322*d14abf15SRobert Mustacchi  * ecore_func_reset_port - reser HW at port stage
6323*d14abf15SRobert Mustacchi  *
6324*d14abf15SRobert Mustacchi  * @pdev:	device handle
6325*d14abf15SRobert Mustacchi  * @drv:
6326*d14abf15SRobert Mustacchi  *
6327*d14abf15SRobert Mustacchi  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
6328*d14abf15SRobert Mustacchi  * FUNCTION-only and PORT-only HW blocks.
6329*d14abf15SRobert Mustacchi  *
6330*d14abf15SRobert Mustacchi  *                 !!!IMPORTANT!!!
6331*d14abf15SRobert Mustacchi  *
6332*d14abf15SRobert Mustacchi  * It's important to call reset_port before reset_func() as the last thing
6333*d14abf15SRobert Mustacchi  * reset_func does is pf_disable() thus disabling PGLUE_B, which
6334*d14abf15SRobert Mustacchi  * makes impossible any DMAE transactions.
6335*d14abf15SRobert Mustacchi  */
6336*d14abf15SRobert Mustacchi static INLINE void ecore_func_reset_port(struct _lm_device_t *pdev,
6337*d14abf15SRobert Mustacchi 					const struct ecore_func_sp_drv_ops *drv)
6338*d14abf15SRobert Mustacchi {
6339*d14abf15SRobert Mustacchi 	drv->reset_hw_port(pdev);
6340*d14abf15SRobert Mustacchi 	ecore_func_reset_func(pdev, drv);
6341*d14abf15SRobert Mustacchi }
6342*d14abf15SRobert Mustacchi 
6343*d14abf15SRobert Mustacchi /**
6344*d14abf15SRobert Mustacchi  * ecore_func_reset_cmn - reser HW at common stage
6345*d14abf15SRobert Mustacchi  *
6346*d14abf15SRobert Mustacchi  * @pdev:	device handle
6347*d14abf15SRobert Mustacchi  * @drv:
6348*d14abf15SRobert Mustacchi  *
6349*d14abf15SRobert Mustacchi  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
6350*d14abf15SRobert Mustacchi  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
6351*d14abf15SRobert Mustacchi  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
6352*d14abf15SRobert Mustacchi  */
6353*d14abf15SRobert Mustacchi static INLINE void ecore_func_reset_cmn(struct _lm_device_t *pdev,
6354*d14abf15SRobert Mustacchi 					const struct ecore_func_sp_drv_ops *drv)
6355*d14abf15SRobert Mustacchi {
6356*d14abf15SRobert Mustacchi 	ecore_func_reset_port(pdev, drv);
6357*d14abf15SRobert Mustacchi 	drv->reset_hw_cmn(pdev);
6358*d14abf15SRobert Mustacchi }
6359*d14abf15SRobert Mustacchi 
6360*d14abf15SRobert Mustacchi static INLINE int ecore_func_hw_reset(struct _lm_device_t *pdev,
6361*d14abf15SRobert Mustacchi 				      struct ecore_func_state_params *params)
6362*d14abf15SRobert Mustacchi {
6363*d14abf15SRobert Mustacchi 	u32 reset_phase = params->params.hw_reset.reset_phase;
6364*d14abf15SRobert Mustacchi 	struct ecore_func_sp_obj *o = params->f_obj;
6365*d14abf15SRobert Mustacchi 	const struct ecore_func_sp_drv_ops *drv = o->drv;
6366*d14abf15SRobert Mustacchi 
6367*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "function %d  reset_phase %x\n", ABS_FUNC_ID(pdev),
6368*d14abf15SRobert Mustacchi 		  reset_phase);
6369*d14abf15SRobert Mustacchi 
6370*d14abf15SRobert Mustacchi 	switch (reset_phase) {
6371*d14abf15SRobert Mustacchi 	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6372*d14abf15SRobert Mustacchi 		ecore_func_reset_cmn(pdev, drv);
6373*d14abf15SRobert Mustacchi 		break;
6374*d14abf15SRobert Mustacchi 	case FW_MSG_CODE_DRV_UNLOAD_PORT:
6375*d14abf15SRobert Mustacchi 		ecore_func_reset_port(pdev, drv);
6376*d14abf15SRobert Mustacchi 		break;
6377*d14abf15SRobert Mustacchi 	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6378*d14abf15SRobert Mustacchi 		ecore_func_reset_func(pdev, drv);
6379*d14abf15SRobert Mustacchi 		break;
6380*d14abf15SRobert Mustacchi 	default:
6381*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
6382*d14abf15SRobert Mustacchi 			  reset_phase);
6383*d14abf15SRobert Mustacchi 		break;
6384*d14abf15SRobert Mustacchi 	}
6385*d14abf15SRobert Mustacchi 
6386*d14abf15SRobert Mustacchi 	/* Complete the command immediately: no ramrods have been sent. */
6387*d14abf15SRobert Mustacchi 	o->complete_cmd(pdev, o, ECORE_F_CMD_HW_RESET);
6388*d14abf15SRobert Mustacchi 
6389*d14abf15SRobert Mustacchi 	return ECORE_SUCCESS;
6390*d14abf15SRobert Mustacchi }
6391*d14abf15SRobert Mustacchi 
6392*d14abf15SRobert Mustacchi static INLINE int ecore_func_send_start(struct _lm_device_t *pdev,
6393*d14abf15SRobert Mustacchi 					struct ecore_func_state_params *params)
6394*d14abf15SRobert Mustacchi {
6395*d14abf15SRobert Mustacchi 	struct ecore_func_sp_obj *o = params->f_obj;
6396*d14abf15SRobert Mustacchi 	struct function_start_data *rdata =
6397*d14abf15SRobert Mustacchi 		(struct function_start_data *)o->rdata;
6398*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->rdata_mapping;
6399*d14abf15SRobert Mustacchi 	struct ecore_func_start_params *start_params = &params->params.start;
6400*d14abf15SRobert Mustacchi 
6401*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
6402*d14abf15SRobert Mustacchi 
6403*d14abf15SRobert Mustacchi 	/* Fill the ramrod data with provided parameters */
6404*d14abf15SRobert Mustacchi 	rdata->function_mode	= (u8)start_params->mf_mode;
6405*d14abf15SRobert Mustacchi 	rdata->sd_vlan_tag	= mm_cpu_to_le16(start_params->sd_vlan_tag);
6406*d14abf15SRobert Mustacchi 	rdata->path_id		= PATH_ID(pdev);
6407*d14abf15SRobert Mustacchi 	rdata->network_cos_mode	= start_params->network_cos_mode;
6408*d14abf15SRobert Mustacchi 	rdata->tunnel_mode	= start_params->tunnel_mode;
6409*d14abf15SRobert Mustacchi 	rdata->gre_tunnel_type	= start_params->gre_tunnel_type;
6410*d14abf15SRobert Mustacchi 	rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
6411*d14abf15SRobert Mustacchi 	rdata->vxlan_dst_port	= start_params->vxlan_dst_port;
6412*d14abf15SRobert Mustacchi 	rdata->sd_accept_mf_clss_fail = start_params->class_fail;
6413*d14abf15SRobert Mustacchi 	if (start_params->class_fail_ethtype) {
6414*d14abf15SRobert Mustacchi 		rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
6415*d14abf15SRobert Mustacchi 		rdata->sd_accept_mf_clss_fail_ethtype =
6416*d14abf15SRobert Mustacchi 			mm_cpu_to_le16(start_params->class_fail_ethtype);
6417*d14abf15SRobert Mustacchi 	}
6418*d14abf15SRobert Mustacchi 	rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
6419*d14abf15SRobert Mustacchi 	rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
6420*d14abf15SRobert Mustacchi 
6421*d14abf15SRobert Mustacchi 	/** @@@TMP - until FW 7.10.7 (which will introduce an HSI change)
6422*d14abf15SRobert Mustacchi 	 * `sd_vlan_eth_type' will replace ethertype in SD mode even if
6423*d14abf15SRobert Mustacchi 	 * it's set to 0; This will probably break SD, so we're setting it
6424*d14abf15SRobert Mustacchi 	 * to ethertype 0x8100 for now.
6425*d14abf15SRobert Mustacchi 	 */
6426*d14abf15SRobert Mustacchi 	if (start_params->sd_vlan_eth_type)
6427*d14abf15SRobert Mustacchi 		rdata->sd_vlan_eth_type =
6428*d14abf15SRobert Mustacchi 			mm_cpu_to_le16(start_params->sd_vlan_eth_type);
6429*d14abf15SRobert Mustacchi 	else
6430*d14abf15SRobert Mustacchi 		rdata->sd_vlan_eth_type =
6431*d14abf15SRobert Mustacchi 			mm_cpu_to_le16((u16) 0x8100);
6432*d14abf15SRobert Mustacchi 
6433*d14abf15SRobert Mustacchi 	rdata->no_added_tags = start_params->no_added_tags;
6434*d14abf15SRobert Mustacchi 
6435*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
6436*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
6437*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
6438*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
6439*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
6440*d14abf15SRobert Mustacchi 	 */
6441*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
6442*d14abf15SRobert Mustacchi 			     data_mapping.as_u64, NONE_CONNECTION_TYPE);
6443*d14abf15SRobert Mustacchi }
6444*d14abf15SRobert Mustacchi 
6445*d14abf15SRobert Mustacchi static INLINE int ecore_func_send_switch_update(struct _lm_device_t *pdev,
6446*d14abf15SRobert Mustacchi 					struct ecore_func_state_params *params)
6447*d14abf15SRobert Mustacchi {
6448*d14abf15SRobert Mustacchi 	struct ecore_func_sp_obj *o = params->f_obj;
6449*d14abf15SRobert Mustacchi 	struct function_update_data *rdata =
6450*d14abf15SRobert Mustacchi 		(struct function_update_data *)o->rdata;
6451*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->rdata_mapping;
6452*d14abf15SRobert Mustacchi 	struct ecore_func_switch_update_params *switch_update_params =
6453*d14abf15SRobert Mustacchi 		&params->params.switch_update;
6454*d14abf15SRobert Mustacchi 
6455*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
6456*d14abf15SRobert Mustacchi 
6457*d14abf15SRobert Mustacchi 	/* Fill the ramrod data with provided parameters */
6458*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
6459*d14abf15SRobert Mustacchi 			   &switch_update_params->changes)) {
6460*d14abf15SRobert Mustacchi 		rdata->tx_switch_suspend_change_flg = 1;
6461*d14abf15SRobert Mustacchi 		rdata->tx_switch_suspend =
6462*d14abf15SRobert Mustacchi 			ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
6463*d14abf15SRobert Mustacchi 				       &switch_update_params->changes);
6464*d14abf15SRobert Mustacchi 	}
6465*d14abf15SRobert Mustacchi 
6466*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_TAG_CHNG,
6467*d14abf15SRobert Mustacchi 			   &switch_update_params->changes)) {
6468*d14abf15SRobert Mustacchi 		rdata->sd_vlan_tag_change_flg = 1;
6469*d14abf15SRobert Mustacchi 		rdata->sd_vlan_tag =
6470*d14abf15SRobert Mustacchi 			mm_cpu_to_le16(switch_update_params->vlan);
6471*d14abf15SRobert Mustacchi 	}
6472*d14abf15SRobert Mustacchi 
6473*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
6474*d14abf15SRobert Mustacchi 			   &switch_update_params->changes)) {
6475*d14abf15SRobert Mustacchi 		rdata->sd_vlan_eth_type_change_flg = 1;
6476*d14abf15SRobert Mustacchi 		rdata->sd_vlan_eth_type =
6477*d14abf15SRobert Mustacchi 			mm_cpu_to_le16(switch_update_params->vlan_eth_type);
6478*d14abf15SRobert Mustacchi 	}
6479*d14abf15SRobert Mustacchi 
6480*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
6481*d14abf15SRobert Mustacchi 			   &switch_update_params->changes)) {
6482*d14abf15SRobert Mustacchi 		rdata->sd_vlan_force_pri_change_flg = 1;
6483*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
6484*d14abf15SRobert Mustacchi 				   &switch_update_params->changes))
6485*d14abf15SRobert Mustacchi 			rdata->sd_vlan_force_pri_flg = 1;
6486*d14abf15SRobert Mustacchi 		rdata->sd_vlan_force_pri_flg =
6487*d14abf15SRobert Mustacchi 			switch_update_params->vlan_force_prio;
6488*d14abf15SRobert Mustacchi 	}
6489*d14abf15SRobert Mustacchi 
6490*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_CFG_CHNG,
6491*d14abf15SRobert Mustacchi 			   &switch_update_params->changes)) {
6492*d14abf15SRobert Mustacchi 		rdata->update_tunn_cfg_flg = 1;
6493*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_CLSS_EN,
6494*d14abf15SRobert Mustacchi 				   &switch_update_params->changes))
6495*d14abf15SRobert Mustacchi 			rdata->tunn_clss_en = 1;
6496*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
6497*d14abf15SRobert Mustacchi 				   &switch_update_params->changes))
6498*d14abf15SRobert Mustacchi 			rdata->inner_gre_rss_en = 1;
6499*d14abf15SRobert Mustacchi 		rdata->tunnel_mode = switch_update_params->tunnel_mode;
6500*d14abf15SRobert Mustacchi 		rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
6501*d14abf15SRobert Mustacchi 		rdata->vxlan_dst_port =
6502*d14abf15SRobert Mustacchi 			mm_cpu_to_le16(switch_update_params->vxlan_dst_port);
6503*d14abf15SRobert Mustacchi 	}
6504*d14abf15SRobert Mustacchi 
6505*d14abf15SRobert Mustacchi 	rdata->echo = SWITCH_UPDATE;
6506*d14abf15SRobert Mustacchi 
6507*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
6508*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
6509*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
6510*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
6511*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
6512*d14abf15SRobert Mustacchi 	 */
6513*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6514*d14abf15SRobert Mustacchi 			     data_mapping.as_u64, NONE_CONNECTION_TYPE);
6515*d14abf15SRobert Mustacchi }
6516*d14abf15SRobert Mustacchi 
6517*d14abf15SRobert Mustacchi static INLINE int ecore_func_send_afex_update(struct _lm_device_t *pdev,
6518*d14abf15SRobert Mustacchi 					 struct ecore_func_state_params *params)
6519*d14abf15SRobert Mustacchi {
6520*d14abf15SRobert Mustacchi 	struct ecore_func_sp_obj *o = params->f_obj;
6521*d14abf15SRobert Mustacchi 	struct function_update_data *rdata =
6522*d14abf15SRobert Mustacchi 		(struct function_update_data *)o->afex_rdata;
6523*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->afex_rdata_mapping;
6524*d14abf15SRobert Mustacchi 	struct ecore_func_afex_update_params *afex_update_params =
6525*d14abf15SRobert Mustacchi 		&params->params.afex_update;
6526*d14abf15SRobert Mustacchi 
6527*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
6528*d14abf15SRobert Mustacchi 
6529*d14abf15SRobert Mustacchi 	/* Fill the ramrod data with provided parameters */
6530*d14abf15SRobert Mustacchi 	rdata->vif_id_change_flg = 1;
6531*d14abf15SRobert Mustacchi 	rdata->vif_id = mm_cpu_to_le16(afex_update_params->vif_id);
6532*d14abf15SRobert Mustacchi 	rdata->afex_default_vlan_change_flg = 1;
6533*d14abf15SRobert Mustacchi 	rdata->afex_default_vlan =
6534*d14abf15SRobert Mustacchi 		mm_cpu_to_le16(afex_update_params->afex_default_vlan);
6535*d14abf15SRobert Mustacchi 	rdata->allowed_priorities_change_flg = 1;
6536*d14abf15SRobert Mustacchi 	rdata->allowed_priorities = afex_update_params->allowed_priorities;
6537*d14abf15SRobert Mustacchi 	rdata->echo = AFEX_UPDATE;
6538*d14abf15SRobert Mustacchi 
6539*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
6540*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
6541*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
6542*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
6543*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
6544*d14abf15SRobert Mustacchi 	 */
6545*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev,
6546*d14abf15SRobert Mustacchi 		  "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
6547*d14abf15SRobert Mustacchi 		  rdata->vif_id,
6548*d14abf15SRobert Mustacchi 		  rdata->afex_default_vlan, rdata->allowed_priorities);
6549*d14abf15SRobert Mustacchi 
6550*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6551*d14abf15SRobert Mustacchi 			     data_mapping.as_u64, NONE_CONNECTION_TYPE);
6552*d14abf15SRobert Mustacchi }
6553*d14abf15SRobert Mustacchi 
6554*d14abf15SRobert Mustacchi static
6555*d14abf15SRobert Mustacchi INLINE int ecore_func_send_afex_viflists(struct _lm_device_t *pdev,
6556*d14abf15SRobert Mustacchi 					 struct ecore_func_state_params *params)
6557*d14abf15SRobert Mustacchi {
6558*d14abf15SRobert Mustacchi 	struct ecore_func_sp_obj *o = params->f_obj;
6559*d14abf15SRobert Mustacchi 	struct afex_vif_list_ramrod_data *rdata =
6560*d14abf15SRobert Mustacchi 		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
6561*d14abf15SRobert Mustacchi 	struct ecore_func_afex_viflists_params *afex_vif_params =
6562*d14abf15SRobert Mustacchi 		&params->params.afex_viflists;
6563*d14abf15SRobert Mustacchi 	u64 *p_rdata = (u64 *)rdata;
6564*d14abf15SRobert Mustacchi 
6565*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
6566*d14abf15SRobert Mustacchi 
6567*d14abf15SRobert Mustacchi 	/* Fill the ramrod data with provided parameters */
6568*d14abf15SRobert Mustacchi 	rdata->vif_list_index = mm_cpu_to_le16(afex_vif_params->vif_list_index);
6569*d14abf15SRobert Mustacchi 	rdata->func_bit_map          = afex_vif_params->func_bit_map;
6570*d14abf15SRobert Mustacchi 	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
6571*d14abf15SRobert Mustacchi 	rdata->func_to_clear         = afex_vif_params->func_to_clear;
6572*d14abf15SRobert Mustacchi 
6573*d14abf15SRobert Mustacchi 	/* send in echo type of sub command */
6574*d14abf15SRobert Mustacchi 	rdata->echo = afex_vif_params->afex_vif_list_command;
6575*d14abf15SRobert Mustacchi 
6576*d14abf15SRobert Mustacchi 	ECORE_MSG(pdev, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
6577*d14abf15SRobert Mustacchi 		  rdata->afex_vif_list_command, rdata->vif_list_index,
6578*d14abf15SRobert Mustacchi 		  rdata->func_bit_map, rdata->func_to_clear);
6579*d14abf15SRobert Mustacchi 
6580*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
6581*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
6582*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
6583*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
6584*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
6585*d14abf15SRobert Mustacchi 	 */
6586*d14abf15SRobert Mustacchi 
6587*d14abf15SRobert Mustacchi 	/* this ramrod sends data directly and not through DMA mapping */
6588*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
6589*d14abf15SRobert Mustacchi 			     *p_rdata, NONE_CONNECTION_TYPE);
6590*d14abf15SRobert Mustacchi }
6591*d14abf15SRobert Mustacchi 
6592*d14abf15SRobert Mustacchi static INLINE int ecore_func_send_stop(struct _lm_device_t *pdev,
6593*d14abf15SRobert Mustacchi 				       struct ecore_func_state_params *params)
6594*d14abf15SRobert Mustacchi {
6595*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
6596*d14abf15SRobert Mustacchi 			     NONE_CONNECTION_TYPE);
6597*d14abf15SRobert Mustacchi }
6598*d14abf15SRobert Mustacchi 
6599*d14abf15SRobert Mustacchi static INLINE int ecore_func_send_tx_stop(struct _lm_device_t *pdev,
6600*d14abf15SRobert Mustacchi 				       struct ecore_func_state_params *params)
6601*d14abf15SRobert Mustacchi {
6602*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
6603*d14abf15SRobert Mustacchi 			     NONE_CONNECTION_TYPE);
6604*d14abf15SRobert Mustacchi }
6605*d14abf15SRobert Mustacchi static INLINE int ecore_func_send_tx_start(struct _lm_device_t *pdev,
6606*d14abf15SRobert Mustacchi 				       struct ecore_func_state_params *params)
6607*d14abf15SRobert Mustacchi {
6608*d14abf15SRobert Mustacchi 	struct ecore_func_sp_obj *o = params->f_obj;
6609*d14abf15SRobert Mustacchi 	struct flow_control_configuration *rdata =
6610*d14abf15SRobert Mustacchi 		(struct flow_control_configuration *)o->rdata;
6611*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->rdata_mapping;
6612*d14abf15SRobert Mustacchi 	struct ecore_func_tx_start_params *tx_start_params =
6613*d14abf15SRobert Mustacchi 		&params->params.tx_start;
6614*d14abf15SRobert Mustacchi 	int i;
6615*d14abf15SRobert Mustacchi 
6616*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
6617*d14abf15SRobert Mustacchi 
6618*d14abf15SRobert Mustacchi 	rdata->dcb_enabled = tx_start_params->dcb_enabled;
6619*d14abf15SRobert Mustacchi 	rdata->dcb_version = tx_start_params->dcb_version;
6620*d14abf15SRobert Mustacchi 	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
6621*d14abf15SRobert Mustacchi 
6622*d14abf15SRobert Mustacchi 	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6623*d14abf15SRobert Mustacchi 		rdata->traffic_type_to_priority_cos[i] =
6624*d14abf15SRobert Mustacchi 			tx_start_params->traffic_type_to_priority_cos[i];
6625*d14abf15SRobert Mustacchi 
6626*d14abf15SRobert Mustacchi 	/* No need for an explicit memory barrier here as long as we
6627*d14abf15SRobert Mustacchi 	 * ensure the ordering of writing to the SPQ element
6628*d14abf15SRobert Mustacchi 	 * and updating of the SPQ producer which involves a memory
6629*d14abf15SRobert Mustacchi 	 * read. If the memory read is removed we will have to put a
6630*d14abf15SRobert Mustacchi 	 * full memory barrier there (inside ecore_sp_post()).
6631*d14abf15SRobert Mustacchi 	 */
6632*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6633*d14abf15SRobert Mustacchi 			     data_mapping.as_u64, NONE_CONNECTION_TYPE);
6634*d14abf15SRobert Mustacchi }
6635*d14abf15SRobert Mustacchi 
6636*d14abf15SRobert Mustacchi static INLINE int ecore_func_send_set_timesync(struct _lm_device_t *pdev,
6637*d14abf15SRobert Mustacchi 					struct ecore_func_state_params *params)
6638*d14abf15SRobert Mustacchi {
6639*d14abf15SRobert Mustacchi 	struct ecore_func_sp_obj *o = params->f_obj;
6640*d14abf15SRobert Mustacchi 	struct set_timesync_ramrod_data *rdata =
6641*d14abf15SRobert Mustacchi 		(struct set_timesync_ramrod_data *)o->rdata;
6642*d14abf15SRobert Mustacchi 	lm_address_t data_mapping = o->rdata_mapping;
6643*d14abf15SRobert Mustacchi 	struct ecore_func_set_timesync_params *set_timesync_params =
6644*d14abf15SRobert Mustacchi 		&params->params.set_timesync;
6645*d14abf15SRobert Mustacchi 
6646*d14abf15SRobert Mustacchi 	mm_memset(rdata, 0, sizeof(*rdata));
6647*d14abf15SRobert Mustacchi 
6648*d14abf15SRobert Mustacchi 	/* Fill the ramrod data with provided parameters */
6649*d14abf15SRobert Mustacchi 	rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
6650*d14abf15SRobert Mustacchi 	rdata->offset_cmd = set_timesync_params->offset_cmd;
6651*d14abf15SRobert Mustacchi 	rdata->add_sub_drift_adjust_value =
6652*d14abf15SRobert Mustacchi 		set_timesync_params->add_sub_drift_adjust_value;
6653*d14abf15SRobert Mustacchi 	rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
6654*d14abf15SRobert Mustacchi 	rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
6655*d14abf15SRobert Mustacchi 	rdata->offset_delta.lo =
6656*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_LO(set_timesync_params->offset_delta));
6657*d14abf15SRobert Mustacchi 	rdata->offset_delta.hi =
6658*d14abf15SRobert Mustacchi 		mm_cpu_to_le32(U64_HI(set_timesync_params->offset_delta));
6659*d14abf15SRobert Mustacchi 
6660*d14abf15SRobert Mustacchi 	DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
6661*d14abf15SRobert Mustacchi 	   rdata->drift_adjust_cmd, rdata->offset_cmd,
6662*d14abf15SRobert Mustacchi 	   rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
6663*d14abf15SRobert Mustacchi 	   rdata->drift_adjust_period, rdata->offset_delta.lo,
6664*d14abf15SRobert Mustacchi 	   rdata->offset_delta.hi);
6665*d14abf15SRobert Mustacchi 
6666*d14abf15SRobert Mustacchi 	return ecore_sp_post(pdev, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
6667*d14abf15SRobert Mustacchi 			     data_mapping.as_u64, NONE_CONNECTION_TYPE);
6668*d14abf15SRobert Mustacchi }
6669*d14abf15SRobert Mustacchi 
6670*d14abf15SRobert Mustacchi static int ecore_func_send_cmd(struct _lm_device_t *pdev,
6671*d14abf15SRobert Mustacchi 			       struct ecore_func_state_params *params)
6672*d14abf15SRobert Mustacchi {
6673*d14abf15SRobert Mustacchi 	switch (params->cmd) {
6674*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_HW_INIT:
6675*d14abf15SRobert Mustacchi 		return ecore_func_hw_init(pdev, params);
6676*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_START:
6677*d14abf15SRobert Mustacchi 		return ecore_func_send_start(pdev, params);
6678*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_STOP:
6679*d14abf15SRobert Mustacchi 		return ecore_func_send_stop(pdev, params);
6680*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_HW_RESET:
6681*d14abf15SRobert Mustacchi 		return ecore_func_hw_reset(pdev, params);
6682*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_AFEX_UPDATE:
6683*d14abf15SRobert Mustacchi 		return ecore_func_send_afex_update(pdev, params);
6684*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_AFEX_VIFLISTS:
6685*d14abf15SRobert Mustacchi 		return ecore_func_send_afex_viflists(pdev, params);
6686*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_TX_STOP:
6687*d14abf15SRobert Mustacchi 		return ecore_func_send_tx_stop(pdev, params);
6688*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_TX_START:
6689*d14abf15SRobert Mustacchi 		return ecore_func_send_tx_start(pdev, params);
6690*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_SWITCH_UPDATE:
6691*d14abf15SRobert Mustacchi 		return ecore_func_send_switch_update(pdev, params);
6692*d14abf15SRobert Mustacchi 	case ECORE_F_CMD_SET_TIMESYNC:
6693*d14abf15SRobert Mustacchi 		return ecore_func_send_set_timesync(pdev, params);
6694*d14abf15SRobert Mustacchi 	default:
6695*d14abf15SRobert Mustacchi 		ECORE_ERR("Unknown command: %d\n", params->cmd);
6696*d14abf15SRobert Mustacchi 		return ECORE_INVAL;
6697*d14abf15SRobert Mustacchi 	}
6698*d14abf15SRobert Mustacchi }
6699*d14abf15SRobert Mustacchi 
6700*d14abf15SRobert Mustacchi void ecore_init_func_obj(struct _lm_device_t *pdev,
6701*d14abf15SRobert Mustacchi 			 struct ecore_func_sp_obj *obj,
6702*d14abf15SRobert Mustacchi 			 void *rdata, lm_address_t rdata_mapping,
6703*d14abf15SRobert Mustacchi 			 void *afex_rdata, lm_address_t afex_rdata_mapping,
6704*d14abf15SRobert Mustacchi 			 struct ecore_func_sp_drv_ops *drv_iface)
6705*d14abf15SRobert Mustacchi {
6706*d14abf15SRobert Mustacchi 	mm_memset(obj, 0, sizeof(*obj));
6707*d14abf15SRobert Mustacchi 
6708*d14abf15SRobert Mustacchi 	ECORE_MUTEX_INIT(&obj->one_pending_mutex);
6709*d14abf15SRobert Mustacchi 
6710*d14abf15SRobert Mustacchi 	obj->rdata = rdata;
6711*d14abf15SRobert Mustacchi 	obj->rdata_mapping = rdata_mapping;
6712*d14abf15SRobert Mustacchi 	obj->afex_rdata = afex_rdata;
6713*d14abf15SRobert Mustacchi 	obj->afex_rdata_mapping = afex_rdata_mapping;
6714*d14abf15SRobert Mustacchi 	obj->send_cmd = ecore_func_send_cmd;
6715*d14abf15SRobert Mustacchi 	obj->check_transition = ecore_func_chk_transition;
6716*d14abf15SRobert Mustacchi 	obj->complete_cmd = ecore_func_comp_cmd;
6717*d14abf15SRobert Mustacchi 	obj->wait_comp = ecore_func_wait_comp;
6718*d14abf15SRobert Mustacchi 	obj->drv = drv_iface;
6719*d14abf15SRobert Mustacchi }
6720*d14abf15SRobert Mustacchi 
6721*d14abf15SRobert Mustacchi /**
6722*d14abf15SRobert Mustacchi  * ecore_func_state_change - perform Function state change transition
6723*d14abf15SRobert Mustacchi  *
6724*d14abf15SRobert Mustacchi  * @pdev:	device handle
6725*d14abf15SRobert Mustacchi  * @params:	parameters to perform the transaction
6726*d14abf15SRobert Mustacchi  *
6727*d14abf15SRobert Mustacchi  * returns 0 in case of successfully completed transition,
6728*d14abf15SRobert Mustacchi  *         negative error code in case of failure, positive
6729*d14abf15SRobert Mustacchi  *         (EBUSY) value if there is a completion to that is
6730*d14abf15SRobert Mustacchi  *         still pending (possible only if RAMROD_COMP_WAIT is
6731*d14abf15SRobert Mustacchi  *         not set in params->ramrod_flags for asynchronous
6732*d14abf15SRobert Mustacchi  *         commands).
6733*d14abf15SRobert Mustacchi  */
6734*d14abf15SRobert Mustacchi int ecore_func_state_change(struct _lm_device_t *pdev,
6735*d14abf15SRobert Mustacchi 			    struct ecore_func_state_params *params)
6736*d14abf15SRobert Mustacchi {
6737*d14abf15SRobert Mustacchi 	struct ecore_func_sp_obj *o = params->f_obj;
6738*d14abf15SRobert Mustacchi 	int rc, cnt = 300;
6739*d14abf15SRobert Mustacchi 	enum ecore_func_cmd cmd = params->cmd;
6740*d14abf15SRobert Mustacchi 	unsigned long *pending = &o->pending;
6741*d14abf15SRobert Mustacchi 
6742*d14abf15SRobert Mustacchi 	ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6743*d14abf15SRobert Mustacchi 
6744*d14abf15SRobert Mustacchi 	/* Check that the requested transition is legal */
6745*d14abf15SRobert Mustacchi 	rc = o->check_transition(pdev, o, params);
6746*d14abf15SRobert Mustacchi 	if ((rc == ECORE_BUSY) &&
6747*d14abf15SRobert Mustacchi 	    (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
6748*d14abf15SRobert Mustacchi 		while ((rc == ECORE_BUSY) && (--cnt > 0)) {
6749*d14abf15SRobert Mustacchi 			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6750*d14abf15SRobert Mustacchi 			msleep(10);
6751*d14abf15SRobert Mustacchi 			ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6752*d14abf15SRobert Mustacchi 			rc = o->check_transition(pdev, o, params);
6753*d14abf15SRobert Mustacchi 		}
6754*d14abf15SRobert Mustacchi 		if (rc == ECORE_BUSY) {
6755*d14abf15SRobert Mustacchi 			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6756*d14abf15SRobert Mustacchi 			ECORE_ERR("timeout waiting for previous ramrod completion\n");
6757*d14abf15SRobert Mustacchi 			return rc;
6758*d14abf15SRobert Mustacchi 		}
6759*d14abf15SRobert Mustacchi 	} else if (rc) {
6760*d14abf15SRobert Mustacchi 		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6761*d14abf15SRobert Mustacchi 		return rc;
6762*d14abf15SRobert Mustacchi 	}
6763*d14abf15SRobert Mustacchi 
6764*d14abf15SRobert Mustacchi 	/* Set "pending" bit */
6765*d14abf15SRobert Mustacchi 	ECORE_SET_BIT(cmd, pending);
6766*d14abf15SRobert Mustacchi 
6767*d14abf15SRobert Mustacchi 	/* Don't send a command if only driver cleanup was requested */
6768*d14abf15SRobert Mustacchi 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6769*d14abf15SRobert Mustacchi 		ecore_func_state_change_comp(pdev, o, cmd);
6770*d14abf15SRobert Mustacchi 		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6771*d14abf15SRobert Mustacchi 	} else {
6772*d14abf15SRobert Mustacchi 		/* Send a ramrod */
6773*d14abf15SRobert Mustacchi 		rc = o->send_cmd(pdev, params);
6774*d14abf15SRobert Mustacchi 
6775*d14abf15SRobert Mustacchi 		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6776*d14abf15SRobert Mustacchi 
6777*d14abf15SRobert Mustacchi 		if (rc) {
6778*d14abf15SRobert Mustacchi 			o->next_state = ECORE_F_STATE_MAX;
6779*d14abf15SRobert Mustacchi 			ECORE_CLEAR_BIT(cmd, pending);
6780*d14abf15SRobert Mustacchi 			smp_mb__after_atomic();
6781*d14abf15SRobert Mustacchi 			return rc;
6782*d14abf15SRobert Mustacchi 		}
6783*d14abf15SRobert Mustacchi 
6784*d14abf15SRobert Mustacchi 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6785*d14abf15SRobert Mustacchi 			rc = o->wait_comp(pdev, o, cmd);
6786*d14abf15SRobert Mustacchi 			if (rc)
6787*d14abf15SRobert Mustacchi 				return rc;
6788*d14abf15SRobert Mustacchi 
6789*d14abf15SRobert Mustacchi 			return ECORE_SUCCESS;
6790*d14abf15SRobert Mustacchi 		}
6791*d14abf15SRobert Mustacchi 	}
6792*d14abf15SRobert Mustacchi 
6793*d14abf15SRobert Mustacchi 	return ECORE_RET_PENDING(cmd, pending);
6794*d14abf15SRobert Mustacchi }
6795*d14abf15SRobert Mustacchi #endif
6796