xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_l2_api.h (revision 38069501)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #ifndef __ECORE_L2_API_H__
32 #define __ECORE_L2_API_H__
33 
34 #include "ecore_status.h"
35 #include "ecore_sp_api.h"
36 #include "ecore_int_api.h"
37 
38 enum ecore_rss_caps {
39 	ECORE_RSS_IPV4		= 0x1,
40 	ECORE_RSS_IPV6		= 0x2,
41 	ECORE_RSS_IPV4_TCP	= 0x4,
42 	ECORE_RSS_IPV6_TCP	= 0x8,
43 	ECORE_RSS_IPV4_UDP	= 0x10,
44 	ECORE_RSS_IPV6_UDP	= 0x20,
45 };
46 
47 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
48 #define ECORE_RSS_IND_TABLE_SIZE 128
49 #define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
50 
51 #define ECORE_MAX_PHC_DRIFT_PPB	291666666
52 
53 enum ecore_ptp_filter_type {
54 	ECORE_PTP_FILTER_NONE,
55 	ECORE_PTP_FILTER_ALL,
56 	ECORE_PTP_FILTER_V1_L4_EVENT,
57 	ECORE_PTP_FILTER_V1_L4_GEN,
58 	ECORE_PTP_FILTER_V2_L4_EVENT,
59 	ECORE_PTP_FILTER_V2_L4_GEN,
60 	ECORE_PTP_FILTER_V2_L2_EVENT,
61 	ECORE_PTP_FILTER_V2_L2_GEN,
62 	ECORE_PTP_FILTER_V2_EVENT,
63 	ECORE_PTP_FILTER_V2_GEN
64 };
65 
66 enum ecore_ptp_hwtstamp_tx_type {
67 	ECORE_PTP_HWTSTAMP_TX_OFF,
68 	ECORE_PTP_HWTSTAMP_TX_ON,
69 };
70 
71 struct ecore_queue_start_common_params {
72 	/* Should always be relative to entity sending this. */
73 	u8 vport_id;
74 	u16 queue_id;
75 
76 	/* Relative, but relevant only for PFs */
77 	u8 stats_id;
78 
79 	struct ecore_sb_info *p_sb;
80 	u8 sb_idx;
81 };
82 
83 struct ecore_rxq_start_ret_params {
84 	void OSAL_IOMEM *p_prod;
85 	void *p_handle;
86 };
87 
88 struct ecore_txq_start_ret_params {
89 	void OSAL_IOMEM *p_doorbell;
90 	void *p_handle;
91 };
92 
93 struct ecore_rss_params {
94 	u8 update_rss_config;
95 	u8 rss_enable;
96 	u8 rss_eng_id;
97 	u8 update_rss_capabilities;
98 	u8 update_rss_ind_table;
99 	u8 update_rss_key;
100 	u8 rss_caps;
101 	u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
102 
103 	/* Indirection table consist of rx queue handles */
104 	void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
105 	u32 rss_key[ECORE_RSS_KEY_SIZE];
106 };
107 
108 struct ecore_sge_tpa_params {
109 	u8 max_buffers_per_cqe;
110 
111 	u8 update_tpa_en_flg;
112 	u8 tpa_ipv4_en_flg;
113 	u8 tpa_ipv6_en_flg;
114 	u8 tpa_ipv4_tunn_en_flg;
115 	u8 tpa_ipv6_tunn_en_flg;
116 
117 	u8 update_tpa_param_flg;
118 	u8 tpa_pkt_split_flg;
119 	u8 tpa_hdr_data_split_flg;
120 	u8 tpa_gro_consistent_flg;
121 	u8 tpa_max_aggs_num;
122 	u16 tpa_max_size;
123 	u16 tpa_min_size_to_start;
124 	u16 tpa_min_size_to_cont;
125 };
126 
127 enum ecore_filter_opcode {
128 	ECORE_FILTER_ADD,
129 	ECORE_FILTER_REMOVE,
130 	ECORE_FILTER_MOVE,
131 	ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
132 	ECORE_FILTER_FLUSH, /* Removes all filters */
133 };
134 
135 enum ecore_filter_ucast_type {
136 	ECORE_FILTER_MAC,
137 	ECORE_FILTER_VLAN,
138 	ECORE_FILTER_MAC_VLAN,
139 	ECORE_FILTER_INNER_MAC,
140 	ECORE_FILTER_INNER_VLAN,
141 	ECORE_FILTER_INNER_PAIR,
142 	ECORE_FILTER_INNER_MAC_VNI_PAIR,
143 	ECORE_FILTER_MAC_VNI_PAIR,
144 	ECORE_FILTER_VNI,
145 };
146 
147 struct ecore_filter_ucast {
148 	enum ecore_filter_opcode opcode;
149 	enum ecore_filter_ucast_type type;
150 	u8 is_rx_filter;
151 	u8 is_tx_filter;
152 	u8 vport_to_add_to;
153 	u8 vport_to_remove_from;
154 	unsigned char mac[ETH_ALEN];
155 	u8 assert_on_error;
156 	u16 vlan;
157 	u32 vni;
158 };
159 
160 struct ecore_filter_mcast {
161 	/* MOVE is not supported for multicast */
162 	enum ecore_filter_opcode opcode;
163 	u8 vport_to_add_to;
164 	u8 vport_to_remove_from;
165 	u8	num_mc_addrs;
166 #define ECORE_MAX_MC_ADDRS	64
167 	unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
168 };
169 
170 struct ecore_filter_accept_flags {
171 	u8 update_rx_mode_config;
172 	u8 update_tx_mode_config;
173 	u8 rx_accept_filter;
174 	u8 tx_accept_filter;
175 #define	ECORE_ACCEPT_NONE		0x01
176 #define ECORE_ACCEPT_UCAST_MATCHED	0x02
177 #define ECORE_ACCEPT_UCAST_UNMATCHED	0x04
178 #define ECORE_ACCEPT_MCAST_MATCHED	0x08
179 #define ECORE_ACCEPT_MCAST_UNMATCHED	0x10
180 #define ECORE_ACCEPT_BCAST		0x20
181 };
182 
183 struct ecore_arfs_config_params {
184 	bool tcp;
185 	bool udp;
186 	bool ipv4;
187 	bool ipv6;
188 	bool arfs_enable;	/* Enable or disable arfs mode */
189 };
190 
191 /* Add / remove / move / remove-all unicast MAC-VLAN filters.
192  * FW will assert in the following cases, so driver should take care...:
193  * 1. Adding a filter to a full table.
194  * 2. Adding a filter which already exists on that vport.
195  * 3. Removing a filter which doesn't exist.
196  */
197 
198 enum _ecore_status_t
199 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
200 		       struct ecore_filter_ucast *p_filter_cmd,
201 		       enum spq_mode comp_mode,
202 		       struct ecore_spq_comp_cb *p_comp_data);
203 
204 /* Add / remove / move multicast MAC filters. */
205 enum _ecore_status_t
206 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
207 		       struct ecore_filter_mcast *p_filter_cmd,
208 		       enum spq_mode comp_mode,
209 		       struct ecore_spq_comp_cb *p_comp_data);
210 
211 /* Set "accept" filters */
212 enum _ecore_status_t
213 ecore_filter_accept_cmd(
214 	struct ecore_dev		 *p_dev,
215 	u8				 vport,
216 	struct ecore_filter_accept_flags accept_flags,
217 	u8				 update_accept_any_vlan,
218 	u8				 accept_any_vlan,
219 	enum spq_mode			 comp_mode,
220 	struct ecore_spq_comp_cb	 *p_comp_data);
221 
222 /**
223  * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
224  *
225  * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
226  * the VPort ID is not currently initialized.
227  *
228  * @param p_hwfn
229  * @param opaque_fid
230  * @p_params			Inputs; Relative for PF [SB being an exception]
231  * @param bd_max_bytes 		Maximum bytes that can be placed on a BD
232  * @param bd_chain_phys_addr	Physical address of BDs for receive.
233  * @param cqe_pbl_addr		Physical address of the CQE PBL Table.
234  * @param cqe_pbl_size 		Size of the CQE PBL Table
235  * @param p_ret_params		Pointed struct to be filled with outputs.
236  *
237  * @return enum _ecore_status_t
238  */
239 enum _ecore_status_t
240 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
241 			 u16 opaque_fid,
242 			 struct ecore_queue_start_common_params *p_params,
243 			 u16 bd_max_bytes,
244 			 dma_addr_t bd_chain_phys_addr,
245 			 dma_addr_t cqe_pbl_addr,
246 			 u16 cqe_pbl_size,
247 			 struct ecore_rxq_start_ret_params *p_ret_params);
248 
249 /**
250  * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
251  *
252  * @param p_hwfn
253  * @param p_rxq			Handler of queue to close
254  * @param eq_completion_only	If True completion will be on
255  *				EQe, if False completion will be
256  *				on EQe if p_hwfn opaque
257  *				different from the RXQ opaque
258  *				otherwise on CQe.
259  * @param cqe_completion	If True completion will be
260  *				recieve on CQe.
261  * @return enum _ecore_status_t
262  */
263 enum _ecore_status_t
264 ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
265 			void *p_rxq,
266 			bool eq_completion_only,
267 			bool cqe_completion);
268 
269 /**
270  * @brief - TX Queue Start Ramrod
271  *
272  * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
273  * the VPort is not currently initialized.
274  *
275  * @param p_hwfn
276  * @param opaque_fid
277  * @p_params
278  * @param tc			traffic class to use with this L2 txq
279  * @param pbl_addr		address of the pbl array
280  * @param pbl_size 		number of entries in pbl
281  * @oaram p_ret_params		Pointer to fill the return parameters in.
282  *
283  * @return enum _ecore_status_t
284  */
285 enum _ecore_status_t
286 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
287 			 u16 opaque_fid,
288 			 struct ecore_queue_start_common_params *p_params,
289 			 u8 tc,
290 			 dma_addr_t pbl_addr,
291 			 u16 pbl_size,
292 			 struct ecore_txq_start_ret_params *p_ret_params);
293 
294 /**
295  * @brief ecore_eth_tx_queue_stop - closes a Tx queue
296  *
297  * @param p_hwfn
298  * @param p_txq - handle to Tx queue needed to be closed
299  *
300  * @return enum _ecore_status_t
301  */
302 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
303 					     void *p_txq);
304 
305 enum ecore_tpa_mode	{
306 	ECORE_TPA_MODE_NONE,
307 	ECORE_TPA_MODE_RSC,
308 	ECORE_TPA_MODE_GRO,
309 	ECORE_TPA_MODE_MAX
310 };
311 
312 struct ecore_sp_vport_start_params {
313 	enum ecore_tpa_mode tpa_mode;
314 	bool remove_inner_vlan;	/* Inner VLAN removal is enabled */
315 	bool tx_switching;	/* Vport supports tx-switching */
316 	bool handle_ptp_pkts;	/* Handle PTP packets */
317 	bool only_untagged;	/* Untagged pkt control */
318 	bool drop_ttl0;		/* Drop packets with TTL = 0 */
319 	u8 max_buffers_per_cqe;
320 	u32 concrete_fid;
321 	u16 opaque_fid;
322 	u8 vport_id;		/* VPORT ID */
323 	u16 mtu;		/* VPORT MTU */
324 	bool zero_placement_offset;
325 	bool check_mac;
326 	bool check_ethtype;
327 
328 	/* Strict behavior on transmission errors */
329 	bool b_err_illegal_vlan_mode;
330 	bool b_err_illegal_inband_mode;
331 	bool b_err_vlan_insert_with_inband;
332 	bool b_err_small_pkt;
333 	bool b_err_big_pkt;
334 	bool b_err_anti_spoof;
335 	bool b_err_ctrl_frame;
336 };
337 
338 /**
339  * @brief ecore_sp_vport_start -
340  *
341  * This ramrod initializes a VPort. An Assert if generated if the Function ID
342  * of the VPort is not enabled.
343  *
344  * @param p_hwfn
345  * @param p_params		VPORT start params
346  *
347  * @return enum _ecore_status_t
348  */
349 enum _ecore_status_t
350 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
351 		     struct ecore_sp_vport_start_params *p_params);
352 
353 struct ecore_sp_vport_update_params {
354 	u16			opaque_fid;
355 	u8			vport_id;
356 	u8			update_vport_active_rx_flg;
357 	u8			vport_active_rx_flg;
358 	u8			update_vport_active_tx_flg;
359 	u8			vport_active_tx_flg;
360 	u8			update_inner_vlan_removal_flg;
361 	u8			inner_vlan_removal_flg;
362 	u8			silent_vlan_removal_flg;
363 	u8			update_default_vlan_enable_flg;
364 	u8			default_vlan_enable_flg;
365 	u8			update_default_vlan_flg;
366 	u16			default_vlan;
367 	u8			update_tx_switching_flg;
368 	u8			tx_switching_flg;
369 	u8			update_approx_mcast_flg;
370 	u8			update_anti_spoofing_en_flg;
371 	u8			anti_spoofing_en;
372 	u8			update_accept_any_vlan_flg;
373 	u8			accept_any_vlan;
374 	unsigned long		bins[8];
375 	struct ecore_rss_params	*rss_params;
376 	struct ecore_filter_accept_flags accept_flags;
377 	struct ecore_sge_tpa_params *sge_tpa_params;
378 };
379 
380 /**
381  * @brief ecore_sp_vport_update -
382  *
383  * This ramrod updates the parameters of the VPort. Every field can be updated
384  * independently, according to flags.
385  *
386  * This ramrod is also used to set the VPort state to active after creation.
387  * An Assert is generated if the VPort does not contain an RX queue.
388  *
389  * @param p_hwfn
390  * @param p_params
391  *
392  * @return enum _ecore_status_t
393  */
394 enum _ecore_status_t
395 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
396 		      struct ecore_sp_vport_update_params *p_params,
397 		      enum spq_mode comp_mode,
398 		      struct ecore_spq_comp_cb *p_comp_data);
399 /**
400  * @brief ecore_sp_vport_stop -
401  *
402  * This ramrod closes a VPort after all its RX and TX queues are terminated.
403  * An Assert is generated if any queues are left open.
404  *
405  * @param p_hwfn
406  * @param opaque_fid
407  * @param vport_id VPort ID
408  *
409  * @return enum _ecore_status_t
410  */
411 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
412 					 u16 opaque_fid,
413 					 u8 vport_id);
414 
415 enum _ecore_status_t
416 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
417 			  u16 opaque_fid,
418 			  struct ecore_filter_ucast *p_filter_cmd,
419 			  enum spq_mode comp_mode,
420 			  struct ecore_spq_comp_cb *p_comp_data);
421 
422 /**
423  * @brief ecore_sp_rx_eth_queues_update -
424  *
425  * This ramrod updates an RX queue. It is used for setting the active state
426  * of the queue and updating the TPA and SGE parameters.
427  *
428  * @note Final phase API.
429  *
430  * @param p_hwfn
431  * @param pp_rxq_handlers	An array of queue handlers to be updated.
432  * @param num_rxqs              number of queues to update.
433  * @param complete_cqe_flg	Post completion to the CQE Ring if set
434  * @param complete_event_flg	Post completion to the Event Ring if set
435  * @param comp_mode
436  * @param p_comp_data
437  *
438  * @return enum _ecore_status_t
439  */
440 
441 enum _ecore_status_t
442 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
443 			      void **pp_rxq_handlers,
444 			      u8 num_rxqs,
445 			      u8 complete_cqe_flg,
446 			      u8 complete_event_flg,
447 			      enum spq_mode comp_mode,
448 			      struct ecore_spq_comp_cb *p_comp_data);
449 
450 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
451 			     struct ecore_ptt *p_ptt,
452 			     struct ecore_eth_stats *stats,
453 			     u16 statistics_bin, bool b_get_port_stats);
454 
455 void ecore_get_vport_stats(struct ecore_dev *p_dev,
456 			   struct ecore_eth_stats *stats);
457 
458 void ecore_reset_vport_stats(struct ecore_dev *p_dev);
459 
460 /**
461  *@brief ecore_arfs_mode_configure -
462  *
463  *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
464  *and atleast one of ipv4 or ipv6 true to enable rfs mode.
465  *
466  *@param p_hwfn
467  *@param p_ptt
468  *@param p_cfg_params		arfs mode configuration parameters.
469  *
470  */
471 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
472 			       struct ecore_ptt *p_ptt,
473 			       struct ecore_arfs_config_params *p_cfg_params);
474 
475 /**
476  * @brief - ecore_configure_rfs_ntuple_filter
477  *
478  * This ramrod should be used to add or remove arfs hw filter
479  *
480  * @params p_hwfn
481  * @params p_cb		Used for ECORE_SPQ_MODE_CB,where client would initialize
482  *			it with cookie and callback function address, if not
483  *			using this mode then client must pass NULL.
484  * @params p_addr	p_addr is an actual packet header that needs to be
485  *			filter. It has to mapped with IO to read prior to
486  *			calling this, [contains 4 tuples- src ip, dest ip,
487  *			src port, dest port].
488  * @params length	length of p_addr header up to past the transport header.
489  * @params qid		receive packet will be directed to this queue.
490  * @params vport_id
491  * @params b_is_add	flag to add or remove filter.
492  *
493  */
494 enum _ecore_status_t
495 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
496 				  struct ecore_spq_comp_cb *p_cb,
497 				  dma_addr_t p_addr, u16 length,
498 				  u16 qid, u8 vport_id,
499 				  bool b_is_add);
500 #endif
501