xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_sriov.h (revision d93a896e)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #ifndef __ECORE_SRIOV_H__
32 #define __ECORE_SRIOV_H__
33 
34 #include "ecore_status.h"
35 #include "ecore_vfpf_if.h"
36 #include "ecore_iov_api.h"
37 #include "ecore_hsi_common.h"
38 #include "ecore_l2.h"
39 
40 #define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
41 	(MAX_NUM_VFS_E4 * ECORE_ETH_VF_NUM_VLAN_FILTERS)
42 
43 /* Represents a full message. Both the request filled by VF
44  * and the response filled by the PF. The VF needs one copy
45  * of this message, it fills the request part and sends it to
46  * the PF. The PF will copy the response to the response part for
47  * the VF to later read it. The PF needs to hold a message like this
48  * per VF, the request that is copied to the PF is placed in the
49  * request size, and the response is filled by the PF before sending
50  * it to the VF.
51  */
52 struct ecore_vf_mbx_msg {
53 	union vfpf_tlvs req;
54 	union pfvf_tlvs resp;
55 };
56 
57 /* This mailbox is maintained per VF in its PF
58  * contains all information required for sending / receiving
59  * a message
60  */
61 struct ecore_iov_vf_mbx {
62 	union vfpf_tlvs		*req_virt;
63 	dma_addr_t		req_phys;
64 	union pfvf_tlvs		*reply_virt;
65 	dma_addr_t		reply_phys;
66 
67 	/* Address in VF where a pending message is located */
68 	dma_addr_t		pending_req;
69 
70 	/* Message from VF awaits handling */
71 	bool			b_pending_msg;
72 
73 	u8 *offset;
74 
75 #ifdef CONFIG_ECORE_SW_CHANNEL
76 	struct ecore_iov_sw_mbx sw_mbx;
77 #endif
78 
79 	/* VF GPA address */
80 	u32			vf_addr_lo;
81 	u32			vf_addr_hi;
82 
83 	struct vfpf_first_tlv	first_tlv;	/* saved VF request header */
84 
85 	u8			flags;
86 #define VF_MSG_INPROCESS	0x1	/* failsafe - the FW should prevent
87 					 * more then one pending msg
88 					 */
89 };
90 
91 #define ECORE_IOV_LEGACY_QID_RX (0)
92 #define ECORE_IOV_LEGACY_QID_TX (1)
93 #define ECORE_IOV_QID_INVALID (0xFE)
94 
95 struct ecore_vf_queue_cid {
96 	bool b_is_tx;
97 	struct ecore_queue_cid *p_cid;
98 };
99 
100 /* Describes a qzone associated with the VF */
101 struct ecore_vf_queue {
102 	/* Input from upper-layer, mapping relateive queue to queue-zone */
103 	u16 fw_rx_qid;
104 	u16 fw_tx_qid;
105 
106 	struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
107 };
108 
109 enum vf_state {
110 	VF_FREE		= 0,	/* VF ready to be acquired holds no resc */
111 	VF_ACQUIRED	= 1,	/* VF, aquired, but not initalized */
112 	VF_ENABLED	= 2,	/* VF, Enabled */
113 	VF_RESET	= 3,	/* VF, FLR'd, pending cleanup */
114 	VF_STOPPED      = 4     /* VF, Stopped */
115 };
116 
117 struct ecore_vf_vlan_shadow {
118 	bool used;
119 	u16 vid;
120 };
121 
122 struct ecore_vf_shadow_config {
123 	/* Shadow copy of all guest vlans */
124 	struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
125 
126 	/* Shadow copy of all configured MACs; Empty if forcing MACs */
127 	u8 macs[ECORE_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
128 	u8 inner_vlan_removal;
129 };
130 
131 /* PFs maintain an array of this structure, per VF */
132 struct ecore_vf_info {
133 	struct ecore_iov_vf_mbx vf_mbx;
134 	enum vf_state state;
135 	bool b_init;
136 	bool b_malicious;
137 	u8			to_disable;
138 
139 	struct ecore_bulletin	bulletin;
140 	dma_addr_t		vf_bulletin;
141 
142 	/* PF saves a copy of the last VF acquire message */
143 	struct vfpf_acquire_tlv acquire;
144 
145 	u32			concrete_fid;
146 	u16			opaque_fid;
147 	u16			mtu;
148 
149 	u8			vport_id;
150 	u8			rss_eng_id;
151 	u8			relative_vf_id;
152 	u8			abs_vf_id;
153 #define ECORE_VF_ABS_ID(p_hwfn, p_vf)	(ECORE_PATH_ID(p_hwfn) ? \
154 					 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
155 					 (p_vf)->abs_vf_id)
156 
157 	u8			vport_instance; /* Number of active vports */
158 	u8			num_rxqs;
159 	u8			num_txqs;
160 
161 	u16			rx_coal;
162 	u16			tx_coal;
163 
164 	u8			num_sbs;
165 
166 	u8			num_mac_filters;
167 	u8			num_vlan_filters;
168 
169 	struct ecore_vf_queue	vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
170 	u16			igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
171 
172 	/* TODO - Only windows is using it - should be removed */
173 	u8 was_malicious;
174 	u8 num_active_rxqs;
175 	void *ctx;
176 	struct ecore_public_vf_info p_vf_info;
177 	bool spoof_chk;		/* Current configured on HW */
178 	bool req_spoofchk_val;  /* Requested value */
179 
180 	/* Stores the configuration requested by VF */
181 	struct ecore_vf_shadow_config shadow_config;
182 
183 	/* A bitfield using bulletin's valid-map bits, used to indicate
184 	 * which of the bulletin board features have been configured.
185 	 */
186 	u64 configured_features;
187 #define ECORE_IOV_CONFIGURED_FEATURES_MASK	((1 << MAC_ADDR_FORCED) | \
188 						 (1 << VLAN_ADDR_FORCED))
189 };
190 
191 /* This structure is part of ecore_hwfn and used only for PFs that have sriov
192  * capability enabled.
193  */
194 struct ecore_pf_iov {
195 	struct ecore_vf_info	vfs_array[MAX_NUM_VFS_E4];
196 	u64			pending_flr[ECORE_VF_ARRAY_LENGTH];
197 
198 #ifndef REMOVE_DBG
199 	/* This doesn't serve anything functionally, but it makes windows
200 	 * debugging of IOV related issues easier.
201 	 */
202 	u64			active_vfs[ECORE_VF_ARRAY_LENGTH];
203 #endif
204 
205 	/* Allocate message address continuosuly and split to each VF */
206 	void			*mbx_msg_virt_addr;
207 	dma_addr_t		mbx_msg_phys_addr;
208 	u32			mbx_msg_size;
209 	void			*mbx_reply_virt_addr;
210 	dma_addr_t		mbx_reply_phys_addr;
211 	u32			mbx_reply_size;
212 	void			*p_bulletins;
213 	dma_addr_t		bulletins_phys;
214 	u32			bulletins_size;
215 };
216 
217 #ifdef CONFIG_ECORE_SRIOV
218 /**
219  * @brief Read sriov related information and allocated resources
220  *  reads from configuraiton space, shmem, etc.
221  *
222  * @param p_hwfn
223  *
224  * @return enum _ecore_status_t
225  */
226 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
227 
228 /**
229  * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
230  *
231  * @param offset
232  * @param type
233  * @param length
234  *
235  * @return pointer to the newly placed tlv
236  */
237 void *ecore_add_tlv(u8 **offset, u16 type, u16 length);
238 
239 /**
240  * @brief list the types and lengths of the tlvs on the buffer
241  *
242  * @param p_hwfn
243  * @param tlvs_list
244  */
245 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
246 		       void *tlvs_list);
247 
248 /**
249  * @brief ecore_iov_alloc - allocate sriov related resources
250  *
251  * @param p_hwfn
252  *
253  * @return enum _ecore_status_t
254  */
255 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
256 
257 /**
258  * @brief ecore_iov_setup - setup sriov related resources
259  *
260  * @param p_hwfn
261  */
262 void ecore_iov_setup(struct ecore_hwfn	*p_hwfn);
263 
264 /**
265  * @brief ecore_iov_free - free sriov related resources
266  *
267  * @param p_hwfn
268  */
269 void ecore_iov_free(struct ecore_hwfn *p_hwfn);
270 
271 /**
272  * @brief free sriov related memory that was allocated during hw_prepare
273  *
274  * @param p_dev
275  */
276 void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
277 
278 /**
279  * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
280  *
281  * @param p_hwfn
282  * @param opcode
283  * @param echo
284  * @param data
285  */
286 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn	 *p_hwfn,
287 					   u8			 opcode,
288 					   __le16		 echo,
289 					   union event_ring_data *data);
290 
291 /**
292  * @brief Mark structs of vfs that have been FLR-ed.
293  *
294  * @param p_hwfn
295  * @param disabled_vfs - bitmask of all VFs on path that were FLRed
296  *
297  * @return true iff one of the PF's vfs got FLRed. false otherwise.
298  */
299 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
300 			  u32 *disabled_vfs);
301 
302 /**
303  * @brief Search extended TLVs in request/reply buffer.
304  *
305  * @param p_hwfn
306  * @param p_tlvs_list - Pointer to tlvs list
307  * @param req_type - Type of TLV
308  *
309  * @return pointer to tlv type if found, otherwise returns NULL.
310  */
311 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
312 				 void *p_tlvs_list, u16 req_type);
313 
314 /**
315  * @brief ecore_iov_get_vf_info - return the database of a
316  *        specific VF
317  *
318  * @param p_hwfn
319  * @param relative_vf_id - relative id of the VF for which info
320  *			 is requested
321  * @param b_enabled_only - false iff want to access even if vf is disabled
322  *
323  * @return struct ecore_vf_info*
324  */
325 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
326 					    u16 relative_vf_id,
327 					    bool b_enabled_only);
328 #else
329 static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
330 static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED **offset, OSAL_UNUSED u16 type, OSAL_UNUSED u16 length) {return OSAL_NULL;}
331 static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *tlvs_list) {}
332 static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
333 static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
334 static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
335 static OSAL_INLINE void ecore_iov_free_hw_info(struct ecore_dev OSAL_UNUSED *p_dev) {}
336 static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED opcode, __le16 OSAL_UNUSED echo, union event_ring_data OSAL_UNUSED *data) {return ECORE_INVAL;}
337 static OSAL_INLINE u32 ecore_crc32(u32 OSAL_UNUSED crc, u8 OSAL_UNUSED *ptr, u32 OSAL_UNUSED length) {return 0;}
338 static OSAL_INLINE bool ecore_iov_mark_vf_flr(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u32 OSAL_UNUSED *disabled_vfs) {return false;}
339 static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *p_tlvs_list, u16 OSAL_UNUSED req_type) {return OSAL_NULL;}
340 static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED relative_vf_id, bool OSAL_UNUSED b_enabled_only) {return OSAL_NULL;}
341 
342 #endif
343 #endif /* __ECORE_SRIOV_H__ */
344