xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_dev_api.h (revision d6b92ffa)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #ifndef __ECORE_DEV_API_H__
32 #define __ECORE_DEV_API_H__
33 
34 #include "ecore_status.h"
35 #include "ecore_chain.h"
36 #include "ecore_int_api.h"
37 
38 struct ecore_wake_info {
39 	u32 wk_info;
40 	u32 wk_details;
41 	u32 wk_pkt_len;
42 	u8  wk_buffer[256];
43 };
44 
45 /**
46  * @brief ecore_init_dp - initialize the debug level
47  *
48  * @param p_dev
49  * @param dp_module
50  * @param dp_level
51  * @param dp_ctx
52  */
53 void ecore_init_dp(struct ecore_dev *p_dev,
54 		   u32 dp_module,
55 		   u8 dp_level,
56 		   void *dp_ctx);
57 
58 /**
59  * @brief ecore_init_struct - initialize the device structure to
60  *        its defaults
61  *
62  * @param p_dev
63  */
64 void ecore_init_struct(struct ecore_dev *p_dev);
65 
66 /**
67  * @brief ecore_resc_free -
68  *
69  * @param p_dev
70  */
71 void ecore_resc_free(struct ecore_dev *p_dev);
72 
73 /**
74  * @brief ecore_resc_alloc -
75  *
76  * @param p_dev
77  *
78  * @return enum _ecore_status_t
79  */
80 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
81 
82 /**
83  * @brief ecore_resc_setup -
84  *
85  * @param p_dev
86  */
87 void ecore_resc_setup(struct ecore_dev *p_dev);
88 
89 enum ecore_override_force_load {
90 	ECORE_OVERRIDE_FORCE_LOAD_NONE,
91 	ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
92 	ECORE_OVERRIDE_FORCE_LOAD_NEVER,
93 };
94 
95 struct ecore_drv_load_params {
96 	/* Indicates whether the driver is running over a crash kernel.
97 	 * As part of the load request, this will be used for providing the
98 	 * driver role to the MFW.
99 	 * In case of a crash kernel over PDA - this should be set to false.
100 	 */
101 	bool is_crash_kernel;
102 
103 	/* The timeout value that the MFW should use when locking the engine for
104 	 * the driver load process.
105 	 * A value of '0' means the default value, and '255' means no timeout.
106 	 */
107 	u8 mfw_timeout_val;
108 #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT	0
109 #define ECORE_LOAD_REQ_LOCK_TO_NONE	255
110 
111 	/* Avoid engine reset when first PF loads on it */
112 	bool avoid_eng_reset;
113 
114 	/* Allow overriding the default force load behavior */
115 	enum ecore_override_force_load override_force_load;
116 };
117 
118 struct ecore_hw_init_params {
119 	/* Tunneling parameters */
120 	struct ecore_tunnel_info *p_tunn;
121 
122 	bool b_hw_start;
123 
124 	/* Interrupt mode [msix, inta, etc.] to use */
125 	enum ecore_int_mode int_mode;
126 
127 	/* NPAR tx switching to be used for vports configured for tx-switching */
128 	bool allow_npar_tx_switch;
129 
130 	/* Binary fw data pointer in binary fw file */
131 	const u8 *bin_fw_data;
132 
133 	/* Driver load parameters */
134 	struct ecore_drv_load_params *p_drv_load_params;
135 };
136 
137 /**
138  * @brief ecore_hw_init -
139  *
140  * @param p_dev
141  * @param p_params
142  *
143  * @return enum _ecore_status_t
144  */
145 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
146 				   struct ecore_hw_init_params *p_params);
147 
148 /**
149  * @brief ecore_hw_timers_stop_all -
150  *
151  * @param p_dev
152  *
153  * @return void
154  */
155 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
156 
157 /**
158  * @brief ecore_hw_stop -
159  *
160  * @param p_dev
161  *
162  * @return enum _ecore_status_t
163  */
164 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
165 
166 /**
167  * @brief ecore_hw_stop_fastpath -should be called incase
168  *        slowpath is still required for the device,
169  *        but fastpath is not.
170  *
171  * @param p_dev
172  *
173  * @return enum _ecore_status_t
174  */
175 enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
176 
177 /**
178  * @brief ecore_hw_hibernate_prepare -should be called when
179  *        the system is going into the hibernate state
180  *
181  * @param p_dev
182  *
183  */
184 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev);
185 
186 /**
187  * @brief ecore_hw_hibernate_resume -should be called when the system is
188 	  resuming from D3 power state and before calling ecore_hw_init.
189  *
190  * @param p_hwfn
191  *
192  */
193 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev);
194 
195 
196 /**
197  * @brief ecore_hw_start_fastpath -restart fastpath traffic,
198  *        only if hw_stop_fastpath was called
199 
200  * @param p_hwfn
201  *
202  * @return enum _ecore_status_t
203  */
204 enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
205 
206 enum ecore_hw_prepare_result {
207 	ECORE_HW_PREPARE_SUCCESS,
208 
209 	/* FAILED results indicate probe has failed & cleaned up */
210 	ECORE_HW_PREPARE_FAILED_ENG2,
211 	ECORE_HW_PREPARE_FAILED_ME,
212 	ECORE_HW_PREPARE_FAILED_MEM,
213 	ECORE_HW_PREPARE_FAILED_DEV,
214 	ECORE_HW_PREPARE_FAILED_NVM,
215 
216 	/* BAD results indicate probe is passed even though some wrongness
217 	 * has occurred; Trying to actually use [I.e., hw_init()] might have
218 	 * dire reprecautions.
219 	 */
220 	ECORE_HW_PREPARE_BAD_IOV,
221 	ECORE_HW_PREPARE_BAD_MCP,
222 	ECORE_HW_PREPARE_BAD_IGU,
223 };
224 
225 struct ecore_hw_prepare_params {
226 	/* Personality to initialize */
227 	int personality;
228 
229 	/* Force the driver's default resource allocation */
230 	bool drv_resc_alloc;
231 
232 	/* Check the reg_fifo after any register access */
233 	bool chk_reg_fifo;
234 
235 	/* Request the MFW to initiate PF FLR */
236 	bool initiate_pf_flr;
237 
238 	/* The OS Epoch time in seconds */
239 	u32 epoch;
240 
241 	/* Allow the MFW to collect a crash dump */
242 	bool allow_mdump;
243 
244 	/* Allow prepare to pass even if some initializations are failing.
245 	 * If set, the `p_prepare_res' field would be set with the return,
246 	 * and might allow probe to pass even if there are certain issues.
247 	 */
248 	bool b_relaxed_probe;
249 	enum ecore_hw_prepare_result p_relaxed_res;
250 };
251 
252 /**
253  * @brief ecore_hw_prepare -
254  *
255  * @param p_dev
256  * @param p_params
257  *
258  * @return enum _ecore_status_t
259  */
260 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
261 				      struct ecore_hw_prepare_params *p_params);
262 
263 /**
264  * @brief ecore_hw_remove -
265  *
266  * @param p_dev
267  */
268 void ecore_hw_remove(struct ecore_dev *p_dev);
269 
270 /**
271 * @brief ecore_set_nwuf_reg -
272 *
273 * @param p_dev
274 * @param reg_idx - Index of the pattern register
275 * @param pattern_size - size of pattern
276 * @param crc - CRC value of patter & mask
277 *
278 * @return enum _ecore_status_t
279 */
280 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev,
281 					u32 reg_idx, u32 pattern_size, u32 crc);
282 
283 /**
284 * @brief ecore_get_wake_info - get magic packet buffer
285 *
286 * @param p_hwfn
287 * @param p_ppt
288 * @param wake_info - pointer to ecore_wake_info buffer
289 *
290 * @return enum _ecore_status_t
291 */
292 enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn,
293 					 struct ecore_ptt *p_ptt,
294 					 struct ecore_wake_info *wake_info);
295 
296 /**
297 * @brief ecore_wol_buffer_clear - Clear magic package buffer
298 *
299 * @param p_hwfn
300 * @param p_ptt
301 *
302 * @return void
303 */
304 void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn,
305 			    struct ecore_ptt *p_ptt);
306 
307 /**
308  * @brief ecore_ptt_acquire - Allocate a PTT window
309  *
310  * Should be called at the entry point to the driver (at the beginning of an
311  * exported function)
312  *
313  * @param p_hwfn
314  *
315  * @return struct ecore_ptt
316  */
317 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
318 
319 /**
320  * @brief ecore_ptt_release - Release PTT Window
321  *
322  * Should be called at the end of a flow - at the end of the function that
323  * acquired the PTT.
324  *
325  *
326  * @param p_hwfn
327  * @param p_ptt
328  */
329 void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
330 		       struct ecore_ptt *p_ptt);
331 
332 #ifndef __EXTRACT__LINUX__
333 struct ecore_eth_stats_common {
334 	u64 no_buff_discards;
335 	u64 packet_too_big_discard;
336 	u64 ttl0_discard;
337 	u64 rx_ucast_bytes;
338 	u64 rx_mcast_bytes;
339 	u64 rx_bcast_bytes;
340 	u64 rx_ucast_pkts;
341 	u64 rx_mcast_pkts;
342 	u64 rx_bcast_pkts;
343 	u64 mftag_filter_discards;
344 	u64 mac_filter_discards;
345 	u64 tx_ucast_bytes;
346 	u64 tx_mcast_bytes;
347 	u64 tx_bcast_bytes;
348 	u64 tx_ucast_pkts;
349 	u64 tx_mcast_pkts;
350 	u64 tx_bcast_pkts;
351 	u64 tx_err_drop_pkts;
352 	u64 tpa_coalesced_pkts;
353 	u64 tpa_coalesced_events;
354 	u64 tpa_aborts_num;
355 	u64 tpa_not_coalesced_pkts;
356 	u64 tpa_coalesced_bytes;
357 
358 	/* port */
359 	u64 rx_64_byte_packets;
360 	u64 rx_65_to_127_byte_packets;
361 	u64 rx_128_to_255_byte_packets;
362 	u64 rx_256_to_511_byte_packets;
363 	u64 rx_512_to_1023_byte_packets;
364 	u64 rx_1024_to_1518_byte_packets;
365 	u64 rx_crc_errors;
366 	u64 rx_mac_crtl_frames;
367 	u64 rx_pause_frames;
368 	u64 rx_pfc_frames;
369 	u64 rx_align_errors;
370 	u64 rx_carrier_errors;
371 	u64 rx_oversize_packets;
372 	u64 rx_jabbers;
373 	u64 rx_undersize_packets;
374 	u64 rx_fragments;
375 	u64 tx_64_byte_packets;
376 	u64 tx_65_to_127_byte_packets;
377 	u64 tx_128_to_255_byte_packets;
378 	u64 tx_256_to_511_byte_packets;
379 	u64 tx_512_to_1023_byte_packets;
380 	u64 tx_1024_to_1518_byte_packets;
381 	u64 tx_pause_frames;
382 	u64 tx_pfc_frames;
383 	u64 brb_truncates;
384 	u64 brb_discards;
385 	u64 rx_mac_bytes;
386 	u64 rx_mac_uc_packets;
387 	u64 rx_mac_mc_packets;
388 	u64 rx_mac_bc_packets;
389 	u64 rx_mac_frames_ok;
390 	u64 tx_mac_bytes;
391 	u64 tx_mac_uc_packets;
392 	u64 tx_mac_mc_packets;
393 	u64 tx_mac_bc_packets;
394 	u64 tx_mac_ctrl_frames;
395 };
396 
397 struct ecore_eth_stats_bb {
398 	u64 rx_1519_to_1522_byte_packets;
399 	u64 rx_1519_to_2047_byte_packets;
400 	u64 rx_2048_to_4095_byte_packets;
401 	u64 rx_4096_to_9216_byte_packets;
402 	u64 rx_9217_to_16383_byte_packets;
403 	u64 tx_1519_to_2047_byte_packets;
404 	u64 tx_2048_to_4095_byte_packets;
405 	u64 tx_4096_to_9216_byte_packets;
406 	u64 tx_9217_to_16383_byte_packets;
407 	u64 tx_lpi_entry_count;
408 	u64 tx_total_collisions;
409 };
410 
411 struct ecore_eth_stats_ah {
412 	u64 rx_1519_to_max_byte_packets;
413 	u64 tx_1519_to_max_byte_packets;
414 };
415 
416 struct ecore_eth_stats {
417 	struct ecore_eth_stats_common common;
418 	union {
419 		struct ecore_eth_stats_bb bb;
420 		struct ecore_eth_stats_ah ah;
421 	};
422 };
423 #endif
424 
425 enum ecore_dmae_address_type_t {
426 	ECORE_DMAE_ADDRESS_HOST_VIRT,
427 	ECORE_DMAE_ADDRESS_HOST_PHYS,
428 	ECORE_DMAE_ADDRESS_GRC
429 };
430 
431 /* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
432  * source is a block of length DMAE_MAX_RW_SIZE and the
433  * destination is larger, the source block will be duplicated as
434  * many times as required to fill the destination block. This is
435  * used mostly to write a zeroed buffer to destination address
436  * using DMA
437  */
438 #define ECORE_DMAE_FLAG_RW_REPL_SRC	0x00000001
439 #define ECORE_DMAE_FLAG_VF_SRC		0x00000002
440 #define ECORE_DMAE_FLAG_VF_DST		0x00000004
441 #define ECORE_DMAE_FLAG_COMPLETION_DST	0x00000008
442 
443 struct ecore_dmae_params {
444 	u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
445 	u8 src_vfid;
446 	u8 dst_vfid;
447 };
448 
449 /**
450  * @brief ecore_dmae_host2grc - copy data from source addr to
451  * dmae registers using the given ptt
452  *
453  * @param p_hwfn
454  * @param p_ptt
455  * @param source_addr
456  * @param grc_addr (dmae_data_offset)
457  * @param size_in_dwords
458  * @param flags (one of the flags defined above)
459  */
460 enum _ecore_status_t
461 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
462 		    struct ecore_ptt *p_ptt,
463 		    u64 source_addr,
464 		    u32 grc_addr,
465 		    u32 size_in_dwords,
466 		    u32 flags);
467 
468 /**
469  * @brief ecore_dmae_grc2host - Read data from dmae data offset
470  * to source address using the given ptt
471  *
472  * @param p_ptt
473  * @param grc_addr (dmae_data_offset)
474  * @param dest_addr
475  * @param size_in_dwords
476  * @param flags - one of the flags defined above
477  */
478 enum _ecore_status_t
479 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
480 		    struct ecore_ptt *p_ptt,
481 		    u32 grc_addr,
482 		    dma_addr_t dest_addr,
483 		    u32 size_in_dwords,
484 		    u32 flags);
485 
486 /**
487  * @brief ecore_dmae_host2host - copy data from to source address
488  * to a destination adress (for SRIOV) using the given ptt
489  *
490  * @param p_hwfn
491  * @param p_ptt
492  * @param source_addr
493  * @param dest_addr
494  * @param size_in_dwords
495  * @param params
496  */
497 enum _ecore_status_t
498 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
499 		     struct ecore_ptt *p_ptt,
500 		     dma_addr_t source_addr,
501 		     dma_addr_t dest_addr,
502 		     u32 size_in_dwords,
503 		     struct ecore_dmae_params *p_params);
504 
505 /**
506  * @brief ecore_chain_alloc - Allocate and initialize a chain
507  *
508  * @param p_hwfn
509  * @param intended_use
510  * @param mode
511  * @param num_elems
512  * @param elem_size
513  * @param p_chain
514  *
515  * @return enum _ecore_status_t
516  */
517 enum _ecore_status_t
518 ecore_chain_alloc(struct ecore_dev *p_dev,
519 		  enum ecore_chain_use_mode intended_use,
520 		  enum ecore_chain_mode mode,
521 		  enum ecore_chain_cnt_type cnt_type,
522 		  u32 num_elems,
523 		  osal_size_t elem_size,
524 		  struct ecore_chain *p_chain,
525 		  struct ecore_chain_ext_pbl *ext_pbl);
526 
527 /**
528  * @brief ecore_chain_free - Free chain DMA memory
529  *
530  * @param p_hwfn
531  * @param p_chain
532  */
533 void ecore_chain_free(struct ecore_dev *p_dev,
534 		      struct ecore_chain *p_chain);
535 
536 /**
537  * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
538  *
539  *  @param p_hwfn
540  *  @param src_id - relative to p_hwfn
541  *  @param dst_id - absolute per engine
542  *
543  *  @return enum _ecore_status_t
544  */
545 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
546 				       u16 src_id,
547 				       u16 *dst_id);
548 
549 /**
550  * @@brief ecore_fw_vport - Get absolute vport ID
551  *
552  *  @param p_hwfn
553  *  @param src_id - relative to p_hwfn
554  *  @param dst_id - absolute per engine
555  *
556  *  @return enum _ecore_status_t
557  */
558 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
559 				    u8 src_id,
560 				    u8 *dst_id);
561 
562 /**
563  * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
564  *
565  *  @param p_hwfn
566  *  @param src_id - relative to p_hwfn
567  *  @param dst_id - absolute per engine
568  *
569  *  @return enum _ecore_status_t
570  */
571 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
572 				      u8 src_id,
573 				      u8 *dst_id);
574 
575 /**
576  * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
577  *
578  * @param p_hwfn
579  * @param p_ptt
580  * @param p_filter - MAC to add
581  */
582 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
583 					  struct ecore_ptt *p_ptt,
584 					  u8 *p_filter);
585 
586 /**
587  * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
588  *
589  * @param p_hwfn
590  * @param p_ptt
591  * @param p_filter - MAC to remove
592  */
593 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
594 			     struct ecore_ptt *p_ptt,
595 			     u8 *p_filter);
596 
597 enum ecore_llh_port_filter_type_t {
598 	ECORE_LLH_FILTER_ETHERTYPE,
599 	ECORE_LLH_FILTER_TCP_SRC_PORT,
600 	ECORE_LLH_FILTER_TCP_DEST_PORT,
601 	ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
602 	ECORE_LLH_FILTER_UDP_SRC_PORT,
603 	ECORE_LLH_FILTER_UDP_DEST_PORT,
604 	ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
605 };
606 
607 /**
608  * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
609  *
610  * @param p_hwfn
611  * @param p_ptt
612  * @param source_port_or_eth_type - source port or ethertype to add
613  * @param dest_port - destination port to add
614  * @param type - type of filters and comparing
615  */
616 enum _ecore_status_t
617 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
618 			      struct ecore_ptt *p_ptt,
619 			      u16 source_port_or_eth_type,
620 			      u16 dest_port,
621 			      enum ecore_llh_port_filter_type_t type);
622 
623 /**
624  * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
625  *
626  * @param p_hwfn
627  * @param p_ptt
628  * @param source_port_or_eth_type - source port or ethertype to add
629  * @param dest_port - destination port to add
630  * @param type - type of filters and comparing
631  */
632 void
633 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
634 				 struct ecore_ptt *p_ptt,
635 				 u16 source_port_or_eth_type,
636 				 u16 dest_port,
637 				 enum ecore_llh_port_filter_type_t type);
638 
639 /**
640  * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
641  *
642  * @param p_hwfn
643  * @param p_ptt
644  */
645 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
646 			     struct ecore_ptt *p_ptt);
647 
648 /**
649  * @brief ecore_llh_set_function_as_default - set function as defult per port
650  *
651  * @param p_hwfn
652  * @param p_ptt
653  */
654 enum _ecore_status_t
655 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
656 				  struct ecore_ptt *p_ptt);
657 
658 /**
659  *@brief Cleanup of previous driver remains prior to load
660  *
661  * @param p_hwfn
662  * @param p_ptt
663  * @param id - For PF, engine-relative. For VF, PF-relative.
664  * @param is_vf - true iff cleanup is made for a VF.
665  *
666  * @return enum _ecore_status_t
667  */
668 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn	*p_hwfn,
669 					 struct ecore_ptt	*p_ptt,
670 					 u16			id,
671 					 bool			is_vf);
672 /**
673  * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
674  *    Tx queue. The fact that we can configure coalescing to up to 511, but on
675  *    varying accuracy [the bigger the value the less accurate] up to a mistake
676  *    of 3usec for the highest values.
677  *    While the API allows setting coalescing per-qid, all queues sharing a SB
678  *    should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
679  *    otherwise configuration would break.
680  *
681  * @param p_hwfn
682  * @param rx_coal - Rx Coalesce value in micro seconds.
683  * @param tx_coal - TX Coalesce value in micro seconds.
684  * @param p_handle
685  *
686  * @return enum _ecore_status_t
687  **/
688 enum _ecore_status_t
689 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
690 			 u16 tx_coal, void *p_handle);
691 
692 /**
693  * @brief - Recalculate feature distributions based on HW resources and
694  * user inputs. Currently this affects RDMA_CNQ, PF_L2_QUE and VF_L2_QUE.
695  * As a result, this must not be called while RDMA is active or while VFs
696  * are enabled.
697  *
698  * @param p_hwfn
699  */
700 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn);
701 
702 /**
703  * @brief ecore_change_pci_hwfn - Enable or disable PCI BUS MASTER
704  *
705  * @param p_hwfn
706  * @param p_ptt
707  * @param enable - true/false
708  *
709  * @return enum _ecore_status_t
710  */
711 enum _ecore_status_t
712 ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
713 		      struct ecore_ptt *p_ptt,
714 		      u8 enable);
715 
716 #ifndef __EXTRACT__LINUX__
717 enum ecore_db_rec_width {
718 	DB_REC_WIDTH_32B,
719 	DB_REC_WIDTH_64B,
720 };
721 
722 enum ecore_db_rec_space {
723 	DB_REC_KERNEL,
724 	DB_REC_USER,
725 };
726 #endif
727 
728 /**
729  * @brief db_recovery_add - add doorbell information to the doorbell
730  * recovery mechanism.
731  *
732  * @param p_dev
733  * @param db_addr - doorbell address
734  * @param db_data - address of where db_data is stored
735  * @param db_width - doorbell is 32b pr 64b
736  * @param db_space - doorbell recovery addresses are user or kernel space
737  */
738 enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
739 					   void OSAL_IOMEM *db_addr,
740 					   void *db_data,
741 					   enum ecore_db_rec_width db_width,
742 					   enum ecore_db_rec_space db_space);
743 
744 /**
745  * @brief db_recovery_del - remove doorbell information from the doorbell
746  * recovery mechanism. db_data serves as key (db_addr is not unique).
747  *
748  * @param cdev
749  * @param db_addr - doorbell address
750  * @param db_data - address where db_data is stored. Serves as key for the
751  *                  entry to delete.
752  */
753 enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
754 					   void OSAL_IOMEM *db_addr,
755 					   void *db_data);
756 #endif
757