1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * aQuantia Corporation Network Driver
4  * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
5  */
6 
7 /* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
8 
9 #include "../aq_hw.h"
10 #include "../aq_hw_utils.h"
11 #include "../aq_ring.h"
12 #include "../aq_nic.h"
13 #include "../aq_phy.h"
14 #include "hw_atl_b0.h"
15 #include "hw_atl_utils.h"
16 #include "hw_atl_llh.h"
17 #include "hw_atl_b0_internal.h"
18 #include "hw_atl_llh_internal.h"
19 
20 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
21 	.is_64_dma = true,		  \
22 	.msix_irqs = 8U,		  \
23 	.irq_mask = ~0U,		  \
24 	.vecs = HW_ATL_B0_RSS_MAX,	  \
25 	.tcs = HW_ATL_B0_TC_MAX,	  \
26 	.rxd_alignment = 1U,		  \
27 	.rxd_size = HW_ATL_B0_RXD_SIZE,   \
28 	.rxds_max = HW_ATL_B0_MAX_RXD,    \
29 	.rxds_min = HW_ATL_B0_MIN_RXD,    \
30 	.txd_alignment = 1U,		  \
31 	.txd_size = HW_ATL_B0_TXD_SIZE,   \
32 	.txds_max = HW_ATL_B0_MAX_TXD,    \
33 	.txds_min = HW_ATL_B0_MIN_TXD,    \
34 	.txhwb_alignment = 4096U,	  \
35 	.tx_rings = HW_ATL_B0_TX_RINGS,   \
36 	.rx_rings = HW_ATL_B0_RX_RINGS,   \
37 	.hw_features = NETIF_F_HW_CSUM |  \
38 			NETIF_F_RXCSUM |  \
39 			NETIF_F_RXHASH |  \
40 			NETIF_F_SG |      \
41 			NETIF_F_TSO |     \
42 			NETIF_F_LRO |     \
43 			NETIF_F_NTUPLE |  \
44 			NETIF_F_HW_VLAN_CTAG_FILTER | \
45 			NETIF_F_HW_VLAN_CTAG_RX |     \
46 			NETIF_F_HW_VLAN_CTAG_TX |     \
47 			NETIF_F_GSO_UDP_L4      |     \
48 			NETIF_F_GSO_PARTIAL,          \
49 	.hw_priv_flags = IFF_UNICAST_FLT, \
50 	.flow_control = true,		  \
51 	.mtu = HW_ATL_B0_MTU_JUMBO,	  \
52 	.mac_regs_count = 88,		  \
53 	.hw_alive_check_addr = 0x10U
54 
55 #define FRAC_PER_NS 0x100000000LL
56 
57 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
58 	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
59 	.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
60 	.link_speed_msk = AQ_NIC_RATE_10G |
61 			  AQ_NIC_RATE_5G |
62 			  AQ_NIC_RATE_2GS |
63 			  AQ_NIC_RATE_1G |
64 			  AQ_NIC_RATE_100M,
65 };
66 
67 const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
68 	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
69 	.media_type = AQ_HW_MEDIA_TYPE_TP,
70 	.link_speed_msk = AQ_NIC_RATE_10G |
71 			  AQ_NIC_RATE_5G |
72 			  AQ_NIC_RATE_2GS |
73 			  AQ_NIC_RATE_1G |
74 			  AQ_NIC_RATE_100M,
75 };
76 
77 const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
78 	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
79 	.media_type = AQ_HW_MEDIA_TYPE_TP,
80 	.link_speed_msk = AQ_NIC_RATE_5G |
81 			  AQ_NIC_RATE_2GS |
82 			  AQ_NIC_RATE_1G |
83 			  AQ_NIC_RATE_100M,
84 };
85 
86 const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
87 	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
88 	.media_type = AQ_HW_MEDIA_TYPE_TP,
89 	.link_speed_msk = AQ_NIC_RATE_2GS |
90 			  AQ_NIC_RATE_1G |
91 			  AQ_NIC_RATE_100M,
92 };
93 
94 static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
95 {
96 	int err = 0;
97 
98 	err = hw_atl_utils_soft_reset(self);
99 	if (err)
100 		return err;
101 
102 	self->aq_fw_ops->set_state(self, MPI_RESET);
103 
104 	err = aq_hw_err_from_flags(self);
105 
106 	return err;
107 }
108 
109 static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
110 {
111 	hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
112 
113 	return 0;
114 }
115 
116 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
117 {
118 	unsigned int i_priority = 0U;
119 	u32 buff_size = 0U;
120 	u32 tc = 0U;
121 
122 	/* TPS Descriptor rate init */
123 	hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
124 	hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
125 
126 	/* TPS VM init */
127 	hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
128 
129 	/* TPS TC credits init */
130 	hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
131 	hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
132 
133 	tc = 0;
134 
135 	/* TX Packet Scheduler Data TC0 */
136 	hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
137 	hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
138 	hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
139 	hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
140 
141 	/* Tx buf size TC0 */
142 	buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
143 
144 	hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
145 	hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
146 						   (buff_size *
147 						   (1024 / 32U) * 66U) /
148 						   100U, tc);
149 	hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
150 						   (buff_size *
151 						   (1024 / 32U) * 50U) /
152 						   100U, tc);
153 	/* Init TC2 for PTP_TX */
154 	tc = 2;
155 
156 	hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
157 					       tc);
158 
159 	/* QoS Rx buf size per TC */
160 	tc = 0;
161 	buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
162 
163 	hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
164 	hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
165 						   (buff_size *
166 						   (1024U / 32U) * 66U) /
167 						   100U, tc);
168 	hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
169 						   (buff_size *
170 						   (1024U / 32U) * 50U) /
171 						   100U, tc);
172 
173 	hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
174 
175 	/* Init TC2 for PTP_RX */
176 	tc = 2;
177 
178 	hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
179 					       tc);
180 	/* No flow control for PTP */
181 	hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
182 
183 	/* QoS 802.1p priority -> TC mapping */
184 	for (i_priority = 8U; i_priority--;)
185 		hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
186 
187 	return aq_hw_err_from_flags(self);
188 }
189 
190 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
191 				     struct aq_rss_parameters *rss_params)
192 {
193 	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
194 	unsigned int addr = 0U;
195 	unsigned int i = 0U;
196 	int err = 0;
197 	u32 val;
198 
199 	for (i = 10, addr = 0U; i--; ++addr) {
200 		u32 key_data = cfg->is_rss ?
201 			__swab32(rss_params->hash_secret_key[i]) : 0U;
202 		hw_atl_rpf_rss_key_wr_data_set(self, key_data);
203 		hw_atl_rpf_rss_key_addr_set(self, addr);
204 		hw_atl_rpf_rss_key_wr_en_set(self, 1U);
205 		err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get,
206 						self, val, val == 0,
207 						1000U, 10000U);
208 		if (err < 0)
209 			goto err_exit;
210 	}
211 
212 	err = aq_hw_err_from_flags(self);
213 
214 err_exit:
215 	return err;
216 }
217 
218 static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
219 				struct aq_rss_parameters *rss_params)
220 {
221 	u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
222 	u8 *indirection_table =	rss_params->indirection_table;
223 	u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
224 		   HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
225 	int err = 0;
226 	u32 i = 0U;
227 	u32 val;
228 
229 	memset(bitary, 0, sizeof(bitary));
230 
231 	for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
232 		(*(u32 *)(bitary + ((i * 3U) / 16U))) |=
233 			((indirection_table[i] % num_rss_queues) <<
234 			((i * 3U) & 0xFU));
235 	}
236 
237 	for (i = ARRAY_SIZE(bitary); i--;) {
238 		hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
239 		hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
240 		hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
241 		err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get,
242 						self, val, val == 0,
243 						1000U, 10000U);
244 		if (err < 0)
245 			goto err_exit;
246 	}
247 
248 	err = aq_hw_err_from_flags(self);
249 
250 err_exit:
251 	return err;
252 }
253 
254 static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
255 				    struct aq_nic_cfg_s *aq_nic_cfg)
256 {
257 	unsigned int i;
258 
259 	/* TX checksums offloads*/
260 	hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
261 	hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
262 
263 	/* RX checksums offloads*/
264 	hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
265 						 NETIF_F_RXCSUM));
266 	hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
267 					      NETIF_F_RXCSUM));
268 
269 	/* LSO offloads*/
270 	hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
271 
272 	/* Outer VLAN tag offload */
273 	hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
274 
275 /* LRO offloads */
276 	{
277 		unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
278 			((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
279 			((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
280 
281 		for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
282 			hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
283 
284 		hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
285 		hw_atl_rpo_lro_inactive_interval_set(self, 0);
286 		/* the LRO timebase divider is 5 uS (0x61a),
287 		 * which is multiplied by 50(0x32)
288 		 * to get a maximum coalescing interval of 250 uS,
289 		 * which is the default value
290 		 */
291 		hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
292 
293 		hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
294 
295 		hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
296 
297 		hw_atl_rpo_lro_patch_optimization_en_set(self, 1U);
298 
299 		hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
300 
301 		hw_atl_rpo_lro_pkt_lim_set(self, 1U);
302 
303 		hw_atl_rpo_lro_en_set(self,
304 				      aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
305 		hw_atl_itr_rsc_en_set(self,
306 				      aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
307 
308 		hw_atl_itr_rsc_delay_set(self, 1U);
309 	}
310 
311 	return aq_hw_err_from_flags(self);
312 }
313 
314 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
315 {
316 	/* Tx TC/Queue number config */
317 	hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
318 
319 	hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
320 	hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
321 	hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
322 
323 	/* Tx interrupts */
324 	hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
325 
326 	/* misc */
327 	aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
328 			0x00010000U : 0x00000000U);
329 	hw_atl_tdm_tx_dca_en_set(self, 0U);
330 	hw_atl_tdm_tx_dca_mode_set(self, 0U);
331 
332 	hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
333 
334 	return aq_hw_err_from_flags(self);
335 }
336 
337 static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
338 {
339 	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
340 	int i;
341 
342 	/* Rx TC/RSS number config */
343 	hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
344 
345 	/* Rx flow control */
346 	hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
347 
348 	/* RSS Ring selection */
349 	hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
350 					0xB3333333U : 0x00000000U);
351 
352 	/* Multicast filters */
353 	for (i = HW_ATL_B0_MAC_MAX; i--;) {
354 		hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
355 		hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
356 	}
357 
358 	hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
359 	hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
360 
361 	/* Vlan filters */
362 	hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
363 	hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
364 
365 	hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
366 
367 	// Always accept untagged packets
368 	hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
369 	hw_atl_rpf_vlan_untagged_act_set(self, 1U);
370 
371 	/* Rx Interrupts */
372 	hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
373 
374 	/* misc */
375 	aq_hw_write_reg(self, 0x00005040U,
376 			IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
377 
378 	hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
379 	hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
380 
381 	hw_atl_rdm_rx_dca_en_set(self, 0U);
382 	hw_atl_rdm_rx_dca_mode_set(self, 0U);
383 
384 	return aq_hw_err_from_flags(self);
385 }
386 
387 static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
388 {
389 	unsigned int h = 0U;
390 	unsigned int l = 0U;
391 	int err = 0;
392 
393 	if (!mac_addr) {
394 		err = -EINVAL;
395 		goto err_exit;
396 	}
397 	h = (mac_addr[0] << 8) | (mac_addr[1]);
398 	l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
399 		(mac_addr[4] << 8) | mac_addr[5];
400 
401 	hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
402 	hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
403 	hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
404 	hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
405 
406 	err = aq_hw_err_from_flags(self);
407 
408 err_exit:
409 	return err;
410 }
411 
412 static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
413 {
414 	static u32 aq_hw_atl_igcr_table_[4][2] = {
415 		[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
416 		[AQ_HW_IRQ_LEGACY]  = { 0x20000080U, 0x20000080U },
417 		[AQ_HW_IRQ_MSI]     = { 0x20000021U, 0x20000025U },
418 		[AQ_HW_IRQ_MSIX]    = { 0x20000022U, 0x20000026U },
419 	};
420 	struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
421 	int err = 0;
422 	u32 val;
423 
424 
425 	hw_atl_b0_hw_init_tx_path(self);
426 	hw_atl_b0_hw_init_rx_path(self);
427 
428 	hw_atl_b0_hw_mac_addr_set(self, mac_addr);
429 
430 	self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
431 	self->aq_fw_ops->set_state(self, MPI_INIT);
432 
433 	hw_atl_b0_hw_qos_set(self);
434 	hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
435 	hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
436 
437 	/* Force limit MRRS on RDM/TDM to 2K */
438 	val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
439 	aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
440 			(val & ~0x707) | 0x404);
441 
442 	/* TX DMA total request limit. B0 hardware is not capable to
443 	 * handle more than (8K-MRRS) incoming DMA data.
444 	 * Value 24 in 256byte units
445 	 */
446 	aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
447 
448 	/* Reset link status and read out initial hardware counters */
449 	self->aq_link_status.mbps = 0;
450 	self->aq_fw_ops->update_stats(self);
451 
452 	err = aq_hw_err_from_flags(self);
453 	if (err < 0)
454 		goto err_exit;
455 
456 	/* Interrupts */
457 	hw_atl_reg_irq_glb_ctl_set(self,
458 				   aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
459 						 [(aq_nic_cfg->vecs > 1U) ?
460 						 1 : 0]);
461 
462 	hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
463 
464 	/* Interrupts */
465 	hw_atl_reg_gen_irq_map_set(self,
466 				   ((HW_ATL_B0_ERR_INT << 0x18) |
467 				    (1U << 0x1F)) |
468 				   ((HW_ATL_B0_ERR_INT << 0x10) |
469 				    (1U << 0x17)), 0U);
470 
471 	/* Enable link interrupt */
472 	if (aq_nic_cfg->link_irq_vec)
473 		hw_atl_reg_gen_irq_map_set(self, BIT(7) |
474 					   aq_nic_cfg->link_irq_vec, 3U);
475 
476 	hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
477 
478 err_exit:
479 	return err;
480 }
481 
482 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
483 				      struct aq_ring_s *ring)
484 {
485 	hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
486 
487 	return aq_hw_err_from_flags(self);
488 }
489 
490 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
491 				      struct aq_ring_s *ring)
492 {
493 	hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
494 
495 	return aq_hw_err_from_flags(self);
496 }
497 
498 static int hw_atl_b0_hw_start(struct aq_hw_s *self)
499 {
500 	hw_atl_tpb_tx_buff_en_set(self, 1);
501 	hw_atl_rpb_rx_buff_en_set(self, 1);
502 
503 	return aq_hw_err_from_flags(self);
504 }
505 
506 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
507 					    struct aq_ring_s *ring)
508 {
509 	hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
510 
511 	return 0;
512 }
513 
514 static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
515 				     struct aq_ring_s *ring,
516 				     unsigned int frags)
517 {
518 	struct aq_ring_buff_s *buff = NULL;
519 	struct hw_atl_txd_s *txd = NULL;
520 	unsigned int buff_pa_len = 0U;
521 	unsigned int frag_count = 0U;
522 	unsigned int pkt_len = 0U;
523 	bool is_vlan = false;
524 	bool is_gso = false;
525 
526 	buff = &ring->buff_ring[ring->sw_tail];
527 	pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
528 
529 	for (frag_count = 0; frag_count < frags; frag_count++) {
530 		txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
531 						HW_ATL_B0_TXD_SIZE];
532 		txd->ctl = 0;
533 		txd->ctl2 = 0;
534 		txd->buf_addr = 0;
535 
536 		buff = &ring->buff_ring[ring->sw_tail];
537 
538 		if (buff->is_gso_tcp || buff->is_gso_udp) {
539 			if (buff->is_gso_tcp)
540 				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP;
541 			txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
542 			txd->ctl |= (buff->len_l3 << 31) |
543 				    (buff->len_l2 << 24);
544 			txd->ctl2 |= (buff->mss << 16);
545 			is_gso = true;
546 
547 			pkt_len -= (buff->len_l4 +
548 				    buff->len_l3 +
549 				    buff->len_l2);
550 			if (buff->is_ipv6)
551 				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
552 			txd->ctl2 |= (buff->len_l4 << 8) |
553 				     (buff->len_l3 >> 1);
554 		}
555 		if (buff->is_vlan) {
556 			txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
557 			txd->ctl |= buff->vlan_tx_tag << 4;
558 			is_vlan = true;
559 		}
560 		if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) {
561 			buff_pa_len = buff->len;
562 
563 			txd->buf_addr = buff->pa;
564 			txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
565 						((u32)buff_pa_len << 4));
566 			txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
567 
568 			/* PAY_LEN */
569 			txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
570 
571 			if (is_gso || is_vlan) {
572 				/* enable tx context */
573 				txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
574 			}
575 			if (is_gso)
576 				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
577 
578 			/* Tx checksum offloads */
579 			if (buff->is_ip_cso)
580 				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
581 
582 			if (buff->is_udp_cso || buff->is_tcp_cso)
583 				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
584 
585 			if (is_vlan)
586 				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_VLAN;
587 
588 			if (unlikely(buff->is_eop)) {
589 				txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
590 				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
591 				is_gso = false;
592 				is_vlan = false;
593 			}
594 		}
595 		ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
596 	}
597 
598 	hw_atl_b0_hw_tx_ring_tail_update(self, ring);
599 
600 	return aq_hw_err_from_flags(self);
601 }
602 
603 static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
604 				     struct aq_ring_s *aq_ring,
605 				     struct aq_ring_param_s *aq_ring_param)
606 {
607 	u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
608 	u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
609 	u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
610 
611 	hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
612 
613 	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
614 
615 	hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
616 						  aq_ring->idx);
617 
618 	hw_atl_reg_rx_dma_desc_base_addressmswset(self,
619 						  dma_desc_addr_msw, aq_ring->idx);
620 
621 	hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
622 
623 	hw_atl_rdm_rx_desc_data_buff_size_set(self,
624 					      AQ_CFG_RX_FRAME_MAX / 1024U,
625 				       aq_ring->idx);
626 
627 	hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
628 	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
629 	hw_atl_rpo_rx_desc_vlan_stripping_set(self, !!vlan_rx_stripping,
630 					      aq_ring->idx);
631 
632 	/* Rx ring set mode */
633 
634 	/* Mapping interrupt vector */
635 	hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
636 	hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
637 
638 	hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
639 	hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
640 	hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
641 	hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
642 
643 	return aq_hw_err_from_flags(self);
644 }
645 
646 static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
647 				     struct aq_ring_s *aq_ring,
648 				     struct aq_ring_param_s *aq_ring_param)
649 {
650 	u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
651 	u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
652 
653 	hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
654 						  aq_ring->idx);
655 
656 	hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
657 						  aq_ring->idx);
658 
659 	hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
660 
661 	hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
662 
663 	/* Set Tx threshold */
664 	hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
665 
666 	/* Mapping interrupt vector */
667 	hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
668 	hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
669 
670 	hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
671 	hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
672 
673 	return aq_hw_err_from_flags(self);
674 }
675 
676 static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
677 				     struct aq_ring_s *ring,
678 				     unsigned int sw_tail_old)
679 {
680 	for (; sw_tail_old != ring->sw_tail;
681 		sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
682 		struct hw_atl_rxd_s *rxd =
683 			(struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
684 							HW_ATL_B0_RXD_SIZE];
685 
686 		struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
687 
688 		rxd->buf_addr = buff->pa;
689 		rxd->hdr_addr = 0U;
690 	}
691 
692 	hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
693 
694 	return aq_hw_err_from_flags(self);
695 }
696 
697 static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
698 					  struct aq_ring_s *ring)
699 {
700 	unsigned int i;
701 
702 	for (i = aq_ring_avail_dx(ring); i--;
703 			ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
704 		struct hw_atl_rxd_s *rxd =
705 			(struct hw_atl_rxd_s *)
706 			&ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
707 
708 		rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
709 		rxd->hdr_addr = 0U;
710 	}
711 	/* Make sure descriptors are updated before bump tail*/
712 	wmb();
713 
714 	hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
715 
716 	return aq_hw_err_from_flags(self);
717 }
718 
719 static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
720 					     struct aq_ring_s *ring)
721 {
722 	while (ring->hw_head != ring->sw_tail) {
723 		struct hw_atl_rxd_hwts_wb_s *hwts_wb =
724 			(struct hw_atl_rxd_hwts_wb_s *)
725 			(ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
726 
727 		/* RxD is not done */
728 		if (!(hwts_wb->sec_lw0 & 0x1U))
729 			break;
730 
731 		ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
732 	}
733 
734 	return aq_hw_err_from_flags(self);
735 }
736 
737 static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
738 					    struct aq_ring_s *ring)
739 {
740 	unsigned int hw_head_;
741 	int err = 0;
742 
743 	hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
744 
745 	if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
746 		err = -ENXIO;
747 		goto err_exit;
748 	}
749 	ring->hw_head = hw_head_;
750 	err = aq_hw_err_from_flags(self);
751 
752 err_exit:
753 	return err;
754 }
755 
756 static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
757 					struct aq_ring_s *ring)
758 {
759 	for (; ring->hw_head != ring->sw_tail;
760 		ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
761 		struct aq_ring_buff_s *buff = NULL;
762 		struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
763 			&ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
764 
765 		unsigned int is_rx_check_sum_enabled = 0U;
766 		unsigned int pkt_type = 0U;
767 		u8 rx_stat = 0U;
768 
769 		if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
770 			break;
771 		}
772 
773 		buff = &ring->buff_ring[ring->hw_head];
774 
775 		buff->flags = 0U;
776 		buff->is_hash_l4 = 0U;
777 
778 		rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
779 
780 		is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
781 
782 		pkt_type = (rxd_wb->type & HW_ATL_B0_RXD_WB_STAT_PKTTYPE) >>
783 			   HW_ATL_B0_RXD_WB_STAT_PKTTYPE_SHIFT;
784 
785 		if (is_rx_check_sum_enabled & BIT(0) &&
786 		    (0x0U == (pkt_type & 0x3U)))
787 			buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
788 
789 		if (is_rx_check_sum_enabled & BIT(1)) {
790 			if (0x4U == (pkt_type & 0x1CU))
791 				buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
792 						   !!(rx_stat & BIT(3));
793 			else if (0x0U == (pkt_type & 0x1CU))
794 				buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
795 						   !!(rx_stat & BIT(3));
796 		}
797 		buff->is_cso_err = !!(rx_stat & 0x6);
798 		/* Checksum offload workaround for small packets */
799 		if (unlikely(rxd_wb->pkt_len <= 60)) {
800 			buff->is_ip_cso = 0U;
801 			buff->is_cso_err = 0U;
802 		}
803 
804 		if (self->aq_nic_cfg->is_vlan_rx_strip &&
805 		    ((pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN) ||
806 		     (pkt_type & HW_ATL_B0_RXD_WB_PKTTYPE_VLAN_DOUBLE))) {
807 			buff->is_vlan = 1;
808 			buff->vlan_rx_tag = le16_to_cpu(rxd_wb->vlan);
809 		}
810 
811 		if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
812 			/* MAC error or DMA error */
813 			buff->is_error = 1U;
814 		}
815 		if (self->aq_nic_cfg->is_rss) {
816 			/* last 4 byte */
817 			u16 rss_type = rxd_wb->type & 0xFU;
818 
819 			if (rss_type && rss_type < 0x8U) {
820 				buff->is_hash_l4 = (rss_type == 0x4 ||
821 				rss_type == 0x5);
822 				buff->rss_hash = rxd_wb->rss_hash;
823 			}
824 		}
825 
826 		if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
827 			buff->len = rxd_wb->pkt_len %
828 				AQ_CFG_RX_FRAME_MAX;
829 			buff->len = buff->len ?
830 				buff->len : AQ_CFG_RX_FRAME_MAX;
831 			buff->next = 0U;
832 			buff->is_eop = 1U;
833 		} else {
834 			buff->len =
835 				rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
836 				AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
837 
838 			if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
839 				rxd_wb->status) {
840 				/* LRO */
841 				buff->next = rxd_wb->next_desc_ptr;
842 				++ring->stats.rx.lro_packets;
843 			} else {
844 				/* jumbo */
845 				buff->next =
846 					aq_ring_next_dx(ring,
847 							ring->hw_head);
848 				++ring->stats.rx.jumbo_packets;
849 			}
850 		}
851 	}
852 
853 	return aq_hw_err_from_flags(self);
854 }
855 
856 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
857 {
858 	hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
859 
860 	return aq_hw_err_from_flags(self);
861 }
862 
863 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
864 {
865 	hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
866 	hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
867 
868 	atomic_inc(&self->dpc);
869 
870 	return aq_hw_err_from_flags(self);
871 }
872 
873 static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
874 {
875 	*mask = hw_atl_itr_irq_statuslsw_get(self);
876 
877 	return aq_hw_err_from_flags(self);
878 }
879 
880 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
881 
882 static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
883 					  unsigned int packet_filter)
884 {
885 	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
886 	unsigned int i = 0U;
887 
888 	hw_atl_rpfl2promiscuous_mode_en_set(self,
889 					    IS_FILTER_ENABLED(IFF_PROMISC));
890 
891 	hw_atl_rpf_vlan_prom_mode_en_set(self,
892 				     IS_FILTER_ENABLED(IFF_PROMISC) ||
893 				     cfg->is_vlan_force_promisc);
894 
895 	hw_atl_rpfl2multicast_flr_en_set(self,
896 					 IS_FILTER_ENABLED(IFF_ALLMULTI) &&
897 					 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
898 
899 	hw_atl_rpfl2_accept_all_mc_packets_set(self,
900 					      IS_FILTER_ENABLED(IFF_ALLMULTI) &&
901 					      IS_FILTER_ENABLED(IFF_MULTICAST));
902 
903 	hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
904 
905 
906 	for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
907 		hw_atl_rpfl2_uc_flr_en_set(self,
908 					   (cfg->is_mc_list_enabled &&
909 					    (i <= cfg->mc_list_count)) ?
910 					   1U : 0U, i);
911 
912 	return aq_hw_err_from_flags(self);
913 }
914 
915 #undef IS_FILTER_ENABLED
916 
917 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
918 					   u8 ar_mac
919 					   [AQ_HW_MULTICAST_ADDRESS_MAX]
920 					   [ETH_ALEN],
921 					   u32 count)
922 {
923 	int err = 0;
924 	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
925 
926 	if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
927 		err = -EBADRQC;
928 		goto err_exit;
929 	}
930 	for (cfg->mc_list_count = 0U;
931 			cfg->mc_list_count < count;
932 			++cfg->mc_list_count) {
933 		u32 i = cfg->mc_list_count;
934 		u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
935 		u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
936 					(ar_mac[i][4] << 8) | ar_mac[i][5];
937 
938 		hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
939 
940 		hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
941 							HW_ATL_B0_MAC_MIN + i);
942 
943 		hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
944 							HW_ATL_B0_MAC_MIN + i);
945 
946 		hw_atl_rpfl2_uc_flr_en_set(self,
947 					   (cfg->is_mc_list_enabled),
948 					   HW_ATL_B0_MAC_MIN + i);
949 	}
950 
951 	err = aq_hw_err_from_flags(self);
952 
953 err_exit:
954 	return err;
955 }
956 
957 static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
958 {
959 	unsigned int i = 0U;
960 	u32 itr_tx = 2U;
961 	u32 itr_rx = 2U;
962 
963 	switch (self->aq_nic_cfg->itr) {
964 	case  AQ_CFG_INTERRUPT_MODERATION_ON:
965 	case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
966 		hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
967 		hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
968 		hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
969 		hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
970 
971 		if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
972 			/* HW timers are in 2us units */
973 			int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
974 			int tx_min_timer = tx_max_timer / 2;
975 
976 			int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
977 			int rx_min_timer = rx_max_timer / 2;
978 
979 			tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
980 			tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
981 			rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
982 			rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
983 
984 			itr_tx |= tx_min_timer << 0x8U;
985 			itr_tx |= tx_max_timer << 0x10U;
986 			itr_rx |= rx_min_timer << 0x8U;
987 			itr_rx |= rx_max_timer << 0x10U;
988 		} else {
989 			static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
990 				{0xfU, 0xffU}, /* 10Gbit */
991 				{0xfU, 0x1ffU}, /* 5Gbit */
992 				{0xfU, 0x1ffU}, /* 5Gbit 5GS */
993 				{0xfU, 0x1ffU}, /* 2.5Gbit */
994 				{0xfU, 0x1ffU}, /* 1Gbit */
995 				{0xfU, 0x1ffU}, /* 100Mbit */
996 			};
997 
998 			static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
999 				{0x6U, 0x38U},/* 10Gbit */
1000 				{0xCU, 0x70U},/* 5Gbit */
1001 				{0xCU, 0x70U},/* 5Gbit 5GS */
1002 				{0x18U, 0xE0U},/* 2.5Gbit */
1003 				{0x30U, 0x80U},/* 1Gbit */
1004 				{0x4U, 0x50U},/* 100Mbit */
1005 			};
1006 
1007 			unsigned int speed_index =
1008 					hw_atl_utils_mbps_2_speed_index(
1009 						self->aq_link_status.mbps);
1010 
1011 			/* Update user visible ITR settings */
1012 			self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
1013 							[speed_index][1] * 2;
1014 			self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
1015 							[speed_index][1] * 2;
1016 
1017 			itr_tx |= hw_atl_b0_timers_table_tx_
1018 						[speed_index][0] << 0x8U;
1019 			itr_tx |= hw_atl_b0_timers_table_tx_
1020 						[speed_index][1] << 0x10U;
1021 
1022 			itr_rx |= hw_atl_b0_timers_table_rx_
1023 						[speed_index][0] << 0x8U;
1024 			itr_rx |= hw_atl_b0_timers_table_rx_
1025 						[speed_index][1] << 0x10U;
1026 		}
1027 		break;
1028 	case AQ_CFG_INTERRUPT_MODERATION_OFF:
1029 		hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
1030 		hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
1031 		hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
1032 		hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
1033 		itr_tx = 0U;
1034 		itr_rx = 0U;
1035 		break;
1036 	}
1037 
1038 	for (i = HW_ATL_B0_RINGS_MAX; i--;) {
1039 		hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
1040 		hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
1041 	}
1042 
1043 	return aq_hw_err_from_flags(self);
1044 }
1045 
1046 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
1047 {
1048 	int err;
1049 	u32 val;
1050 
1051 	hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
1052 
1053 	/* Invalidate Descriptor Cache to prevent writing to the cached
1054 	 * descriptors and to the data pointer of those descriptors
1055 	 */
1056 	hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
1057 
1058 	err = aq_hw_err_from_flags(self);
1059 
1060 	if (err)
1061 		goto err_exit;
1062 
1063 	readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
1064 				  self, val, val == 1, 1000U, 10000U);
1065 
1066 err_exit:
1067 	return err;
1068 }
1069 
1070 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
1071 				     struct aq_ring_s *ring)
1072 {
1073 	hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
1074 
1075 	return aq_hw_err_from_flags(self);
1076 }
1077 
1078 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
1079 				     struct aq_ring_s *ring)
1080 {
1081 	hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
1082 
1083 	return aq_hw_err_from_flags(self);
1084 }
1085 
1086 static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
1087 {
1088 	*tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
1089 	return aq_hw_err_from_flags(self);
1090 }
1091 
1092 static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
1093 {
1094 	*tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
1095 	return aq_hw_err_from_flags(self);
1096 }
1097 
1098 #define get_ptp_ts_val_u64(self, indx) \
1099 	((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
1100 
1101 static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
1102 {
1103 	u64 ns;
1104 
1105 	hw_atl_pcs_ptp_clock_read_enable(self, 1);
1106 	hw_atl_pcs_ptp_clock_read_enable(self, 0);
1107 	ns = (get_ptp_ts_val_u64(self, 0) +
1108 	      (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
1109 	     (get_ptp_ts_val_u64(self, 3) +
1110 	      (get_ptp_ts_val_u64(self, 4) << 16));
1111 
1112 	*stamp = ns + self->ptp_clk_offset;
1113 }
1114 
1115 static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
1116 {
1117 	/* For accuracy, the digit is extended */
1118 	s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
1119 	u64 nsi_frac = 0;
1120 	u64 nsi;
1121 
1122 	base_ns = div64_s64(base_ns, freq);
1123 	nsi = div64_u64(base_ns, NSEC_PER_SEC);
1124 
1125 	if (base_ns != nsi * NSEC_PER_SEC) {
1126 		s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
1127 					base_ns - nsi * NSEC_PER_SEC);
1128 		nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor);
1129 	}
1130 
1131 	*ns = (u32)nsi;
1132 	*fns = (u32)nsi_frac;
1133 }
1134 
1135 static void
1136 hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
1137 			     u64 phyfreq, u64 macfreq)
1138 {
1139 	s64 adj_fns_val;
1140 	s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
1141 					FRAC_PER_NS * ptp_adj_freq->ns_phy);
1142 	s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
1143 					FRAC_PER_NS * ptp_adj_freq->ns_mac);
1144 	s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
1145 	s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
1146 	/* MAC MCP counter freq is macfreq / 4 */
1147 	s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
1148 				   4 * FRAC_PER_NS;
1149 
1150 	diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
1151 					 AQ_HW_MAC_COUNTER_HZ);
1152 	adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS *
1153 		       ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
1154 
1155 	ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS);
1156 	ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
1157 				    FRAC_PER_NS;
1158 }
1159 
1160 static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
1161 {
1162 	self->ptp_clk_offset += delta;
1163 
1164 	return 0;
1165 }
1166 
1167 static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
1168 {
1169 	s64 delta = time - (self->ptp_clk_offset + ts);
1170 
1171 	return hw_atl_b0_adj_sys_clock(self, delta);
1172 }
1173 
1174 static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
1175 {
1176 	*time = self->ptp_clk_offset + ts;
1177 	return 0;
1178 }
1179 
1180 static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
1181 {
1182 	struct hw_fw_request_iface fwreq;
1183 	size_t size;
1184 
1185 	memset(&fwreq, 0, sizeof(fwreq));
1186 
1187 	fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
1188 	hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
1189 				 &fwreq.ptp_adj_freq.ns_mac,
1190 				 &fwreq.ptp_adj_freq.fns_mac);
1191 	hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
1192 				 &fwreq.ptp_adj_freq.ns_phy,
1193 				 &fwreq.ptp_adj_freq.fns_phy);
1194 	hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
1195 				     AQ_HW_PHY_COUNTER_HZ,
1196 				     AQ_HW_MAC_COUNTER_HZ);
1197 
1198 	size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
1199 	return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1200 }
1201 
1202 static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
1203 				u64 start, u32 period)
1204 {
1205 	struct hw_fw_request_iface fwreq;
1206 	size_t size;
1207 
1208 	memset(&fwreq, 0, sizeof(fwreq));
1209 
1210 	fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
1211 	fwreq.ptp_gpio_ctrl.index = index;
1212 	fwreq.ptp_gpio_ctrl.period = period;
1213 	/* Apply time offset */
1214 	fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
1215 
1216 	size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
1217 	return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
1218 }
1219 
1220 static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
1221 				       u32 enable)
1222 {
1223 	/* Enable/disable Sync1588 GPIO Timestamping */
1224 	aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
1225 
1226 	return 0;
1227 }
1228 
1229 static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
1230 {
1231 	u64 sec_l;
1232 	u64 sec_h;
1233 	u64 nsec_l;
1234 	u64 nsec_h;
1235 
1236 	if (!ts)
1237 		return -1;
1238 
1239 	/* PTP external GPIO clock seconds count 15:0 */
1240 	sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
1241 	/* PTP external GPIO clock seconds count 31:16 */
1242 	sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
1243 	/* PTP external GPIO clock nanoseconds count 15:0 */
1244 	nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
1245 	/* PTP external GPIO clock nanoseconds count 31:16 */
1246 	nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
1247 
1248 	*ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
1249 
1250 	return 0;
1251 }
1252 
1253 static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
1254 				   unsigned int len, u64 *timestamp)
1255 {
1256 	unsigned int offset = 14;
1257 	struct ethhdr *eth;
1258 	__be64 sec;
1259 	__be32 ns;
1260 	u8 *ptr;
1261 
1262 	if (len <= offset || !timestamp)
1263 		return 0;
1264 
1265 	/* The TIMESTAMP in the end of package has following format:
1266 	 * (big-endian)
1267 	 *   struct {
1268 	 *     uint64_t sec;
1269 	 *     uint32_t ns;
1270 	 *     uint16_t stream_id;
1271 	 *   };
1272 	 */
1273 	ptr = p + (len - offset);
1274 	memcpy(&sec, ptr, sizeof(sec));
1275 	ptr += sizeof(sec);
1276 	memcpy(&ns, ptr, sizeof(ns));
1277 
1278 	*timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC +
1279 		     be32_to_cpu(ns) + self->ptp_clk_offset;
1280 
1281 	eth = (struct ethhdr *)p;
1282 
1283 	return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
1284 }
1285 
1286 static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
1287 				  u64 *timestamp)
1288 {
1289 	struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
1290 	u64 tmp, sec, ns;
1291 
1292 	sec = 0;
1293 	tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
1294 	sec += tmp;
1295 	tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
1296 	sec += tmp;
1297 	tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
1298 	sec += tmp;
1299 	tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
1300 	sec += tmp;
1301 	ns = sec * NSEC_PER_SEC + hwts_wb->ns;
1302 	if (timestamp)
1303 		*timestamp = ns + self->ptp_clk_offset;
1304 	return 0;
1305 }
1306 
1307 static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
1308 				    struct aq_rx_filter_l3l4 *data)
1309 {
1310 	u8 location = data->location;
1311 
1312 	if (!data->is_ipv6) {
1313 		hw_atl_rpfl3l4_cmd_clear(self, location);
1314 		hw_atl_rpf_l4_spd_set(self, 0U, location);
1315 		hw_atl_rpf_l4_dpd_set(self, 0U, location);
1316 		hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
1317 		hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
1318 	} else {
1319 		int i;
1320 
1321 		for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
1322 			hw_atl_rpfl3l4_cmd_clear(self, location + i);
1323 			hw_atl_rpf_l4_spd_set(self, 0U, location + i);
1324 			hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
1325 		}
1326 		hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
1327 		hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
1328 	}
1329 
1330 	return aq_hw_err_from_flags(self);
1331 }
1332 
1333 static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
1334 				  struct aq_rx_filter_l3l4 *data)
1335 {
1336 	u8 location = data->location;
1337 
1338 	hw_atl_b0_hw_fl3l4_clear(self, data);
1339 
1340 	if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
1341 			 HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
1342 		if (!data->is_ipv6) {
1343 			hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
1344 							  location,
1345 							  data->ip_dst[0]);
1346 			hw_atl_rpfl3l4_ipv4_src_addr_set(self,
1347 							 location,
1348 							 data->ip_src[0]);
1349 		} else {
1350 			hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
1351 							  location,
1352 							  data->ip_dst);
1353 			hw_atl_rpfl3l4_ipv6_src_addr_set(self,
1354 							 location,
1355 							 data->ip_src);
1356 		}
1357 	}
1358 
1359 	if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
1360 			 HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
1361 		hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
1362 		hw_atl_rpf_l4_spd_set(self, data->p_src, location);
1363 	}
1364 
1365 	hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
1366 
1367 	return aq_hw_err_from_flags(self);
1368 }
1369 
1370 static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
1371 				struct aq_rx_filter_l2 *data)
1372 {
1373 	hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
1374 	hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
1375 	hw_atl_rpf_etht_user_priority_en_set(self,
1376 					     !!data->user_priority_en,
1377 					     data->location);
1378 	if (data->user_priority_en)
1379 		hw_atl_rpf_etht_user_priority_set(self,
1380 						  data->user_priority,
1381 						  data->location);
1382 
1383 	if (data->queue < 0) {
1384 		hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
1385 		hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
1386 	} else {
1387 		hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
1388 		hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
1389 		hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
1390 	}
1391 
1392 	return aq_hw_err_from_flags(self);
1393 }
1394 
1395 static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
1396 				  struct aq_rx_filter_l2 *data)
1397 {
1398 	hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
1399 	hw_atl_rpf_etht_flr_set(self, 0U, data->location);
1400 	hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
1401 
1402 	return aq_hw_err_from_flags(self);
1403 }
1404 
1405 /**
1406  * @brief Set VLAN filter table
1407  * @details Configure VLAN filter table to accept (and assign the queue) traffic
1408  *  for the particular vlan ids.
1409  * Note: use this function under vlan promisc mode not to lost the traffic
1410  *
1411  * @param aq_hw_s
1412  * @param aq_rx_filter_vlan VLAN filter configuration
1413  * @return 0 - OK, <0 - error
1414  */
1415 static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
1416 				 struct aq_rx_filter_vlan *aq_vlans)
1417 {
1418 	int i;
1419 
1420 	for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
1421 		hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
1422 		hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
1423 		if (aq_vlans[i].enable) {
1424 			hw_atl_rpf_vlan_id_flr_set(self,
1425 						   aq_vlans[i].vlan_id,
1426 						   i);
1427 			hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
1428 			hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
1429 			if (aq_vlans[i].queue != 0xFF) {
1430 				hw_atl_rpf_vlan_rxq_flr_set(self,
1431 							    aq_vlans[i].queue,
1432 							    i);
1433 				hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
1434 			}
1435 		}
1436 	}
1437 
1438 	return aq_hw_err_from_flags(self);
1439 }
1440 
1441 static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
1442 {
1443 	/* set promisc in case of disabing the vland filter */
1444 	hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
1445 
1446 	return aq_hw_err_from_flags(self);
1447 }
1448 
1449 static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
1450 {
1451 	switch (mode) {
1452 	case AQ_HW_LOOPBACK_DMA_SYS:
1453 		hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable);
1454 		hw_atl_rpb_dma_sys_lbk_set(self, enable);
1455 		break;
1456 	case AQ_HW_LOOPBACK_PKT_SYS:
1457 		hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable);
1458 		hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable);
1459 		break;
1460 	case AQ_HW_LOOPBACK_DMA_NET:
1461 		hw_atl_rpf_vlan_prom_mode_en_set(self, enable);
1462 		hw_atl_rpfl2promiscuous_mode_en_set(self, enable);
1463 		hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable);
1464 		hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable);
1465 		hw_atl_rpb_dma_net_lbk_set(self, enable);
1466 		break;
1467 	default:
1468 		return -EINVAL;
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 const struct aq_hw_ops hw_atl_ops_b0 = {
1475 	.hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
1476 	.hw_init              = hw_atl_b0_hw_init,
1477 	.hw_reset             = hw_atl_b0_hw_reset,
1478 	.hw_start             = hw_atl_b0_hw_start,
1479 	.hw_ring_tx_start     = hw_atl_b0_hw_ring_tx_start,
1480 	.hw_ring_tx_stop      = hw_atl_b0_hw_ring_tx_stop,
1481 	.hw_ring_rx_start     = hw_atl_b0_hw_ring_rx_start,
1482 	.hw_ring_rx_stop      = hw_atl_b0_hw_ring_rx_stop,
1483 	.hw_stop              = hw_atl_b0_hw_stop,
1484 
1485 	.hw_ring_tx_xmit         = hw_atl_b0_hw_ring_tx_xmit,
1486 	.hw_ring_tx_head_update  = hw_atl_b0_hw_ring_tx_head_update,
1487 
1488 	.hw_ring_rx_receive      = hw_atl_b0_hw_ring_rx_receive,
1489 	.hw_ring_rx_fill         = hw_atl_b0_hw_ring_rx_fill,
1490 
1491 	.hw_irq_enable           = hw_atl_b0_hw_irq_enable,
1492 	.hw_irq_disable          = hw_atl_b0_hw_irq_disable,
1493 	.hw_irq_read             = hw_atl_b0_hw_irq_read,
1494 
1495 	.hw_ring_rx_init             = hw_atl_b0_hw_ring_rx_init,
1496 	.hw_ring_tx_init             = hw_atl_b0_hw_ring_tx_init,
1497 	.hw_packet_filter_set        = hw_atl_b0_hw_packet_filter_set,
1498 	.hw_filter_l2_set            = hw_atl_b0_hw_fl2_set,
1499 	.hw_filter_l2_clear          = hw_atl_b0_hw_fl2_clear,
1500 	.hw_filter_l3l4_set          = hw_atl_b0_hw_fl3l4_set,
1501 	.hw_filter_vlan_set          = hw_atl_b0_hw_vlan_set,
1502 	.hw_filter_vlan_ctrl         = hw_atl_b0_hw_vlan_ctrl,
1503 	.hw_multicast_list_set       = hw_atl_b0_hw_multicast_list_set,
1504 	.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
1505 	.hw_rss_set                  = hw_atl_b0_hw_rss_set,
1506 	.hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
1507 	.hw_get_regs                 = hw_atl_utils_hw_get_regs,
1508 	.hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
1509 	.hw_get_fw_version           = hw_atl_utils_get_fw_version,
1510 
1511 	.hw_tx_tc_mode_get       = hw_atl_b0_tx_tc_mode_get,
1512 	.hw_rx_tc_mode_get       = hw_atl_b0_rx_tc_mode_get,
1513 
1514 	.hw_ring_hwts_rx_fill        = hw_atl_b0_hw_ring_hwts_rx_fill,
1515 	.hw_ring_hwts_rx_receive     = hw_atl_b0_hw_ring_hwts_rx_receive,
1516 
1517 	.hw_get_ptp_ts           = hw_atl_b0_get_ptp_ts,
1518 	.hw_adj_sys_clock        = hw_atl_b0_adj_sys_clock,
1519 	.hw_set_sys_clock        = hw_atl_b0_set_sys_clock,
1520 	.hw_ts_to_sys_clock      = hw_atl_b0_ts_to_sys_clock,
1521 	.hw_adj_clock_freq       = hw_atl_b0_adj_clock_freq,
1522 	.hw_gpio_pulse           = hw_atl_b0_gpio_pulse,
1523 	.hw_extts_gpio_enable    = hw_atl_b0_extts_gpio_enable,
1524 	.hw_get_sync_ts          = hw_atl_b0_get_sync_ts,
1525 	.rx_extract_ts           = hw_atl_b0_rx_extract_ts,
1526 	.extract_hwts            = hw_atl_b0_extract_hwts,
1527 	.hw_set_offload          = hw_atl_b0_hw_offload_set,
1528 	.hw_set_loopback         = hw_atl_b0_set_loopback,
1529 	.hw_set_fc               = hw_atl_b0_set_fc,
1530 };
1531