]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / aquantia / atlantic / hw_atl / hw_atl_llh.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
5 */
6
7 /* File hw_atl_llh.h: Declarations of bitfield and register access functions for
8 * Atlantic registers.
9 */
10
11 #ifndef HW_ATL_LLH_H
12 #define HW_ATL_LLH_H
13
14 #include <linux/types.h>
15
16 struct aq_hw_s;
17
18 /* global */
19
20 /* set global microprocessor semaphore */
21 void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
22 u32 semaphore);
23
24 /* get global microprocessor semaphore */
25 u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
26
27 /* set global register reset disable */
28 void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
29
30 /* set soft reset */
31 void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
32
33 /* get soft reset */
34 u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
35
36 /* stats */
37
38 u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
39
40 /* get rx dma good octet counter */
41 u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw);
42
43 /* get rx dma good packet counter */
44 u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw);
45
46 /* get tx dma good octet counter */
47 u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw);
48
49 /* get tx dma good packet counter */
50 u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw);
51
52 /* get msm rx errors counter register */
53 u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
54
55 /* get msm rx unicast frames counter register */
56 u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
57
58 /* get msm rx multicast frames counter register */
59 u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
60
61 /* get msm rx broadcast frames counter register */
62 u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
63
64 /* get msm rx broadcast octets counter register 1 */
65 u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
66
67 /* get msm rx unicast octets counter register 0 */
68 u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
69
70 /* get msm tx errors counter register */
71 u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
72
73 /* get msm tx unicast frames counter register */
74 u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
75
76 /* get msm tx multicast frames counter register */
77 u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
78
79 /* get msm tx broadcast frames counter register */
80 u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
81
82 /* get msm tx multicast octets counter register 1 */
83 u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
84
85 /* get msm tx broadcast octets counter register 1 */
86 u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
87
88 /* get msm tx unicast octets counter register 0 */
89 u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
90
91 /* get global mif identification */
92 u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
93
94 /* interrupt */
95
96 /* set interrupt auto mask lsw */
97 void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
98 u32 irq_auto_masklsw);
99
100 /* set interrupt mapping enable rx */
101 void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
102 u32 rx);
103
104 /* set interrupt mapping enable tx */
105 void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
106 u32 tx);
107
108 /* set interrupt mapping rx */
109 void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
110
111 /* set interrupt mapping tx */
112 void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
113
114 /* set interrupt mask clear lsw */
115 void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
116 u32 irq_msk_clearlsw);
117
118 /* set interrupt mask set lsw */
119 void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
120
121 /* set interrupt register reset disable */
122 void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
123
124 /* set interrupt status clear lsw */
125 void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
126 u32 irq_status_clearlsw);
127
128 /* get interrupt status lsw */
129 u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
130
131 /* get reset interrupt */
132 u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
133
134 /* set reset interrupt */
135 void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
136
137 /* set RSC interrupt */
138 void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable);
139
140 /* set RSC delay */
141 void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay);
142
143 /* rdm */
144
145 /* set cpu id */
146 void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
147
148 /* set rx dca enable */
149 void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
150
151 /* set rx dca mode */
152 void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
153
154 /* set rx descriptor data buffer size */
155 void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
156 u32 rx_desc_data_buff_size,
157 u32 descriptor);
158
159 /* set rx descriptor dca enable */
160 void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
161 u32 dca);
162
163 /* set rx descriptor enable */
164 void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
165 u32 descriptor);
166
167 /* set rx descriptor header splitting */
168 void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
169 u32 rx_desc_head_splitting,
170 u32 descriptor);
171
172 /* get rx descriptor head pointer */
173 u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
174
175 /* set rx descriptor length */
176 void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
177 u32 descriptor);
178
179 /* set rx descriptor write-back interrupt enable */
180 void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
181 u32 rx_desc_wr_wb_irq_en);
182
183 /* set rx header dca enable */
184 void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
185 u32 dca);
186
187 /* set rx payload dca enable */
188 void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
189 u32 dca);
190
191 /* set rx descriptor header buffer size */
192 void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
193 u32 rx_desc_head_buff_size,
194 u32 descriptor);
195
196 /* set rx descriptor reset */
197 void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
198 u32 descriptor);
199
200 /* Set RDM Interrupt Moderation Enable */
201 void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
202 u32 rdm_intr_moder_en);
203
204 /* reg */
205
206 /* set general interrupt mapping register */
207 void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
208 u32 regidx);
209
210 /* get general interrupt status register */
211 u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
212
213 /* set interrupt global control register */
214 void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
215
216 /* set interrupt throttle register */
217 void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
218
219 /* set rx dma descriptor base address lsw */
220 void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
221 u32 rx_dma_desc_base_addrlsw,
222 u32 descriptor);
223
224 /* set rx dma descriptor base address msw */
225 void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
226 u32 rx_dma_desc_base_addrmsw,
227 u32 descriptor);
228
229 /* get rx dma descriptor status register */
230 u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
231
232 /* set rx dma descriptor tail pointer register */
233 void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
234 u32 rx_dma_desc_tail_ptr,
235 u32 descriptor);
236
237 /* set rx filter multicast filter mask register */
238 void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
239 u32 rx_flr_mcst_flr_msk);
240
241 /* set rx filter multicast filter register */
242 void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
243 u32 filter);
244
245 /* set rx filter rss control register 1 */
246 void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
247 u32 rx_flr_rss_control1);
248
249 /* Set RX Filter Control Register 2 */
250 void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
251
252 /* Set RX Interrupt Moderation Control Register */
253 void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
254 u32 rx_intr_moderation_ctl,
255 u32 queue);
256
257 /* set tx dma debug control */
258 void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
259 u32 tx_dma_debug_ctl);
260
261 /* set tx dma descriptor base address lsw */
262 void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
263 u32 tx_dma_desc_base_addrlsw,
264 u32 descriptor);
265
266 /* set tx dma descriptor base address msw */
267 void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
268 u32 tx_dma_desc_base_addrmsw,
269 u32 descriptor);
270
271 /* set tx dma descriptor tail pointer register */
272 void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
273 u32 tx_dma_desc_tail_ptr,
274 u32 descriptor);
275
276 /* Set TX Interrupt Moderation Control Register */
277 void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
278 u32 tx_intr_moderation_ctl,
279 u32 queue);
280
281 /* set global microprocessor scratch pad */
282 void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
283 u32 glb_cpu_scratch_scp,
284 u32 scratch_scp);
285
286 /* rpb */
287
288 /* set dma system loopback */
289 void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
290
291 /* set dma network loopback */
292 void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk);
293
294 /* set rx traffic class mode */
295 void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
296 u32 rx_traf_class_mode);
297
298 /* get rx traffic class mode */
299 u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw);
300
301 /* set rx buffer enable */
302 void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
303
304 /* set rx buffer high threshold (per tc) */
305 void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
306 u32 rx_buff_hi_threshold_per_tc,
307 u32 buffer);
308
309 /* set rx buffer low threshold (per tc) */
310 void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
311 u32 rx_buff_lo_threshold_per_tc,
312 u32 buffer);
313
314 /* set rx flow control mode */
315 void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw,
316 u32 rx_flow_ctl_mode);
317
318 /* set rx packet buffer size (per tc) */
319 void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
320 u32 rx_pkt_buff_size_per_tc,
321 u32 buffer);
322
323 /* toggle rdm rx dma descriptor cache init */
324 void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
325
326 /* get rdm rx dma descriptor cache init done */
327 u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
328
329 /* set rx xoff enable (per tc) */
330 void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
331 u32 rx_xoff_en_per_tc,
332 u32 buffer);
333
334 /* rpf */
335
336 /* set l2 broadcast count threshold */
337 void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
338 u32 l2broadcast_count_threshold);
339
340 /* set l2 broadcast enable */
341 void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
342
343 /* set l2 broadcast filter action */
344 void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
345 u32 l2broadcast_flr_act);
346
347 /* set l2 multicast filter enable */
348 void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
349 u32 l2multicast_flr_en,
350 u32 filter);
351
352 /* set l2 promiscuous mode enable */
353 void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
354 u32 l2promiscuous_mode_en);
355
356 /* set l2 unicast filter action */
357 void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
358 u32 l2unicast_flr_act,
359 u32 filter);
360
361 /* set l2 unicast filter enable */
362 void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
363 u32 filter);
364
365 /* set l2 unicast destination address lsw */
366 void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
367 u32 l2unicast_dest_addresslsw,
368 u32 filter);
369
370 /* set l2 unicast destination address msw */
371 void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
372 u32 l2unicast_dest_addressmsw,
373 u32 filter);
374
375 /* Set L2 Accept all Multicast packets */
376 void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
377 u32 l2_accept_all_mc_packets);
378
379 /* set user-priority tc mapping */
380 void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
381 u32 user_priority_tc_map, u32 tc);
382
383 /* set rss key address */
384 void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
385
386 /* set rss key write data */
387 void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
388
389 /* get rss key write enable */
390 u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
391
392 /* set rss key write enable */
393 void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
394
395 /* set rss redirection table address */
396 void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
397 u32 rss_redir_tbl_addr);
398
399 /* set rss redirection table write data */
400 void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
401 u32 rss_redir_tbl_wr_data);
402
403 /* get rss redirection write enable */
404 u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
405
406 /* set rss redirection write enable */
407 void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
408
409 /* set tpo to rpf system loopback */
410 void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
411 u32 tpo_to_rpf_sys_lbk);
412
413 /* set vlan inner ethertype */
414 void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
415
416 /* set vlan outer ethertype */
417 void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
418
419 /* set vlan promiscuous mode enable */
420 void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
421 u32 vlan_prom_mode_en);
422
423 /* Set VLAN untagged action */
424 void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
425 u32 vlan_untagged_act);
426
427 /* Set VLAN accept untagged packets */
428 void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
429 u32 vlan_acc_untagged_packets);
430
431 /* Set VLAN filter enable */
432 void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
433 u32 filter);
434
435 /* Set VLAN Filter Action */
436 void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
437 u32 filter);
438
439 /* Set VLAN ID Filter */
440 void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
441 u32 filter);
442
443 /* Set VLAN RX queue assignment enable */
444 void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
445 u32 filter);
446
447 /* Set VLAN RX queue */
448 void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
449 u32 filter);
450
451 /* set ethertype filter enable */
452 void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
453 u32 filter);
454
455 /* set ethertype user-priority enable */
456 void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
457 u32 etht_user_priority_en,
458 u32 filter);
459
460 /* set ethertype rx queue enable */
461 void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
462 u32 etht_rx_queue_en,
463 u32 filter);
464
465 /* set ethertype rx queue */
466 void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
467 u32 filter);
468
469 /* set ethertype user-priority */
470 void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
471 u32 etht_user_priority,
472 u32 filter);
473
474 /* set ethertype management queue */
475 void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
476 u32 filter);
477
478 /* set ethertype filter action */
479 void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
480 u32 filter);
481
482 /* set ethertype filter */
483 void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
484
485 /* set L4 source port */
486 void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
487
488 /* set L4 destination port */
489 void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
490
491 /* rpo */
492
493 /* set ipv4 header checksum offload enable */
494 void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
495 u32 ipv4header_crc_offload_en);
496
497 /* set rx descriptor vlan stripping */
498 void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
499 u32 rx_desc_vlan_stripping,
500 u32 descriptor);
501
502 void hw_atl_rpo_outer_vlan_tag_mode_set(void *context,
503 u32 outervlantagmode);
504
505 u32 hw_atl_rpo_outer_vlan_tag_mode_get(void *context);
506
507 /* set tcp/udp checksum offload enable */
508 void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
509 u32 tcp_udp_crc_offload_en);
510
511 /* Set LRO Patch Optimization Enable. */
512 void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
513 u32 lro_patch_optimization_en);
514
515 /* Set Large Receive Offload Enable */
516 void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
517
518 /* Set LRO Q Sessions Limit */
519 void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
520 u32 lro_qsessions_lim);
521
522 /* Set LRO Total Descriptor Limit */
523 void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
524 u32 lro_total_desc_lim);
525
526 /* Set LRO Min Payload of First Packet */
527 void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
528 u32 lro_min_pld_of_first_pkt);
529
530 /* Set LRO Packet Limit */
531 void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
532
533 /* Set LRO Max Number of Descriptors */
534 void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
535 u32 lro_max_desc_num, u32 lro);
536
537 /* Set LRO Time Base Divider */
538 void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
539 u32 lro_time_base_divider);
540
541 /*Set LRO Inactive Interval */
542 void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
543 u32 lro_inactive_interval);
544
545 /*Set LRO Max Coalescing Interval */
546 void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
547 u32 lro_max_coal_interval);
548
549 /* rx */
550
551 /* set rx register reset disable */
552 void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
553
554 /* tdm */
555
556 /* set cpu id */
557 void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
558
559 /* set large send offload enable */
560 void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
561 u32 large_send_offload_en);
562
563 /* set tx descriptor enable */
564 void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
565 u32 descriptor);
566
567 /* set tx dca enable */
568 void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
569
570 /* set tx dca mode */
571 void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
572
573 /* set tx descriptor dca enable */
574 void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
575 u32 dca);
576
577 /* get tx descriptor head pointer */
578 u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
579
580 /* set tx descriptor length */
581 void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
582 u32 descriptor);
583
584 /* set tx descriptor write-back interrupt enable */
585 void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
586 u32 tx_desc_wr_wb_irq_en);
587
588 /* set tx descriptor write-back threshold */
589 void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
590 u32 tx_desc_wr_wb_threshold,
591 u32 descriptor);
592
593 /* Set TDM Interrupt Moderation Enable */
594 void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
595 u32 tdm_irq_moderation_en);
596 /* thm */
597
598 /* set lso tcp flag of first packet */
599 void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
600 u32 lso_tcp_flag_of_first_pkt);
601
602 /* set lso tcp flag of last packet */
603 void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
604 u32 lso_tcp_flag_of_last_pkt);
605
606 /* set lso tcp flag of middle packet */
607 void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
608 u32 lso_tcp_flag_of_middle_pkt);
609
610 /* tpb */
611
612 /* set TX Traffic Class Mode */
613 void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
614 u32 tx_traf_class_mode);
615
616 /* get TX Traffic Class Mode */
617 u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
618
619 /* set tx buffer enable */
620 void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
621
622 /* set tx buffer high threshold (per tc) */
623 void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
624 u32 tx_buff_hi_threshold_per_tc,
625 u32 buffer);
626
627 /* set tx buffer low threshold (per tc) */
628 void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
629 u32 tx_buff_lo_threshold_per_tc,
630 u32 buffer);
631
632 /* set tx dma system loopback enable */
633 void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
634
635 /* set tx dma network loopback enable */
636 void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw,
637 u32 tx_dma_net_lbk_en);
638
639 /* set tx clock gating enable */
640 void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw,
641 u32 tx_clk_gate_en);
642
643 /* set tx packet buffer size (per tc) */
644 void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
645 u32 tx_pkt_buff_size_per_tc,
646 u32 buffer);
647
648 /* set tx path pad insert enable */
649 void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
650
651 /* tpo */
652
653 /* set ipv4 header checksum offload enable */
654 void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
655 u32 ipv4header_crc_offload_en);
656
657 /* set tcp/udp checksum offload enable */
658 void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
659 u32 tcp_udp_crc_offload_en);
660
661 /* set tx pkt system loopback enable */
662 void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
663 u32 tx_pkt_sys_lbk_en);
664
665 /* tps */
666
667 /* set tx packet scheduler data arbitration mode */
668 void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
669 u32 tx_pkt_shed_data_arb_mode);
670
671 /* set tx packet scheduler descriptor rate current time reset */
672 void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
673 u32 curr_time_res);
674
675 /* set tx packet scheduler descriptor rate limit */
676 void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
677 u32 tx_pkt_shed_desc_rate_lim);
678
679 /* set tx packet scheduler descriptor tc arbitration mode */
680 void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
681 u32 arb_mode);
682
683 /* set tx packet scheduler descriptor tc max credit */
684 void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
685 u32 max_credit,
686 u32 tc);
687
688 /* set tx packet scheduler descriptor tc weight */
689 void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
690 u32 tx_pkt_shed_desc_tc_weight,
691 u32 tc);
692
693 /* set tx packet scheduler descriptor vm arbitration mode */
694 void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
695 u32 arb_mode);
696
697 /* set tx packet scheduler tc data max credit */
698 void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
699 u32 max_credit,
700 u32 tc);
701
702 /* set tx packet scheduler tc data weight */
703 void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
704 u32 tx_pkt_shed_tc_data_weight,
705 u32 tc);
706
707 /* tx */
708
709 /* set tx register reset disable */
710 void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
711
712 /* msm */
713
714 /* get register access status */
715 u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
716
717 /* set register address for indirect address */
718 void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
719 u32 reg_addr_for_indirect_addr);
720
721 /* set register read strobe */
722 void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
723
724 /* get register read data */
725 u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
726
727 /* set register write data */
728 void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
729
730 /* set register write strobe */
731 void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
732
733 /* pci */
734
735 /* set pci register reset disable */
736 void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
737
738 /* pcs */
739 void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
740 u32 ptp_clock_read_enable);
741
742 u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index);
743
744 /* set uP Force Interrupt */
745 void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
746
747 /* clear ipv4 filter destination address */
748 void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
749
750 /* clear ipv4 filter source address */
751 void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
752
753 /* clear command for filter l3-l4 */
754 void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location);
755
756 /* clear ipv6 filter destination address */
757 void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
758
759 /* clear ipv6 filter source address */
760 void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
761
762 /* set ipv4 filter destination address */
763 void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
764 u32 ipv4_dest);
765
766 /* set ipv4 filter source address */
767 void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
768 u32 ipv4_src);
769
770 /* set command for filter l3-l4 */
771 void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd);
772
773 /* set ipv6 filter source address */
774 void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
775 u32 *ipv6_src);
776
777 /* set ipv6 filter destination address */
778 void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
779 u32 *ipv6_dest);
780
781 /* set Global MDIO Interface 1 */
782 void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *hw, u32 value);
783
784 /* get Global MDIO Interface 1 */
785 u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *hw);
786
787 /* set Global MDIO Interface 2 */
788 void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *hw, u32 value);
789
790 /* get Global MDIO Interface 2 */
791 u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *hw);
792
793 /* set Global MDIO Interface 3 */
794 void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *hw, u32 value);
795
796 /* get Global MDIO Interface 3 */
797 u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *hw);
798
799 /* set Global MDIO Interface 4 */
800 void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *hw, u32 value);
801
802 /* get Global MDIO Interface 4 */
803 u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *hw);
804
805 /* set Global MDIO Interface 5 */
806 void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *hw, u32 value);
807
808 /* get Global MDIO Interface 5 */
809 u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *hw);
810
811 u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw);
812
813 /* get global microprocessor ram semaphore */
814 u32 hw_atl_sem_ram_get(struct aq_hw_s *self);
815
816 /* get global microprocessor mdio semaphore */
817 u32 hw_atl_sem_mdio_get(struct aq_hw_s *self);
818
819 /* get global microprocessor scratch pad register */
820 u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp);
821
822 /* get global microprocessor scratch pad 12 register */
823 u32 hw_atl_scrpad12_get(struct aq_hw_s *self);
824
825 /* get global microprocessor scratch pad 25 register */
826 u32 hw_atl_scrpad25_get(struct aq_hw_s *self);
827
828 #endif /* HW_ATL_LLH_H */