xref: /qemu/hw/net/igb_core.c (revision 2a7e1486)
1 /*
2  * Core code for QEMU igb emulation
3  *
4  * Datasheet:
5  * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
6  *
7  * Copyright (c) 2020-2023 Red Hat, Inc.
8  * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
9  * Developed by Daynix Computing LTD (http://www.daynix.com)
10  *
11  * Authors:
12  * Akihiko Odaki <akihiko.odaki@daynix.com>
13  * Gal Hammmer <gal.hammer@sap.com>
14  * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
15  * Dmitry Fleytman <dmitry@daynix.com>
16  * Leonid Bloch <leonid@daynix.com>
17  * Yan Vugenfirer <yan@daynix.com>
18  *
19  * Based on work done by:
20  * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
21  * Copyright (c) 2008 Qumranet
22  * Based on work done by:
23  * Copyright (c) 2007 Dan Aloni
24  * Copyright (c) 2004 Antony T Curtis
25  *
26  * This library is free software; you can redistribute it and/or
27  * modify it under the terms of the GNU Lesser General Public
28  * License as published by the Free Software Foundation; either
29  * version 2.1 of the License, or (at your option) any later version.
30  *
31  * This library is distributed in the hope that it will be useful,
32  * but WITHOUT ANY WARRANTY; without even the implied warranty of
33  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
34  * Lesser General Public License for more details.
35  *
36  * You should have received a copy of the GNU Lesser General Public
37  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
38  */
39 
40 #include "qemu/osdep.h"
41 #include "qemu/log.h"
42 #include "net/net.h"
43 #include "net/tap.h"
44 #include "hw/net/mii.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "sysemu/runstate.h"
48 
49 #include "net_tx_pkt.h"
50 #include "net_rx_pkt.h"
51 
52 #include "igb_common.h"
53 #include "e1000x_common.h"
54 #include "igb_core.h"
55 
56 #include "trace.h"
57 
58 #define E1000E_MAX_TX_FRAGS (64)
59 
60 union e1000_rx_desc_union {
61     struct e1000_rx_desc legacy;
62     union e1000_adv_rx_desc adv;
63 };
64 
65 typedef struct IGBTxPktVmdqCallbackContext {
66     IGBCore *core;
67     NetClientState *nc;
68 } IGBTxPktVmdqCallbackContext;
69 
70 typedef struct L2Header {
71     struct eth_header eth;
72     struct vlan_header vlan[2];
73 } L2Header;
74 
75 typedef struct PTP2 {
76     uint8_t message_id_transport_specific;
77     uint8_t version_ptp;
78     uint16_t message_length;
79     uint8_t subdomain_number;
80     uint8_t reserved0;
81     uint16_t flags;
82     uint64_t correction;
83     uint8_t reserved1[5];
84     uint8_t source_communication_technology;
85     uint32_t source_uuid_lo;
86     uint16_t source_uuid_hi;
87     uint16_t source_port_id;
88     uint16_t sequence_id;
89     uint8_t control;
90     uint8_t log_message_period;
91 } PTP2;
92 
93 static ssize_t
94 igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
95                      bool has_vnet, bool *external_tx);
96 
97 static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes);
98 static void igb_reset(IGBCore *core, bool sw);
99 
100 static inline void
igb_raise_legacy_irq(IGBCore * core)101 igb_raise_legacy_irq(IGBCore *core)
102 {
103     trace_e1000e_irq_legacy_notify(true);
104     e1000x_inc_reg_if_not_full(core->mac, IAC);
105     pci_set_irq(core->owner, 1);
106 }
107 
108 static inline void
igb_lower_legacy_irq(IGBCore * core)109 igb_lower_legacy_irq(IGBCore *core)
110 {
111     trace_e1000e_irq_legacy_notify(false);
112     pci_set_irq(core->owner, 0);
113 }
114 
igb_msix_notify(IGBCore * core,unsigned int cause)115 static void igb_msix_notify(IGBCore *core, unsigned int cause)
116 {
117     PCIDevice *dev = core->owner;
118     uint16_t vfn;
119     uint32_t effective_eiac;
120     unsigned int vector;
121 
122     vfn = 8 - (cause + 2) / IGBVF_MSIX_VEC_NUM;
123     if (vfn < pcie_sriov_num_vfs(core->owner)) {
124         dev = pcie_sriov_get_vf_at_index(core->owner, vfn);
125         assert(dev);
126         vector = (cause + 2) % IGBVF_MSIX_VEC_NUM;
127     } else if (cause >= IGB_MSIX_VEC_NUM) {
128         qemu_log_mask(LOG_GUEST_ERROR,
129                       "igb: Tried to use vector unavailable for PF");
130         return;
131     } else {
132         vector = cause;
133     }
134 
135     msix_notify(dev, vector);
136 
137     trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]);
138     effective_eiac = core->mac[EIAC] & BIT(cause);
139     core->mac[EICR] &= ~effective_eiac;
140 }
141 
142 static inline void
igb_intrmgr_rearm_timer(IGBIntrDelayTimer * timer)143 igb_intrmgr_rearm_timer(IGBIntrDelayTimer *timer)
144 {
145     int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] *
146                                  timer->delay_resolution_ns;
147 
148     trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns);
149 
150     timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns);
151 
152     timer->running = true;
153 }
154 
155 static void
igb_intmgr_timer_resume(IGBIntrDelayTimer * timer)156 igb_intmgr_timer_resume(IGBIntrDelayTimer *timer)
157 {
158     if (timer->running) {
159         igb_intrmgr_rearm_timer(timer);
160     }
161 }
162 
163 static void
igb_intrmgr_on_msix_throttling_timer(void * opaque)164 igb_intrmgr_on_msix_throttling_timer(void *opaque)
165 {
166     IGBIntrDelayTimer *timer = opaque;
167     int idx = timer - &timer->core->eitr[0];
168 
169     timer->running = false;
170 
171     trace_e1000e_irq_msix_notify_postponed_vec(idx);
172     igb_msix_notify(timer->core, idx);
173 }
174 
175 static void
igb_intrmgr_initialize_all_timers(IGBCore * core,bool create)176 igb_intrmgr_initialize_all_timers(IGBCore *core, bool create)
177 {
178     int i;
179 
180     for (i = 0; i < IGB_INTR_NUM; i++) {
181         core->eitr[i].core = core;
182         core->eitr[i].delay_reg = EITR0 + i;
183         core->eitr[i].delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
184     }
185 
186     if (!create) {
187         return;
188     }
189 
190     for (i = 0; i < IGB_INTR_NUM; i++) {
191         core->eitr[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
192                                            igb_intrmgr_on_msix_throttling_timer,
193                                            &core->eitr[i]);
194     }
195 }
196 
197 static void
igb_intrmgr_resume(IGBCore * core)198 igb_intrmgr_resume(IGBCore *core)
199 {
200     int i;
201 
202     for (i = 0; i < IGB_INTR_NUM; i++) {
203         igb_intmgr_timer_resume(&core->eitr[i]);
204     }
205 }
206 
207 static void
igb_intrmgr_reset(IGBCore * core)208 igb_intrmgr_reset(IGBCore *core)
209 {
210     int i;
211 
212     for (i = 0; i < IGB_INTR_NUM; i++) {
213         if (core->eitr[i].running) {
214             timer_del(core->eitr[i].timer);
215             igb_intrmgr_on_msix_throttling_timer(&core->eitr[i]);
216         }
217     }
218 }
219 
220 static void
igb_intrmgr_pci_unint(IGBCore * core)221 igb_intrmgr_pci_unint(IGBCore *core)
222 {
223     int i;
224 
225     for (i = 0; i < IGB_INTR_NUM; i++) {
226         timer_free(core->eitr[i].timer);
227     }
228 }
229 
230 static void
igb_intrmgr_pci_realize(IGBCore * core)231 igb_intrmgr_pci_realize(IGBCore *core)
232 {
233     igb_intrmgr_initialize_all_timers(core, true);
234 }
235 
236 static inline bool
igb_rx_csum_enabled(IGBCore * core)237 igb_rx_csum_enabled(IGBCore *core)
238 {
239     return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true;
240 }
241 
242 static inline bool
igb_rx_use_legacy_descriptor(IGBCore * core)243 igb_rx_use_legacy_descriptor(IGBCore *core)
244 {
245     /*
246      * TODO: If SRRCTL[n],DESCTYPE = 000b, the 82576 uses the legacy Rx
247      * descriptor.
248      */
249     return false;
250 }
251 
252 typedef struct E1000ERingInfo {
253     int dbah;
254     int dbal;
255     int dlen;
256     int dh;
257     int dt;
258     int idx;
259 } E1000ERingInfo;
260 
261 static uint32_t
igb_rx_queue_desctyp_get(IGBCore * core,const E1000ERingInfo * r)262 igb_rx_queue_desctyp_get(IGBCore *core, const E1000ERingInfo *r)
263 {
264     return core->mac[E1000_SRRCTL(r->idx) >> 2] & E1000_SRRCTL_DESCTYPE_MASK;
265 }
266 
267 static bool
igb_rx_use_ps_descriptor(IGBCore * core,const E1000ERingInfo * r)268 igb_rx_use_ps_descriptor(IGBCore *core, const E1000ERingInfo *r)
269 {
270     uint32_t desctyp = igb_rx_queue_desctyp_get(core, r);
271     return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT ||
272            desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
273 }
274 
275 static inline bool
igb_rss_enabled(IGBCore * core)276 igb_rss_enabled(IGBCore *core)
277 {
278     return (core->mac[MRQC] & 3) == E1000_MRQC_ENABLE_RSS_MQ &&
279            !igb_rx_csum_enabled(core) &&
280            !igb_rx_use_legacy_descriptor(core);
281 }
282 
283 typedef struct E1000E_RSSInfo_st {
284     bool enabled;
285     uint32_t hash;
286     uint32_t queue;
287     uint32_t type;
288 } E1000E_RSSInfo;
289 
290 static uint32_t
igb_rss_get_hash_type(IGBCore * core,struct NetRxPkt * pkt)291 igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt)
292 {
293     bool hasip4, hasip6;
294     EthL4HdrProto l4hdr_proto;
295 
296     assert(igb_rss_enabled(core));
297 
298     net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
299 
300     if (hasip4) {
301         trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC],
302                                 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]),
303                                 E1000_MRQC_EN_IPV4(core->mac[MRQC]));
304 
305         if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
306             E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) {
307             return E1000_MRQ_RSS_TYPE_IPV4TCP;
308         }
309 
310         if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP &&
311             (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV4_UDP)) {
312             return E1000_MRQ_RSS_TYPE_IPV4UDP;
313         }
314 
315         if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) {
316             return E1000_MRQ_RSS_TYPE_IPV4;
317         }
318     } else if (hasip6) {
319         eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt);
320 
321         bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS;
322         bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS;
323 
324         /*
325          * Following two traces must not be combined because resulting
326          * event will have 11 arguments totally and some trace backends
327          * (at least "ust") have limitation of maximum 10 arguments per
328          * event. Events with more arguments fail to compile for
329          * backends like these.
330          */
331         trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]);
332         trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto,
333                                 ip6info->has_ext_hdrs,
334                                 ip6info->rss_ex_dst_valid,
335                                 ip6info->rss_ex_src_valid,
336                                 core->mac[MRQC],
337                                 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]),
338                                 E1000_MRQC_EN_IPV6EX(core->mac[MRQC]),
339                                 E1000_MRQC_EN_IPV6(core->mac[MRQC]));
340 
341         if ((!ex_dis || !ip6info->has_ext_hdrs) &&
342             (!new_ex_dis || !(ip6info->rss_ex_dst_valid ||
343                               ip6info->rss_ex_src_valid))) {
344 
345             if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
346                 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) {
347                 return E1000_MRQ_RSS_TYPE_IPV6TCPEX;
348             }
349 
350             if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP &&
351                 (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV6_UDP)) {
352                 return E1000_MRQ_RSS_TYPE_IPV6UDP;
353             }
354 
355             if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) {
356                 return E1000_MRQ_RSS_TYPE_IPV6EX;
357             }
358 
359         }
360 
361         if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) {
362             return E1000_MRQ_RSS_TYPE_IPV6;
363         }
364 
365     }
366 
367     return E1000_MRQ_RSS_TYPE_NONE;
368 }
369 
370 static uint32_t
igb_rss_calc_hash(IGBCore * core,struct NetRxPkt * pkt,E1000E_RSSInfo * info)371 igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info)
372 {
373     NetRxPktRssType type;
374 
375     assert(igb_rss_enabled(core));
376 
377     switch (info->type) {
378     case E1000_MRQ_RSS_TYPE_IPV4:
379         type = NetPktRssIpV4;
380         break;
381     case E1000_MRQ_RSS_TYPE_IPV4TCP:
382         type = NetPktRssIpV4Tcp;
383         break;
384     case E1000_MRQ_RSS_TYPE_IPV6TCPEX:
385         type = NetPktRssIpV6TcpEx;
386         break;
387     case E1000_MRQ_RSS_TYPE_IPV6:
388         type = NetPktRssIpV6;
389         break;
390     case E1000_MRQ_RSS_TYPE_IPV6EX:
391         type = NetPktRssIpV6Ex;
392         break;
393     case E1000_MRQ_RSS_TYPE_IPV4UDP:
394         type = NetPktRssIpV4Udp;
395         break;
396     case E1000_MRQ_RSS_TYPE_IPV6UDP:
397         type = NetPktRssIpV6Udp;
398         break;
399     default:
400         g_assert_not_reached();
401     }
402 
403     return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]);
404 }
405 
406 static void
igb_rss_parse_packet(IGBCore * core,struct NetRxPkt * pkt,bool tx,E1000E_RSSInfo * info)407 igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx,
408                      E1000E_RSSInfo *info)
409 {
410     trace_e1000e_rx_rss_started();
411 
412     if (tx || !igb_rss_enabled(core)) {
413         info->enabled = false;
414         info->hash = 0;
415         info->queue = 0;
416         info->type = 0;
417         trace_e1000e_rx_rss_disabled();
418         return;
419     }
420 
421     info->enabled = true;
422 
423     info->type = igb_rss_get_hash_type(core, pkt);
424 
425     trace_e1000e_rx_rss_type(info->type);
426 
427     if (info->type == E1000_MRQ_RSS_TYPE_NONE) {
428         info->hash = 0;
429         info->queue = 0;
430         return;
431     }
432 
433     info->hash = igb_rss_calc_hash(core, pkt, info);
434     info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
435 }
436 
437 static void
igb_tx_insert_vlan(IGBCore * core,uint16_t qn,struct igb_tx * tx,uint16_t vlan,bool insert_vlan)438 igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx,
439     uint16_t vlan, bool insert_vlan)
440 {
441     if (core->mac[MRQC] & 1) {
442         uint16_t pool = qn % IGB_NUM_VM_POOLS;
443 
444         if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) {
445             /* always insert default VLAN */
446             insert_vlan = true;
447             vlan = core->mac[VMVIR0 + pool] & 0xffff;
448         } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) {
449             insert_vlan = false;
450         }
451     }
452 
453     if (insert_vlan) {
454         net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan,
455             core->mac[VET] & 0xffff);
456     }
457 }
458 
459 static bool
igb_setup_tx_offloads(IGBCore * core,struct igb_tx * tx)460 igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
461 {
462     uint32_t idx = (tx->first_olinfo_status >> 4) & 1;
463 
464     if (tx->first_cmd_type_len & E1000_ADVTXD_DCMD_TSE) {
465         uint32_t mss = tx->ctx[idx].mss_l4len_idx >> E1000_ADVTXD_MSS_SHIFT;
466         if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) {
467             return false;
468         }
469 
470         net_tx_pkt_update_ip_checksums(tx->tx_pkt);
471         e1000x_inc_reg_if_not_full(core->mac, TSCTC);
472         return true;
473     }
474 
475     if ((tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) &&
476         !((tx->ctx[idx].type_tucmd_mlhl & E1000_ADVTXD_TUCMD_L4T_SCTP) ?
477           net_tx_pkt_update_sctp_checksum(tx->tx_pkt) :
478           net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0))) {
479         return false;
480     }
481 
482     if (tx->first_olinfo_status & E1000_ADVTXD_POTS_IXSM) {
483         net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt);
484     }
485 
486     return true;
487 }
488 
igb_tx_pkt_mac_callback(void * core,const struct iovec * iov,int iovcnt,const struct iovec * virt_iov,int virt_iovcnt)489 static void igb_tx_pkt_mac_callback(void *core,
490                                     const struct iovec *iov,
491                                     int iovcnt,
492                                     const struct iovec *virt_iov,
493                                     int virt_iovcnt)
494 {
495     igb_receive_internal(core, virt_iov, virt_iovcnt, true, NULL);
496 }
497 
igb_tx_pkt_vmdq_callback(void * opaque,const struct iovec * iov,int iovcnt,const struct iovec * virt_iov,int virt_iovcnt)498 static void igb_tx_pkt_vmdq_callback(void *opaque,
499                                      const struct iovec *iov,
500                                      int iovcnt,
501                                      const struct iovec *virt_iov,
502                                      int virt_iovcnt)
503 {
504     IGBTxPktVmdqCallbackContext *context = opaque;
505     bool external_tx;
506 
507     igb_receive_internal(context->core, virt_iov, virt_iovcnt, true,
508                          &external_tx);
509 
510     if (external_tx) {
511         if (context->core->has_vnet) {
512             qemu_sendv_packet(context->nc, virt_iov, virt_iovcnt);
513         } else {
514             qemu_sendv_packet(context->nc, iov, iovcnt);
515         }
516     }
517 }
518 
519 /* TX Packets Switching (7.10.3.6) */
igb_tx_pkt_switch(IGBCore * core,struct igb_tx * tx,NetClientState * nc)520 static bool igb_tx_pkt_switch(IGBCore *core, struct igb_tx *tx,
521                               NetClientState *nc)
522 {
523     IGBTxPktVmdqCallbackContext context;
524 
525     /* TX switching is only used to serve VM to VM traffic. */
526     if (!(core->mac[MRQC] & 1)) {
527         goto send_out;
528     }
529 
530     /* TX switching requires DTXSWC.Loopback_en bit enabled. */
531     if (!(core->mac[DTXSWC] & E1000_DTXSWC_VMDQ_LOOPBACK_EN)) {
532         goto send_out;
533     }
534 
535     context.core = core;
536     context.nc = nc;
537 
538     return net_tx_pkt_send_custom(tx->tx_pkt, false,
539                                   igb_tx_pkt_vmdq_callback, &context);
540 
541 send_out:
542     return net_tx_pkt_send(tx->tx_pkt, nc);
543 }
544 
545 static bool
igb_tx_pkt_send(IGBCore * core,struct igb_tx * tx,int queue_index)546 igb_tx_pkt_send(IGBCore *core, struct igb_tx *tx, int queue_index)
547 {
548     int target_queue = MIN(core->max_queue_num, queue_index);
549     NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue);
550 
551     if (!igb_setup_tx_offloads(core, tx)) {
552         return false;
553     }
554 
555     net_tx_pkt_dump(tx->tx_pkt);
556 
557     if ((core->phy[MII_BMCR] & MII_BMCR_LOOPBACK) ||
558         ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) {
559         return net_tx_pkt_send_custom(tx->tx_pkt, false,
560                                       igb_tx_pkt_mac_callback, core);
561     } else {
562         return igb_tx_pkt_switch(core, tx, queue);
563     }
564 }
565 
566 static void
igb_on_tx_done_update_stats(IGBCore * core,struct NetTxPkt * tx_pkt,int qn)567 igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn)
568 {
569     static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
570                                     PTC1023, PTC1522 };
571 
572     size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4;
573 
574     e1000x_increase_size_stats(core->mac, PTCregs, tot_len);
575     e1000x_inc_reg_if_not_full(core->mac, TPT);
576     e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len);
577 
578     switch (net_tx_pkt_get_packet_type(tx_pkt)) {
579     case ETH_PKT_BCAST:
580         e1000x_inc_reg_if_not_full(core->mac, BPTC);
581         break;
582     case ETH_PKT_MCAST:
583         e1000x_inc_reg_if_not_full(core->mac, MPTC);
584         break;
585     case ETH_PKT_UCAST:
586         break;
587     default:
588         g_assert_not_reached();
589     }
590 
591     e1000x_inc_reg_if_not_full(core->mac, GPTC);
592     e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len);
593 
594     if (core->mac[MRQC] & 1) {
595         uint16_t pool = qn % IGB_NUM_VM_POOLS;
596 
597         core->mac[PVFGOTC0 + (pool * 64)] += tot_len;
598         core->mac[PVFGPTC0 + (pool * 64)]++;
599     }
600 }
601 
602 static void
igb_process_tx_desc(IGBCore * core,PCIDevice * dev,struct igb_tx * tx,union e1000_adv_tx_desc * tx_desc,int queue_index)603 igb_process_tx_desc(IGBCore *core,
604                     PCIDevice *dev,
605                     struct igb_tx *tx,
606                     union e1000_adv_tx_desc *tx_desc,
607                     int queue_index)
608 {
609     struct e1000_adv_tx_context_desc *tx_ctx_desc;
610     uint32_t cmd_type_len;
611     uint32_t idx;
612     uint64_t buffer_addr;
613     uint16_t length;
614 
615     cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
616 
617     if (cmd_type_len & E1000_ADVTXD_DCMD_DEXT) {
618         if ((cmd_type_len & E1000_ADVTXD_DTYP_DATA) ==
619             E1000_ADVTXD_DTYP_DATA) {
620             /* advanced transmit data descriptor */
621             if (tx->first) {
622                 tx->first_cmd_type_len = cmd_type_len;
623                 tx->first_olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
624                 tx->first = false;
625             }
626         } else if ((cmd_type_len & E1000_ADVTXD_DTYP_CTXT) ==
627                    E1000_ADVTXD_DTYP_CTXT) {
628             /* advanced transmit context descriptor */
629             tx_ctx_desc = (struct e1000_adv_tx_context_desc *)tx_desc;
630             idx = (le32_to_cpu(tx_ctx_desc->mss_l4len_idx) >> 4) & 1;
631             tx->ctx[idx].vlan_macip_lens = le32_to_cpu(tx_ctx_desc->vlan_macip_lens);
632             tx->ctx[idx].seqnum_seed = le32_to_cpu(tx_ctx_desc->seqnum_seed);
633             tx->ctx[idx].type_tucmd_mlhl = le32_to_cpu(tx_ctx_desc->type_tucmd_mlhl);
634             tx->ctx[idx].mss_l4len_idx = le32_to_cpu(tx_ctx_desc->mss_l4len_idx);
635             return;
636         } else {
637             /* unknown descriptor type */
638             return;
639         }
640     } else {
641         /* legacy descriptor */
642 
643         /* TODO: Implement a support for legacy descriptors (7.2.2.1). */
644     }
645 
646     buffer_addr = le64_to_cpu(tx_desc->read.buffer_addr);
647     length = cmd_type_len & 0xFFFF;
648 
649     if (!tx->skip_cp) {
650         if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, dev,
651                                              buffer_addr, length)) {
652             tx->skip_cp = true;
653         }
654     }
655 
656     if (cmd_type_len & E1000_TXD_CMD_EOP) {
657         if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
658             idx = (tx->first_olinfo_status >> 4) & 1;
659             igb_tx_insert_vlan(core, queue_index, tx,
660                 tx->ctx[idx].vlan_macip_lens >> IGB_TX_FLAGS_VLAN_SHIFT,
661                 !!(tx->first_cmd_type_len & E1000_TXD_CMD_VLE));
662 
663             if ((tx->first_cmd_type_len & E1000_ADVTXD_MAC_TSTAMP) &&
664                 (core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_ENABLED) &&
665                 !(core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_VALID)) {
666                 core->mac[TSYNCTXCTL] |= E1000_TSYNCTXCTL_VALID;
667                 e1000x_timestamp(core->mac, core->timadj, TXSTMPL, TXSTMPH);
668             }
669 
670             if (igb_tx_pkt_send(core, tx, queue_index)) {
671                 igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index);
672             }
673         }
674 
675         tx->first = true;
676         tx->skip_cp = false;
677         net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, dev);
678     }
679 }
680 
igb_tx_wb_eic(IGBCore * core,int queue_idx)681 static uint32_t igb_tx_wb_eic(IGBCore *core, int queue_idx)
682 {
683     uint32_t n, ent = 0;
684 
685     n = igb_ivar_entry_tx(queue_idx);
686     ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
687 
688     return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
689 }
690 
igb_rx_wb_eic(IGBCore * core,int queue_idx)691 static uint32_t igb_rx_wb_eic(IGBCore *core, int queue_idx)
692 {
693     uint32_t n, ent = 0;
694 
695     n = igb_ivar_entry_rx(queue_idx);
696     ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
697 
698     return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
699 }
700 
701 static inline bool
igb_ring_empty(IGBCore * core,const E1000ERingInfo * r)702 igb_ring_empty(IGBCore *core, const E1000ERingInfo *r)
703 {
704     return core->mac[r->dh] == core->mac[r->dt] ||
705                 core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
706 }
707 
708 static inline uint64_t
igb_ring_base(IGBCore * core,const E1000ERingInfo * r)709 igb_ring_base(IGBCore *core, const E1000ERingInfo *r)
710 {
711     uint64_t bah = core->mac[r->dbah];
712     uint64_t bal = core->mac[r->dbal];
713 
714     return (bah << 32) + bal;
715 }
716 
717 static inline uint64_t
igb_ring_head_descr(IGBCore * core,const E1000ERingInfo * r)718 igb_ring_head_descr(IGBCore *core, const E1000ERingInfo *r)
719 {
720     return igb_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh];
721 }
722 
723 static inline void
igb_ring_advance(IGBCore * core,const E1000ERingInfo * r,uint32_t count)724 igb_ring_advance(IGBCore *core, const E1000ERingInfo *r, uint32_t count)
725 {
726     core->mac[r->dh] += count;
727 
728     if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) {
729         core->mac[r->dh] = 0;
730     }
731 }
732 
733 static inline uint32_t
igb_ring_free_descr_num(IGBCore * core,const E1000ERingInfo * r)734 igb_ring_free_descr_num(IGBCore *core, const E1000ERingInfo *r)
735 {
736     trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen],
737                                  core->mac[r->dh],  core->mac[r->dt]);
738 
739     if (core->mac[r->dh] <= core->mac[r->dt]) {
740         return core->mac[r->dt] - core->mac[r->dh];
741     }
742 
743     if (core->mac[r->dh] > core->mac[r->dt]) {
744         return core->mac[r->dlen] / E1000_RING_DESC_LEN +
745                core->mac[r->dt] - core->mac[r->dh];
746     }
747 
748     g_assert_not_reached();
749 }
750 
751 static inline bool
igb_ring_enabled(IGBCore * core,const E1000ERingInfo * r)752 igb_ring_enabled(IGBCore *core, const E1000ERingInfo *r)
753 {
754     return core->mac[r->dlen] > 0;
755 }
756 
757 typedef struct IGB_TxRing_st {
758     const E1000ERingInfo *i;
759     struct igb_tx *tx;
760 } IGB_TxRing;
761 
762 static inline int
igb_mq_queue_idx(int base_reg_idx,int reg_idx)763 igb_mq_queue_idx(int base_reg_idx, int reg_idx)
764 {
765     return (reg_idx - base_reg_idx) / 16;
766 }
767 
768 static inline void
igb_tx_ring_init(IGBCore * core,IGB_TxRing * txr,int idx)769 igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx)
770 {
771     static const E1000ERingInfo i[IGB_NUM_QUEUES] = {
772         { TDBAH0, TDBAL0, TDLEN0, TDH0, TDT0, 0 },
773         { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 },
774         { TDBAH2, TDBAL2, TDLEN2, TDH2, TDT2, 2 },
775         { TDBAH3, TDBAL3, TDLEN3, TDH3, TDT3, 3 },
776         { TDBAH4, TDBAL4, TDLEN4, TDH4, TDT4, 4 },
777         { TDBAH5, TDBAL5, TDLEN5, TDH5, TDT5, 5 },
778         { TDBAH6, TDBAL6, TDLEN6, TDH6, TDT6, 6 },
779         { TDBAH7, TDBAL7, TDLEN7, TDH7, TDT7, 7 },
780         { TDBAH8, TDBAL8, TDLEN8, TDH8, TDT8, 8 },
781         { TDBAH9, TDBAL9, TDLEN9, TDH9, TDT9, 9 },
782         { TDBAH10, TDBAL10, TDLEN10, TDH10, TDT10, 10 },
783         { TDBAH11, TDBAL11, TDLEN11, TDH11, TDT11, 11 },
784         { TDBAH12, TDBAL12, TDLEN12, TDH12, TDT12, 12 },
785         { TDBAH13, TDBAL13, TDLEN13, TDH13, TDT13, 13 },
786         { TDBAH14, TDBAL14, TDLEN14, TDH14, TDT14, 14 },
787         { TDBAH15, TDBAL15, TDLEN15, TDH15, TDT15, 15 }
788     };
789 
790     assert(idx < ARRAY_SIZE(i));
791 
792     txr->i     = &i[idx];
793     txr->tx    = &core->tx[idx];
794 }
795 
796 typedef struct E1000E_RxRing_st {
797     const E1000ERingInfo *i;
798 } E1000E_RxRing;
799 
800 static inline void
igb_rx_ring_init(IGBCore * core,E1000E_RxRing * rxr,int idx)801 igb_rx_ring_init(IGBCore *core, E1000E_RxRing *rxr, int idx)
802 {
803     static const E1000ERingInfo i[IGB_NUM_QUEUES] = {
804         { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 },
805         { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 },
806         { RDBAH2, RDBAL2, RDLEN2, RDH2, RDT2, 2 },
807         { RDBAH3, RDBAL3, RDLEN3, RDH3, RDT3, 3 },
808         { RDBAH4, RDBAL4, RDLEN4, RDH4, RDT4, 4 },
809         { RDBAH5, RDBAL5, RDLEN5, RDH5, RDT5, 5 },
810         { RDBAH6, RDBAL6, RDLEN6, RDH6, RDT6, 6 },
811         { RDBAH7, RDBAL7, RDLEN7, RDH7, RDT7, 7 },
812         { RDBAH8, RDBAL8, RDLEN8, RDH8, RDT8, 8 },
813         { RDBAH9, RDBAL9, RDLEN9, RDH9, RDT9, 9 },
814         { RDBAH10, RDBAL10, RDLEN10, RDH10, RDT10, 10 },
815         { RDBAH11, RDBAL11, RDLEN11, RDH11, RDT11, 11 },
816         { RDBAH12, RDBAL12, RDLEN12, RDH12, RDT12, 12 },
817         { RDBAH13, RDBAL13, RDLEN13, RDH13, RDT13, 13 },
818         { RDBAH14, RDBAL14, RDLEN14, RDH14, RDT14, 14 },
819         { RDBAH15, RDBAL15, RDLEN15, RDH15, RDT15, 15 }
820     };
821 
822     assert(idx < ARRAY_SIZE(i));
823 
824     rxr->i      = &i[idx];
825 }
826 
827 static uint32_t
igb_txdesc_writeback(IGBCore * core,dma_addr_t base,union e1000_adv_tx_desc * tx_desc,const E1000ERingInfo * txi)828 igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
829                      union e1000_adv_tx_desc *tx_desc,
830                      const E1000ERingInfo *txi)
831 {
832     PCIDevice *d;
833     uint32_t cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
834     uint64_t tdwba;
835 
836     tdwba = core->mac[E1000_TDWBAL(txi->idx) >> 2];
837     tdwba |= (uint64_t)core->mac[E1000_TDWBAH(txi->idx) >> 2] << 32;
838 
839     if (!(cmd_type_len & E1000_TXD_CMD_RS)) {
840         return 0;
841     }
842 
843     d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
844     if (!d) {
845         d = core->owner;
846     }
847 
848     if (tdwba & 1) {
849         uint32_t buffer = cpu_to_le32(core->mac[txi->dh]);
850         pci_dma_write(d, tdwba & ~3, &buffer, sizeof(buffer));
851     } else {
852         uint32_t status = le32_to_cpu(tx_desc->wb.status) | E1000_TXD_STAT_DD;
853 
854         tx_desc->wb.status = cpu_to_le32(status);
855         pci_dma_write(d, base + offsetof(union e1000_adv_tx_desc, wb),
856             &tx_desc->wb, sizeof(tx_desc->wb));
857     }
858 
859     return igb_tx_wb_eic(core, txi->idx);
860 }
861 
862 static inline bool
igb_tx_enabled(IGBCore * core,const E1000ERingInfo * txi)863 igb_tx_enabled(IGBCore *core, const E1000ERingInfo *txi)
864 {
865     bool vmdq = core->mac[MRQC] & 1;
866     uint16_t qn = txi->idx;
867     uint16_t pool = qn % IGB_NUM_VM_POOLS;
868 
869     return (core->mac[TCTL] & E1000_TCTL_EN) &&
870         (!vmdq || core->mac[VFTE] & BIT(pool)) &&
871         (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
872 }
873 
874 static void
igb_start_xmit(IGBCore * core,const IGB_TxRing * txr)875 igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
876 {
877     PCIDevice *d;
878     dma_addr_t base;
879     union e1000_adv_tx_desc desc;
880     const E1000ERingInfo *txi = txr->i;
881     uint32_t eic = 0;
882 
883     if (!igb_tx_enabled(core, txi)) {
884         trace_e1000e_tx_disabled();
885         return;
886     }
887 
888     d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
889     if (!d) {
890         d = core->owner;
891     }
892 
893     while (!igb_ring_empty(core, txi)) {
894         base = igb_ring_head_descr(core, txi);
895 
896         pci_dma_read(d, base, &desc, sizeof(desc));
897 
898         trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr,
899                               desc.read.cmd_type_len, desc.wb.status);
900 
901         igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx);
902         igb_ring_advance(core, txi, 1);
903         eic |= igb_txdesc_writeback(core, base, &desc, txi);
904     }
905 
906     if (eic) {
907         igb_raise_interrupts(core, EICR, eic);
908         igb_raise_interrupts(core, ICR, E1000_ICR_TXDW);
909     }
910 
911     net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d);
912 }
913 
914 static uint32_t
igb_rxbufsize(IGBCore * core,const E1000ERingInfo * r)915 igb_rxbufsize(IGBCore *core, const E1000ERingInfo *r)
916 {
917     uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2];
918     uint32_t bsizepkt = srrctl & E1000_SRRCTL_BSIZEPKT_MASK;
919     if (bsizepkt) {
920         return bsizepkt << E1000_SRRCTL_BSIZEPKT_SHIFT;
921     }
922 
923     return e1000x_rxbufsize(core->mac[RCTL]);
924 }
925 
926 static bool
igb_has_rxbufs(IGBCore * core,const E1000ERingInfo * r,size_t total_size)927 igb_has_rxbufs(IGBCore *core, const E1000ERingInfo *r, size_t total_size)
928 {
929     uint32_t bufs = igb_ring_free_descr_num(core, r);
930     uint32_t bufsize = igb_rxbufsize(core, r);
931 
932     trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, bufsize);
933 
934     return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) *
935                          bufsize;
936 }
937 
938 static uint32_t
igb_rxhdrbufsize(IGBCore * core,const E1000ERingInfo * r)939 igb_rxhdrbufsize(IGBCore *core, const E1000ERingInfo *r)
940 {
941     uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2];
942     return (srrctl & E1000_SRRCTL_BSIZEHDRSIZE_MASK) >>
943            E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
944 }
945 
946 void
igb_start_recv(IGBCore * core)947 igb_start_recv(IGBCore *core)
948 {
949     int i;
950 
951     trace_e1000e_rx_start_recv();
952 
953     for (i = 0; i <= core->max_queue_num; i++) {
954         qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i));
955     }
956 }
957 
958 bool
igb_can_receive(IGBCore * core)959 igb_can_receive(IGBCore *core)
960 {
961     int i;
962 
963     if (!e1000x_rx_ready(core->owner, core->mac)) {
964         return false;
965     }
966 
967     for (i = 0; i < IGB_NUM_QUEUES; i++) {
968         E1000E_RxRing rxr;
969         if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
970             continue;
971         }
972 
973         igb_rx_ring_init(core, &rxr, i);
974         if (igb_ring_enabled(core, rxr.i) && igb_has_rxbufs(core, rxr.i, 1)) {
975             trace_e1000e_rx_can_recv();
976             return true;
977         }
978     }
979 
980     trace_e1000e_rx_can_recv_rings_full();
981     return false;
982 }
983 
984 ssize_t
igb_receive(IGBCore * core,const uint8_t * buf,size_t size)985 igb_receive(IGBCore *core, const uint8_t *buf, size_t size)
986 {
987     const struct iovec iov = {
988         .iov_base = (uint8_t *)buf,
989         .iov_len = size
990     };
991 
992     return igb_receive_iov(core, &iov, 1);
993 }
994 
995 static inline bool
igb_rx_l3_cso_enabled(IGBCore * core)996 igb_rx_l3_cso_enabled(IGBCore *core)
997 {
998     return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD);
999 }
1000 
1001 static inline bool
igb_rx_l4_cso_enabled(IGBCore * core)1002 igb_rx_l4_cso_enabled(IGBCore *core)
1003 {
1004     return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD);
1005 }
1006 
igb_rx_is_oversized(IGBCore * core,const struct eth_header * ehdr,size_t size,size_t vlan_num,bool lpe,uint16_t rlpml)1007 static bool igb_rx_is_oversized(IGBCore *core, const struct eth_header *ehdr,
1008                                 size_t size, size_t vlan_num,
1009                                 bool lpe, uint16_t rlpml)
1010 {
1011     size_t vlan_header_size = sizeof(struct vlan_header) * vlan_num;
1012     size_t header_size = sizeof(struct eth_header) + vlan_header_size;
1013     return lpe ? size + ETH_FCS_LEN > rlpml : size > header_size + ETH_MTU;
1014 }
1015 
igb_receive_assign(IGBCore * core,const struct iovec * iov,size_t iovcnt,size_t iov_ofs,const L2Header * l2_header,size_t size,E1000E_RSSInfo * rss_info,uint16_t * etqf,bool * ts,bool * external_tx)1016 static uint16_t igb_receive_assign(IGBCore *core, const struct iovec *iov,
1017                                    size_t iovcnt, size_t iov_ofs,
1018                                    const L2Header *l2_header, size_t size,
1019                                    E1000E_RSSInfo *rss_info,
1020                                    uint16_t *etqf, bool *ts, bool *external_tx)
1021 {
1022     static const int ta_shift[] = { 4, 3, 2, 0 };
1023     const struct eth_header *ehdr = &l2_header->eth;
1024     uint32_t f, ra[2], *macp, rctl = core->mac[RCTL];
1025     uint16_t queues = 0;
1026     uint16_t oversized = 0;
1027     size_t vlan_num = 0;
1028     PTP2 ptp2;
1029     bool lpe;
1030     uint16_t rlpml;
1031     int i;
1032 
1033     memset(rss_info, 0, sizeof(E1000E_RSSInfo));
1034     *ts = false;
1035 
1036     if (external_tx) {
1037         *external_tx = true;
1038     }
1039 
1040     if (core->mac[CTRL_EXT] & BIT(26)) {
1041         if (be16_to_cpu(ehdr->h_proto) == core->mac[VET] >> 16 &&
1042             be16_to_cpu(l2_header->vlan[0].h_proto) == (core->mac[VET] & 0xffff)) {
1043             vlan_num = 2;
1044         }
1045     } else {
1046         if (be16_to_cpu(ehdr->h_proto) == (core->mac[VET] & 0xffff)) {
1047             vlan_num = 1;
1048         }
1049     }
1050 
1051     lpe = !!(core->mac[RCTL] & E1000_RCTL_LPE);
1052     rlpml = core->mac[RLPML];
1053     if (!(core->mac[RCTL] & E1000_RCTL_SBP) &&
1054         igb_rx_is_oversized(core, ehdr, size, vlan_num, lpe, rlpml)) {
1055         trace_e1000x_rx_oversized(size);
1056         return queues;
1057     }
1058 
1059     for (*etqf = 0; *etqf < 8; (*etqf)++) {
1060         if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_FILTER_ENABLE) &&
1061             be16_to_cpu(ehdr->h_proto) == (core->mac[ETQF0 + *etqf] & E1000_ETQF_ETYPE_MASK)) {
1062             if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_1588) &&
1063                 (core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_ENABLED) &&
1064                 !(core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_VALID) &&
1065                 iov_to_buf(iov, iovcnt, iov_ofs + ETH_HLEN, &ptp2, sizeof(ptp2)) >= sizeof(ptp2) &&
1066                 (ptp2.version_ptp & 15) == 2 &&
1067                 ptp2.message_id_transport_specific == ((core->mac[TSYNCRXCFG] >> 8) & 255)) {
1068                 e1000x_timestamp(core->mac, core->timadj, RXSTMPL, RXSTMPH);
1069                 *ts = true;
1070                 core->mac[TSYNCRXCTL] |= E1000_TSYNCRXCTL_VALID;
1071                 core->mac[RXSATRL] = le32_to_cpu(ptp2.source_uuid_lo);
1072                 core->mac[RXSATRH] = le16_to_cpu(ptp2.source_uuid_hi) |
1073                                      (le16_to_cpu(ptp2.sequence_id) << 16);
1074             }
1075             break;
1076         }
1077     }
1078 
1079     if (vlan_num &&
1080         !e1000x_rx_vlan_filter(core->mac, l2_header->vlan + vlan_num - 1)) {
1081         return queues;
1082     }
1083 
1084     if (core->mac[MRQC] & 1) {
1085         if (is_broadcast_ether_addr(ehdr->h_dest)) {
1086             for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1087                 if (core->mac[VMOLR0 + i] & E1000_VMOLR_BAM) {
1088                     queues |= BIT(i);
1089                 }
1090             }
1091         } else {
1092             for (macp = core->mac + RA; macp < core->mac + RA + 32; macp += 2) {
1093                 if (!(macp[1] & E1000_RAH_AV)) {
1094                     continue;
1095                 }
1096                 ra[0] = cpu_to_le32(macp[0]);
1097                 ra[1] = cpu_to_le32(macp[1]);
1098                 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
1099                     queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
1100                 }
1101             }
1102 
1103             for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
1104                 if (!(macp[1] & E1000_RAH_AV)) {
1105                     continue;
1106                 }
1107                 ra[0] = cpu_to_le32(macp[0]);
1108                 ra[1] = cpu_to_le32(macp[1]);
1109                 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
1110                     queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
1111                 }
1112             }
1113 
1114             if (!queues) {
1115                 macp = core->mac + (is_multicast_ether_addr(ehdr->h_dest) ? MTA : UTA);
1116 
1117                 f = ta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
1118                 f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff;
1119                 if (macp[f >> 5] & (1 << (f & 0x1f))) {
1120                     for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1121                         if (core->mac[VMOLR0 + i] & E1000_VMOLR_ROMPE) {
1122                             queues |= BIT(i);
1123                         }
1124                     }
1125                 }
1126             } else if (is_unicast_ether_addr(ehdr->h_dest) && external_tx) {
1127                 *external_tx = false;
1128             }
1129         }
1130 
1131         if (e1000x_vlan_rx_filter_enabled(core->mac)) {
1132             uint16_t mask = 0;
1133 
1134             if (vlan_num) {
1135                 uint16_t vid = be16_to_cpu(l2_header->vlan[vlan_num - 1].h_tci) & VLAN_VID_MASK;
1136 
1137                 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
1138                     if ((core->mac[VLVF0 + i] & E1000_VLVF_VLANID_MASK) == vid &&
1139                         (core->mac[VLVF0 + i] & E1000_VLVF_VLANID_ENABLE)) {
1140                         uint32_t poolsel = core->mac[VLVF0 + i] & E1000_VLVF_POOLSEL_MASK;
1141                         mask |= poolsel >> E1000_VLVF_POOLSEL_SHIFT;
1142                     }
1143                 }
1144             } else {
1145                 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1146                     if (core->mac[VMOLR0 + i] & E1000_VMOLR_AUPE) {
1147                         mask |= BIT(i);
1148                     }
1149                 }
1150             }
1151 
1152             queues &= mask;
1153         }
1154 
1155         if (is_unicast_ether_addr(ehdr->h_dest) && !queues && !external_tx &&
1156             !(core->mac[VT_CTL] & E1000_VT_CTL_DISABLE_DEF_POOL)) {
1157             uint32_t def_pl = core->mac[VT_CTL] & E1000_VT_CTL_DEFAULT_POOL_MASK;
1158             queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1159         }
1160 
1161         queues &= core->mac[VFRE];
1162         if (queues) {
1163             for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1164                 lpe = !!(core->mac[VMOLR0 + i] & E1000_VMOLR_LPE);
1165                 rlpml = core->mac[VMOLR0 + i] & E1000_VMOLR_RLPML_MASK;
1166                 if ((queues & BIT(i)) &&
1167                     igb_rx_is_oversized(core, ehdr, size, vlan_num,
1168                                         lpe, rlpml)) {
1169                     oversized |= BIT(i);
1170                 }
1171             }
1172             /* 8.19.37 increment ROC if packet is oversized for all queues */
1173             if (oversized == queues) {
1174                 trace_e1000x_rx_oversized(size);
1175                 e1000x_inc_reg_if_not_full(core->mac, ROC);
1176             }
1177             queues &= ~oversized;
1178         }
1179 
1180         if (queues) {
1181             igb_rss_parse_packet(core, core->rx_pkt,
1182                                  external_tx != NULL, rss_info);
1183             /* Sec 8.26.1: PQn = VFn + VQn*8 */
1184             if (rss_info->queue & 1) {
1185                 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1186                     if ((queues & BIT(i)) &&
1187                         (core->mac[VMOLR0 + i] & E1000_VMOLR_RSSE)) {
1188                         queues |= BIT(i + IGB_NUM_VM_POOLS);
1189                         queues &= ~BIT(i);
1190                     }
1191                 }
1192             }
1193         }
1194     } else {
1195         bool accepted = e1000x_rx_group_filter(core->mac, ehdr);
1196         if (!accepted) {
1197             for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
1198                 if (!(macp[1] & E1000_RAH_AV)) {
1199                     continue;
1200                 }
1201                 ra[0] = cpu_to_le32(macp[0]);
1202                 ra[1] = cpu_to_le32(macp[1]);
1203                 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
1204                     trace_e1000x_rx_flt_ucast_match((int)(macp - core->mac - RA2) / 2,
1205                                                     MAC_ARG(ehdr->h_dest));
1206 
1207                     accepted = true;
1208                     break;
1209                 }
1210             }
1211         }
1212 
1213         if (accepted) {
1214             igb_rss_parse_packet(core, core->rx_pkt, false, rss_info);
1215             queues = BIT(rss_info->queue);
1216         }
1217     }
1218 
1219     return queues;
1220 }
1221 
1222 static inline void
igb_read_lgcy_rx_descr(IGBCore * core,struct e1000_rx_desc * desc,hwaddr * buff_addr)1223 igb_read_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
1224                        hwaddr *buff_addr)
1225 {
1226     *buff_addr = le64_to_cpu(desc->buffer_addr);
1227 }
1228 
1229 static inline void
igb_read_adv_rx_single_buf_descr(IGBCore * core,union e1000_adv_rx_desc * desc,hwaddr * buff_addr)1230 igb_read_adv_rx_single_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
1231                                  hwaddr *buff_addr)
1232 {
1233     *buff_addr = le64_to_cpu(desc->read.pkt_addr);
1234 }
1235 
1236 static inline void
igb_read_adv_rx_split_buf_descr(IGBCore * core,union e1000_adv_rx_desc * desc,hwaddr * buff_addr)1237 igb_read_adv_rx_split_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
1238                                 hwaddr *buff_addr)
1239 {
1240     buff_addr[0] = le64_to_cpu(desc->read.hdr_addr);
1241     buff_addr[1] = le64_to_cpu(desc->read.pkt_addr);
1242 }
1243 
1244 typedef struct IGBBAState {
1245     uint16_t written[IGB_MAX_PS_BUFFERS];
1246     uint8_t cur_idx;
1247 } IGBBAState;
1248 
1249 typedef struct IGBSplitDescriptorData {
1250     bool sph;
1251     bool hbo;
1252     size_t hdr_len;
1253 } IGBSplitDescriptorData;
1254 
1255 typedef struct IGBPacketRxDMAState {
1256     size_t size;
1257     size_t total_size;
1258     size_t ps_hdr_len;
1259     size_t desc_size;
1260     size_t desc_offset;
1261     uint32_t rx_desc_packet_buf_size;
1262     uint32_t rx_desc_header_buf_size;
1263     struct iovec *iov;
1264     size_t iov_ofs;
1265     bool do_ps;
1266     bool is_first;
1267     IGBBAState bastate;
1268     hwaddr ba[IGB_MAX_PS_BUFFERS];
1269     IGBSplitDescriptorData ps_desc_data;
1270 } IGBPacketRxDMAState;
1271 
1272 static inline void
igb_read_rx_descr(IGBCore * core,union e1000_rx_desc_union * desc,IGBPacketRxDMAState * pdma_st,const E1000ERingInfo * r)1273 igb_read_rx_descr(IGBCore *core,
1274                   union e1000_rx_desc_union *desc,
1275                   IGBPacketRxDMAState *pdma_st,
1276                   const E1000ERingInfo *r)
1277 {
1278     uint32_t desc_type;
1279 
1280     if (igb_rx_use_legacy_descriptor(core)) {
1281         igb_read_lgcy_rx_descr(core, &desc->legacy, &pdma_st->ba[1]);
1282         pdma_st->ba[0] = 0;
1283         return;
1284     }
1285 
1286     /* advanced header split descriptor */
1287     if (igb_rx_use_ps_descriptor(core, r)) {
1288         igb_read_adv_rx_split_buf_descr(core, &desc->adv, &pdma_st->ba[0]);
1289         return;
1290     }
1291 
1292     /* descriptor replication modes not supported */
1293     desc_type = igb_rx_queue_desctyp_get(core, r);
1294     if (desc_type != E1000_SRRCTL_DESCTYPE_ADV_ONEBUF) {
1295         trace_igb_wrn_rx_desc_modes_not_supp(desc_type);
1296     }
1297 
1298     /* advanced single buffer descriptor */
1299     igb_read_adv_rx_single_buf_descr(core, &desc->adv, &pdma_st->ba[1]);
1300     pdma_st->ba[0] = 0;
1301 }
1302 
1303 static void
igb_verify_csum_in_sw(IGBCore * core,struct NetRxPkt * pkt,uint32_t * status_flags,EthL4HdrProto l4hdr_proto)1304 igb_verify_csum_in_sw(IGBCore *core,
1305                       struct NetRxPkt *pkt,
1306                       uint32_t *status_flags,
1307                       EthL4HdrProto l4hdr_proto)
1308 {
1309     bool csum_valid;
1310     uint32_t csum_error;
1311 
1312     if (igb_rx_l3_cso_enabled(core)) {
1313         if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) {
1314             trace_e1000e_rx_metadata_l3_csum_validation_failed();
1315         } else {
1316             csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE;
1317             *status_flags |= E1000_RXD_STAT_IPCS | csum_error;
1318         }
1319     } else {
1320         trace_e1000e_rx_metadata_l3_cso_disabled();
1321     }
1322 
1323     if (!igb_rx_l4_cso_enabled(core)) {
1324         trace_e1000e_rx_metadata_l4_cso_disabled();
1325         return;
1326     }
1327 
1328     if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) {
1329         trace_e1000e_rx_metadata_l4_csum_validation_failed();
1330         return;
1331     }
1332 
1333     csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE;
1334     *status_flags |= E1000_RXD_STAT_TCPCS | csum_error;
1335 
1336     if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
1337         *status_flags |= E1000_RXD_STAT_UDPCS;
1338     }
1339 }
1340 
1341 static void
igb_build_rx_metadata_common(IGBCore * core,struct NetRxPkt * pkt,bool is_eop,uint32_t * status_flags,uint16_t * vlan_tag)1342 igb_build_rx_metadata_common(IGBCore *core,
1343                              struct NetRxPkt *pkt,
1344                              bool is_eop,
1345                              uint32_t *status_flags,
1346                              uint16_t *vlan_tag)
1347 {
1348     struct virtio_net_hdr *vhdr;
1349     bool hasip4, hasip6, csum_valid;
1350     EthL4HdrProto l4hdr_proto;
1351 
1352     *status_flags = E1000_RXD_STAT_DD;
1353 
1354     /* No additional metadata needed for non-EOP descriptors */
1355     if (!is_eop) {
1356         goto func_exit;
1357     }
1358 
1359     *status_flags |= E1000_RXD_STAT_EOP;
1360 
1361     net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1362     trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto);
1363 
1364     /* VLAN state */
1365     if (net_rx_pkt_is_vlan_stripped(pkt)) {
1366         *status_flags |= E1000_RXD_STAT_VP;
1367         *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt));
1368         trace_e1000e_rx_metadata_vlan(*vlan_tag);
1369     }
1370 
1371     /* RX CSO information */
1372     if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
1373         trace_e1000e_rx_metadata_ipv6_sum_disabled();
1374         goto func_exit;
1375     }
1376 
1377     vhdr = net_rx_pkt_get_vhdr(pkt);
1378 
1379     if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
1380         !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
1381         trace_e1000e_rx_metadata_virthdr_no_csum_info();
1382         igb_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto);
1383         goto func_exit;
1384     }
1385 
1386     if (igb_rx_l3_cso_enabled(core)) {
1387         *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0;
1388     } else {
1389         trace_e1000e_rx_metadata_l3_cso_disabled();
1390     }
1391 
1392     if (igb_rx_l4_cso_enabled(core)) {
1393         switch (l4hdr_proto) {
1394         case ETH_L4_HDR_PROTO_SCTP:
1395             if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) {
1396                 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1397                 goto func_exit;
1398             }
1399             if (!csum_valid) {
1400                 *status_flags |= E1000_RXDEXT_STATERR_TCPE;
1401             }
1402             /* fall through */
1403         case ETH_L4_HDR_PROTO_TCP:
1404             *status_flags |= E1000_RXD_STAT_TCPCS;
1405             break;
1406 
1407         case ETH_L4_HDR_PROTO_UDP:
1408             *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS;
1409             break;
1410 
1411         default:
1412             break;
1413         }
1414     } else {
1415         trace_e1000e_rx_metadata_l4_cso_disabled();
1416     }
1417 
1418 func_exit:
1419     trace_e1000e_rx_metadata_status_flags(*status_flags);
1420     *status_flags = cpu_to_le32(*status_flags);
1421 }
1422 
1423 static inline void
igb_write_lgcy_rx_descr(IGBCore * core,struct e1000_rx_desc * desc,struct NetRxPkt * pkt,const E1000E_RSSInfo * rss_info,uint16_t length)1424 igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
1425                         struct NetRxPkt *pkt,
1426                         const E1000E_RSSInfo *rss_info,
1427                         uint16_t length)
1428 {
1429     uint32_t status_flags;
1430 
1431     assert(!rss_info->enabled);
1432 
1433     memset(desc, 0, sizeof(*desc));
1434     desc->length = cpu_to_le16(length);
1435     igb_build_rx_metadata_common(core, pkt, pkt != NULL,
1436                                  &status_flags,
1437                                  &desc->special);
1438 
1439     desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24);
1440     desc->status = (uint8_t) le32_to_cpu(status_flags);
1441 }
1442 
1443 static bool
igb_rx_ps_descriptor_split_always(IGBCore * core,const E1000ERingInfo * r)1444 igb_rx_ps_descriptor_split_always(IGBCore *core, const E1000ERingInfo *r)
1445 {
1446     uint32_t desctyp = igb_rx_queue_desctyp_get(core, r);
1447     return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1448 }
1449 
1450 static uint16_t
igb_rx_desc_get_packet_type(IGBCore * core,struct NetRxPkt * pkt,uint16_t etqf)1451 igb_rx_desc_get_packet_type(IGBCore *core, struct NetRxPkt *pkt, uint16_t etqf)
1452 {
1453     uint16_t pkt_type;
1454     bool hasip4, hasip6;
1455     EthL4HdrProto l4hdr_proto;
1456 
1457     if (etqf < 8) {
1458         pkt_type = BIT(11) | etqf;
1459         return pkt_type;
1460     }
1461 
1462     net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1463 
1464     if (hasip6 && !(core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
1465         eth_ip6_hdr_info *ip6hdr_info = net_rx_pkt_get_ip6_info(pkt);
1466         pkt_type = ip6hdr_info->has_ext_hdrs ? E1000_ADVRXD_PKT_IP6E :
1467                                                E1000_ADVRXD_PKT_IP6;
1468     } else if (hasip4) {
1469         pkt_type = E1000_ADVRXD_PKT_IP4;
1470     } else {
1471         pkt_type = 0;
1472     }
1473 
1474     switch (l4hdr_proto) {
1475     case ETH_L4_HDR_PROTO_TCP:
1476         pkt_type |= E1000_ADVRXD_PKT_TCP;
1477         break;
1478     case ETH_L4_HDR_PROTO_UDP:
1479         pkt_type |= E1000_ADVRXD_PKT_UDP;
1480         break;
1481     case ETH_L4_HDR_PROTO_SCTP:
1482         pkt_type |= E1000_ADVRXD_PKT_SCTP;
1483         break;
1484     default:
1485         break;
1486     }
1487 
1488     return pkt_type;
1489 }
1490 
1491 static inline void
igb_write_adv_rx_descr(IGBCore * core,union e1000_adv_rx_desc * desc,struct NetRxPkt * pkt,const E1000E_RSSInfo * rss_info,uint16_t etqf,bool ts,uint16_t length)1492 igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
1493                        struct NetRxPkt *pkt,
1494                        const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts,
1495                        uint16_t length)
1496 {
1497     bool hasip4, hasip6;
1498     EthL4HdrProto l4hdr_proto;
1499     uint16_t rss_type = 0, pkt_type;
1500     bool eop = (pkt != NULL);
1501     uint32_t adv_desc_status_error = 0;
1502     memset(&desc->wb, 0, sizeof(desc->wb));
1503 
1504     desc->wb.upper.length = cpu_to_le16(length);
1505     igb_build_rx_metadata_common(core, pkt, eop,
1506                                  &desc->wb.upper.status_error,
1507                                  &desc->wb.upper.vlan);
1508 
1509     if (!eop) {
1510         return;
1511     }
1512 
1513     net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1514 
1515     if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) {
1516         if (rss_info->enabled) {
1517             desc->wb.lower.hi_dword.rss = cpu_to_le32(rss_info->hash);
1518             rss_type = rss_info->type;
1519             trace_igb_rx_metadata_rss(desc->wb.lower.hi_dword.rss, rss_type);
1520         }
1521     } else if (hasip4) {
1522             adv_desc_status_error |= E1000_RXD_STAT_IPIDV;
1523             desc->wb.lower.hi_dword.csum_ip.ip_id =
1524                 cpu_to_le16(net_rx_pkt_get_ip_id(pkt));
1525             trace_e1000e_rx_metadata_ip_id(
1526                 desc->wb.lower.hi_dword.csum_ip.ip_id);
1527     }
1528 
1529     if (ts) {
1530         adv_desc_status_error |= BIT(16);
1531     }
1532 
1533     pkt_type = igb_rx_desc_get_packet_type(core, pkt, etqf);
1534     trace_e1000e_rx_metadata_pkt_type(pkt_type);
1535     desc->wb.lower.lo_dword.pkt_info = cpu_to_le16(rss_type | (pkt_type << 4));
1536     desc->wb.upper.status_error |= cpu_to_le32(adv_desc_status_error);
1537 }
1538 
1539 static inline void
igb_write_adv_ps_rx_descr(IGBCore * core,union e1000_adv_rx_desc * desc,struct NetRxPkt * pkt,const E1000E_RSSInfo * rss_info,const E1000ERingInfo * r,uint16_t etqf,bool ts,IGBPacketRxDMAState * pdma_st)1540 igb_write_adv_ps_rx_descr(IGBCore *core,
1541                           union e1000_adv_rx_desc *desc,
1542                           struct NetRxPkt *pkt,
1543                           const E1000E_RSSInfo *rss_info,
1544                           const E1000ERingInfo *r,
1545                           uint16_t etqf,
1546                           bool ts,
1547                           IGBPacketRxDMAState *pdma_st)
1548 {
1549     size_t pkt_len;
1550     uint16_t hdr_info = 0;
1551 
1552     if (pdma_st->do_ps) {
1553         pkt_len = pdma_st->bastate.written[1];
1554     } else {
1555         pkt_len = pdma_st->bastate.written[0] + pdma_st->bastate.written[1];
1556     }
1557 
1558     igb_write_adv_rx_descr(core, desc, pkt, rss_info, etqf, ts, pkt_len);
1559 
1560     hdr_info = (pdma_st->ps_desc_data.hdr_len << E1000_ADVRXD_HDR_LEN_OFFSET) &
1561                E1000_ADVRXD_ADV_HDR_LEN_MASK;
1562     hdr_info |= pdma_st->ps_desc_data.sph ? E1000_ADVRXD_HDR_SPH : 0;
1563     desc->wb.lower.lo_dword.hdr_info = cpu_to_le16(hdr_info);
1564 
1565     desc->wb.upper.status_error |= cpu_to_le32(
1566         pdma_st->ps_desc_data.hbo ? E1000_ADVRXD_ST_ERR_HBO_OFFSET : 0);
1567 }
1568 
1569 static inline void
igb_write_rx_descr(IGBCore * core,union e1000_rx_desc_union * desc,struct NetRxPkt * pkt,const E1000E_RSSInfo * rss_info,uint16_t etqf,bool ts,IGBPacketRxDMAState * pdma_st,const E1000ERingInfo * r)1570 igb_write_rx_descr(IGBCore *core,
1571                    union e1000_rx_desc_union *desc,
1572                    struct NetRxPkt *pkt,
1573                    const E1000E_RSSInfo *rss_info,
1574                    uint16_t etqf,
1575                    bool ts,
1576                    IGBPacketRxDMAState *pdma_st,
1577                    const E1000ERingInfo *r)
1578 {
1579     if (igb_rx_use_legacy_descriptor(core)) {
1580         igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info,
1581                                 pdma_st->bastate.written[1]);
1582     } else if (igb_rx_use_ps_descriptor(core, r)) {
1583         igb_write_adv_ps_rx_descr(core, &desc->adv, pkt, rss_info, r, etqf, ts,
1584                                   pdma_st);
1585     } else {
1586         igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info,
1587                                etqf, ts, pdma_st->bastate.written[1]);
1588     }
1589 }
1590 
1591 static inline void
igb_pci_dma_write_rx_desc(IGBCore * core,PCIDevice * dev,dma_addr_t addr,union e1000_rx_desc_union * desc,dma_addr_t len)1592 igb_pci_dma_write_rx_desc(IGBCore *core, PCIDevice *dev, dma_addr_t addr,
1593                           union e1000_rx_desc_union *desc, dma_addr_t len)
1594 {
1595     if (igb_rx_use_legacy_descriptor(core)) {
1596         struct e1000_rx_desc *d = &desc->legacy;
1597         size_t offset = offsetof(struct e1000_rx_desc, status);
1598         uint8_t status = d->status;
1599 
1600         d->status &= ~E1000_RXD_STAT_DD;
1601         pci_dma_write(dev, addr, desc, len);
1602 
1603         if (status & E1000_RXD_STAT_DD) {
1604             d->status = status;
1605             pci_dma_write(dev, addr + offset, &status, sizeof(status));
1606         }
1607     } else {
1608         union e1000_adv_rx_desc *d = &desc->adv;
1609         size_t offset =
1610             offsetof(union e1000_adv_rx_desc, wb.upper.status_error);
1611         uint32_t status = d->wb.upper.status_error;
1612 
1613         d->wb.upper.status_error &= ~E1000_RXD_STAT_DD;
1614         pci_dma_write(dev, addr, desc, len);
1615 
1616         if (status & E1000_RXD_STAT_DD) {
1617             d->wb.upper.status_error = status;
1618             pci_dma_write(dev, addr + offset, &status, sizeof(status));
1619         }
1620     }
1621 }
1622 
1623 static void
igb_update_rx_stats(IGBCore * core,const E1000ERingInfo * rxi,size_t pkt_size,size_t pkt_fcs_size)1624 igb_update_rx_stats(IGBCore *core, const E1000ERingInfo *rxi,
1625                     size_t pkt_size, size_t pkt_fcs_size)
1626 {
1627     eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt);
1628     e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size);
1629 
1630     if (core->mac[MRQC] & 1) {
1631         uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
1632 
1633         core->mac[PVFGORC0 + (pool * 64)] += pkt_size + 4;
1634         core->mac[PVFGPRC0 + (pool * 64)]++;
1635         if (pkt_type == ETH_PKT_MCAST) {
1636             core->mac[PVFMPRC0 + (pool * 64)]++;
1637         }
1638     }
1639 }
1640 
1641 static inline bool
igb_rx_descr_threshold_hit(IGBCore * core,const E1000ERingInfo * rxi)1642 igb_rx_descr_threshold_hit(IGBCore *core, const E1000ERingInfo *rxi)
1643 {
1644     return igb_ring_free_descr_num(core, rxi) ==
1645            ((core->mac[E1000_SRRCTL(rxi->idx) >> 2] >> 20) & 31) * 16;
1646 }
1647 
1648 static bool
igb_do_ps(IGBCore * core,const E1000ERingInfo * r,struct NetRxPkt * pkt,IGBPacketRxDMAState * pdma_st)1649 igb_do_ps(IGBCore *core,
1650           const E1000ERingInfo *r,
1651           struct NetRxPkt *pkt,
1652           IGBPacketRxDMAState *pdma_st)
1653 {
1654     bool hasip4, hasip6;
1655     EthL4HdrProto l4hdr_proto;
1656     bool fragment;
1657     bool split_always;
1658     size_t bheader_size;
1659     size_t total_pkt_len;
1660 
1661     if (!igb_rx_use_ps_descriptor(core, r)) {
1662         return false;
1663     }
1664 
1665     total_pkt_len = net_rx_pkt_get_total_len(pkt);
1666     bheader_size = igb_rxhdrbufsize(core, r);
1667     split_always = igb_rx_ps_descriptor_split_always(core, r);
1668     if (split_always && total_pkt_len <= bheader_size) {
1669         pdma_st->ps_hdr_len = total_pkt_len;
1670         pdma_st->ps_desc_data.hdr_len = total_pkt_len;
1671         return true;
1672     }
1673 
1674     net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1675 
1676     if (hasip4) {
1677         fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
1678     } else if (hasip6) {
1679         fragment = net_rx_pkt_get_ip6_info(pkt)->fragment;
1680     } else {
1681         pdma_st->ps_desc_data.hdr_len = bheader_size;
1682         goto header_not_handled;
1683     }
1684 
1685     if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) {
1686         pdma_st->ps_desc_data.hdr_len = bheader_size;
1687         goto header_not_handled;
1688     }
1689 
1690     /* no header splitting for SCTP */
1691     if (!fragment && (l4hdr_proto == ETH_L4_HDR_PROTO_UDP ||
1692                       l4hdr_proto == ETH_L4_HDR_PROTO_TCP)) {
1693         pdma_st->ps_hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt);
1694     } else {
1695         pdma_st->ps_hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt);
1696     }
1697 
1698     pdma_st->ps_desc_data.sph = true;
1699     pdma_st->ps_desc_data.hdr_len = pdma_st->ps_hdr_len;
1700 
1701     if (pdma_st->ps_hdr_len > bheader_size) {
1702         pdma_st->ps_desc_data.hbo = true;
1703         goto header_not_handled;
1704     }
1705 
1706     return true;
1707 
1708 header_not_handled:
1709     if (split_always) {
1710         pdma_st->ps_hdr_len = bheader_size;
1711         return true;
1712     }
1713 
1714     return false;
1715 }
1716 
1717 static void
igb_truncate_to_descriptor_size(IGBPacketRxDMAState * pdma_st,size_t * size)1718 igb_truncate_to_descriptor_size(IGBPacketRxDMAState *pdma_st, size_t *size)
1719 {
1720     if (pdma_st->do_ps && pdma_st->is_first) {
1721         if (*size > pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len) {
1722             *size = pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len;
1723         }
1724     } else {
1725         if (*size > pdma_st->rx_desc_packet_buf_size) {
1726             *size = pdma_st->rx_desc_packet_buf_size;
1727         }
1728     }
1729 }
1730 
1731 static inline void
igb_write_hdr_frag_to_rx_buffers(IGBCore * core,PCIDevice * d,IGBPacketRxDMAState * pdma_st,const char * data,dma_addr_t data_len)1732 igb_write_hdr_frag_to_rx_buffers(IGBCore *core,
1733                                  PCIDevice *d,
1734                                  IGBPacketRxDMAState *pdma_st,
1735                                  const char *data,
1736                                  dma_addr_t data_len)
1737 {
1738     assert(data_len <= pdma_st->rx_desc_header_buf_size -
1739                        pdma_st->bastate.written[0]);
1740     pci_dma_write(d,
1741                   pdma_st->ba[0] + pdma_st->bastate.written[0],
1742                   data, data_len);
1743     pdma_st->bastate.written[0] += data_len;
1744     pdma_st->bastate.cur_idx = 1;
1745 }
1746 
1747 static void
igb_write_header_to_rx_buffers(IGBCore * core,struct NetRxPkt * pkt,PCIDevice * d,IGBPacketRxDMAState * pdma_st,size_t * copy_size)1748 igb_write_header_to_rx_buffers(IGBCore *core,
1749                                struct NetRxPkt *pkt,
1750                                PCIDevice *d,
1751                                IGBPacketRxDMAState *pdma_st,
1752                                size_t *copy_size)
1753 {
1754     size_t iov_copy;
1755     size_t ps_hdr_copied = 0;
1756 
1757     if (!pdma_st->is_first) {
1758         /* Leave buffer 0 of each descriptor except first */
1759         /* empty                                          */
1760         pdma_st->bastate.cur_idx = 1;
1761         return;
1762     }
1763 
1764     do {
1765         iov_copy = MIN(pdma_st->ps_hdr_len - ps_hdr_copied,
1766                        pdma_st->iov->iov_len - pdma_st->iov_ofs);
1767 
1768         igb_write_hdr_frag_to_rx_buffers(core, d, pdma_st,
1769                                          pdma_st->iov->iov_base,
1770                                          iov_copy);
1771 
1772         *copy_size -= iov_copy;
1773         ps_hdr_copied += iov_copy;
1774 
1775         pdma_st->iov_ofs += iov_copy;
1776         if (pdma_st->iov_ofs == pdma_st->iov->iov_len) {
1777             pdma_st->iov++;
1778             pdma_st->iov_ofs = 0;
1779         }
1780     } while (ps_hdr_copied < pdma_st->ps_hdr_len);
1781 
1782     pdma_st->is_first = false;
1783 }
1784 
1785 static void
igb_write_payload_frag_to_rx_buffers(IGBCore * core,PCIDevice * d,IGBPacketRxDMAState * pdma_st,const char * data,dma_addr_t data_len)1786 igb_write_payload_frag_to_rx_buffers(IGBCore *core,
1787                                      PCIDevice *d,
1788                                      IGBPacketRxDMAState *pdma_st,
1789                                      const char *data,
1790                                      dma_addr_t data_len)
1791 {
1792     while (data_len > 0) {
1793         assert(pdma_st->bastate.cur_idx < IGB_MAX_PS_BUFFERS);
1794 
1795         uint32_t cur_buf_bytes_left =
1796             pdma_st->rx_desc_packet_buf_size -
1797             pdma_st->bastate.written[pdma_st->bastate.cur_idx];
1798         uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left);
1799 
1800         trace_igb_rx_desc_buff_write(
1801             pdma_st->bastate.cur_idx,
1802             pdma_st->ba[pdma_st->bastate.cur_idx],
1803             pdma_st->bastate.written[pdma_st->bastate.cur_idx],
1804             data,
1805             bytes_to_write);
1806 
1807         pci_dma_write(d,
1808                       pdma_st->ba[pdma_st->bastate.cur_idx] +
1809                       pdma_st->bastate.written[pdma_st->bastate.cur_idx],
1810                       data, bytes_to_write);
1811 
1812         pdma_st->bastate.written[pdma_st->bastate.cur_idx] += bytes_to_write;
1813         data += bytes_to_write;
1814         data_len -= bytes_to_write;
1815 
1816         if (pdma_st->bastate.written[pdma_st->bastate.cur_idx] ==
1817             pdma_st->rx_desc_packet_buf_size) {
1818             pdma_st->bastate.cur_idx++;
1819         }
1820     }
1821 }
1822 
1823 static void
igb_write_payload_to_rx_buffers(IGBCore * core,struct NetRxPkt * pkt,PCIDevice * d,IGBPacketRxDMAState * pdma_st,size_t * copy_size)1824 igb_write_payload_to_rx_buffers(IGBCore *core,
1825                                 struct NetRxPkt *pkt,
1826                                 PCIDevice *d,
1827                                 IGBPacketRxDMAState *pdma_st,
1828                                 size_t *copy_size)
1829 {
1830     static const uint32_t fcs_pad;
1831     size_t iov_copy;
1832 
1833     /* Copy packet payload */
1834     while (*copy_size) {
1835         iov_copy = MIN(*copy_size, pdma_st->iov->iov_len - pdma_st->iov_ofs);
1836         igb_write_payload_frag_to_rx_buffers(core, d,
1837                                              pdma_st,
1838                                              pdma_st->iov->iov_base +
1839                                              pdma_st->iov_ofs,
1840                                              iov_copy);
1841 
1842         *copy_size -= iov_copy;
1843         pdma_st->iov_ofs += iov_copy;
1844         if (pdma_st->iov_ofs == pdma_st->iov->iov_len) {
1845             pdma_st->iov++;
1846             pdma_st->iov_ofs = 0;
1847         }
1848     }
1849 
1850     if (pdma_st->desc_offset + pdma_st->desc_size >= pdma_st->total_size) {
1851         /* Simulate FCS checksum presence in the last descriptor */
1852         igb_write_payload_frag_to_rx_buffers(core, d,
1853                                              pdma_st,
1854                                              (const char *) &fcs_pad,
1855                                              e1000x_fcs_len(core->mac));
1856     }
1857 }
1858 
1859 static void
igb_write_to_rx_buffers(IGBCore * core,struct NetRxPkt * pkt,PCIDevice * d,IGBPacketRxDMAState * pdma_st)1860 igb_write_to_rx_buffers(IGBCore *core,
1861                         struct NetRxPkt *pkt,
1862                         PCIDevice *d,
1863                         IGBPacketRxDMAState *pdma_st)
1864 {
1865     size_t copy_size;
1866 
1867     if (!(pdma_st->ba)[1] || (pdma_st->do_ps && !(pdma_st->ba[0]))) {
1868         /* as per intel docs; skip descriptors with null buf addr */
1869         trace_e1000e_rx_null_descriptor();
1870         return;
1871     }
1872 
1873     if (pdma_st->desc_offset >= pdma_st->size) {
1874         return;
1875     }
1876 
1877     pdma_st->desc_size = pdma_st->total_size - pdma_st->desc_offset;
1878     igb_truncate_to_descriptor_size(pdma_st, &pdma_st->desc_size);
1879     copy_size = pdma_st->size - pdma_st->desc_offset;
1880     igb_truncate_to_descriptor_size(pdma_st, &copy_size);
1881 
1882     /* For PS mode copy the packet header first */
1883     if (pdma_st->do_ps) {
1884         igb_write_header_to_rx_buffers(core, pkt, d, pdma_st, &copy_size);
1885     } else {
1886         pdma_st->bastate.cur_idx = 1;
1887     }
1888 
1889     igb_write_payload_to_rx_buffers(core, pkt, d, pdma_st, &copy_size);
1890 }
1891 
1892 static void
igb_write_packet_to_guest(IGBCore * core,struct NetRxPkt * pkt,const E1000E_RxRing * rxr,const E1000E_RSSInfo * rss_info,uint16_t etqf,bool ts)1893 igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt,
1894                           const E1000E_RxRing *rxr,
1895                           const E1000E_RSSInfo *rss_info,
1896                           uint16_t etqf, bool ts)
1897 {
1898     PCIDevice *d;
1899     dma_addr_t base;
1900     union e1000_rx_desc_union desc;
1901     const E1000ERingInfo *rxi;
1902     size_t rx_desc_len;
1903 
1904     IGBPacketRxDMAState pdma_st = {0};
1905     pdma_st.is_first = true;
1906     pdma_st.size = net_rx_pkt_get_total_len(pkt);
1907     pdma_st.total_size = pdma_st.size + e1000x_fcs_len(core->mac);
1908 
1909     rxi = rxr->i;
1910     rx_desc_len = core->rx_desc_len;
1911     pdma_st.rx_desc_packet_buf_size = igb_rxbufsize(core, rxi);
1912     pdma_st.rx_desc_header_buf_size = igb_rxhdrbufsize(core, rxi);
1913     pdma_st.iov = net_rx_pkt_get_iovec(pkt);
1914     d = pcie_sriov_get_vf_at_index(core->owner, rxi->idx % 8);
1915     if (!d) {
1916         d = core->owner;
1917     }
1918 
1919     pdma_st.do_ps = igb_do_ps(core, rxi, pkt, &pdma_st);
1920 
1921     do {
1922         memset(&pdma_st.bastate, 0, sizeof(IGBBAState));
1923         bool is_last = false;
1924 
1925         if (igb_ring_empty(core, rxi)) {
1926             return;
1927         }
1928 
1929         base = igb_ring_head_descr(core, rxi);
1930         pci_dma_read(d, base, &desc, rx_desc_len);
1931         trace_e1000e_rx_descr(rxi->idx, base, rx_desc_len);
1932 
1933         igb_read_rx_descr(core, &desc, &pdma_st, rxi);
1934 
1935         igb_write_to_rx_buffers(core, pkt, d, &pdma_st);
1936         pdma_st.desc_offset += pdma_st.desc_size;
1937         if (pdma_st.desc_offset >= pdma_st.total_size) {
1938             is_last = true;
1939         }
1940 
1941         igb_write_rx_descr(core, &desc,
1942                            is_last ? pkt : NULL,
1943                            rss_info,
1944                            etqf, ts,
1945                            &pdma_st,
1946                            rxi);
1947         igb_pci_dma_write_rx_desc(core, d, base, &desc, rx_desc_len);
1948         igb_ring_advance(core, rxi, rx_desc_len / E1000_MIN_RX_DESC_LEN);
1949     } while (pdma_st.desc_offset < pdma_st.total_size);
1950 
1951     igb_update_rx_stats(core, rxi, pdma_st.size, pdma_st.total_size);
1952 }
1953 
1954 static bool
igb_rx_strip_vlan(IGBCore * core,const E1000ERingInfo * rxi)1955 igb_rx_strip_vlan(IGBCore *core, const E1000ERingInfo *rxi)
1956 {
1957     if (core->mac[MRQC] & 1) {
1958         uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
1959         /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */
1960         return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ?
1961                 core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN :
1962                 core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN;
1963     }
1964 
1965     return e1000x_vlan_enabled(core->mac);
1966 }
1967 
1968 static inline void
igb_rx_fix_l4_csum(IGBCore * core,struct NetRxPkt * pkt)1969 igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt)
1970 {
1971     struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt);
1972 
1973     if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1974         net_rx_pkt_fix_l4_csum(pkt);
1975     }
1976 }
1977 
1978 ssize_t
igb_receive_iov(IGBCore * core,const struct iovec * iov,int iovcnt)1979 igb_receive_iov(IGBCore *core, const struct iovec *iov, int iovcnt)
1980 {
1981     return igb_receive_internal(core, iov, iovcnt, core->has_vnet, NULL);
1982 }
1983 
1984 static ssize_t
igb_receive_internal(IGBCore * core,const struct iovec * iov,int iovcnt,bool has_vnet,bool * external_tx)1985 igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
1986                      bool has_vnet, bool *external_tx)
1987 {
1988     uint16_t queues = 0;
1989     uint32_t causes = 0;
1990     uint32_t ecauses = 0;
1991     union {
1992         L2Header l2_header;
1993         uint8_t octets[ETH_ZLEN];
1994     } buf;
1995     struct iovec min_iov;
1996     size_t size, orig_size;
1997     size_t iov_ofs = 0;
1998     E1000E_RxRing rxr;
1999     E1000E_RSSInfo rss_info;
2000     uint16_t etqf;
2001     bool ts;
2002     size_t total_size;
2003     int strip_vlan_index;
2004     int i;
2005 
2006     trace_e1000e_rx_receive_iov(iovcnt);
2007 
2008     if (external_tx) {
2009         *external_tx = true;
2010     }
2011 
2012     if (!e1000x_hw_rx_enabled(core->mac)) {
2013         return -1;
2014     }
2015 
2016     /* Pull virtio header in */
2017     if (has_vnet) {
2018         net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt);
2019         iov_ofs = sizeof(struct virtio_net_hdr);
2020     } else {
2021         net_rx_pkt_unset_vhdr(core->rx_pkt);
2022     }
2023 
2024     orig_size = iov_size(iov, iovcnt);
2025     size = orig_size - iov_ofs;
2026 
2027     /* Pad to minimum Ethernet frame length */
2028     if (size < sizeof(buf)) {
2029         iov_to_buf(iov, iovcnt, iov_ofs, &buf, size);
2030         memset(&buf.octets[size], 0, sizeof(buf) - size);
2031         e1000x_inc_reg_if_not_full(core->mac, RUC);
2032         min_iov.iov_base = &buf;
2033         min_iov.iov_len = size = sizeof(buf);
2034         iovcnt = 1;
2035         iov = &min_iov;
2036         iov_ofs = 0;
2037     } else {
2038         iov_to_buf(iov, iovcnt, iov_ofs, &buf, sizeof(buf.l2_header));
2039     }
2040 
2041     net_rx_pkt_set_packet_type(core->rx_pkt,
2042                                get_eth_packet_type(&buf.l2_header.eth));
2043     net_rx_pkt_set_protocols(core->rx_pkt, iov, iovcnt, iov_ofs);
2044 
2045     queues = igb_receive_assign(core, iov, iovcnt, iov_ofs,
2046                                 &buf.l2_header, size,
2047                                 &rss_info, &etqf, &ts, external_tx);
2048     if (!queues) {
2049         trace_e1000e_rx_flt_dropped();
2050         return orig_size;
2051     }
2052 
2053     for (i = 0; i < IGB_NUM_QUEUES; i++) {
2054         if (!(queues & BIT(i)) ||
2055             !(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
2056             continue;
2057         }
2058 
2059         igb_rx_ring_init(core, &rxr, i);
2060 
2061         if (!igb_rx_strip_vlan(core, rxr.i)) {
2062             strip_vlan_index = -1;
2063         } else if (core->mac[CTRL_EXT] & BIT(26)) {
2064             strip_vlan_index = 1;
2065         } else {
2066             strip_vlan_index = 0;
2067         }
2068 
2069         net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
2070                                    strip_vlan_index,
2071                                    core->mac[VET] & 0xffff,
2072                                    core->mac[VET] >> 16);
2073 
2074         total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
2075             e1000x_fcs_len(core->mac);
2076 
2077         if (!igb_has_rxbufs(core, rxr.i, total_size)) {
2078             causes |= E1000_ICS_RXO;
2079             trace_e1000e_rx_not_written_to_guest(rxr.i->idx);
2080             continue;
2081         }
2082 
2083         causes |= E1000_ICR_RXDW;
2084 
2085         igb_rx_fix_l4_csum(core, core->rx_pkt);
2086         igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info, etqf, ts);
2087 
2088         /* Check if receive descriptor minimum threshold hit */
2089         if (igb_rx_descr_threshold_hit(core, rxr.i)) {
2090             causes |= E1000_ICS_RXDMT0;
2091         }
2092 
2093         ecauses |= igb_rx_wb_eic(core, rxr.i->idx);
2094 
2095         trace_e1000e_rx_written_to_guest(rxr.i->idx);
2096     }
2097 
2098     trace_e1000e_rx_interrupt_set(causes);
2099     igb_raise_interrupts(core, EICR, ecauses);
2100     igb_raise_interrupts(core, ICR, causes);
2101 
2102     return orig_size;
2103 }
2104 
2105 static inline bool
igb_have_autoneg(IGBCore * core)2106 igb_have_autoneg(IGBCore *core)
2107 {
2108     return core->phy[MII_BMCR] & MII_BMCR_AUTOEN;
2109 }
2110 
igb_update_flowctl_status(IGBCore * core)2111 static void igb_update_flowctl_status(IGBCore *core)
2112 {
2113     if (igb_have_autoneg(core) && core->phy[MII_BMSR] & MII_BMSR_AN_COMP) {
2114         trace_e1000e_link_autoneg_flowctl(true);
2115         core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE;
2116     } else {
2117         trace_e1000e_link_autoneg_flowctl(false);
2118     }
2119 }
2120 
2121 static inline void
igb_link_down(IGBCore * core)2122 igb_link_down(IGBCore *core)
2123 {
2124     e1000x_update_regs_on_link_down(core->mac, core->phy);
2125     igb_update_flowctl_status(core);
2126 }
2127 
2128 static inline void
igb_set_phy_ctrl(IGBCore * core,uint16_t val)2129 igb_set_phy_ctrl(IGBCore *core, uint16_t val)
2130 {
2131     /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
2132     core->phy[MII_BMCR] = val & ~(0x3f | MII_BMCR_RESET | MII_BMCR_ANRESTART);
2133 
2134     if ((val & MII_BMCR_ANRESTART) && igb_have_autoneg(core)) {
2135         e1000x_restart_autoneg(core->mac, core->phy, core->autoneg_timer);
2136     }
2137 }
2138 
igb_core_set_link_status(IGBCore * core)2139 void igb_core_set_link_status(IGBCore *core)
2140 {
2141     NetClientState *nc = qemu_get_queue(core->owner_nic);
2142     uint32_t old_status = core->mac[STATUS];
2143 
2144     trace_e1000e_link_status_changed(nc->link_down ? false : true);
2145 
2146     if (nc->link_down) {
2147         e1000x_update_regs_on_link_down(core->mac, core->phy);
2148     } else {
2149         if (igb_have_autoneg(core) &&
2150             !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
2151             e1000x_restart_autoneg(core->mac, core->phy,
2152                                    core->autoneg_timer);
2153         } else {
2154             e1000x_update_regs_on_link_up(core->mac, core->phy);
2155             igb_start_recv(core);
2156         }
2157     }
2158 
2159     if (core->mac[STATUS] != old_status) {
2160         igb_raise_interrupts(core, ICR, E1000_ICR_LSC);
2161     }
2162 }
2163 
2164 static void
igb_set_ctrl(IGBCore * core,int index,uint32_t val)2165 igb_set_ctrl(IGBCore *core, int index, uint32_t val)
2166 {
2167     trace_e1000e_core_ctrl_write(index, val);
2168 
2169     /* RST is self clearing */
2170     core->mac[CTRL] = val & ~E1000_CTRL_RST;
2171     core->mac[CTRL_DUP] = core->mac[CTRL];
2172 
2173     trace_e1000e_link_set_params(
2174         !!(val & E1000_CTRL_ASDE),
2175         (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
2176         !!(val & E1000_CTRL_FRCSPD),
2177         !!(val & E1000_CTRL_FRCDPX),
2178         !!(val & E1000_CTRL_RFCE),
2179         !!(val & E1000_CTRL_TFCE));
2180 
2181     if (val & E1000_CTRL_RST) {
2182         trace_e1000e_core_ctrl_sw_reset();
2183         igb_reset(core, true);
2184     }
2185 
2186     if (val & E1000_CTRL_PHY_RST) {
2187         trace_e1000e_core_ctrl_phy_reset();
2188         core->mac[STATUS] |= E1000_STATUS_PHYRA;
2189     }
2190 }
2191 
2192 static void
igb_set_rfctl(IGBCore * core,int index,uint32_t val)2193 igb_set_rfctl(IGBCore *core, int index, uint32_t val)
2194 {
2195     trace_e1000e_rx_set_rfctl(val);
2196 
2197     if (!(val & E1000_RFCTL_ISCSI_DIS)) {
2198         trace_e1000e_wrn_iscsi_filtering_not_supported();
2199     }
2200 
2201     if (!(val & E1000_RFCTL_NFSW_DIS)) {
2202         trace_e1000e_wrn_nfsw_filtering_not_supported();
2203     }
2204 
2205     if (!(val & E1000_RFCTL_NFSR_DIS)) {
2206         trace_e1000e_wrn_nfsr_filtering_not_supported();
2207     }
2208 
2209     core->mac[RFCTL] = val;
2210 }
2211 
2212 static void
igb_calc_rxdesclen(IGBCore * core)2213 igb_calc_rxdesclen(IGBCore *core)
2214 {
2215     if (igb_rx_use_legacy_descriptor(core)) {
2216         core->rx_desc_len = sizeof(struct e1000_rx_desc);
2217     } else {
2218         core->rx_desc_len = sizeof(union e1000_adv_rx_desc);
2219     }
2220     trace_e1000e_rx_desc_len(core->rx_desc_len);
2221 }
2222 
2223 static void
igb_set_rx_control(IGBCore * core,int index,uint32_t val)2224 igb_set_rx_control(IGBCore *core, int index, uint32_t val)
2225 {
2226     core->mac[RCTL] = val;
2227     trace_e1000e_rx_set_rctl(core->mac[RCTL]);
2228 
2229     if (val & E1000_RCTL_DTYP_MASK) {
2230         qemu_log_mask(LOG_GUEST_ERROR,
2231                       "igb: RCTL.DTYP must be zero for compatibility");
2232     }
2233 
2234     if (val & E1000_RCTL_EN) {
2235         igb_calc_rxdesclen(core);
2236         igb_start_recv(core);
2237     }
2238 }
2239 
2240 static inline bool
igb_postpone_interrupt(IGBIntrDelayTimer * timer)2241 igb_postpone_interrupt(IGBIntrDelayTimer *timer)
2242 {
2243     if (timer->running) {
2244         trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2);
2245 
2246         return true;
2247     }
2248 
2249     if (timer->core->mac[timer->delay_reg] != 0) {
2250         igb_intrmgr_rearm_timer(timer);
2251     }
2252 
2253     return false;
2254 }
2255 
2256 static inline bool
igb_eitr_should_postpone(IGBCore * core,int idx)2257 igb_eitr_should_postpone(IGBCore *core, int idx)
2258 {
2259     return igb_postpone_interrupt(&core->eitr[idx]);
2260 }
2261 
igb_send_msix(IGBCore * core,uint32_t causes)2262 static void igb_send_msix(IGBCore *core, uint32_t causes)
2263 {
2264     int vector;
2265 
2266     for (vector = 0; vector < IGB_INTR_NUM; ++vector) {
2267         if ((causes & BIT(vector)) && !igb_eitr_should_postpone(core, vector)) {
2268 
2269             trace_e1000e_irq_msix_notify_vec(vector);
2270             igb_msix_notify(core, vector);
2271         }
2272     }
2273 }
2274 
2275 static inline void
igb_fix_icr_asserted(IGBCore * core)2276 igb_fix_icr_asserted(IGBCore *core)
2277 {
2278     core->mac[ICR] &= ~E1000_ICR_ASSERTED;
2279     if (core->mac[ICR]) {
2280         core->mac[ICR] |= E1000_ICR_ASSERTED;
2281     }
2282 
2283     trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]);
2284 }
2285 
igb_raise_interrupts(IGBCore * core,size_t index,uint32_t causes)2286 static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes)
2287 {
2288     uint32_t old_causes = core->mac[ICR] & core->mac[IMS];
2289     uint32_t old_ecauses = core->mac[EICR] & core->mac[EIMS];
2290     uint32_t raised_causes;
2291     uint32_t raised_ecauses;
2292     uint32_t int_alloc;
2293 
2294     trace_e1000e_irq_set(index << 2,
2295                          core->mac[index], core->mac[index] | causes);
2296 
2297     core->mac[index] |= causes;
2298 
2299     if (core->mac[GPIE] & E1000_GPIE_MSIX_MODE) {
2300         raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes;
2301 
2302         if (raised_causes & E1000_ICR_DRSTA) {
2303             int_alloc = core->mac[IVAR_MISC] & 0xff;
2304             if (int_alloc & E1000_IVAR_VALID) {
2305                 core->mac[EICR] |= BIT(int_alloc & 0x1f);
2306             }
2307         }
2308         /* Check if other bits (excluding the TCP Timer) are enabled. */
2309         if (raised_causes & ~E1000_ICR_DRSTA) {
2310             int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff;
2311             if (int_alloc & E1000_IVAR_VALID) {
2312                 core->mac[EICR] |= BIT(int_alloc & 0x1f);
2313             }
2314         }
2315 
2316         raised_ecauses = core->mac[EICR] & core->mac[EIMS] & ~old_ecauses;
2317         if (!raised_ecauses) {
2318             return;
2319         }
2320 
2321         igb_send_msix(core, raised_ecauses);
2322     } else {
2323         igb_fix_icr_asserted(core);
2324 
2325         raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes;
2326         if (!raised_causes) {
2327             return;
2328         }
2329 
2330         core->mac[EICR] |= (raised_causes & E1000_ICR_DRSTA) | E1000_EICR_OTHER;
2331 
2332         if (msix_enabled(core->owner)) {
2333             trace_e1000e_irq_msix_notify_vec(0);
2334             msix_notify(core->owner, 0);
2335         } else if (msi_enabled(core->owner)) {
2336             trace_e1000e_irq_msi_notify(raised_causes);
2337             msi_notify(core->owner, 0);
2338         } else {
2339             igb_raise_legacy_irq(core);
2340         }
2341     }
2342 }
2343 
igb_lower_interrupts(IGBCore * core,size_t index,uint32_t causes)2344 static void igb_lower_interrupts(IGBCore *core, size_t index, uint32_t causes)
2345 {
2346     trace_e1000e_irq_clear(index << 2,
2347                            core->mac[index], core->mac[index] & ~causes);
2348 
2349     core->mac[index] &= ~causes;
2350 
2351     trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS],
2352                                         core->mac[ICR], core->mac[IMS]);
2353 
2354     if (!(core->mac[ICR] & core->mac[IMS]) &&
2355         !(core->mac[GPIE] & E1000_GPIE_MSIX_MODE)) {
2356         core->mac[EICR] &= ~E1000_EICR_OTHER;
2357 
2358         if (!msix_enabled(core->owner) && !msi_enabled(core->owner)) {
2359             igb_lower_legacy_irq(core);
2360         }
2361     }
2362 }
2363 
igb_set_eics(IGBCore * core,int index,uint32_t val)2364 static void igb_set_eics(IGBCore *core, int index, uint32_t val)
2365 {
2366     bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2367     uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2368 
2369     trace_igb_irq_write_eics(val, msix);
2370     igb_raise_interrupts(core, EICR, val & mask);
2371 }
2372 
igb_set_eims(IGBCore * core,int index,uint32_t val)2373 static void igb_set_eims(IGBCore *core, int index, uint32_t val)
2374 {
2375     bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2376     uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2377 
2378     trace_igb_irq_write_eims(val, msix);
2379     igb_raise_interrupts(core, EIMS, val & mask);
2380 }
2381 
mailbox_interrupt_to_vf(IGBCore * core,uint16_t vfn)2382 static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn)
2383 {
2384     uint32_t ent = core->mac[VTIVAR_MISC + vfn];
2385     uint32_t causes;
2386 
2387     if ((ent & E1000_IVAR_VALID)) {
2388         causes = (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM);
2389         igb_raise_interrupts(core, EICR, causes);
2390     }
2391 }
2392 
mailbox_interrupt_to_pf(IGBCore * core)2393 static void mailbox_interrupt_to_pf(IGBCore *core)
2394 {
2395     igb_raise_interrupts(core, ICR, E1000_ICR_VMMB);
2396 }
2397 
igb_set_pfmailbox(IGBCore * core,int index,uint32_t val)2398 static void igb_set_pfmailbox(IGBCore *core, int index, uint32_t val)
2399 {
2400     uint16_t vfn = index - P2VMAILBOX0;
2401 
2402     trace_igb_set_pfmailbox(vfn, val);
2403 
2404     if (val & E1000_P2VMAILBOX_STS) {
2405         core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFSTS;
2406         mailbox_interrupt_to_vf(core, vfn);
2407     }
2408 
2409     if (val & E1000_P2VMAILBOX_ACK) {
2410         core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFACK;
2411         mailbox_interrupt_to_vf(core, vfn);
2412     }
2413 
2414     /* Buffer Taken by PF (can be set only if the VFU is cleared). */
2415     if (val & E1000_P2VMAILBOX_PFU) {
2416         if (!(core->mac[index] & E1000_P2VMAILBOX_VFU)) {
2417             core->mac[index] |= E1000_P2VMAILBOX_PFU;
2418             core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFU;
2419         }
2420     } else {
2421         core->mac[index] &= ~E1000_P2VMAILBOX_PFU;
2422         core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_PFU;
2423     }
2424 
2425     if (val & E1000_P2VMAILBOX_RVFU) {
2426         core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_VFU;
2427         core->mac[MBVFICR] &= ~((E1000_MBVFICR_VFACK_VF1 << vfn) |
2428                                 (E1000_MBVFICR_VFREQ_VF1 << vfn));
2429     }
2430 }
2431 
igb_set_vfmailbox(IGBCore * core,int index,uint32_t val)2432 static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
2433 {
2434     uint16_t vfn = index - V2PMAILBOX0;
2435 
2436     trace_igb_set_vfmailbox(vfn, val);
2437 
2438     if (val & E1000_V2PMAILBOX_REQ) {
2439         core->mac[MBVFICR] |= E1000_MBVFICR_VFREQ_VF1 << vfn;
2440         mailbox_interrupt_to_pf(core);
2441     }
2442 
2443     if (val & E1000_V2PMAILBOX_ACK) {
2444         core->mac[MBVFICR] |= E1000_MBVFICR_VFACK_VF1 << vfn;
2445         mailbox_interrupt_to_pf(core);
2446     }
2447 
2448     /* Buffer Taken by VF (can be set only if the PFU is cleared). */
2449     if (val & E1000_V2PMAILBOX_VFU) {
2450         if (!(core->mac[index] & E1000_V2PMAILBOX_PFU)) {
2451             core->mac[index] |= E1000_V2PMAILBOX_VFU;
2452             core->mac[P2VMAILBOX0 + vfn] |= E1000_P2VMAILBOX_VFU;
2453         }
2454     } else {
2455         core->mac[index] &= ~E1000_V2PMAILBOX_VFU;
2456         core->mac[P2VMAILBOX0 + vfn] &= ~E1000_P2VMAILBOX_VFU;
2457     }
2458 }
2459 
igb_core_vf_reset(IGBCore * core,uint16_t vfn)2460 void igb_core_vf_reset(IGBCore *core, uint16_t vfn)
2461 {
2462     uint16_t qn0 = vfn;
2463     uint16_t qn1 = vfn + IGB_NUM_VM_POOLS;
2464 
2465     trace_igb_core_vf_reset(vfn);
2466 
2467     /* disable Rx and Tx for the VF*/
2468     core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
2469     core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
2470     core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
2471     core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
2472     core->mac[VFRE] &= ~BIT(vfn);
2473     core->mac[VFTE] &= ~BIT(vfn);
2474     /* indicate VF reset to PF */
2475     core->mac[VFLRE] |= BIT(vfn);
2476     /* VFLRE and mailbox use the same interrupt cause */
2477     mailbox_interrupt_to_pf(core);
2478 }
2479 
igb_w1c(IGBCore * core,int index,uint32_t val)2480 static void igb_w1c(IGBCore *core, int index, uint32_t val)
2481 {
2482     core->mac[index] &= ~val;
2483 }
2484 
igb_set_eimc(IGBCore * core,int index,uint32_t val)2485 static void igb_set_eimc(IGBCore *core, int index, uint32_t val)
2486 {
2487     bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2488     uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2489 
2490     trace_igb_irq_write_eimc(val, msix);
2491 
2492     /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */
2493     igb_lower_interrupts(core, EIMS, val & mask);
2494 }
2495 
igb_set_eiac(IGBCore * core,int index,uint32_t val)2496 static void igb_set_eiac(IGBCore *core, int index, uint32_t val)
2497 {
2498     bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2499 
2500     if (msix) {
2501         trace_igb_irq_write_eiac(val);
2502 
2503         /*
2504          * TODO: When using IOV, the bits that correspond to MSI-X vectors
2505          * that are assigned to a VF are read-only.
2506          */
2507         core->mac[EIAC] |= (val & E1000_EICR_MSIX_MASK);
2508     }
2509 }
2510 
igb_set_eiam(IGBCore * core,int index,uint32_t val)2511 static void igb_set_eiam(IGBCore *core, int index, uint32_t val)
2512 {
2513     bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2514 
2515     /*
2516      * TODO: When using IOV, the bits that correspond to MSI-X vectors that
2517      * are assigned to a VF are read-only.
2518      */
2519     core->mac[EIAM] |=
2520         ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK));
2521 
2522     trace_igb_irq_write_eiam(val, msix);
2523 }
2524 
igb_set_eicr(IGBCore * core,int index,uint32_t val)2525 static void igb_set_eicr(IGBCore *core, int index, uint32_t val)
2526 {
2527     bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2528 
2529     /*
2530      * TODO: In IOV mode, only bit zero of this vector is available for the PF
2531      * function.
2532      */
2533     uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2534 
2535     trace_igb_irq_write_eicr(val, msix);
2536     igb_lower_interrupts(core, EICR, val & mask);
2537 }
2538 
igb_set_vtctrl(IGBCore * core,int index,uint32_t val)2539 static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val)
2540 {
2541     uint16_t vfn;
2542 
2543     if (val & E1000_CTRL_RST) {
2544         vfn = (index - PVTCTRL0) / 0x40;
2545         igb_core_vf_reset(core, vfn);
2546     }
2547 }
2548 
igb_set_vteics(IGBCore * core,int index,uint32_t val)2549 static void igb_set_vteics(IGBCore *core, int index, uint32_t val)
2550 {
2551     uint16_t vfn = (index - PVTEICS0) / 0x40;
2552 
2553     core->mac[index] = val;
2554     igb_set_eics(core, EICS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2555 }
2556 
igb_set_vteims(IGBCore * core,int index,uint32_t val)2557 static void igb_set_vteims(IGBCore *core, int index, uint32_t val)
2558 {
2559     uint16_t vfn = (index - PVTEIMS0) / 0x40;
2560 
2561     core->mac[index] = val;
2562     igb_set_eims(core, EIMS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2563 }
2564 
igb_set_vteimc(IGBCore * core,int index,uint32_t val)2565 static void igb_set_vteimc(IGBCore *core, int index, uint32_t val)
2566 {
2567     uint16_t vfn = (index - PVTEIMC0) / 0x40;
2568 
2569     core->mac[index] = val;
2570     igb_set_eimc(core, EIMC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2571 }
2572 
igb_set_vteiac(IGBCore * core,int index,uint32_t val)2573 static void igb_set_vteiac(IGBCore *core, int index, uint32_t val)
2574 {
2575     uint16_t vfn = (index - PVTEIAC0) / 0x40;
2576 
2577     core->mac[index] = val;
2578     igb_set_eiac(core, EIAC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2579 }
2580 
igb_set_vteiam(IGBCore * core,int index,uint32_t val)2581 static void igb_set_vteiam(IGBCore *core, int index, uint32_t val)
2582 {
2583     uint16_t vfn = (index - PVTEIAM0) / 0x40;
2584 
2585     core->mac[index] = val;
2586     igb_set_eiam(core, EIAM, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2587 }
2588 
igb_set_vteicr(IGBCore * core,int index,uint32_t val)2589 static void igb_set_vteicr(IGBCore *core, int index, uint32_t val)
2590 {
2591     uint16_t vfn = (index - PVTEICR0) / 0x40;
2592 
2593     core->mac[index] = val;
2594     igb_set_eicr(core, EICR, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2595 }
2596 
igb_set_vtivar(IGBCore * core,int index,uint32_t val)2597 static void igb_set_vtivar(IGBCore *core, int index, uint32_t val)
2598 {
2599     uint16_t vfn = (index - VTIVAR);
2600     uint16_t qn = vfn;
2601     uint8_t ent;
2602     int n;
2603 
2604     core->mac[index] = val;
2605 
2606     /* Get assigned vector associated with queue Rx#0. */
2607     if ((val & E1000_IVAR_VALID)) {
2608         n = igb_ivar_entry_rx(qn);
2609         ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (val & 0x7)));
2610         core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
2611     }
2612 
2613     /* Get assigned vector associated with queue Tx#0 */
2614     ent = val >> 8;
2615     if ((ent & E1000_IVAR_VALID)) {
2616         n = igb_ivar_entry_tx(qn);
2617         ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (ent & 0x7)));
2618         core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
2619     }
2620 
2621     /*
2622      * Ignoring assigned vectors associated with queues Rx#1 and Tx#1 for now.
2623      */
2624 }
2625 
2626 static inline void
igb_autoneg_timer(void * opaque)2627 igb_autoneg_timer(void *opaque)
2628 {
2629     IGBCore *core = opaque;
2630     if (!qemu_get_queue(core->owner_nic)->link_down) {
2631         e1000x_update_regs_on_autoneg_done(core->mac, core->phy);
2632         igb_start_recv(core);
2633 
2634         igb_update_flowctl_status(core);
2635         /* signal link status change to the guest */
2636         igb_raise_interrupts(core, ICR, E1000_ICR_LSC);
2637     }
2638 }
2639 
2640 static inline uint16_t
igb_get_reg_index_with_offset(const uint16_t * mac_reg_access,hwaddr addr)2641 igb_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr)
2642 {
2643     uint16_t index = (addr & 0x1ffff) >> 2;
2644     return index + (mac_reg_access[index] & 0xfffe);
2645 }
2646 
2647 static const char igb_phy_regcap[MAX_PHY_REG_ADDRESS + 1] = {
2648     [MII_BMCR]                   = PHY_RW,
2649     [MII_BMSR]                   = PHY_R,
2650     [MII_PHYID1]                 = PHY_R,
2651     [MII_PHYID2]                 = PHY_R,
2652     [MII_ANAR]                   = PHY_RW,
2653     [MII_ANLPAR]                 = PHY_R,
2654     [MII_ANER]                   = PHY_R,
2655     [MII_ANNP]                   = PHY_RW,
2656     [MII_ANLPRNP]                = PHY_R,
2657     [MII_CTRL1000]               = PHY_RW,
2658     [MII_STAT1000]               = PHY_R,
2659     [MII_EXTSTAT]                = PHY_R,
2660 
2661     [IGP01E1000_PHY_PORT_CONFIG] = PHY_RW,
2662     [IGP01E1000_PHY_PORT_STATUS] = PHY_R,
2663     [IGP01E1000_PHY_PORT_CTRL]   = PHY_RW,
2664     [IGP01E1000_PHY_LINK_HEALTH] = PHY_R,
2665     [IGP02E1000_PHY_POWER_MGMT]  = PHY_RW,
2666     [IGP01E1000_PHY_PAGE_SELECT] = PHY_W
2667 };
2668 
2669 static void
igb_phy_reg_write(IGBCore * core,uint32_t addr,uint16_t data)2670 igb_phy_reg_write(IGBCore *core, uint32_t addr, uint16_t data)
2671 {
2672     assert(addr <= MAX_PHY_REG_ADDRESS);
2673 
2674     if (addr == MII_BMCR) {
2675         igb_set_phy_ctrl(core, data);
2676     } else {
2677         core->phy[addr] = data;
2678     }
2679 }
2680 
2681 static void
igb_set_mdic(IGBCore * core,int index,uint32_t val)2682 igb_set_mdic(IGBCore *core, int index, uint32_t val)
2683 {
2684     uint32_t data = val & E1000_MDIC_DATA_MASK;
2685     uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
2686 
2687     if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */
2688         val = core->mac[MDIC] | E1000_MDIC_ERROR;
2689     } else if (val & E1000_MDIC_OP_READ) {
2690         if (!(igb_phy_regcap[addr] & PHY_R)) {
2691             trace_igb_core_mdic_read_unhandled(addr);
2692             val |= E1000_MDIC_ERROR;
2693         } else {
2694             val = (val ^ data) | core->phy[addr];
2695             trace_igb_core_mdic_read(addr, val);
2696         }
2697     } else if (val & E1000_MDIC_OP_WRITE) {
2698         if (!(igb_phy_regcap[addr] & PHY_W)) {
2699             trace_igb_core_mdic_write_unhandled(addr);
2700             val |= E1000_MDIC_ERROR;
2701         } else {
2702             trace_igb_core_mdic_write(addr, data);
2703             igb_phy_reg_write(core, addr, data);
2704         }
2705     }
2706     core->mac[MDIC] = val | E1000_MDIC_READY;
2707 
2708     if (val & E1000_MDIC_INT_EN) {
2709         igb_raise_interrupts(core, ICR, E1000_ICR_MDAC);
2710     }
2711 }
2712 
2713 static void
igb_set_rdt(IGBCore * core,int index,uint32_t val)2714 igb_set_rdt(IGBCore *core, int index, uint32_t val)
2715 {
2716     core->mac[index] = val & 0xffff;
2717     trace_e1000e_rx_set_rdt(igb_mq_queue_idx(RDT0, index), val);
2718     igb_start_recv(core);
2719 }
2720 
2721 static void
igb_set_status(IGBCore * core,int index,uint32_t val)2722 igb_set_status(IGBCore *core, int index, uint32_t val)
2723 {
2724     if ((val & E1000_STATUS_PHYRA) == 0) {
2725         core->mac[index] &= ~E1000_STATUS_PHYRA;
2726     }
2727 }
2728 
2729 static void
igb_set_ctrlext(IGBCore * core,int index,uint32_t val)2730 igb_set_ctrlext(IGBCore *core, int index, uint32_t val)
2731 {
2732     trace_igb_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK),
2733                                   !!(val & E1000_CTRL_EXT_SPD_BYPS),
2734                                   !!(val & E1000_CTRL_EXT_PFRSTD));
2735 
2736     /* Zero self-clearing bits */
2737     val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST);
2738     core->mac[CTRL_EXT] = val;
2739 
2740     if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PFRSTD) {
2741         for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) {
2742             core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_RSTI;
2743             core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTD;
2744         }
2745     }
2746 }
2747 
2748 static void
igb_set_pbaclr(IGBCore * core,int index,uint32_t val)2749 igb_set_pbaclr(IGBCore *core, int index, uint32_t val)
2750 {
2751     int i;
2752 
2753     core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK;
2754 
2755     if (!msix_enabled(core->owner)) {
2756         return;
2757     }
2758 
2759     for (i = 0; i < IGB_INTR_NUM; i++) {
2760         if (core->mac[PBACLR] & BIT(i)) {
2761             msix_clr_pending(core->owner, i);
2762         }
2763     }
2764 }
2765 
2766 static void
igb_set_fcrth(IGBCore * core,int index,uint32_t val)2767 igb_set_fcrth(IGBCore *core, int index, uint32_t val)
2768 {
2769     core->mac[FCRTH] = val & 0xFFF8;
2770 }
2771 
2772 static void
igb_set_fcrtl(IGBCore * core,int index,uint32_t val)2773 igb_set_fcrtl(IGBCore *core, int index, uint32_t val)
2774 {
2775     core->mac[FCRTL] = val & 0x8000FFF8;
2776 }
2777 
2778 #define IGB_LOW_BITS_SET_FUNC(num)                             \
2779     static void                                                \
2780     igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \
2781     {                                                          \
2782         core->mac[index] = val & (BIT(num) - 1);               \
2783     }
2784 
2785 IGB_LOW_BITS_SET_FUNC(4)
2786 IGB_LOW_BITS_SET_FUNC(13)
2787 IGB_LOW_BITS_SET_FUNC(16)
2788 
2789 static void
igb_set_dlen(IGBCore * core,int index,uint32_t val)2790 igb_set_dlen(IGBCore *core, int index, uint32_t val)
2791 {
2792     core->mac[index] = val & 0xffff0;
2793 }
2794 
2795 static void
igb_set_dbal(IGBCore * core,int index,uint32_t val)2796 igb_set_dbal(IGBCore *core, int index, uint32_t val)
2797 {
2798     core->mac[index] = val & E1000_XDBAL_MASK;
2799 }
2800 
2801 static void
igb_set_tdt(IGBCore * core,int index,uint32_t val)2802 igb_set_tdt(IGBCore *core, int index, uint32_t val)
2803 {
2804     IGB_TxRing txr;
2805     int qn = igb_mq_queue_idx(TDT0, index);
2806 
2807     core->mac[index] = val & 0xffff;
2808 
2809     igb_tx_ring_init(core, &txr, qn);
2810     igb_start_xmit(core, &txr);
2811 }
2812 
2813 static void
igb_set_ics(IGBCore * core,int index,uint32_t val)2814 igb_set_ics(IGBCore *core, int index, uint32_t val)
2815 {
2816     trace_e1000e_irq_write_ics(val);
2817     igb_raise_interrupts(core, ICR, val);
2818 }
2819 
2820 static void
igb_set_imc(IGBCore * core,int index,uint32_t val)2821 igb_set_imc(IGBCore *core, int index, uint32_t val)
2822 {
2823     trace_e1000e_irq_ims_clear_set_imc(val);
2824     igb_lower_interrupts(core, IMS, val);
2825 }
2826 
2827 static void
igb_set_ims(IGBCore * core,int index,uint32_t val)2828 igb_set_ims(IGBCore *core, int index, uint32_t val)
2829 {
2830     igb_raise_interrupts(core, IMS, val & 0x77D4FBFD);
2831 }
2832 
igb_nsicr(IGBCore * core)2833 static void igb_nsicr(IGBCore *core)
2834 {
2835     /*
2836      * If GPIE.NSICR = 0, then the clear of IMS will occur only if at
2837      * least one bit is set in the IMS and there is a true interrupt as
2838      * reflected in ICR.INTA.
2839      */
2840     if ((core->mac[GPIE] & E1000_GPIE_NSICR) ||
2841         (core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) {
2842         igb_lower_interrupts(core, IMS, core->mac[IAM]);
2843     }
2844 }
2845 
igb_set_icr(IGBCore * core,int index,uint32_t val)2846 static void igb_set_icr(IGBCore *core, int index, uint32_t val)
2847 {
2848     igb_nsicr(core);
2849     igb_lower_interrupts(core, ICR, val);
2850 }
2851 
2852 static uint32_t
igb_mac_readreg(IGBCore * core,int index)2853 igb_mac_readreg(IGBCore *core, int index)
2854 {
2855     return core->mac[index];
2856 }
2857 
2858 static uint32_t
igb_mac_ics_read(IGBCore * core,int index)2859 igb_mac_ics_read(IGBCore *core, int index)
2860 {
2861     trace_e1000e_irq_read_ics(core->mac[ICS]);
2862     return core->mac[ICS];
2863 }
2864 
2865 static uint32_t
igb_mac_ims_read(IGBCore * core,int index)2866 igb_mac_ims_read(IGBCore *core, int index)
2867 {
2868     trace_e1000e_irq_read_ims(core->mac[IMS]);
2869     return core->mac[IMS];
2870 }
2871 
2872 static uint32_t
igb_mac_swsm_read(IGBCore * core,int index)2873 igb_mac_swsm_read(IGBCore *core, int index)
2874 {
2875     uint32_t val = core->mac[SWSM];
2876     core->mac[SWSM] = val | E1000_SWSM_SMBI;
2877     return val;
2878 }
2879 
2880 static uint32_t
igb_mac_eitr_read(IGBCore * core,int index)2881 igb_mac_eitr_read(IGBCore *core, int index)
2882 {
2883     return core->eitr_guest_value[index - EITR0];
2884 }
2885 
igb_mac_vfmailbox_read(IGBCore * core,int index)2886 static uint32_t igb_mac_vfmailbox_read(IGBCore *core, int index)
2887 {
2888     uint32_t val = core->mac[index];
2889 
2890     core->mac[index] &= ~(E1000_V2PMAILBOX_PFSTS | E1000_V2PMAILBOX_PFACK |
2891                           E1000_V2PMAILBOX_RSTD);
2892 
2893     return val;
2894 }
2895 
2896 static uint32_t
igb_mac_icr_read(IGBCore * core,int index)2897 igb_mac_icr_read(IGBCore *core, int index)
2898 {
2899     uint32_t ret = core->mac[ICR];
2900 
2901     if (core->mac[GPIE] & E1000_GPIE_NSICR) {
2902         trace_igb_irq_icr_clear_gpie_nsicr();
2903         igb_lower_interrupts(core, ICR, 0xffffffff);
2904     } else if (core->mac[IMS] == 0) {
2905         trace_e1000e_irq_icr_clear_zero_ims();
2906         igb_lower_interrupts(core, ICR, 0xffffffff);
2907     } else if (core->mac[ICR] & E1000_ICR_INT_ASSERTED) {
2908         igb_lower_interrupts(core, ICR, 0xffffffff);
2909     } else if (!msix_enabled(core->owner)) {
2910         trace_e1000e_irq_icr_clear_nonmsix_icr_read();
2911         igb_lower_interrupts(core, ICR, 0xffffffff);
2912     }
2913 
2914     igb_nsicr(core);
2915     return ret;
2916 }
2917 
2918 static uint32_t
igb_mac_read_clr4(IGBCore * core,int index)2919 igb_mac_read_clr4(IGBCore *core, int index)
2920 {
2921     uint32_t ret = core->mac[index];
2922 
2923     core->mac[index] = 0;
2924     return ret;
2925 }
2926 
2927 static uint32_t
igb_mac_read_clr8(IGBCore * core,int index)2928 igb_mac_read_clr8(IGBCore *core, int index)
2929 {
2930     uint32_t ret = core->mac[index];
2931 
2932     core->mac[index] = 0;
2933     core->mac[index - 1] = 0;
2934     return ret;
2935 }
2936 
2937 static uint32_t
igb_get_ctrl(IGBCore * core,int index)2938 igb_get_ctrl(IGBCore *core, int index)
2939 {
2940     uint32_t val = core->mac[CTRL];
2941 
2942     trace_e1000e_link_read_params(
2943         !!(val & E1000_CTRL_ASDE),
2944         (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
2945         !!(val & E1000_CTRL_FRCSPD),
2946         !!(val & E1000_CTRL_FRCDPX),
2947         !!(val & E1000_CTRL_RFCE),
2948         !!(val & E1000_CTRL_TFCE));
2949 
2950     return val;
2951 }
2952 
igb_get_status(IGBCore * core,int index)2953 static uint32_t igb_get_status(IGBCore *core, int index)
2954 {
2955     uint32_t res = core->mac[STATUS];
2956     uint16_t num_vfs = pcie_sriov_num_vfs(core->owner);
2957 
2958     if (core->mac[CTRL] & E1000_CTRL_FRCDPX) {
2959         res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0;
2960     } else {
2961         res |= E1000_STATUS_FD;
2962     }
2963 
2964     if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) ||
2965         (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) {
2966         switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) {
2967         case E1000_CTRL_SPD_10:
2968             res |= E1000_STATUS_SPEED_10;
2969             break;
2970         case E1000_CTRL_SPD_100:
2971             res |= E1000_STATUS_SPEED_100;
2972             break;
2973         case E1000_CTRL_SPD_1000:
2974         default:
2975             res |= E1000_STATUS_SPEED_1000;
2976             break;
2977         }
2978     } else {
2979         res |= E1000_STATUS_SPEED_1000;
2980     }
2981 
2982     if (num_vfs) {
2983         res |= num_vfs << E1000_STATUS_NUM_VFS_SHIFT;
2984         res |= E1000_STATUS_IOV_MODE;
2985     }
2986 
2987     if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) {
2988         res |= E1000_STATUS_GIO_MASTER_ENABLE;
2989     }
2990 
2991     return res;
2992 }
2993 
2994 static void
igb_mac_writereg(IGBCore * core,int index,uint32_t val)2995 igb_mac_writereg(IGBCore *core, int index, uint32_t val)
2996 {
2997     core->mac[index] = val;
2998 }
2999 
3000 static void
igb_mac_setmacaddr(IGBCore * core,int index,uint32_t val)3001 igb_mac_setmacaddr(IGBCore *core, int index, uint32_t val)
3002 {
3003     uint32_t macaddr[2];
3004 
3005     core->mac[index] = val;
3006 
3007     macaddr[0] = cpu_to_le32(core->mac[RA]);
3008     macaddr[1] = cpu_to_le32(core->mac[RA + 1]);
3009     qemu_format_nic_info_str(qemu_get_queue(core->owner_nic),
3010         (uint8_t *) macaddr);
3011 
3012     trace_e1000e_mac_set_sw(MAC_ARG(macaddr));
3013 }
3014 
3015 static void
igb_set_eecd(IGBCore * core,int index,uint32_t val)3016 igb_set_eecd(IGBCore *core, int index, uint32_t val)
3017 {
3018     static const uint32_t ro_bits = E1000_EECD_PRES          |
3019                                     E1000_EECD_AUTO_RD       |
3020                                     E1000_EECD_SIZE_EX_MASK;
3021 
3022     core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits);
3023 }
3024 
3025 static void
igb_set_eerd(IGBCore * core,int index,uint32_t val)3026 igb_set_eerd(IGBCore *core, int index, uint32_t val)
3027 {
3028     uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK;
3029     uint32_t flags = 0;
3030     uint32_t data = 0;
3031 
3032     if ((addr < IGB_EEPROM_SIZE) && (val & E1000_EERW_START)) {
3033         data = core->eeprom[addr];
3034         flags = E1000_EERW_DONE;
3035     }
3036 
3037     core->mac[EERD] = flags                           |
3038                       (addr << E1000_EERW_ADDR_SHIFT) |
3039                       (data << E1000_EERW_DATA_SHIFT);
3040 }
3041 
3042 static void
igb_set_eitr(IGBCore * core,int index,uint32_t val)3043 igb_set_eitr(IGBCore *core, int index, uint32_t val)
3044 {
3045     uint32_t eitr_num = index - EITR0;
3046 
3047     trace_igb_irq_eitr_set(eitr_num, val);
3048 
3049     core->eitr_guest_value[eitr_num] = val & ~E1000_EITR_CNT_IGNR;
3050     core->mac[index] = val & 0x7FFE;
3051 }
3052 
3053 static void
igb_update_rx_offloads(IGBCore * core)3054 igb_update_rx_offloads(IGBCore *core)
3055 {
3056     int cso_state = igb_rx_l4_cso_enabled(core);
3057 
3058     trace_e1000e_rx_set_cso(cso_state);
3059 
3060     if (core->has_vnet) {
3061         qemu_set_offload(qemu_get_queue(core->owner_nic)->peer,
3062                          cso_state, 0, 0, 0, 0, 0, 0);
3063     }
3064 }
3065 
3066 static void
igb_set_rxcsum(IGBCore * core,int index,uint32_t val)3067 igb_set_rxcsum(IGBCore *core, int index, uint32_t val)
3068 {
3069     core->mac[RXCSUM] = val;
3070     igb_update_rx_offloads(core);
3071 }
3072 
3073 static void
igb_set_gcr(IGBCore * core,int index,uint32_t val)3074 igb_set_gcr(IGBCore *core, int index, uint32_t val)
3075 {
3076     uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS;
3077     core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits;
3078 }
3079 
igb_get_systiml(IGBCore * core,int index)3080 static uint32_t igb_get_systiml(IGBCore *core, int index)
3081 {
3082     e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH);
3083     return core->mac[SYSTIML];
3084 }
3085 
igb_get_rxsatrh(IGBCore * core,int index)3086 static uint32_t igb_get_rxsatrh(IGBCore *core, int index)
3087 {
3088     core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID;
3089     return core->mac[RXSATRH];
3090 }
3091 
igb_get_txstmph(IGBCore * core,int index)3092 static uint32_t igb_get_txstmph(IGBCore *core, int index)
3093 {
3094     core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID;
3095     return core->mac[TXSTMPH];
3096 }
3097 
igb_set_timinca(IGBCore * core,int index,uint32_t val)3098 static void igb_set_timinca(IGBCore *core, int index, uint32_t val)
3099 {
3100     e1000x_set_timinca(core->mac, &core->timadj, val);
3101 }
3102 
igb_set_timadjh(IGBCore * core,int index,uint32_t val)3103 static void igb_set_timadjh(IGBCore *core, int index, uint32_t val)
3104 {
3105     core->mac[TIMADJH] = val;
3106     core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32);
3107 }
3108 
3109 #define igb_getreg(x)    [x] = igb_mac_readreg
3110 typedef uint32_t (*readops)(IGBCore *, int);
3111 static const readops igb_macreg_readops[] = {
3112     igb_getreg(WUFC),
3113     igb_getreg(MANC),
3114     igb_getreg(TOTL),
3115     igb_getreg(RDT0),
3116     igb_getreg(RDT1),
3117     igb_getreg(RDT2),
3118     igb_getreg(RDT3),
3119     igb_getreg(RDT4),
3120     igb_getreg(RDT5),
3121     igb_getreg(RDT6),
3122     igb_getreg(RDT7),
3123     igb_getreg(RDT8),
3124     igb_getreg(RDT9),
3125     igb_getreg(RDT10),
3126     igb_getreg(RDT11),
3127     igb_getreg(RDT12),
3128     igb_getreg(RDT13),
3129     igb_getreg(RDT14),
3130     igb_getreg(RDT15),
3131     igb_getreg(RDBAH0),
3132     igb_getreg(RDBAH1),
3133     igb_getreg(RDBAH2),
3134     igb_getreg(RDBAH3),
3135     igb_getreg(RDBAH4),
3136     igb_getreg(RDBAH5),
3137     igb_getreg(RDBAH6),
3138     igb_getreg(RDBAH7),
3139     igb_getreg(RDBAH8),
3140     igb_getreg(RDBAH9),
3141     igb_getreg(RDBAH10),
3142     igb_getreg(RDBAH11),
3143     igb_getreg(RDBAH12),
3144     igb_getreg(RDBAH13),
3145     igb_getreg(RDBAH14),
3146     igb_getreg(RDBAH15),
3147     igb_getreg(TDBAL0),
3148     igb_getreg(TDBAL1),
3149     igb_getreg(TDBAL2),
3150     igb_getreg(TDBAL3),
3151     igb_getreg(TDBAL4),
3152     igb_getreg(TDBAL5),
3153     igb_getreg(TDBAL6),
3154     igb_getreg(TDBAL7),
3155     igb_getreg(TDBAL8),
3156     igb_getreg(TDBAL9),
3157     igb_getreg(TDBAL10),
3158     igb_getreg(TDBAL11),
3159     igb_getreg(TDBAL12),
3160     igb_getreg(TDBAL13),
3161     igb_getreg(TDBAL14),
3162     igb_getreg(TDBAL15),
3163     igb_getreg(RDLEN0),
3164     igb_getreg(RDLEN1),
3165     igb_getreg(RDLEN2),
3166     igb_getreg(RDLEN3),
3167     igb_getreg(RDLEN4),
3168     igb_getreg(RDLEN5),
3169     igb_getreg(RDLEN6),
3170     igb_getreg(RDLEN7),
3171     igb_getreg(RDLEN8),
3172     igb_getreg(RDLEN9),
3173     igb_getreg(RDLEN10),
3174     igb_getreg(RDLEN11),
3175     igb_getreg(RDLEN12),
3176     igb_getreg(RDLEN13),
3177     igb_getreg(RDLEN14),
3178     igb_getreg(RDLEN15),
3179     igb_getreg(SRRCTL0),
3180     igb_getreg(SRRCTL1),
3181     igb_getreg(SRRCTL2),
3182     igb_getreg(SRRCTL3),
3183     igb_getreg(SRRCTL4),
3184     igb_getreg(SRRCTL5),
3185     igb_getreg(SRRCTL6),
3186     igb_getreg(SRRCTL7),
3187     igb_getreg(SRRCTL8),
3188     igb_getreg(SRRCTL9),
3189     igb_getreg(SRRCTL10),
3190     igb_getreg(SRRCTL11),
3191     igb_getreg(SRRCTL12),
3192     igb_getreg(SRRCTL13),
3193     igb_getreg(SRRCTL14),
3194     igb_getreg(SRRCTL15),
3195     igb_getreg(LATECOL),
3196     igb_getreg(XONTXC),
3197     igb_getreg(TDFH),
3198     igb_getreg(TDFT),
3199     igb_getreg(TDFHS),
3200     igb_getreg(TDFTS),
3201     igb_getreg(TDFPC),
3202     igb_getreg(WUS),
3203     igb_getreg(RDFH),
3204     igb_getreg(RDFT),
3205     igb_getreg(RDFHS),
3206     igb_getreg(RDFTS),
3207     igb_getreg(RDFPC),
3208     igb_getreg(GORCL),
3209     igb_getreg(MGTPRC),
3210     igb_getreg(EERD),
3211     igb_getreg(EIAC),
3212     igb_getreg(MANC2H),
3213     igb_getreg(RXCSUM),
3214     igb_getreg(GSCL_3),
3215     igb_getreg(GSCN_2),
3216     igb_getreg(FCAH),
3217     igb_getreg(FCRTH),
3218     igb_getreg(FLOP),
3219     igb_getreg(RXSTMPH),
3220     igb_getreg(TXSTMPL),
3221     igb_getreg(TIMADJL),
3222     igb_getreg(RDH0),
3223     igb_getreg(RDH1),
3224     igb_getreg(RDH2),
3225     igb_getreg(RDH3),
3226     igb_getreg(RDH4),
3227     igb_getreg(RDH5),
3228     igb_getreg(RDH6),
3229     igb_getreg(RDH7),
3230     igb_getreg(RDH8),
3231     igb_getreg(RDH9),
3232     igb_getreg(RDH10),
3233     igb_getreg(RDH11),
3234     igb_getreg(RDH12),
3235     igb_getreg(RDH13),
3236     igb_getreg(RDH14),
3237     igb_getreg(RDH15),
3238     igb_getreg(TDT0),
3239     igb_getreg(TDT1),
3240     igb_getreg(TDT2),
3241     igb_getreg(TDT3),
3242     igb_getreg(TDT4),
3243     igb_getreg(TDT5),
3244     igb_getreg(TDT6),
3245     igb_getreg(TDT7),
3246     igb_getreg(TDT8),
3247     igb_getreg(TDT9),
3248     igb_getreg(TDT10),
3249     igb_getreg(TDT11),
3250     igb_getreg(TDT12),
3251     igb_getreg(TDT13),
3252     igb_getreg(TDT14),
3253     igb_getreg(TDT15),
3254     igb_getreg(TNCRS),
3255     igb_getreg(RJC),
3256     igb_getreg(IAM),
3257     igb_getreg(GSCL_2),
3258     igb_getreg(TIPG),
3259     igb_getreg(FLMNGCTL),
3260     igb_getreg(FLMNGCNT),
3261     igb_getreg(TSYNCTXCTL),
3262     igb_getreg(EEMNGDATA),
3263     igb_getreg(CTRL_EXT),
3264     igb_getreg(SYSTIMH),
3265     igb_getreg(EEMNGCTL),
3266     igb_getreg(FLMNGDATA),
3267     igb_getreg(TSYNCRXCTL),
3268     igb_getreg(LEDCTL),
3269     igb_getreg(TCTL),
3270     igb_getreg(TCTL_EXT),
3271     igb_getreg(DTXCTL),
3272     igb_getreg(RXPBS),
3273     igb_getreg(TDH0),
3274     igb_getreg(TDH1),
3275     igb_getreg(TDH2),
3276     igb_getreg(TDH3),
3277     igb_getreg(TDH4),
3278     igb_getreg(TDH5),
3279     igb_getreg(TDH6),
3280     igb_getreg(TDH7),
3281     igb_getreg(TDH8),
3282     igb_getreg(TDH9),
3283     igb_getreg(TDH10),
3284     igb_getreg(TDH11),
3285     igb_getreg(TDH12),
3286     igb_getreg(TDH13),
3287     igb_getreg(TDH14),
3288     igb_getreg(TDH15),
3289     igb_getreg(ECOL),
3290     igb_getreg(DC),
3291     igb_getreg(RLEC),
3292     igb_getreg(XOFFTXC),
3293     igb_getreg(RFC),
3294     igb_getreg(RNBC),
3295     igb_getreg(MGTPTC),
3296     igb_getreg(TIMINCA),
3297     igb_getreg(FACTPS),
3298     igb_getreg(GSCL_1),
3299     igb_getreg(GSCN_0),
3300     igb_getreg(PBACLR),
3301     igb_getreg(FCTTV),
3302     igb_getreg(RXSATRL),
3303     igb_getreg(TORL),
3304     igb_getreg(TDLEN0),
3305     igb_getreg(TDLEN1),
3306     igb_getreg(TDLEN2),
3307     igb_getreg(TDLEN3),
3308     igb_getreg(TDLEN4),
3309     igb_getreg(TDLEN5),
3310     igb_getreg(TDLEN6),
3311     igb_getreg(TDLEN7),
3312     igb_getreg(TDLEN8),
3313     igb_getreg(TDLEN9),
3314     igb_getreg(TDLEN10),
3315     igb_getreg(TDLEN11),
3316     igb_getreg(TDLEN12),
3317     igb_getreg(TDLEN13),
3318     igb_getreg(TDLEN14),
3319     igb_getreg(TDLEN15),
3320     igb_getreg(MCC),
3321     igb_getreg(WUC),
3322     igb_getreg(EECD),
3323     igb_getreg(FCRTV),
3324     igb_getreg(TXDCTL0),
3325     igb_getreg(TXDCTL1),
3326     igb_getreg(TXDCTL2),
3327     igb_getreg(TXDCTL3),
3328     igb_getreg(TXDCTL4),
3329     igb_getreg(TXDCTL5),
3330     igb_getreg(TXDCTL6),
3331     igb_getreg(TXDCTL7),
3332     igb_getreg(TXDCTL8),
3333     igb_getreg(TXDCTL9),
3334     igb_getreg(TXDCTL10),
3335     igb_getreg(TXDCTL11),
3336     igb_getreg(TXDCTL12),
3337     igb_getreg(TXDCTL13),
3338     igb_getreg(TXDCTL14),
3339     igb_getreg(TXDCTL15),
3340     igb_getreg(TXCTL0),
3341     igb_getreg(TXCTL1),
3342     igb_getreg(TXCTL2),
3343     igb_getreg(TXCTL3),
3344     igb_getreg(TXCTL4),
3345     igb_getreg(TXCTL5),
3346     igb_getreg(TXCTL6),
3347     igb_getreg(TXCTL7),
3348     igb_getreg(TXCTL8),
3349     igb_getreg(TXCTL9),
3350     igb_getreg(TXCTL10),
3351     igb_getreg(TXCTL11),
3352     igb_getreg(TXCTL12),
3353     igb_getreg(TXCTL13),
3354     igb_getreg(TXCTL14),
3355     igb_getreg(TXCTL15),
3356     igb_getreg(TDWBAL0),
3357     igb_getreg(TDWBAL1),
3358     igb_getreg(TDWBAL2),
3359     igb_getreg(TDWBAL3),
3360     igb_getreg(TDWBAL4),
3361     igb_getreg(TDWBAL5),
3362     igb_getreg(TDWBAL6),
3363     igb_getreg(TDWBAL7),
3364     igb_getreg(TDWBAL8),
3365     igb_getreg(TDWBAL9),
3366     igb_getreg(TDWBAL10),
3367     igb_getreg(TDWBAL11),
3368     igb_getreg(TDWBAL12),
3369     igb_getreg(TDWBAL13),
3370     igb_getreg(TDWBAL14),
3371     igb_getreg(TDWBAL15),
3372     igb_getreg(TDWBAH0),
3373     igb_getreg(TDWBAH1),
3374     igb_getreg(TDWBAH2),
3375     igb_getreg(TDWBAH3),
3376     igb_getreg(TDWBAH4),
3377     igb_getreg(TDWBAH5),
3378     igb_getreg(TDWBAH6),
3379     igb_getreg(TDWBAH7),
3380     igb_getreg(TDWBAH8),
3381     igb_getreg(TDWBAH9),
3382     igb_getreg(TDWBAH10),
3383     igb_getreg(TDWBAH11),
3384     igb_getreg(TDWBAH12),
3385     igb_getreg(TDWBAH13),
3386     igb_getreg(TDWBAH14),
3387     igb_getreg(TDWBAH15),
3388     igb_getreg(PVTCTRL0),
3389     igb_getreg(PVTCTRL1),
3390     igb_getreg(PVTCTRL2),
3391     igb_getreg(PVTCTRL3),
3392     igb_getreg(PVTCTRL4),
3393     igb_getreg(PVTCTRL5),
3394     igb_getreg(PVTCTRL6),
3395     igb_getreg(PVTCTRL7),
3396     igb_getreg(PVTEIMS0),
3397     igb_getreg(PVTEIMS1),
3398     igb_getreg(PVTEIMS2),
3399     igb_getreg(PVTEIMS3),
3400     igb_getreg(PVTEIMS4),
3401     igb_getreg(PVTEIMS5),
3402     igb_getreg(PVTEIMS6),
3403     igb_getreg(PVTEIMS7),
3404     igb_getreg(PVTEIAC0),
3405     igb_getreg(PVTEIAC1),
3406     igb_getreg(PVTEIAC2),
3407     igb_getreg(PVTEIAC3),
3408     igb_getreg(PVTEIAC4),
3409     igb_getreg(PVTEIAC5),
3410     igb_getreg(PVTEIAC6),
3411     igb_getreg(PVTEIAC7),
3412     igb_getreg(PVTEIAM0),
3413     igb_getreg(PVTEIAM1),
3414     igb_getreg(PVTEIAM2),
3415     igb_getreg(PVTEIAM3),
3416     igb_getreg(PVTEIAM4),
3417     igb_getreg(PVTEIAM5),
3418     igb_getreg(PVTEIAM6),
3419     igb_getreg(PVTEIAM7),
3420     igb_getreg(PVFGPRC0),
3421     igb_getreg(PVFGPRC1),
3422     igb_getreg(PVFGPRC2),
3423     igb_getreg(PVFGPRC3),
3424     igb_getreg(PVFGPRC4),
3425     igb_getreg(PVFGPRC5),
3426     igb_getreg(PVFGPRC6),
3427     igb_getreg(PVFGPRC7),
3428     igb_getreg(PVFGPTC0),
3429     igb_getreg(PVFGPTC1),
3430     igb_getreg(PVFGPTC2),
3431     igb_getreg(PVFGPTC3),
3432     igb_getreg(PVFGPTC4),
3433     igb_getreg(PVFGPTC5),
3434     igb_getreg(PVFGPTC6),
3435     igb_getreg(PVFGPTC7),
3436     igb_getreg(PVFGORC0),
3437     igb_getreg(PVFGORC1),
3438     igb_getreg(PVFGORC2),
3439     igb_getreg(PVFGORC3),
3440     igb_getreg(PVFGORC4),
3441     igb_getreg(PVFGORC5),
3442     igb_getreg(PVFGORC6),
3443     igb_getreg(PVFGORC7),
3444     igb_getreg(PVFGOTC0),
3445     igb_getreg(PVFGOTC1),
3446     igb_getreg(PVFGOTC2),
3447     igb_getreg(PVFGOTC3),
3448     igb_getreg(PVFGOTC4),
3449     igb_getreg(PVFGOTC5),
3450     igb_getreg(PVFGOTC6),
3451     igb_getreg(PVFGOTC7),
3452     igb_getreg(PVFMPRC0),
3453     igb_getreg(PVFMPRC1),
3454     igb_getreg(PVFMPRC2),
3455     igb_getreg(PVFMPRC3),
3456     igb_getreg(PVFMPRC4),
3457     igb_getreg(PVFMPRC5),
3458     igb_getreg(PVFMPRC6),
3459     igb_getreg(PVFMPRC7),
3460     igb_getreg(PVFGPRLBC0),
3461     igb_getreg(PVFGPRLBC1),
3462     igb_getreg(PVFGPRLBC2),
3463     igb_getreg(PVFGPRLBC3),
3464     igb_getreg(PVFGPRLBC4),
3465     igb_getreg(PVFGPRLBC5),
3466     igb_getreg(PVFGPRLBC6),
3467     igb_getreg(PVFGPRLBC7),
3468     igb_getreg(PVFGPTLBC0),
3469     igb_getreg(PVFGPTLBC1),
3470     igb_getreg(PVFGPTLBC2),
3471     igb_getreg(PVFGPTLBC3),
3472     igb_getreg(PVFGPTLBC4),
3473     igb_getreg(PVFGPTLBC5),
3474     igb_getreg(PVFGPTLBC6),
3475     igb_getreg(PVFGPTLBC7),
3476     igb_getreg(PVFGORLBC0),
3477     igb_getreg(PVFGORLBC1),
3478     igb_getreg(PVFGORLBC2),
3479     igb_getreg(PVFGORLBC3),
3480     igb_getreg(PVFGORLBC4),
3481     igb_getreg(PVFGORLBC5),
3482     igb_getreg(PVFGORLBC6),
3483     igb_getreg(PVFGORLBC7),
3484     igb_getreg(PVFGOTLBC0),
3485     igb_getreg(PVFGOTLBC1),
3486     igb_getreg(PVFGOTLBC2),
3487     igb_getreg(PVFGOTLBC3),
3488     igb_getreg(PVFGOTLBC4),
3489     igb_getreg(PVFGOTLBC5),
3490     igb_getreg(PVFGOTLBC6),
3491     igb_getreg(PVFGOTLBC7),
3492     igb_getreg(RCTL),
3493     igb_getreg(MDIC),
3494     igb_getreg(FCRUC),
3495     igb_getreg(VET),
3496     igb_getreg(RDBAL0),
3497     igb_getreg(RDBAL1),
3498     igb_getreg(RDBAL2),
3499     igb_getreg(RDBAL3),
3500     igb_getreg(RDBAL4),
3501     igb_getreg(RDBAL5),
3502     igb_getreg(RDBAL6),
3503     igb_getreg(RDBAL7),
3504     igb_getreg(RDBAL8),
3505     igb_getreg(RDBAL9),
3506     igb_getreg(RDBAL10),
3507     igb_getreg(RDBAL11),
3508     igb_getreg(RDBAL12),
3509     igb_getreg(RDBAL13),
3510     igb_getreg(RDBAL14),
3511     igb_getreg(RDBAL15),
3512     igb_getreg(TDBAH0),
3513     igb_getreg(TDBAH1),
3514     igb_getreg(TDBAH2),
3515     igb_getreg(TDBAH3),
3516     igb_getreg(TDBAH4),
3517     igb_getreg(TDBAH5),
3518     igb_getreg(TDBAH6),
3519     igb_getreg(TDBAH7),
3520     igb_getreg(TDBAH8),
3521     igb_getreg(TDBAH9),
3522     igb_getreg(TDBAH10),
3523     igb_getreg(TDBAH11),
3524     igb_getreg(TDBAH12),
3525     igb_getreg(TDBAH13),
3526     igb_getreg(TDBAH14),
3527     igb_getreg(TDBAH15),
3528     igb_getreg(SCC),
3529     igb_getreg(COLC),
3530     igb_getreg(XOFFRXC),
3531     igb_getreg(IPAV),
3532     igb_getreg(GOTCL),
3533     igb_getreg(MGTPDC),
3534     igb_getreg(GCR),
3535     igb_getreg(MFVAL),
3536     igb_getreg(FUNCTAG),
3537     igb_getreg(GSCL_4),
3538     igb_getreg(GSCN_3),
3539     igb_getreg(MRQC),
3540     igb_getreg(FCT),
3541     igb_getreg(FLA),
3542     igb_getreg(RXDCTL0),
3543     igb_getreg(RXDCTL1),
3544     igb_getreg(RXDCTL2),
3545     igb_getreg(RXDCTL3),
3546     igb_getreg(RXDCTL4),
3547     igb_getreg(RXDCTL5),
3548     igb_getreg(RXDCTL6),
3549     igb_getreg(RXDCTL7),
3550     igb_getreg(RXDCTL8),
3551     igb_getreg(RXDCTL9),
3552     igb_getreg(RXDCTL10),
3553     igb_getreg(RXDCTL11),
3554     igb_getreg(RXDCTL12),
3555     igb_getreg(RXDCTL13),
3556     igb_getreg(RXDCTL14),
3557     igb_getreg(RXDCTL15),
3558     igb_getreg(RXSTMPL),
3559     igb_getreg(TIMADJH),
3560     igb_getreg(FCRTL),
3561     igb_getreg(XONRXC),
3562     igb_getreg(RFCTL),
3563     igb_getreg(GSCN_1),
3564     igb_getreg(FCAL),
3565     igb_getreg(GPIE),
3566     igb_getreg(TXPBS),
3567     igb_getreg(RLPML),
3568 
3569     [TOTH]    = igb_mac_read_clr8,
3570     [GOTCH]   = igb_mac_read_clr8,
3571     [PRC64]   = igb_mac_read_clr4,
3572     [PRC255]  = igb_mac_read_clr4,
3573     [PRC1023] = igb_mac_read_clr4,
3574     [PTC64]   = igb_mac_read_clr4,
3575     [PTC255]  = igb_mac_read_clr4,
3576     [PTC1023] = igb_mac_read_clr4,
3577     [GPRC]    = igb_mac_read_clr4,
3578     [TPT]     = igb_mac_read_clr4,
3579     [RUC]     = igb_mac_read_clr4,
3580     [BPRC]    = igb_mac_read_clr4,
3581     [MPTC]    = igb_mac_read_clr4,
3582     [IAC]     = igb_mac_read_clr4,
3583     [ICR]     = igb_mac_icr_read,
3584     [STATUS]  = igb_get_status,
3585     [ICS]     = igb_mac_ics_read,
3586     /*
3587      * 8.8.10: Reading the IMC register returns the value of the IMS register.
3588      */
3589     [IMC]     = igb_mac_ims_read,
3590     [TORH]    = igb_mac_read_clr8,
3591     [GORCH]   = igb_mac_read_clr8,
3592     [PRC127]  = igb_mac_read_clr4,
3593     [PRC511]  = igb_mac_read_clr4,
3594     [PRC1522] = igb_mac_read_clr4,
3595     [PTC127]  = igb_mac_read_clr4,
3596     [PTC511]  = igb_mac_read_clr4,
3597     [PTC1522] = igb_mac_read_clr4,
3598     [GPTC]    = igb_mac_read_clr4,
3599     [TPR]     = igb_mac_read_clr4,
3600     [ROC]     = igb_mac_read_clr4,
3601     [MPRC]    = igb_mac_read_clr4,
3602     [BPTC]    = igb_mac_read_clr4,
3603     [TSCTC]   = igb_mac_read_clr4,
3604     [CTRL]    = igb_get_ctrl,
3605     [SWSM]    = igb_mac_swsm_read,
3606     [IMS]     = igb_mac_ims_read,
3607     [SYSTIML] = igb_get_systiml,
3608     [RXSATRH] = igb_get_rxsatrh,
3609     [TXSTMPH] = igb_get_txstmph,
3610 
3611     [CRCERRS ... MPC]      = igb_mac_readreg,
3612     [IP6AT ... IP6AT + 3]  = igb_mac_readreg,
3613     [IP4AT ... IP4AT + 6]  = igb_mac_readreg,
3614     [RA ... RA + 31]       = igb_mac_readreg,
3615     [RA2 ... RA2 + 31]     = igb_mac_readreg,
3616     [WUPM ... WUPM + 31]   = igb_mac_readreg,
3617     [MTA ... MTA + E1000_MC_TBL_SIZE - 1]    = igb_mac_readreg,
3618     [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1]  = igb_mac_readreg,
3619     [FFMT ... FFMT + 254]  = igb_mac_readreg,
3620     [MDEF ... MDEF + 7]    = igb_mac_readreg,
3621     [FTFT ... FTFT + 254]  = igb_mac_readreg,
3622     [RETA ... RETA + 31]   = igb_mac_readreg,
3623     [RSSRK ... RSSRK + 9]  = igb_mac_readreg,
3624     [MAVTV0 ... MAVTV3]    = igb_mac_readreg,
3625     [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_mac_eitr_read,
3626     [PVTEICR0] = igb_mac_read_clr4,
3627     [PVTEICR1] = igb_mac_read_clr4,
3628     [PVTEICR2] = igb_mac_read_clr4,
3629     [PVTEICR3] = igb_mac_read_clr4,
3630     [PVTEICR4] = igb_mac_read_clr4,
3631     [PVTEICR5] = igb_mac_read_clr4,
3632     [PVTEICR6] = igb_mac_read_clr4,
3633     [PVTEICR7] = igb_mac_read_clr4,
3634 
3635     /* IGB specific: */
3636     [FWSM]       = igb_mac_readreg,
3637     [SW_FW_SYNC] = igb_mac_readreg,
3638     [HTCBDPC]    = igb_mac_read_clr4,
3639     [EICR]       = igb_mac_read_clr4,
3640     [EIMS]       = igb_mac_readreg,
3641     [EIAM]       = igb_mac_readreg,
3642     [IVAR0 ... IVAR0 + 7] = igb_mac_readreg,
3643     igb_getreg(IVAR_MISC),
3644     igb_getreg(TSYNCRXCFG),
3645     [ETQF0 ... ETQF0 + 7] = igb_mac_readreg,
3646     igb_getreg(VT_CTL),
3647     [P2VMAILBOX0 ... P2VMAILBOX7] = igb_mac_readreg,
3648     [V2PMAILBOX0 ... V2PMAILBOX7] = igb_mac_vfmailbox_read,
3649     igb_getreg(MBVFICR),
3650     [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_readreg,
3651     igb_getreg(MBVFIMR),
3652     igb_getreg(VFLRE),
3653     igb_getreg(VFRE),
3654     igb_getreg(VFTE),
3655     igb_getreg(QDE),
3656     igb_getreg(DTXSWC),
3657     igb_getreg(RPLOLR),
3658     [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_readreg,
3659     [VMVIR0 ... VMVIR7] = igb_mac_readreg,
3660     [VMOLR0 ... VMOLR7] = igb_mac_readreg,
3661     [WVBR] = igb_mac_read_clr4,
3662     [RQDPC0] = igb_mac_read_clr4,
3663     [RQDPC1] = igb_mac_read_clr4,
3664     [RQDPC2] = igb_mac_read_clr4,
3665     [RQDPC3] = igb_mac_read_clr4,
3666     [RQDPC4] = igb_mac_read_clr4,
3667     [RQDPC5] = igb_mac_read_clr4,
3668     [RQDPC6] = igb_mac_read_clr4,
3669     [RQDPC7] = igb_mac_read_clr4,
3670     [RQDPC8] = igb_mac_read_clr4,
3671     [RQDPC9] = igb_mac_read_clr4,
3672     [RQDPC10] = igb_mac_read_clr4,
3673     [RQDPC11] = igb_mac_read_clr4,
3674     [RQDPC12] = igb_mac_read_clr4,
3675     [RQDPC13] = igb_mac_read_clr4,
3676     [RQDPC14] = igb_mac_read_clr4,
3677     [RQDPC15] = igb_mac_read_clr4,
3678     [VTIVAR ... VTIVAR + 7] = igb_mac_readreg,
3679     [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_readreg,
3680 };
3681 enum { IGB_NREADOPS = ARRAY_SIZE(igb_macreg_readops) };
3682 
3683 #define igb_putreg(x)    [x] = igb_mac_writereg
3684 typedef void (*writeops)(IGBCore *, int, uint32_t);
3685 static const writeops igb_macreg_writeops[] = {
3686     igb_putreg(SWSM),
3687     igb_putreg(WUFC),
3688     igb_putreg(RDBAH0),
3689     igb_putreg(RDBAH1),
3690     igb_putreg(RDBAH2),
3691     igb_putreg(RDBAH3),
3692     igb_putreg(RDBAH4),
3693     igb_putreg(RDBAH5),
3694     igb_putreg(RDBAH6),
3695     igb_putreg(RDBAH7),
3696     igb_putreg(RDBAH8),
3697     igb_putreg(RDBAH9),
3698     igb_putreg(RDBAH10),
3699     igb_putreg(RDBAH11),
3700     igb_putreg(RDBAH12),
3701     igb_putreg(RDBAH13),
3702     igb_putreg(RDBAH14),
3703     igb_putreg(RDBAH15),
3704     igb_putreg(SRRCTL0),
3705     igb_putreg(SRRCTL1),
3706     igb_putreg(SRRCTL2),
3707     igb_putreg(SRRCTL3),
3708     igb_putreg(SRRCTL4),
3709     igb_putreg(SRRCTL5),
3710     igb_putreg(SRRCTL6),
3711     igb_putreg(SRRCTL7),
3712     igb_putreg(SRRCTL8),
3713     igb_putreg(SRRCTL9),
3714     igb_putreg(SRRCTL10),
3715     igb_putreg(SRRCTL11),
3716     igb_putreg(SRRCTL12),
3717     igb_putreg(SRRCTL13),
3718     igb_putreg(SRRCTL14),
3719     igb_putreg(SRRCTL15),
3720     igb_putreg(RXDCTL0),
3721     igb_putreg(RXDCTL1),
3722     igb_putreg(RXDCTL2),
3723     igb_putreg(RXDCTL3),
3724     igb_putreg(RXDCTL4),
3725     igb_putreg(RXDCTL5),
3726     igb_putreg(RXDCTL6),
3727     igb_putreg(RXDCTL7),
3728     igb_putreg(RXDCTL8),
3729     igb_putreg(RXDCTL9),
3730     igb_putreg(RXDCTL10),
3731     igb_putreg(RXDCTL11),
3732     igb_putreg(RXDCTL12),
3733     igb_putreg(RXDCTL13),
3734     igb_putreg(RXDCTL14),
3735     igb_putreg(RXDCTL15),
3736     igb_putreg(LEDCTL),
3737     igb_putreg(TCTL),
3738     igb_putreg(TCTL_EXT),
3739     igb_putreg(DTXCTL),
3740     igb_putreg(RXPBS),
3741     igb_putreg(RQDPC0),
3742     igb_putreg(FCAL),
3743     igb_putreg(FCRUC),
3744     igb_putreg(WUC),
3745     igb_putreg(WUS),
3746     igb_putreg(IPAV),
3747     igb_putreg(TDBAH0),
3748     igb_putreg(TDBAH1),
3749     igb_putreg(TDBAH2),
3750     igb_putreg(TDBAH3),
3751     igb_putreg(TDBAH4),
3752     igb_putreg(TDBAH5),
3753     igb_putreg(TDBAH6),
3754     igb_putreg(TDBAH7),
3755     igb_putreg(TDBAH8),
3756     igb_putreg(TDBAH9),
3757     igb_putreg(TDBAH10),
3758     igb_putreg(TDBAH11),
3759     igb_putreg(TDBAH12),
3760     igb_putreg(TDBAH13),
3761     igb_putreg(TDBAH14),
3762     igb_putreg(TDBAH15),
3763     igb_putreg(IAM),
3764     igb_putreg(MANC),
3765     igb_putreg(MANC2H),
3766     igb_putreg(MFVAL),
3767     igb_putreg(FACTPS),
3768     igb_putreg(FUNCTAG),
3769     igb_putreg(GSCL_1),
3770     igb_putreg(GSCL_2),
3771     igb_putreg(GSCL_3),
3772     igb_putreg(GSCL_4),
3773     igb_putreg(GSCN_0),
3774     igb_putreg(GSCN_1),
3775     igb_putreg(GSCN_2),
3776     igb_putreg(GSCN_3),
3777     igb_putreg(MRQC),
3778     igb_putreg(FLOP),
3779     igb_putreg(FLA),
3780     igb_putreg(TXDCTL0),
3781     igb_putreg(TXDCTL1),
3782     igb_putreg(TXDCTL2),
3783     igb_putreg(TXDCTL3),
3784     igb_putreg(TXDCTL4),
3785     igb_putreg(TXDCTL5),
3786     igb_putreg(TXDCTL6),
3787     igb_putreg(TXDCTL7),
3788     igb_putreg(TXDCTL8),
3789     igb_putreg(TXDCTL9),
3790     igb_putreg(TXDCTL10),
3791     igb_putreg(TXDCTL11),
3792     igb_putreg(TXDCTL12),
3793     igb_putreg(TXDCTL13),
3794     igb_putreg(TXDCTL14),
3795     igb_putreg(TXDCTL15),
3796     igb_putreg(TXCTL0),
3797     igb_putreg(TXCTL1),
3798     igb_putreg(TXCTL2),
3799     igb_putreg(TXCTL3),
3800     igb_putreg(TXCTL4),
3801     igb_putreg(TXCTL5),
3802     igb_putreg(TXCTL6),
3803     igb_putreg(TXCTL7),
3804     igb_putreg(TXCTL8),
3805     igb_putreg(TXCTL9),
3806     igb_putreg(TXCTL10),
3807     igb_putreg(TXCTL11),
3808     igb_putreg(TXCTL12),
3809     igb_putreg(TXCTL13),
3810     igb_putreg(TXCTL14),
3811     igb_putreg(TXCTL15),
3812     igb_putreg(TDWBAL0),
3813     igb_putreg(TDWBAL1),
3814     igb_putreg(TDWBAL2),
3815     igb_putreg(TDWBAL3),
3816     igb_putreg(TDWBAL4),
3817     igb_putreg(TDWBAL5),
3818     igb_putreg(TDWBAL6),
3819     igb_putreg(TDWBAL7),
3820     igb_putreg(TDWBAL8),
3821     igb_putreg(TDWBAL9),
3822     igb_putreg(TDWBAL10),
3823     igb_putreg(TDWBAL11),
3824     igb_putreg(TDWBAL12),
3825     igb_putreg(TDWBAL13),
3826     igb_putreg(TDWBAL14),
3827     igb_putreg(TDWBAL15),
3828     igb_putreg(TDWBAH0),
3829     igb_putreg(TDWBAH1),
3830     igb_putreg(TDWBAH2),
3831     igb_putreg(TDWBAH3),
3832     igb_putreg(TDWBAH4),
3833     igb_putreg(TDWBAH5),
3834     igb_putreg(TDWBAH6),
3835     igb_putreg(TDWBAH7),
3836     igb_putreg(TDWBAH8),
3837     igb_putreg(TDWBAH9),
3838     igb_putreg(TDWBAH10),
3839     igb_putreg(TDWBAH11),
3840     igb_putreg(TDWBAH12),
3841     igb_putreg(TDWBAH13),
3842     igb_putreg(TDWBAH14),
3843     igb_putreg(TDWBAH15),
3844     igb_putreg(TIPG),
3845     igb_putreg(RXSTMPH),
3846     igb_putreg(RXSTMPL),
3847     igb_putreg(RXSATRL),
3848     igb_putreg(RXSATRH),
3849     igb_putreg(TXSTMPL),
3850     igb_putreg(TXSTMPH),
3851     igb_putreg(SYSTIML),
3852     igb_putreg(SYSTIMH),
3853     igb_putreg(TIMADJL),
3854     igb_putreg(TSYNCRXCTL),
3855     igb_putreg(TSYNCTXCTL),
3856     igb_putreg(EEMNGCTL),
3857     igb_putreg(GPIE),
3858     igb_putreg(TXPBS),
3859     igb_putreg(RLPML),
3860     igb_putreg(VET),
3861 
3862     [TDH0]     = igb_set_16bit,
3863     [TDH1]     = igb_set_16bit,
3864     [TDH2]     = igb_set_16bit,
3865     [TDH3]     = igb_set_16bit,
3866     [TDH4]     = igb_set_16bit,
3867     [TDH5]     = igb_set_16bit,
3868     [TDH6]     = igb_set_16bit,
3869     [TDH7]     = igb_set_16bit,
3870     [TDH8]     = igb_set_16bit,
3871     [TDH9]     = igb_set_16bit,
3872     [TDH10]    = igb_set_16bit,
3873     [TDH11]    = igb_set_16bit,
3874     [TDH12]    = igb_set_16bit,
3875     [TDH13]    = igb_set_16bit,
3876     [TDH14]    = igb_set_16bit,
3877     [TDH15]    = igb_set_16bit,
3878     [TDT0]     = igb_set_tdt,
3879     [TDT1]     = igb_set_tdt,
3880     [TDT2]     = igb_set_tdt,
3881     [TDT3]     = igb_set_tdt,
3882     [TDT4]     = igb_set_tdt,
3883     [TDT5]     = igb_set_tdt,
3884     [TDT6]     = igb_set_tdt,
3885     [TDT7]     = igb_set_tdt,
3886     [TDT8]     = igb_set_tdt,
3887     [TDT9]     = igb_set_tdt,
3888     [TDT10]    = igb_set_tdt,
3889     [TDT11]    = igb_set_tdt,
3890     [TDT12]    = igb_set_tdt,
3891     [TDT13]    = igb_set_tdt,
3892     [TDT14]    = igb_set_tdt,
3893     [TDT15]    = igb_set_tdt,
3894     [MDIC]     = igb_set_mdic,
3895     [ICS]      = igb_set_ics,
3896     [RDH0]     = igb_set_16bit,
3897     [RDH1]     = igb_set_16bit,
3898     [RDH2]     = igb_set_16bit,
3899     [RDH3]     = igb_set_16bit,
3900     [RDH4]     = igb_set_16bit,
3901     [RDH5]     = igb_set_16bit,
3902     [RDH6]     = igb_set_16bit,
3903     [RDH7]     = igb_set_16bit,
3904     [RDH8]     = igb_set_16bit,
3905     [RDH9]     = igb_set_16bit,
3906     [RDH10]    = igb_set_16bit,
3907     [RDH11]    = igb_set_16bit,
3908     [RDH12]    = igb_set_16bit,
3909     [RDH13]    = igb_set_16bit,
3910     [RDH14]    = igb_set_16bit,
3911     [RDH15]    = igb_set_16bit,
3912     [RDT0]     = igb_set_rdt,
3913     [RDT1]     = igb_set_rdt,
3914     [RDT2]     = igb_set_rdt,
3915     [RDT3]     = igb_set_rdt,
3916     [RDT4]     = igb_set_rdt,
3917     [RDT5]     = igb_set_rdt,
3918     [RDT6]     = igb_set_rdt,
3919     [RDT7]     = igb_set_rdt,
3920     [RDT8]     = igb_set_rdt,
3921     [RDT9]     = igb_set_rdt,
3922     [RDT10]    = igb_set_rdt,
3923     [RDT11]    = igb_set_rdt,
3924     [RDT12]    = igb_set_rdt,
3925     [RDT13]    = igb_set_rdt,
3926     [RDT14]    = igb_set_rdt,
3927     [RDT15]    = igb_set_rdt,
3928     [IMC]      = igb_set_imc,
3929     [IMS]      = igb_set_ims,
3930     [ICR]      = igb_set_icr,
3931     [EECD]     = igb_set_eecd,
3932     [RCTL]     = igb_set_rx_control,
3933     [CTRL]     = igb_set_ctrl,
3934     [EERD]     = igb_set_eerd,
3935     [TDFH]     = igb_set_13bit,
3936     [TDFT]     = igb_set_13bit,
3937     [TDFHS]    = igb_set_13bit,
3938     [TDFTS]    = igb_set_13bit,
3939     [TDFPC]    = igb_set_13bit,
3940     [RDFH]     = igb_set_13bit,
3941     [RDFT]     = igb_set_13bit,
3942     [RDFHS]    = igb_set_13bit,
3943     [RDFTS]    = igb_set_13bit,
3944     [RDFPC]    = igb_set_13bit,
3945     [GCR]      = igb_set_gcr,
3946     [RXCSUM]   = igb_set_rxcsum,
3947     [TDLEN0]   = igb_set_dlen,
3948     [TDLEN1]   = igb_set_dlen,
3949     [TDLEN2]   = igb_set_dlen,
3950     [TDLEN3]   = igb_set_dlen,
3951     [TDLEN4]   = igb_set_dlen,
3952     [TDLEN5]   = igb_set_dlen,
3953     [TDLEN6]   = igb_set_dlen,
3954     [TDLEN7]   = igb_set_dlen,
3955     [TDLEN8]   = igb_set_dlen,
3956     [TDLEN9]   = igb_set_dlen,
3957     [TDLEN10]  = igb_set_dlen,
3958     [TDLEN11]  = igb_set_dlen,
3959     [TDLEN12]  = igb_set_dlen,
3960     [TDLEN13]  = igb_set_dlen,
3961     [TDLEN14]  = igb_set_dlen,
3962     [TDLEN15]  = igb_set_dlen,
3963     [RDLEN0]   = igb_set_dlen,
3964     [RDLEN1]   = igb_set_dlen,
3965     [RDLEN2]   = igb_set_dlen,
3966     [RDLEN3]   = igb_set_dlen,
3967     [RDLEN4]   = igb_set_dlen,
3968     [RDLEN5]   = igb_set_dlen,
3969     [RDLEN6]   = igb_set_dlen,
3970     [RDLEN7]   = igb_set_dlen,
3971     [RDLEN8]   = igb_set_dlen,
3972     [RDLEN9]   = igb_set_dlen,
3973     [RDLEN10]  = igb_set_dlen,
3974     [RDLEN11]  = igb_set_dlen,
3975     [RDLEN12]  = igb_set_dlen,
3976     [RDLEN13]  = igb_set_dlen,
3977     [RDLEN14]  = igb_set_dlen,
3978     [RDLEN15]  = igb_set_dlen,
3979     [TDBAL0]   = igb_set_dbal,
3980     [TDBAL1]   = igb_set_dbal,
3981     [TDBAL2]   = igb_set_dbal,
3982     [TDBAL3]   = igb_set_dbal,
3983     [TDBAL4]   = igb_set_dbal,
3984     [TDBAL5]   = igb_set_dbal,
3985     [TDBAL6]   = igb_set_dbal,
3986     [TDBAL7]   = igb_set_dbal,
3987     [TDBAL8]   = igb_set_dbal,
3988     [TDBAL9]   = igb_set_dbal,
3989     [TDBAL10]  = igb_set_dbal,
3990     [TDBAL11]  = igb_set_dbal,
3991     [TDBAL12]  = igb_set_dbal,
3992     [TDBAL13]  = igb_set_dbal,
3993     [TDBAL14]  = igb_set_dbal,
3994     [TDBAL15]  = igb_set_dbal,
3995     [RDBAL0]   = igb_set_dbal,
3996     [RDBAL1]   = igb_set_dbal,
3997     [RDBAL2]   = igb_set_dbal,
3998     [RDBAL3]   = igb_set_dbal,
3999     [RDBAL4]   = igb_set_dbal,
4000     [RDBAL5]   = igb_set_dbal,
4001     [RDBAL6]   = igb_set_dbal,
4002     [RDBAL7]   = igb_set_dbal,
4003     [RDBAL8]   = igb_set_dbal,
4004     [RDBAL9]   = igb_set_dbal,
4005     [RDBAL10]  = igb_set_dbal,
4006     [RDBAL11]  = igb_set_dbal,
4007     [RDBAL12]  = igb_set_dbal,
4008     [RDBAL13]  = igb_set_dbal,
4009     [RDBAL14]  = igb_set_dbal,
4010     [RDBAL15]  = igb_set_dbal,
4011     [STATUS]   = igb_set_status,
4012     [PBACLR]   = igb_set_pbaclr,
4013     [CTRL_EXT] = igb_set_ctrlext,
4014     [FCAH]     = igb_set_16bit,
4015     [FCT]      = igb_set_16bit,
4016     [FCTTV]    = igb_set_16bit,
4017     [FCRTV]    = igb_set_16bit,
4018     [FCRTH]    = igb_set_fcrth,
4019     [FCRTL]    = igb_set_fcrtl,
4020     [CTRL_DUP] = igb_set_ctrl,
4021     [RFCTL]    = igb_set_rfctl,
4022     [TIMINCA]  = igb_set_timinca,
4023     [TIMADJH]  = igb_set_timadjh,
4024 
4025     [IP6AT ... IP6AT + 3]    = igb_mac_writereg,
4026     [IP4AT ... IP4AT + 6]    = igb_mac_writereg,
4027     [RA]                     = igb_mac_writereg,
4028     [RA + 1]                 = igb_mac_setmacaddr,
4029     [RA + 2 ... RA + 31]     = igb_mac_writereg,
4030     [RA2 ... RA2 + 31]       = igb_mac_writereg,
4031     [WUPM ... WUPM + 31]     = igb_mac_writereg,
4032     [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
4033     [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_writereg,
4034     [FFMT ... FFMT + 254]    = igb_set_4bit,
4035     [MDEF ... MDEF + 7]      = igb_mac_writereg,
4036     [FTFT ... FTFT + 254]    = igb_mac_writereg,
4037     [RETA ... RETA + 31]     = igb_mac_writereg,
4038     [RSSRK ... RSSRK + 9]    = igb_mac_writereg,
4039     [MAVTV0 ... MAVTV3]      = igb_mac_writereg,
4040     [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_set_eitr,
4041 
4042     /* IGB specific: */
4043     [FWSM]     = igb_mac_writereg,
4044     [SW_FW_SYNC] = igb_mac_writereg,
4045     [EICR] = igb_set_eicr,
4046     [EICS] = igb_set_eics,
4047     [EIAC] = igb_set_eiac,
4048     [EIAM] = igb_set_eiam,
4049     [EIMC] = igb_set_eimc,
4050     [EIMS] = igb_set_eims,
4051     [IVAR0 ... IVAR0 + 7] = igb_mac_writereg,
4052     igb_putreg(IVAR_MISC),
4053     igb_putreg(TSYNCRXCFG),
4054     [ETQF0 ... ETQF0 + 7] = igb_mac_writereg,
4055     igb_putreg(VT_CTL),
4056     [P2VMAILBOX0 ... P2VMAILBOX7] = igb_set_pfmailbox,
4057     [V2PMAILBOX0 ... V2PMAILBOX7] = igb_set_vfmailbox,
4058     [MBVFICR] = igb_w1c,
4059     [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_writereg,
4060     igb_putreg(MBVFIMR),
4061     [VFLRE] = igb_w1c,
4062     igb_putreg(VFRE),
4063     igb_putreg(VFTE),
4064     igb_putreg(QDE),
4065     igb_putreg(DTXSWC),
4066     igb_putreg(RPLOLR),
4067     [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_writereg,
4068     [VMVIR0 ... VMVIR7] = igb_mac_writereg,
4069     [VMOLR0 ... VMOLR7] = igb_mac_writereg,
4070     [UTA ... UTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
4071     [PVTCTRL0] = igb_set_vtctrl,
4072     [PVTCTRL1] = igb_set_vtctrl,
4073     [PVTCTRL2] = igb_set_vtctrl,
4074     [PVTCTRL3] = igb_set_vtctrl,
4075     [PVTCTRL4] = igb_set_vtctrl,
4076     [PVTCTRL5] = igb_set_vtctrl,
4077     [PVTCTRL6] = igb_set_vtctrl,
4078     [PVTCTRL7] = igb_set_vtctrl,
4079     [PVTEICS0] = igb_set_vteics,
4080     [PVTEICS1] = igb_set_vteics,
4081     [PVTEICS2] = igb_set_vteics,
4082     [PVTEICS3] = igb_set_vteics,
4083     [PVTEICS4] = igb_set_vteics,
4084     [PVTEICS5] = igb_set_vteics,
4085     [PVTEICS6] = igb_set_vteics,
4086     [PVTEICS7] = igb_set_vteics,
4087     [PVTEIMS0] = igb_set_vteims,
4088     [PVTEIMS1] = igb_set_vteims,
4089     [PVTEIMS2] = igb_set_vteims,
4090     [PVTEIMS3] = igb_set_vteims,
4091     [PVTEIMS4] = igb_set_vteims,
4092     [PVTEIMS5] = igb_set_vteims,
4093     [PVTEIMS6] = igb_set_vteims,
4094     [PVTEIMS7] = igb_set_vteims,
4095     [PVTEIMC0] = igb_set_vteimc,
4096     [PVTEIMC1] = igb_set_vteimc,
4097     [PVTEIMC2] = igb_set_vteimc,
4098     [PVTEIMC3] = igb_set_vteimc,
4099     [PVTEIMC4] = igb_set_vteimc,
4100     [PVTEIMC5] = igb_set_vteimc,
4101     [PVTEIMC6] = igb_set_vteimc,
4102     [PVTEIMC7] = igb_set_vteimc,
4103     [PVTEIAC0] = igb_set_vteiac,
4104     [PVTEIAC1] = igb_set_vteiac,
4105     [PVTEIAC2] = igb_set_vteiac,
4106     [PVTEIAC3] = igb_set_vteiac,
4107     [PVTEIAC4] = igb_set_vteiac,
4108     [PVTEIAC5] = igb_set_vteiac,
4109     [PVTEIAC6] = igb_set_vteiac,
4110     [PVTEIAC7] = igb_set_vteiac,
4111     [PVTEIAM0] = igb_set_vteiam,
4112     [PVTEIAM1] = igb_set_vteiam,
4113     [PVTEIAM2] = igb_set_vteiam,
4114     [PVTEIAM3] = igb_set_vteiam,
4115     [PVTEIAM4] = igb_set_vteiam,
4116     [PVTEIAM5] = igb_set_vteiam,
4117     [PVTEIAM6] = igb_set_vteiam,
4118     [PVTEIAM7] = igb_set_vteiam,
4119     [PVTEICR0] = igb_set_vteicr,
4120     [PVTEICR1] = igb_set_vteicr,
4121     [PVTEICR2] = igb_set_vteicr,
4122     [PVTEICR3] = igb_set_vteicr,
4123     [PVTEICR4] = igb_set_vteicr,
4124     [PVTEICR5] = igb_set_vteicr,
4125     [PVTEICR6] = igb_set_vteicr,
4126     [PVTEICR7] = igb_set_vteicr,
4127     [VTIVAR ... VTIVAR + 7] = igb_set_vtivar,
4128     [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_writereg
4129 };
4130 enum { IGB_NWRITEOPS = ARRAY_SIZE(igb_macreg_writeops) };
4131 
4132 enum { MAC_ACCESS_PARTIAL = 1 };
4133 
4134 /*
4135  * The array below combines alias offsets of the index values for the
4136  * MAC registers that have aliases, with the indication of not fully
4137  * implemented registers (lowest bit). This combination is possible
4138  * because all of the offsets are even.
4139  */
4140 static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = {
4141     /* Alias index offsets */
4142     [FCRTL_A] = 0x07fe,
4143     [RDFH_A]  = 0xe904, [RDFT_A]  = 0xe904,
4144     [TDFH_A]  = 0xed00, [TDFT_A]  = 0xed00,
4145     [RA_A ... RA_A + 31]      = 0x14f0,
4146     [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400,
4147 
4148     [RDBAL0_A] = 0x2600,
4149     [RDBAH0_A] = 0x2600,
4150     [RDLEN0_A] = 0x2600,
4151     [SRRCTL0_A] = 0x2600,
4152     [RDH0_A] = 0x2600,
4153     [RDT0_A] = 0x2600,
4154     [RXDCTL0_A] = 0x2600,
4155     [RXCTL0_A] = 0x2600,
4156     [RQDPC0_A] = 0x2600,
4157     [RDBAL1_A] = 0x25D0,
4158     [RDBAL2_A] = 0x25A0,
4159     [RDBAL3_A] = 0x2570,
4160     [RDBAH1_A] = 0x25D0,
4161     [RDBAH2_A] = 0x25A0,
4162     [RDBAH3_A] = 0x2570,
4163     [RDLEN1_A] = 0x25D0,
4164     [RDLEN2_A] = 0x25A0,
4165     [RDLEN3_A] = 0x2570,
4166     [SRRCTL1_A] = 0x25D0,
4167     [SRRCTL2_A] = 0x25A0,
4168     [SRRCTL3_A] = 0x2570,
4169     [RDH1_A] = 0x25D0,
4170     [RDH2_A] = 0x25A0,
4171     [RDH3_A] = 0x2570,
4172     [RDT1_A] = 0x25D0,
4173     [RDT2_A] = 0x25A0,
4174     [RDT3_A] = 0x2570,
4175     [RXDCTL1_A] = 0x25D0,
4176     [RXDCTL2_A] = 0x25A0,
4177     [RXDCTL3_A] = 0x2570,
4178     [RXCTL1_A] = 0x25D0,
4179     [RXCTL2_A] = 0x25A0,
4180     [RXCTL3_A] = 0x2570,
4181     [RQDPC1_A] = 0x25D0,
4182     [RQDPC2_A] = 0x25A0,
4183     [RQDPC3_A] = 0x2570,
4184     [TDBAL0_A] = 0x2A00,
4185     [TDBAH0_A] = 0x2A00,
4186     [TDLEN0_A] = 0x2A00,
4187     [TDH0_A] = 0x2A00,
4188     [TDT0_A] = 0x2A00,
4189     [TXCTL0_A] = 0x2A00,
4190     [TDWBAL0_A] = 0x2A00,
4191     [TDWBAH0_A] = 0x2A00,
4192     [TDBAL1_A] = 0x29D0,
4193     [TDBAL2_A] = 0x29A0,
4194     [TDBAL3_A] = 0x2970,
4195     [TDBAH1_A] = 0x29D0,
4196     [TDBAH2_A] = 0x29A0,
4197     [TDBAH3_A] = 0x2970,
4198     [TDLEN1_A] = 0x29D0,
4199     [TDLEN2_A] = 0x29A0,
4200     [TDLEN3_A] = 0x2970,
4201     [TDH1_A] = 0x29D0,
4202     [TDH2_A] = 0x29A0,
4203     [TDH3_A] = 0x2970,
4204     [TDT1_A] = 0x29D0,
4205     [TDT2_A] = 0x29A0,
4206     [TDT3_A] = 0x2970,
4207     [TXDCTL0_A] = 0x2A00,
4208     [TXDCTL1_A] = 0x29D0,
4209     [TXDCTL2_A] = 0x29A0,
4210     [TXDCTL3_A] = 0x2970,
4211     [TXCTL1_A] = 0x29D0,
4212     [TXCTL2_A] = 0x29A0,
4213     [TXCTL3_A] = 0x29D0,
4214     [TDWBAL1_A] = 0x29D0,
4215     [TDWBAL2_A] = 0x29A0,
4216     [TDWBAL3_A] = 0x2970,
4217     [TDWBAH1_A] = 0x29D0,
4218     [TDWBAH2_A] = 0x29A0,
4219     [TDWBAH3_A] = 0x2970,
4220 
4221     /* Access options */
4222     [RDFH]  = MAC_ACCESS_PARTIAL,    [RDFT]  = MAC_ACCESS_PARTIAL,
4223     [RDFHS] = MAC_ACCESS_PARTIAL,    [RDFTS] = MAC_ACCESS_PARTIAL,
4224     [RDFPC] = MAC_ACCESS_PARTIAL,
4225     [TDFH]  = MAC_ACCESS_PARTIAL,    [TDFT]  = MAC_ACCESS_PARTIAL,
4226     [TDFHS] = MAC_ACCESS_PARTIAL,    [TDFTS] = MAC_ACCESS_PARTIAL,
4227     [TDFPC] = MAC_ACCESS_PARTIAL,    [EECD]  = MAC_ACCESS_PARTIAL,
4228     [FLA]   = MAC_ACCESS_PARTIAL,
4229     [FCAL]  = MAC_ACCESS_PARTIAL,    [FCAH]  = MAC_ACCESS_PARTIAL,
4230     [FCT]   = MAC_ACCESS_PARTIAL,    [FCTTV] = MAC_ACCESS_PARTIAL,
4231     [FCRTV] = MAC_ACCESS_PARTIAL,    [FCRTL] = MAC_ACCESS_PARTIAL,
4232     [FCRTH] = MAC_ACCESS_PARTIAL,
4233     [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL
4234 };
4235 
4236 void
igb_core_write(IGBCore * core,hwaddr addr,uint64_t val,unsigned size)4237 igb_core_write(IGBCore *core, hwaddr addr, uint64_t val, unsigned size)
4238 {
4239     uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
4240 
4241     if (index < IGB_NWRITEOPS && igb_macreg_writeops[index]) {
4242         if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
4243             trace_e1000e_wrn_regs_write_trivial(index << 2);
4244         }
4245         trace_e1000e_core_write(index << 2, size, val);
4246         igb_macreg_writeops[index](core, index, val);
4247     } else if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
4248         trace_e1000e_wrn_regs_write_ro(index << 2, size, val);
4249     } else {
4250         trace_e1000e_wrn_regs_write_unknown(index << 2, size, val);
4251     }
4252 }
4253 
4254 uint64_t
igb_core_read(IGBCore * core,hwaddr addr,unsigned size)4255 igb_core_read(IGBCore *core, hwaddr addr, unsigned size)
4256 {
4257     uint64_t val;
4258     uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
4259 
4260     if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
4261         if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
4262             trace_e1000e_wrn_regs_read_trivial(index << 2);
4263         }
4264         val = igb_macreg_readops[index](core, index);
4265         trace_e1000e_core_read(index << 2, size, val);
4266         return val;
4267     } else {
4268         trace_e1000e_wrn_regs_read_unknown(index << 2, size);
4269     }
4270     return 0;
4271 }
4272 
4273 static void
igb_autoneg_resume(IGBCore * core)4274 igb_autoneg_resume(IGBCore *core)
4275 {
4276     if (igb_have_autoneg(core) &&
4277         !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
4278         qemu_get_queue(core->owner_nic)->link_down = false;
4279         timer_mod(core->autoneg_timer,
4280                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
4281     }
4282 }
4283 
4284 void
igb_core_pci_realize(IGBCore * core,const uint16_t * eeprom_templ,uint32_t eeprom_size,const uint8_t * macaddr)4285 igb_core_pci_realize(IGBCore        *core,
4286                      const uint16_t *eeprom_templ,
4287                      uint32_t        eeprom_size,
4288                      const uint8_t  *macaddr)
4289 {
4290     int i;
4291 
4292     core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
4293                                        igb_autoneg_timer, core);
4294     igb_intrmgr_pci_realize(core);
4295 
4296     for (i = 0; i < IGB_NUM_QUEUES; i++) {
4297         net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS);
4298     }
4299 
4300     net_rx_pkt_init(&core->rx_pkt);
4301 
4302     e1000x_core_prepare_eeprom(core->eeprom,
4303                                eeprom_templ,
4304                                eeprom_size,
4305                                PCI_DEVICE_GET_CLASS(core->owner)->device_id,
4306                                macaddr);
4307     igb_update_rx_offloads(core);
4308 }
4309 
4310 void
igb_core_pci_uninit(IGBCore * core)4311 igb_core_pci_uninit(IGBCore *core)
4312 {
4313     int i;
4314 
4315     timer_free(core->autoneg_timer);
4316 
4317     igb_intrmgr_pci_unint(core);
4318 
4319     for (i = 0; i < IGB_NUM_QUEUES; i++) {
4320         net_tx_pkt_uninit(core->tx[i].tx_pkt);
4321     }
4322 
4323     net_rx_pkt_uninit(core->rx_pkt);
4324 }
4325 
4326 static const uint16_t
4327 igb_phy_reg_init[] = {
4328     [MII_BMCR] = MII_BMCR_SPEED1000 |
4329                  MII_BMCR_FD        |
4330                  MII_BMCR_AUTOEN,
4331 
4332     [MII_BMSR] = MII_BMSR_EXTCAP    |
4333                  MII_BMSR_LINK_ST   |
4334                  MII_BMSR_AUTONEG   |
4335                  MII_BMSR_MFPS      |
4336                  MII_BMSR_EXTSTAT   |
4337                  MII_BMSR_10T_HD    |
4338                  MII_BMSR_10T_FD    |
4339                  MII_BMSR_100TX_HD  |
4340                  MII_BMSR_100TX_FD,
4341 
4342     [MII_PHYID1]            = IGP03E1000_E_PHY_ID >> 16,
4343     [MII_PHYID2]            = (IGP03E1000_E_PHY_ID & 0xfff0) | 1,
4344     [MII_ANAR]              = MII_ANAR_CSMACD | MII_ANAR_10 |
4345                               MII_ANAR_10FD | MII_ANAR_TX |
4346                               MII_ANAR_TXFD | MII_ANAR_PAUSE |
4347                               MII_ANAR_PAUSE_ASYM,
4348     [MII_ANLPAR]            = MII_ANLPAR_10 | MII_ANLPAR_10FD |
4349                               MII_ANLPAR_TX | MII_ANLPAR_TXFD |
4350                               MII_ANLPAR_T4 | MII_ANLPAR_PAUSE,
4351     [MII_ANER]              = MII_ANER_NP | MII_ANER_NWAY,
4352     [MII_ANNP]              = 0x1 | MII_ANNP_MP,
4353     [MII_CTRL1000]          = MII_CTRL1000_HALF | MII_CTRL1000_FULL |
4354                               MII_CTRL1000_PORT | MII_CTRL1000_MASTER,
4355     [MII_STAT1000]          = MII_STAT1000_HALF | MII_STAT1000_FULL |
4356                               MII_STAT1000_ROK | MII_STAT1000_LOK,
4357     [MII_EXTSTAT]           = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD,
4358 
4359     [IGP01E1000_PHY_PORT_CONFIG] = BIT(5) | BIT(8),
4360     [IGP01E1000_PHY_PORT_STATUS] = IGP01E1000_PSSR_SPEED_1000MBPS,
4361     [IGP02E1000_PHY_POWER_MGMT]  = BIT(0) | BIT(3) | IGP02E1000_PM_D3_LPLU |
4362                                    IGP01E1000_PSCFR_SMART_SPEED
4363 };
4364 
4365 static const uint32_t igb_mac_reg_init[] = {
4366     [LEDCTL]        = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
4367     [EEMNGCTL]      = BIT(31),
4368     [TXDCTL0]       = E1000_TXDCTL_QUEUE_ENABLE,
4369     [RXDCTL0]       = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
4370     [RXDCTL1]       = 1 << 16,
4371     [RXDCTL2]       = 1 << 16,
4372     [RXDCTL3]       = 1 << 16,
4373     [RXDCTL4]       = 1 << 16,
4374     [RXDCTL5]       = 1 << 16,
4375     [RXDCTL6]       = 1 << 16,
4376     [RXDCTL7]       = 1 << 16,
4377     [RXDCTL8]       = 1 << 16,
4378     [RXDCTL9]       = 1 << 16,
4379     [RXDCTL10]      = 1 << 16,
4380     [RXDCTL11]      = 1 << 16,
4381     [RXDCTL12]      = 1 << 16,
4382     [RXDCTL13]      = 1 << 16,
4383     [RXDCTL14]      = 1 << 16,
4384     [RXDCTL15]      = 1 << 16,
4385     [TIPG]          = 0x08 | (0x04 << 10) | (0x06 << 20),
4386     [CTRL]          = E1000_CTRL_FD | E1000_CTRL_LRST | E1000_CTRL_SPD_1000 |
4387                       E1000_CTRL_ADVD3WUC,
4388     [STATUS]        = E1000_STATUS_PHYRA | BIT(31),
4389     [EECD]          = E1000_EECD_FWE_DIS | E1000_EECD_PRES |
4390                       (2 << E1000_EECD_SIZE_EX_SHIFT),
4391     [GCR]           = E1000_L0S_ADJUST |
4392                       E1000_GCR_CMPL_TMOUT_RESEND |
4393                       E1000_GCR_CAP_VER2 |
4394                       E1000_L1_ENTRY_LATENCY_MSB |
4395                       E1000_L1_ENTRY_LATENCY_LSB,
4396     [RXCSUM]        = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD,
4397     [TXPBS]         = 0x28,
4398     [RXPBS]         = 0x40,
4399     [TCTL]          = E1000_TCTL_PSP | (0xF << E1000_CT_SHIFT) |
4400                       (0x40 << E1000_COLD_SHIFT) | (0x1 << 26) | (0xA << 28),
4401     [TCTL_EXT]      = 0x40 | (0x42 << 10),
4402     [DTXCTL]        = E1000_DTXCTL_8023LL | E1000_DTXCTL_SPOOF_INT,
4403     [VET]           = ETH_P_VLAN | (ETH_P_VLAN << 16),
4404 
4405     [V2PMAILBOX0 ... V2PMAILBOX0 + IGB_MAX_VF_FUNCTIONS - 1] = E1000_V2PMAILBOX_RSTI,
4406     [MBVFIMR]       = 0xFF,
4407     [VFRE]          = 0xFF,
4408     [VFTE]          = 0xFF,
4409     [VMOLR0 ... VMOLR0 + 7] = 0x2600 | E1000_VMOLR_STRCRC,
4410     [RPLOLR]        = E1000_RPLOLR_STRCRC,
4411     [RLPML]         = 0x2600,
4412     [TXCTL0]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4413                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4414                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4415     [TXCTL1]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4416                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4417                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4418     [TXCTL2]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4419                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4420                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4421     [TXCTL3]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4422                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4423                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4424     [TXCTL4]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4425                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4426                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4427     [TXCTL5]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4428                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4429                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4430     [TXCTL6]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4431                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4432                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4433     [TXCTL7]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4434                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4435                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4436     [TXCTL8]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4437                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4438                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4439     [TXCTL9]        = E1000_DCA_TXCTRL_DATA_RRO_EN |
4440                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4441                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4442     [TXCTL10]       = E1000_DCA_TXCTRL_DATA_RRO_EN |
4443                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4444                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4445     [TXCTL11]       = E1000_DCA_TXCTRL_DATA_RRO_EN |
4446                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4447                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4448     [TXCTL12]       = E1000_DCA_TXCTRL_DATA_RRO_EN |
4449                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4450                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4451     [TXCTL13]       = E1000_DCA_TXCTRL_DATA_RRO_EN |
4452                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4453                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4454     [TXCTL14]       = E1000_DCA_TXCTRL_DATA_RRO_EN |
4455                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4456                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4457     [TXCTL15]       = E1000_DCA_TXCTRL_DATA_RRO_EN |
4458                       E1000_DCA_TXCTRL_TX_WB_RO_EN |
4459                       E1000_DCA_TXCTRL_DESC_RRO_EN,
4460 };
4461 
igb_reset(IGBCore * core,bool sw)4462 static void igb_reset(IGBCore *core, bool sw)
4463 {
4464     struct igb_tx *tx;
4465     int i;
4466 
4467     timer_del(core->autoneg_timer);
4468 
4469     igb_intrmgr_reset(core);
4470 
4471     memset(core->phy, 0, sizeof core->phy);
4472     memcpy(core->phy, igb_phy_reg_init, sizeof igb_phy_reg_init);
4473 
4474     for (i = 0; i < E1000E_MAC_SIZE; i++) {
4475         if (sw &&
4476             (i == RXPBS || i == TXPBS ||
4477              (i >= EITR0 && i < EITR0 + IGB_INTR_NUM))) {
4478             continue;
4479         }
4480 
4481         core->mac[i] = i < ARRAY_SIZE(igb_mac_reg_init) ?
4482                        igb_mac_reg_init[i] : 0;
4483     }
4484 
4485     if (qemu_get_queue(core->owner_nic)->link_down) {
4486         igb_link_down(core);
4487     }
4488 
4489     e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
4490 
4491     for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) {
4492         /* Set RSTI, so VF can identify a PF reset is in progress */
4493         core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTI;
4494     }
4495 
4496     for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
4497         tx = &core->tx[i];
4498         memset(tx->ctx, 0, sizeof(tx->ctx));
4499         tx->first = true;
4500         tx->skip_cp = false;
4501     }
4502 }
4503 
4504 void
igb_core_reset(IGBCore * core)4505 igb_core_reset(IGBCore *core)
4506 {
4507     igb_reset(core, false);
4508 }
4509 
igb_core_pre_save(IGBCore * core)4510 void igb_core_pre_save(IGBCore *core)
4511 {
4512     int i;
4513     NetClientState *nc = qemu_get_queue(core->owner_nic);
4514 
4515     /*
4516      * If link is down and auto-negotiation is supported and ongoing,
4517      * complete auto-negotiation immediately. This allows us to look
4518      * at MII_BMSR_AN_COMP to infer link status on load.
4519      */
4520     if (nc->link_down && igb_have_autoneg(core)) {
4521         core->phy[MII_BMSR] |= MII_BMSR_AN_COMP;
4522         igb_update_flowctl_status(core);
4523     }
4524 
4525     for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
4526         if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) {
4527             core->tx[i].skip_cp = true;
4528         }
4529     }
4530 }
4531 
4532 int
igb_core_post_load(IGBCore * core)4533 igb_core_post_load(IGBCore *core)
4534 {
4535     NetClientState *nc = qemu_get_queue(core->owner_nic);
4536 
4537     /*
4538      * nc.link_down can't be migrated, so infer link_down according
4539      * to link status bit in core.mac[STATUS].
4540      */
4541     nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
4542 
4543     /*
4544      * we need to restart intrmgr timers, as an older version of
4545      * QEMU can have stopped them before migration
4546      */
4547     igb_intrmgr_resume(core);
4548     igb_autoneg_resume(core);
4549 
4550     return 0;
4551 }
4552