1 /*
2 * i.MX Fast Ethernet Controller emulation.
3 *
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
5 *
6 * Based on Coldfire Fast Ethernet Controller emulation.
7 *
8 * Copyright (c) 2007 CodeSourcery.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include "qemu/osdep.h"
25 #include "hw/irq.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/dma.h"
30 #include "qemu/log.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
33 #include "net/eth.h"
34 #include "trace.h"
35
36 #include <zlib.h> /* for crc32 */
37
38 #define IMX_MAX_DESC 1024
39
imx_default_reg_name(IMXFECState * s,uint32_t index)40 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
41 {
42 static char tmp[20];
43 snprintf(tmp, sizeof(tmp), "index %d", index);
44 return tmp;
45 }
46
imx_fec_reg_name(IMXFECState * s,uint32_t index)47 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
48 {
49 switch (index) {
50 case ENET_FRBR:
51 return "FRBR";
52 case ENET_FRSR:
53 return "FRSR";
54 case ENET_MIIGSK_CFGR:
55 return "MIIGSK_CFGR";
56 case ENET_MIIGSK_ENR:
57 return "MIIGSK_ENR";
58 default:
59 return imx_default_reg_name(s, index);
60 }
61 }
62
imx_enet_reg_name(IMXFECState * s,uint32_t index)63 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
64 {
65 switch (index) {
66 case ENET_RSFL:
67 return "RSFL";
68 case ENET_RSEM:
69 return "RSEM";
70 case ENET_RAEM:
71 return "RAEM";
72 case ENET_RAFL:
73 return "RAFL";
74 case ENET_TSEM:
75 return "TSEM";
76 case ENET_TAEM:
77 return "TAEM";
78 case ENET_TAFL:
79 return "TAFL";
80 case ENET_TIPG:
81 return "TIPG";
82 case ENET_FTRL:
83 return "FTRL";
84 case ENET_TACC:
85 return "TACC";
86 case ENET_RACC:
87 return "RACC";
88 case ENET_ATCR:
89 return "ATCR";
90 case ENET_ATVR:
91 return "ATVR";
92 case ENET_ATOFF:
93 return "ATOFF";
94 case ENET_ATPER:
95 return "ATPER";
96 case ENET_ATCOR:
97 return "ATCOR";
98 case ENET_ATINC:
99 return "ATINC";
100 case ENET_ATSTMP:
101 return "ATSTMP";
102 case ENET_TGSR:
103 return "TGSR";
104 case ENET_TCSR0:
105 return "TCSR0";
106 case ENET_TCCR0:
107 return "TCCR0";
108 case ENET_TCSR1:
109 return "TCSR1";
110 case ENET_TCCR1:
111 return "TCCR1";
112 case ENET_TCSR2:
113 return "TCSR2";
114 case ENET_TCCR2:
115 return "TCCR2";
116 case ENET_TCSR3:
117 return "TCSR3";
118 case ENET_TCCR3:
119 return "TCCR3";
120 default:
121 return imx_default_reg_name(s, index);
122 }
123 }
124
imx_eth_reg_name(IMXFECState * s,uint32_t index)125 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
126 {
127 switch (index) {
128 case ENET_EIR:
129 return "EIR";
130 case ENET_EIMR:
131 return "EIMR";
132 case ENET_RDAR:
133 return "RDAR";
134 case ENET_TDAR:
135 return "TDAR";
136 case ENET_ECR:
137 return "ECR";
138 case ENET_MMFR:
139 return "MMFR";
140 case ENET_MSCR:
141 return "MSCR";
142 case ENET_MIBC:
143 return "MIBC";
144 case ENET_RCR:
145 return "RCR";
146 case ENET_TCR:
147 return "TCR";
148 case ENET_PALR:
149 return "PALR";
150 case ENET_PAUR:
151 return "PAUR";
152 case ENET_OPD:
153 return "OPD";
154 case ENET_IAUR:
155 return "IAUR";
156 case ENET_IALR:
157 return "IALR";
158 case ENET_GAUR:
159 return "GAUR";
160 case ENET_GALR:
161 return "GALR";
162 case ENET_TFWR:
163 return "TFWR";
164 case ENET_RDSR:
165 return "RDSR";
166 case ENET_TDSR:
167 return "TDSR";
168 case ENET_MRBR:
169 return "MRBR";
170 default:
171 if (s->is_fec) {
172 return imx_fec_reg_name(s, index);
173 } else {
174 return imx_enet_reg_name(s, index);
175 }
176 }
177 }
178
179 /*
180 * Versions of this device with more than one TX descriptor save the
181 * 2nd and 3rd descriptors in a subsection, to maintain migration
182 * compatibility with previous versions of the device that only
183 * supported a single descriptor.
184 */
imx_eth_is_multi_tx_ring(void * opaque)185 static bool imx_eth_is_multi_tx_ring(void *opaque)
186 {
187 IMXFECState *s = IMX_FEC(opaque);
188
189 return s->tx_ring_num > 1;
190 }
191
192 static const VMStateDescription vmstate_imx_eth_txdescs = {
193 .name = "imx.fec/txdescs",
194 .version_id = 1,
195 .minimum_version_id = 1,
196 .needed = imx_eth_is_multi_tx_ring,
197 .fields = (const VMStateField[]) {
198 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
199 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
200 VMSTATE_END_OF_LIST()
201 }
202 };
203
204 static const VMStateDescription vmstate_imx_eth = {
205 .name = TYPE_IMX_FEC,
206 .version_id = 2,
207 .minimum_version_id = 2,
208 .fields = (const VMStateField[]) {
209 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
210 VMSTATE_UINT32(rx_descriptor, IMXFECState),
211 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
212 VMSTATE_UINT32(phy_status, IMXFECState),
213 VMSTATE_UINT32(phy_control, IMXFECState),
214 VMSTATE_UINT32(phy_advertise, IMXFECState),
215 VMSTATE_UINT32(phy_int, IMXFECState),
216 VMSTATE_UINT32(phy_int_mask, IMXFECState),
217 VMSTATE_END_OF_LIST()
218 },
219 .subsections = (const VMStateDescription * const []) {
220 &vmstate_imx_eth_txdescs,
221 NULL
222 },
223 };
224
225 #define PHY_INT_ENERGYON (1 << 7)
226 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
227 #define PHY_INT_FAULT (1 << 5)
228 #define PHY_INT_DOWN (1 << 4)
229 #define PHY_INT_AUTONEG_LP (1 << 3)
230 #define PHY_INT_PARFAULT (1 << 2)
231 #define PHY_INT_AUTONEG_PAGE (1 << 1)
232
233 static void imx_eth_update(IMXFECState *s);
234
235 /*
236 * The MII phy could raise a GPIO to the processor which in turn
237 * could be handled as an interrpt by the OS.
238 * For now we don't handle any GPIO/interrupt line, so the OS will
239 * have to poll for the PHY status.
240 */
imx_phy_update_irq(IMXFECState * s)241 static void imx_phy_update_irq(IMXFECState *s)
242 {
243 imx_eth_update(s);
244 }
245
imx_phy_update_link(IMXFECState * s)246 static void imx_phy_update_link(IMXFECState *s)
247 {
248 /* Autonegotiation status mirrors link status. */
249 if (qemu_get_queue(s->nic)->link_down) {
250 trace_imx_phy_update_link("down");
251 s->phy_status &= ~0x0024;
252 s->phy_int |= PHY_INT_DOWN;
253 } else {
254 trace_imx_phy_update_link("up");
255 s->phy_status |= 0x0024;
256 s->phy_int |= PHY_INT_ENERGYON;
257 s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
258 }
259 imx_phy_update_irq(s);
260 }
261
imx_eth_set_link(NetClientState * nc)262 static void imx_eth_set_link(NetClientState *nc)
263 {
264 imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
265 }
266
imx_phy_reset(IMXFECState * s)267 static void imx_phy_reset(IMXFECState *s)
268 {
269 trace_imx_phy_reset();
270
271 s->phy_status = 0x7809;
272 s->phy_control = 0x3000;
273 s->phy_advertise = 0x01e1;
274 s->phy_int_mask = 0;
275 s->phy_int = 0;
276 imx_phy_update_link(s);
277 }
278
imx_phy_read(IMXFECState * s,int reg)279 static uint32_t imx_phy_read(IMXFECState *s, int reg)
280 {
281 uint32_t val;
282 uint32_t phy = reg / 32;
283
284 if (!s->phy_connected) {
285 return 0xffff;
286 }
287
288 if (phy != s->phy_num) {
289 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
290 s = s->phy_consumer;
291 } else {
292 trace_imx_phy_read_num(phy, s->phy_num);
293 return 0xffff;
294 }
295 }
296
297 reg %= 32;
298
299 switch (reg) {
300 case 0: /* Basic Control */
301 val = s->phy_control;
302 break;
303 case 1: /* Basic Status */
304 val = s->phy_status;
305 break;
306 case 2: /* ID1 */
307 val = 0x0007;
308 break;
309 case 3: /* ID2 */
310 val = 0xc0d1;
311 break;
312 case 4: /* Auto-neg advertisement */
313 val = s->phy_advertise;
314 break;
315 case 5: /* Auto-neg Link Partner Ability */
316 val = 0x0f71;
317 break;
318 case 6: /* Auto-neg Expansion */
319 val = 1;
320 break;
321 case 29: /* Interrupt source. */
322 val = s->phy_int;
323 s->phy_int = 0;
324 imx_phy_update_irq(s);
325 break;
326 case 30: /* Interrupt mask */
327 val = s->phy_int_mask;
328 break;
329 case 17:
330 case 18:
331 case 27:
332 case 31:
333 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
334 TYPE_IMX_FEC, __func__, reg);
335 val = 0;
336 break;
337 default:
338 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
339 TYPE_IMX_FEC, __func__, reg);
340 val = 0;
341 break;
342 }
343
344 trace_imx_phy_read(val, phy, reg);
345
346 return val;
347 }
348
imx_phy_write(IMXFECState * s,int reg,uint32_t val)349 static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
350 {
351 uint32_t phy = reg / 32;
352
353 if (!s->phy_connected) {
354 return;
355 }
356
357 if (phy != s->phy_num) {
358 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
359 s = s->phy_consumer;
360 } else {
361 trace_imx_phy_write_num(phy, s->phy_num);
362 return;
363 }
364 }
365
366 reg %= 32;
367
368 trace_imx_phy_write(val, phy, reg);
369
370 switch (reg) {
371 case 0: /* Basic Control */
372 if (val & 0x8000) {
373 imx_phy_reset(s);
374 } else {
375 s->phy_control = val & 0x7980;
376 /* Complete autonegotiation immediately. */
377 if (val & 0x1000) {
378 s->phy_status |= 0x0020;
379 }
380 }
381 break;
382 case 4: /* Auto-neg advertisement */
383 s->phy_advertise = (val & 0x2d7f) | 0x80;
384 break;
385 case 30: /* Interrupt mask */
386 s->phy_int_mask = val & 0xff;
387 imx_phy_update_irq(s);
388 break;
389 case 17:
390 case 18:
391 case 27:
392 case 31:
393 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
394 TYPE_IMX_FEC, __func__, reg);
395 break;
396 default:
397 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
398 TYPE_IMX_FEC, __func__, reg);
399 break;
400 }
401 }
402
imx_fec_read_bd(IMXFECBufDesc * bd,dma_addr_t addr)403 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
404 {
405 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
406 MEMTXATTRS_UNSPECIFIED);
407
408 trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
409 }
410
imx_fec_write_bd(IMXFECBufDesc * bd,dma_addr_t addr)411 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
412 {
413 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
414 MEMTXATTRS_UNSPECIFIED);
415 }
416
imx_enet_read_bd(IMXENETBufDesc * bd,dma_addr_t addr)417 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
418 {
419 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
420 MEMTXATTRS_UNSPECIFIED);
421
422 trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
423 bd->option, bd->status);
424 }
425
imx_enet_write_bd(IMXENETBufDesc * bd,dma_addr_t addr)426 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
427 {
428 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
429 MEMTXATTRS_UNSPECIFIED);
430 }
431
imx_eth_update(IMXFECState * s)432 static void imx_eth_update(IMXFECState *s)
433 {
434 /*
435 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
436 * interrupts swapped. This worked with older versions of Linux (4.14
437 * and older) since Linux associated both interrupt lines with Ethernet
438 * MAC interrupts. Specifically,
439 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
440 * timer interrupts. Those versions of Linux fail with versions of QEMU
441 * with swapped interrupt assignments.
442 * - In linux 4.14, both interrupt lines were registered with the Ethernet
443 * MAC interrupt handler. As a result, all versions of qemu happen to
444 * work, though that is accidental.
445 * - In Linux 4.9 and older, the timer interrupt was registered directly
446 * with the Ethernet MAC interrupt handler. The MAC interrupt was
447 * redirected to a GPIO interrupt to work around erratum ERR006687.
448 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
449 * interrupt never fired since IOMUX is currently not supported in qemu.
450 * Linux instead received MAC interrupts on the timer interrupt.
451 * As a result, qemu versions with the swapped interrupt assignment work,
452 * albeit accidentally, but qemu versions with the correct interrupt
453 * assignment fail.
454 *
455 * To ensure that all versions of Linux work, generate ENET_INT_MAC
456 * interrupts on both interrupt lines. This should be changed if and when
457 * qemu supports IOMUX.
458 */
459 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
460 (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
461 qemu_set_irq(s->irq[1], 1);
462 } else {
463 qemu_set_irq(s->irq[1], 0);
464 }
465
466 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
467 qemu_set_irq(s->irq[0], 1);
468 } else {
469 qemu_set_irq(s->irq[0], 0);
470 }
471 }
472
imx_fec_do_tx(IMXFECState * s)473 static void imx_fec_do_tx(IMXFECState *s)
474 {
475 int frame_size = 0, descnt = 0;
476 uint8_t *ptr = s->frame;
477 uint32_t addr = s->tx_descriptor[0];
478
479 while (descnt++ < IMX_MAX_DESC) {
480 IMXFECBufDesc bd;
481 int len;
482
483 imx_fec_read_bd(&bd, addr);
484 if ((bd.flags & ENET_BD_R) == 0) {
485
486 /* Run out of descriptors to transmit. */
487 trace_imx_eth_tx_bd_busy();
488
489 break;
490 }
491 len = bd.length;
492 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
493 len = ENET_MAX_FRAME_SIZE - frame_size;
494 s->regs[ENET_EIR] |= ENET_INT_BABT;
495 }
496 dma_memory_read(&address_space_memory, bd.data, ptr, len,
497 MEMTXATTRS_UNSPECIFIED);
498 ptr += len;
499 frame_size += len;
500 if (bd.flags & ENET_BD_L) {
501 /* Last buffer in frame. */
502 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
503 ptr = s->frame;
504 frame_size = 0;
505 s->regs[ENET_EIR] |= ENET_INT_TXF;
506 }
507 s->regs[ENET_EIR] |= ENET_INT_TXB;
508 bd.flags &= ~ENET_BD_R;
509 /* Write back the modified descriptor. */
510 imx_fec_write_bd(&bd, addr);
511 /* Advance to the next descriptor. */
512 if ((bd.flags & ENET_BD_W) != 0) {
513 addr = s->regs[ENET_TDSR];
514 } else {
515 addr += sizeof(bd);
516 }
517 }
518
519 s->tx_descriptor[0] = addr;
520
521 imx_eth_update(s);
522 }
523
imx_enet_do_tx(IMXFECState * s,uint32_t index)524 static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
525 {
526 int frame_size = 0, descnt = 0;
527
528 uint8_t *ptr = s->frame;
529 uint32_t addr, int_txb, int_txf, tdsr;
530 size_t ring;
531
532 switch (index) {
533 case ENET_TDAR:
534 ring = 0;
535 int_txb = ENET_INT_TXB;
536 int_txf = ENET_INT_TXF;
537 tdsr = ENET_TDSR;
538 break;
539 case ENET_TDAR1:
540 ring = 1;
541 int_txb = ENET_INT_TXB1;
542 int_txf = ENET_INT_TXF1;
543 tdsr = ENET_TDSR1;
544 break;
545 case ENET_TDAR2:
546 ring = 2;
547 int_txb = ENET_INT_TXB2;
548 int_txf = ENET_INT_TXF2;
549 tdsr = ENET_TDSR2;
550 break;
551 default:
552 qemu_log_mask(LOG_GUEST_ERROR,
553 "%s: bogus value for index %x\n",
554 __func__, index);
555 abort();
556 break;
557 }
558
559 addr = s->tx_descriptor[ring];
560
561 while (descnt++ < IMX_MAX_DESC) {
562 IMXENETBufDesc bd;
563 int len;
564
565 imx_enet_read_bd(&bd, addr);
566 if ((bd.flags & ENET_BD_R) == 0) {
567 /* Run out of descriptors to transmit. */
568
569 trace_imx_eth_tx_bd_busy();
570
571 break;
572 }
573 len = bd.length;
574 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
575 len = ENET_MAX_FRAME_SIZE - frame_size;
576 s->regs[ENET_EIR] |= ENET_INT_BABT;
577 }
578 dma_memory_read(&address_space_memory, bd.data, ptr, len,
579 MEMTXATTRS_UNSPECIFIED);
580 ptr += len;
581 frame_size += len;
582 if (bd.flags & ENET_BD_L) {
583 int csum = 0;
584
585 if (bd.option & ENET_BD_PINS) {
586 csum |= (CSUM_TCP | CSUM_UDP);
587 }
588 if (bd.option & ENET_BD_IINS) {
589 csum |= CSUM_IP;
590 }
591 if (csum) {
592 net_checksum_calculate(s->frame, frame_size, csum);
593 }
594
595 /* Last buffer in frame. */
596
597 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
598 ptr = s->frame;
599
600 frame_size = 0;
601 if (bd.option & ENET_BD_TX_INT) {
602 s->regs[ENET_EIR] |= int_txf;
603 }
604 /* Indicate that we've updated the last buffer descriptor. */
605 bd.last_buffer = ENET_BD_BDU;
606 }
607 if (bd.option & ENET_BD_TX_INT) {
608 s->regs[ENET_EIR] |= int_txb;
609 }
610 bd.flags &= ~ENET_BD_R;
611 /* Write back the modified descriptor. */
612 imx_enet_write_bd(&bd, addr);
613 /* Advance to the next descriptor. */
614 if ((bd.flags & ENET_BD_W) != 0) {
615 addr = s->regs[tdsr];
616 } else {
617 addr += sizeof(bd);
618 }
619 }
620
621 s->tx_descriptor[ring] = addr;
622
623 imx_eth_update(s);
624 }
625
imx_eth_do_tx(IMXFECState * s,uint32_t index)626 static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
627 {
628 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
629 imx_enet_do_tx(s, index);
630 } else {
631 imx_fec_do_tx(s);
632 }
633 }
634
imx_eth_enable_rx(IMXFECState * s,bool flush)635 static void imx_eth_enable_rx(IMXFECState *s, bool flush)
636 {
637 IMXFECBufDesc bd;
638
639 imx_fec_read_bd(&bd, s->rx_descriptor);
640
641 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
642
643 if (!s->regs[ENET_RDAR]) {
644 trace_imx_eth_rx_bd_full();
645 } else if (flush) {
646 qemu_flush_queued_packets(qemu_get_queue(s->nic));
647 }
648 }
649
imx_eth_reset(DeviceState * d)650 static void imx_eth_reset(DeviceState *d)
651 {
652 IMXFECState *s = IMX_FEC(d);
653
654 /* Reset the Device */
655 memset(s->regs, 0, sizeof(s->regs));
656 s->regs[ENET_ECR] = 0xf0000000;
657 s->regs[ENET_MIBC] = 0xc0000000;
658 s->regs[ENET_RCR] = 0x05ee0001;
659 s->regs[ENET_OPD] = 0x00010000;
660
661 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
662 | (s->conf.macaddr.a[1] << 16)
663 | (s->conf.macaddr.a[2] << 8)
664 | s->conf.macaddr.a[3];
665 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
666 | (s->conf.macaddr.a[5] << 16)
667 | 0x8808;
668
669 if (s->is_fec) {
670 s->regs[ENET_FRBR] = 0x00000600;
671 s->regs[ENET_FRSR] = 0x00000500;
672 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
673 } else {
674 s->regs[ENET_RAEM] = 0x00000004;
675 s->regs[ENET_RAFL] = 0x00000004;
676 s->regs[ENET_TAEM] = 0x00000004;
677 s->regs[ENET_TAFL] = 0x00000008;
678 s->regs[ENET_TIPG] = 0x0000000c;
679 s->regs[ENET_FTRL] = 0x000007ff;
680 s->regs[ENET_ATPER] = 0x3b9aca00;
681 }
682
683 s->rx_descriptor = 0;
684 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
685
686 /* We also reset the PHY */
687 imx_phy_reset(s);
688 }
689
imx_default_read(IMXFECState * s,uint32_t index)690 static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
691 {
692 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
693 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
694 return 0;
695 }
696
imx_fec_read(IMXFECState * s,uint32_t index)697 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
698 {
699 switch (index) {
700 case ENET_FRBR:
701 case ENET_FRSR:
702 case ENET_MIIGSK_CFGR:
703 case ENET_MIIGSK_ENR:
704 return s->regs[index];
705 default:
706 return imx_default_read(s, index);
707 }
708 }
709
imx_enet_read(IMXFECState * s,uint32_t index)710 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
711 {
712 switch (index) {
713 case ENET_RSFL:
714 case ENET_RSEM:
715 case ENET_RAEM:
716 case ENET_RAFL:
717 case ENET_TSEM:
718 case ENET_TAEM:
719 case ENET_TAFL:
720 case ENET_TIPG:
721 case ENET_FTRL:
722 case ENET_TACC:
723 case ENET_RACC:
724 case ENET_ATCR:
725 case ENET_ATVR:
726 case ENET_ATOFF:
727 case ENET_ATPER:
728 case ENET_ATCOR:
729 case ENET_ATINC:
730 case ENET_ATSTMP:
731 case ENET_TGSR:
732 case ENET_TCSR0:
733 case ENET_TCCR0:
734 case ENET_TCSR1:
735 case ENET_TCCR1:
736 case ENET_TCSR2:
737 case ENET_TCCR2:
738 case ENET_TCSR3:
739 case ENET_TCCR3:
740 return s->regs[index];
741 default:
742 return imx_default_read(s, index);
743 }
744 }
745
imx_eth_read(void * opaque,hwaddr offset,unsigned size)746 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
747 {
748 uint32_t value = 0;
749 IMXFECState *s = IMX_FEC(opaque);
750 uint32_t index = offset >> 2;
751
752 switch (index) {
753 case ENET_EIR:
754 case ENET_EIMR:
755 case ENET_RDAR:
756 case ENET_TDAR:
757 case ENET_ECR:
758 case ENET_MMFR:
759 case ENET_MSCR:
760 case ENET_MIBC:
761 case ENET_RCR:
762 case ENET_TCR:
763 case ENET_PALR:
764 case ENET_PAUR:
765 case ENET_OPD:
766 case ENET_IAUR:
767 case ENET_IALR:
768 case ENET_GAUR:
769 case ENET_GALR:
770 case ENET_TFWR:
771 case ENET_RDSR:
772 case ENET_TDSR:
773 case ENET_MRBR:
774 value = s->regs[index];
775 break;
776 default:
777 if (s->is_fec) {
778 value = imx_fec_read(s, index);
779 } else {
780 value = imx_enet_read(s, index);
781 }
782 break;
783 }
784
785 trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
786
787 return value;
788 }
789
imx_default_write(IMXFECState * s,uint32_t index,uint32_t value)790 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
791 {
792 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
793 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
794 return;
795 }
796
imx_fec_write(IMXFECState * s,uint32_t index,uint32_t value)797 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
798 {
799 switch (index) {
800 case ENET_FRBR:
801 /* FRBR is read only */
802 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
803 TYPE_IMX_FEC, __func__);
804 break;
805 case ENET_FRSR:
806 s->regs[index] = (value & 0x000003fc) | 0x00000400;
807 break;
808 case ENET_MIIGSK_CFGR:
809 s->regs[index] = value & 0x00000053;
810 break;
811 case ENET_MIIGSK_ENR:
812 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
813 break;
814 default:
815 imx_default_write(s, index, value);
816 break;
817 }
818 }
819
imx_enet_write(IMXFECState * s,uint32_t index,uint32_t value)820 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
821 {
822 switch (index) {
823 case ENET_RSFL:
824 case ENET_RSEM:
825 case ENET_RAEM:
826 case ENET_RAFL:
827 case ENET_TSEM:
828 case ENET_TAEM:
829 case ENET_TAFL:
830 s->regs[index] = value & 0x000001ff;
831 break;
832 case ENET_TIPG:
833 s->regs[index] = value & 0x0000001f;
834 break;
835 case ENET_FTRL:
836 s->regs[index] = value & 0x00003fff;
837 break;
838 case ENET_TACC:
839 s->regs[index] = value & 0x00000019;
840 break;
841 case ENET_RACC:
842 s->regs[index] = value & 0x000000C7;
843 break;
844 case ENET_ATCR:
845 s->regs[index] = value & 0x00002a9d;
846 break;
847 case ENET_ATVR:
848 case ENET_ATOFF:
849 case ENET_ATPER:
850 s->regs[index] = value;
851 break;
852 case ENET_ATSTMP:
853 /* ATSTMP is read only */
854 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
855 TYPE_IMX_FEC, __func__);
856 break;
857 case ENET_ATCOR:
858 s->regs[index] = value & 0x7fffffff;
859 break;
860 case ENET_ATINC:
861 s->regs[index] = value & 0x00007f7f;
862 break;
863 case ENET_TGSR:
864 /* implement clear timer flag */
865 s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
866 break;
867 case ENET_TCSR0:
868 case ENET_TCSR1:
869 case ENET_TCSR2:
870 case ENET_TCSR3:
871 s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
872 s->regs[index] &= ~0x0000007d; /* writable fields */
873 s->regs[index] |= (value & 0x0000007d);
874 break;
875 case ENET_TCCR0:
876 case ENET_TCCR1:
877 case ENET_TCCR2:
878 case ENET_TCCR3:
879 s->regs[index] = value;
880 break;
881 default:
882 imx_default_write(s, index, value);
883 break;
884 }
885 }
886
imx_eth_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)887 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
888 unsigned size)
889 {
890 IMXFECState *s = IMX_FEC(opaque);
891 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
892 uint32_t index = offset >> 2;
893
894 trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
895
896 switch (index) {
897 case ENET_EIR:
898 s->regs[index] &= ~value;
899 break;
900 case ENET_EIMR:
901 s->regs[index] = value;
902 break;
903 case ENET_RDAR:
904 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
905 if (!s->regs[index]) {
906 imx_eth_enable_rx(s, true);
907 }
908 } else {
909 s->regs[index] = 0;
910 }
911 break;
912 case ENET_TDAR1:
913 case ENET_TDAR2:
914 if (unlikely(single_tx_ring)) {
915 qemu_log_mask(LOG_GUEST_ERROR,
916 "[%s]%s: trying to access TDAR2 or TDAR1\n",
917 TYPE_IMX_FEC, __func__);
918 return;
919 }
920 /* fall through */
921 case ENET_TDAR:
922 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
923 s->regs[index] = ENET_TDAR_TDAR;
924 imx_eth_do_tx(s, index);
925 }
926 s->regs[index] = 0;
927 break;
928 case ENET_ECR:
929 if (value & ENET_ECR_RESET) {
930 return imx_eth_reset(DEVICE(s));
931 }
932 s->regs[index] = value;
933 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
934 s->regs[ENET_RDAR] = 0;
935 s->rx_descriptor = s->regs[ENET_RDSR];
936 s->regs[ENET_TDAR] = 0;
937 s->regs[ENET_TDAR1] = 0;
938 s->regs[ENET_TDAR2] = 0;
939 s->tx_descriptor[0] = s->regs[ENET_TDSR];
940 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
941 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
942 }
943 break;
944 case ENET_MMFR:
945 s->regs[index] = value;
946 if (extract32(value, 29, 1)) {
947 /* This is a read operation */
948 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
949 imx_phy_read(s,
950 extract32(value,
951 18, 10)));
952 } else {
953 /* This is a write operation */
954 imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
955 }
956 /* raise the interrupt as the PHY operation is done */
957 s->regs[ENET_EIR] |= ENET_INT_MII;
958 break;
959 case ENET_MSCR:
960 s->regs[index] = value & 0xfe;
961 break;
962 case ENET_MIBC:
963 /* TODO: Implement MIB. */
964 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
965 break;
966 case ENET_RCR:
967 s->regs[index] = value & 0x07ff003f;
968 /* TODO: Implement LOOP mode. */
969 break;
970 case ENET_TCR:
971 /* We transmit immediately, so raise GRA immediately. */
972 s->regs[index] = value;
973 if (value & 1) {
974 s->regs[ENET_EIR] |= ENET_INT_GRA;
975 }
976 break;
977 case ENET_PALR:
978 s->regs[index] = value;
979 s->conf.macaddr.a[0] = value >> 24;
980 s->conf.macaddr.a[1] = value >> 16;
981 s->conf.macaddr.a[2] = value >> 8;
982 s->conf.macaddr.a[3] = value;
983 break;
984 case ENET_PAUR:
985 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
986 s->conf.macaddr.a[4] = value >> 24;
987 s->conf.macaddr.a[5] = value >> 16;
988 break;
989 case ENET_OPD:
990 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
991 break;
992 case ENET_IAUR:
993 case ENET_IALR:
994 case ENET_GAUR:
995 case ENET_GALR:
996 /* TODO: implement MAC hash filtering. */
997 break;
998 case ENET_TFWR:
999 if (s->is_fec) {
1000 s->regs[index] = value & 0x3;
1001 } else {
1002 s->regs[index] = value & 0x13f;
1003 }
1004 break;
1005 case ENET_RDSR:
1006 if (s->is_fec) {
1007 s->regs[index] = value & ~3;
1008 } else {
1009 s->regs[index] = value & ~7;
1010 }
1011 s->rx_descriptor = s->regs[index];
1012 break;
1013 case ENET_TDSR:
1014 if (s->is_fec) {
1015 s->regs[index] = value & ~3;
1016 } else {
1017 s->regs[index] = value & ~7;
1018 }
1019 s->tx_descriptor[0] = s->regs[index];
1020 break;
1021 case ENET_TDSR1:
1022 if (unlikely(single_tx_ring)) {
1023 qemu_log_mask(LOG_GUEST_ERROR,
1024 "[%s]%s: trying to access TDSR1\n",
1025 TYPE_IMX_FEC, __func__);
1026 return;
1027 }
1028
1029 s->regs[index] = value & ~7;
1030 s->tx_descriptor[1] = s->regs[index];
1031 break;
1032 case ENET_TDSR2:
1033 if (unlikely(single_tx_ring)) {
1034 qemu_log_mask(LOG_GUEST_ERROR,
1035 "[%s]%s: trying to access TDSR2\n",
1036 TYPE_IMX_FEC, __func__);
1037 return;
1038 }
1039
1040 s->regs[index] = value & ~7;
1041 s->tx_descriptor[2] = s->regs[index];
1042 break;
1043 case ENET_MRBR:
1044 s->regs[index] = value & 0x00003ff0;
1045 break;
1046 default:
1047 if (s->is_fec) {
1048 imx_fec_write(s, index, value);
1049 } else {
1050 imx_enet_write(s, index, value);
1051 }
1052 return;
1053 }
1054
1055 imx_eth_update(s);
1056 }
1057
imx_eth_can_receive(NetClientState * nc)1058 static bool imx_eth_can_receive(NetClientState *nc)
1059 {
1060 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1061
1062 return !!s->regs[ENET_RDAR];
1063 }
1064
imx_fec_receive(NetClientState * nc,const uint8_t * buf,size_t len)1065 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
1066 size_t len)
1067 {
1068 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1069 IMXFECBufDesc bd;
1070 uint32_t flags = 0;
1071 uint32_t addr;
1072 uint32_t crc;
1073 uint32_t buf_addr;
1074 uint8_t *crc_ptr;
1075 unsigned int buf_len;
1076 size_t size = len;
1077
1078 trace_imx_fec_receive(size);
1079
1080 if (!s->regs[ENET_RDAR]) {
1081 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1082 TYPE_IMX_FEC, __func__);
1083 return 0;
1084 }
1085
1086 crc = cpu_to_be32(crc32(~0, buf, size));
1087 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
1088 size += 4;
1089 crc_ptr = (uint8_t *) &crc;
1090
1091 /* Huge frames are truncated. */
1092 if (size > ENET_MAX_FRAME_SIZE) {
1093 size = ENET_MAX_FRAME_SIZE;
1094 flags |= ENET_BD_TR | ENET_BD_LG;
1095 }
1096
1097 /* Frames larger than the user limit just set error flags. */
1098 if (size > (s->regs[ENET_RCR] >> 16)) {
1099 flags |= ENET_BD_LG;
1100 }
1101
1102 addr = s->rx_descriptor;
1103 while (size > 0) {
1104 imx_fec_read_bd(&bd, addr);
1105 if ((bd.flags & ENET_BD_E) == 0) {
1106 /* No descriptors available. Bail out. */
1107 /*
1108 * FIXME: This is wrong. We should probably either
1109 * save the remainder for when more RX buffers are
1110 * available, or flag an error.
1111 */
1112 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1113 TYPE_IMX_FEC, __func__);
1114 break;
1115 }
1116 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
1117 bd.length = buf_len;
1118 size -= buf_len;
1119
1120 trace_imx_fec_receive_len(addr, bd.length);
1121
1122 /* The last 4 bytes are the CRC. */
1123 if (size < 4) {
1124 buf_len += size - 4;
1125 }
1126 buf_addr = bd.data;
1127 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1128 MEMTXATTRS_UNSPECIFIED);
1129 buf += buf_len;
1130 if (size < 4) {
1131 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1132 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1133 crc_ptr += 4 - size;
1134 }
1135 bd.flags &= ~ENET_BD_E;
1136 if (size == 0) {
1137 /* Last buffer in frame. */
1138 bd.flags |= flags | ENET_BD_L;
1139
1140 trace_imx_fec_receive_last(bd.flags);
1141
1142 s->regs[ENET_EIR] |= ENET_INT_RXF;
1143 } else {
1144 s->regs[ENET_EIR] |= ENET_INT_RXB;
1145 }
1146 imx_fec_write_bd(&bd, addr);
1147 /* Advance to the next descriptor. */
1148 if ((bd.flags & ENET_BD_W) != 0) {
1149 addr = s->regs[ENET_RDSR];
1150 } else {
1151 addr += sizeof(bd);
1152 }
1153 }
1154 s->rx_descriptor = addr;
1155 imx_eth_enable_rx(s, false);
1156 imx_eth_update(s);
1157 return len;
1158 }
1159
imx_enet_receive(NetClientState * nc,const uint8_t * buf,size_t len)1160 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1161 size_t len)
1162 {
1163 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1164 IMXENETBufDesc bd;
1165 uint32_t flags = 0;
1166 uint32_t addr;
1167 uint32_t crc;
1168 uint32_t buf_addr;
1169 uint8_t *crc_ptr;
1170 unsigned int buf_len;
1171 size_t size = len;
1172 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1173
1174 trace_imx_enet_receive(size);
1175
1176 if (!s->regs[ENET_RDAR]) {
1177 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1178 TYPE_IMX_FEC, __func__);
1179 return 0;
1180 }
1181
1182 crc = cpu_to_be32(crc32(~0, buf, size));
1183 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
1184 size += 4;
1185 crc_ptr = (uint8_t *) &crc;
1186
1187 if (shift16) {
1188 size += 2;
1189 }
1190
1191 /* Huge frames are truncated. */
1192 if (size > s->regs[ENET_FTRL]) {
1193 size = s->regs[ENET_FTRL];
1194 flags |= ENET_BD_TR | ENET_BD_LG;
1195 }
1196
1197 /* Frames larger than the user limit just set error flags. */
1198 if (size > (s->regs[ENET_RCR] >> 16)) {
1199 flags |= ENET_BD_LG;
1200 }
1201
1202 addr = s->rx_descriptor;
1203 while (size > 0) {
1204 imx_enet_read_bd(&bd, addr);
1205 if ((bd.flags & ENET_BD_E) == 0) {
1206 /* No descriptors available. Bail out. */
1207 /*
1208 * FIXME: This is wrong. We should probably either
1209 * save the remainder for when more RX buffers are
1210 * available, or flag an error.
1211 */
1212 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1213 TYPE_IMX_FEC, __func__);
1214 break;
1215 }
1216 buf_len = MIN(size, s->regs[ENET_MRBR]);
1217 bd.length = buf_len;
1218 size -= buf_len;
1219
1220 trace_imx_enet_receive_len(addr, bd.length);
1221
1222 /* The last 4 bytes are the CRC. */
1223 if (size < 4) {
1224 buf_len += size - 4;
1225 }
1226 buf_addr = bd.data;
1227
1228 if (shift16) {
1229 /*
1230 * If SHIFT16 bit of ENETx_RACC register is set we need to
1231 * align the payload to 4-byte boundary.
1232 */
1233 const uint8_t zeros[2] = { 0 };
1234
1235 dma_memory_write(&address_space_memory, buf_addr, zeros,
1236 sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
1237
1238 buf_addr += sizeof(zeros);
1239 buf_len -= sizeof(zeros);
1240
1241 /* We only do this once per Ethernet frame */
1242 shift16 = false;
1243 }
1244
1245 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1246 MEMTXATTRS_UNSPECIFIED);
1247 buf += buf_len;
1248 if (size < 4) {
1249 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1250 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1251 crc_ptr += 4 - size;
1252 }
1253 bd.flags &= ~ENET_BD_E;
1254 if (size == 0) {
1255 /* Last buffer in frame. */
1256 bd.flags |= flags | ENET_BD_L;
1257
1258 trace_imx_enet_receive_last(bd.flags);
1259
1260 /* Indicate that we've updated the last buffer descriptor. */
1261 bd.last_buffer = ENET_BD_BDU;
1262 if (bd.option & ENET_BD_RX_INT) {
1263 s->regs[ENET_EIR] |= ENET_INT_RXF;
1264 }
1265 } else {
1266 if (bd.option & ENET_BD_RX_INT) {
1267 s->regs[ENET_EIR] |= ENET_INT_RXB;
1268 }
1269 }
1270 imx_enet_write_bd(&bd, addr);
1271 /* Advance to the next descriptor. */
1272 if ((bd.flags & ENET_BD_W) != 0) {
1273 addr = s->regs[ENET_RDSR];
1274 } else {
1275 addr += sizeof(bd);
1276 }
1277 }
1278 s->rx_descriptor = addr;
1279 imx_eth_enable_rx(s, false);
1280 imx_eth_update(s);
1281 return len;
1282 }
1283
imx_eth_receive(NetClientState * nc,const uint8_t * buf,size_t len)1284 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1285 size_t len)
1286 {
1287 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1288
1289 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1290 return imx_enet_receive(nc, buf, len);
1291 } else {
1292 return imx_fec_receive(nc, buf, len);
1293 }
1294 }
1295
1296 static const MemoryRegionOps imx_eth_ops = {
1297 .read = imx_eth_read,
1298 .write = imx_eth_write,
1299 .valid.min_access_size = 4,
1300 .valid.max_access_size = 4,
1301 .endianness = DEVICE_NATIVE_ENDIAN,
1302 };
1303
imx_eth_cleanup(NetClientState * nc)1304 static void imx_eth_cleanup(NetClientState *nc)
1305 {
1306 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1307
1308 s->nic = NULL;
1309 }
1310
1311 static NetClientInfo imx_eth_net_info = {
1312 .type = NET_CLIENT_DRIVER_NIC,
1313 .size = sizeof(NICState),
1314 .can_receive = imx_eth_can_receive,
1315 .receive = imx_eth_receive,
1316 .cleanup = imx_eth_cleanup,
1317 .link_status_changed = imx_eth_set_link,
1318 };
1319
1320
imx_eth_realize(DeviceState * dev,Error ** errp)1321 static void imx_eth_realize(DeviceState *dev, Error **errp)
1322 {
1323 IMXFECState *s = IMX_FEC(dev);
1324 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1325
1326 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1327 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1328 sysbus_init_mmio(sbd, &s->iomem);
1329 sysbus_init_irq(sbd, &s->irq[0]);
1330 sysbus_init_irq(sbd, &s->irq[1]);
1331
1332 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1333
1334 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1335 object_get_typename(OBJECT(dev)),
1336 dev->id, &dev->mem_reentrancy_guard, s);
1337
1338 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1339 }
1340
1341 static Property imx_eth_properties[] = {
1342 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1343 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1344 DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
1345 DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true),
1346 DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC,
1347 IMXFECState *),
1348 DEFINE_PROP_END_OF_LIST(),
1349 };
1350
imx_eth_class_init(ObjectClass * klass,void * data)1351 static void imx_eth_class_init(ObjectClass *klass, void *data)
1352 {
1353 DeviceClass *dc = DEVICE_CLASS(klass);
1354
1355 dc->vmsd = &vmstate_imx_eth;
1356 device_class_set_legacy_reset(dc, imx_eth_reset);
1357 device_class_set_props(dc, imx_eth_properties);
1358 dc->realize = imx_eth_realize;
1359 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1360 }
1361
imx_fec_init(Object * obj)1362 static void imx_fec_init(Object *obj)
1363 {
1364 IMXFECState *s = IMX_FEC(obj);
1365
1366 s->is_fec = true;
1367 }
1368
imx_enet_init(Object * obj)1369 static void imx_enet_init(Object *obj)
1370 {
1371 IMXFECState *s = IMX_FEC(obj);
1372
1373 s->is_fec = false;
1374 }
1375
1376 static const TypeInfo imx_fec_info = {
1377 .name = TYPE_IMX_FEC,
1378 .parent = TYPE_SYS_BUS_DEVICE,
1379 .instance_size = sizeof(IMXFECState),
1380 .instance_init = imx_fec_init,
1381 .class_init = imx_eth_class_init,
1382 };
1383
1384 static const TypeInfo imx_enet_info = {
1385 .name = TYPE_IMX_ENET,
1386 .parent = TYPE_IMX_FEC,
1387 .instance_init = imx_enet_init,
1388 };
1389
imx_eth_register_types(void)1390 static void imx_eth_register_types(void)
1391 {
1392 type_register_static(&imx_fec_info);
1393 type_register_static(&imx_enet_info);
1394 }
1395
1396 type_init(imx_eth_register_types)
1397