1 /*
2 * Cisco router simulation platform.
3 * Copyright (C) 2005,2006 Christophe Fillot. All rights reserved.
4 *
5 * Serial Interfaces (Mueslix).
6 *
7 * Note: "debug serial mueslix" gives more technical info.
8 *
9 * Chip mode: Cisco models 36xx and 72xx don't seem to use the same microcode,
10 * so there are code variants to make things work properly.
11 *
12 * Chip mode 0 => 3600
13 * Chip mode 1 => 7200
14 *
15 * 2 points noticed until now:
16 * - RX/TX ring wrapping checks are done differently,
17 * - TX packet sizes are not specified in the same way.
18 *
19 * Test methodology:
20 * - Connect two virtual routers together ;
21 * - Do pings by sending 10 packets by 10 packets. If this stops working,
22 * count the number of transmitted packets and check with RX/TX rings
23 * sizes. This is problably a ring wrapping problem.
24 * - Do multiple pings with various sizes (padding checks);
25 * - Check if CDP is working, with various hostname sizes. Since CDP
26 * contains a checksum, it is a good way to determine if packets are
27 * sent/received correctly.
28 * - Do a Telnet from both virtual router to the other one, and do a
29 * "sh run".
30 */
31
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <assert.h>
38
39 #include "cpu.h"
40 #include "vm.h"
41 #include "dynamips.h"
42 #include "memory.h"
43 #include "device.h"
44 #include "net.h"
45 #include "net_io.h"
46 #include "ptask.h"
47 #include "dev_mueslix.h"
48
49 /* Debugging flags */
50 #define DEBUG_ACCESS 0
51 #define DEBUG_UNKNOWN 0
52 #define DEBUG_PCI_REGS 0
53 #define DEBUG_TRANSMIT 0
54 #define DEBUG_RECEIVE 0
55
56 /* Mueslix PCI vendor/product codes */
57 #define MUESLIX_PCI_VENDOR_ID 0x1137
58 #define MUESLIX_PCI_PRODUCT_ID 0x0001
59
60 /* Number of channels (4 interfaces) */
61 #define MUESLIX_NR_CHANNELS 4
62 #define MUESLIX_CHANNEL_LEN 0x100
63
64 /* RX/TX status for a channel */
65 #define MUESLIX_CHANNEL_STATUS_RX 0x01
66 #define MUESLIX_CHANNEL_STATUS_TX 0x02
67
68 /* RX/TX enable masks (XXX check if bit position is correct) */
69 #define MUESLIX_TX_ENABLE 0x01
70 #define MUESLIX_RX_ENABLE 0x02
71
72 /* RX/TX IRQ masks */
73 #define MUESLIX_TX_IRQ 0x01
74 #define MUESLIX_RX_IRQ 0x10
75
76 /* Addresses of ports */
77 #define MUESLIX_CHANNEL0_OFFSET 0x100
78 #define MUESLIX_CHANNEL1_OFFSET 0x200
79 #define MUESLIX_CHANNEL2_OFFSET 0x300
80 #define MUESLIX_CHANNEL3_OFFSET 0x400
81
82 /* TPU Registers */
83 #define MUESLIX_TPU_CMD_OFFSET 0x2c24
84 #define MUESLIX_TPU_CMD_RSP_OFFSET 0x2c2c
85
86 /* General and channels registers */
87 #define MUESLIX_GEN_CHAN_LEN 0x500
88
89 /* TPU microcode */
90 #define MUESLIX_UCODE_OFFSET 0x2000
91 #define MUESLIX_UCODE_LEN 0x800
92
93 /* TPU Xmem and YMem */
94 #define MUESLIX_XMEM_OFFSET 0x2a00
95 #define MUESLIX_YMEM_OFFSET 0x2b00
96 #define MUESLIX_XYMEM_LEN 0x100
97
98 /* Maximum packet size */
99 #define MUESLIX_MAX_PKT_SIZE 18000
100
101 /* Send up to 16 packets in a TX ring scan pass */
102 #define MUESLIX_TXRING_PASS_COUNT 16
103
104 /* RX descriptors */
105 #define MUESLIX_RXDESC_OWN 0x80000000 /* Ownership */
106 #define MUESLIX_RXDESC_FS 0x40000000 /* First Segment */
107 #define MUESLIX_RXDESC_LS 0x20000000 /* Last Segment */
108 #define MUESLIX_RXDESC_OVERRUN 0x10000000 /* Overrun */
109 #define MUESLIX_RXDESC_IGNORED 0x08000000 /* Ignored */
110 #define MUESLIX_RXDESC_ABORT 0x04000000 /* Abort */
111 #define MUESLIX_RXDESC_CRC 0x02000000 /* CRC error */
112 #define MUESLIX_RXDESC_LEN_MASK 0xffff
113
114 /* TX descriptors */
115 #define MUESLIX_TXDESC_OWN 0x80000000 /* Ownership */
116 #define MUESLIX_TXDESC_FS 0x40000000 /* First Segment */
117 #define MUESLIX_TXDESC_LS 0x20000000 /* Last Segment */
118 #define MUESLIX_TXDESC_SUB 0x00100000 /* Length substractor ? */
119 #define MUESLIX_TXDESC_SUB_LEN 0x03000000 /* Length substrator ? */
120 #define MUESLIX_TXDESC_SUB_SHIFT 24
121 #define MUESLIX_TXDESC_PAD 0x00c00000 /* Sort of padding info ? */
122 #define MUESLIX_TXDESC_PAD_SHIFT 22
123
124 #define MUESLIX_TXDESC_LEN_MASK 0xffff
125
126 /* RX Descriptor */
127 struct rx_desc {
128 m_uint32_t rdes[2];
129 };
130
131 /* TX Descriptor */
132 struct tx_desc {
133 m_uint32_t tdes[2];
134 };
135
136 /* Forward declaration of Mueslix data */
137 typedef struct mueslix_data mueslix_data_t;
138
139 /* Mueslix channel */
140 struct mueslix_channel {
141 /* Channel ID */
142 u_int id;
143
144 /* Channel status (0=disabled) */
145 u_int status;
146
147 /* Clock parameters */
148 u_int clk_shift,clk_div;
149 u_int clk_rate;
150
151 /* CRC control register */
152 u_int crc_ctrl_reg;
153
154 /* CRC size */
155 u_int crc_size;
156
157 /* NetIO descriptor */
158 netio_desc_t *nio;
159
160 /* TX ring scanners task id */
161 ptask_id_t tx_tid;
162
163 /* physical addresses for start and end of RX/TX rings */
164 m_uint32_t rx_start,rx_end,tx_start,tx_end;
165
166 /* physical addresses of current RX and TX descriptors */
167 m_uint32_t rx_current,tx_current;
168
169 /* Parent mueslix structure */
170 mueslix_data_t *parent;
171 };
172
173 /* Mueslix Data */
174 struct mueslix_data {
175 char *name;
176
177 /* Lock */
178 pthread_mutex_t lock;
179
180 /* IRQ status and mask */
181 m_uint32_t irq_status,irq_mask;
182 u_int irq_clearing_count;
183
184 /* TPU options */
185 m_uint32_t tpu_options;
186
187 /* Virtual machine */
188 vm_instance_t *vm;
189
190 /* Virtual device */
191 struct vdevice *dev;
192
193 /* PCI device information */
194 struct pci_device *pci_dev;
195
196 /* Chip mode:
197 *
198 * 0=increment ring pointers before check + direct TX size,
199 * 1=increment ring pointers after check + "complex" TX size.
200 */
201 int chip_mode;
202
203 /* Channels */
204 struct mueslix_channel channel[MUESLIX_NR_CHANNELS];
205 m_uint32_t channel_enable_mask;
206
207 /* TPU microcode */
208 u_char ucode[MUESLIX_UCODE_LEN];
209
210 /* TPU Xmem and Ymem */
211 u_char xmem[MUESLIX_XYMEM_LEN];
212 u_char ymem[MUESLIX_XYMEM_LEN];
213 };
214
215 /* Offsets of the 4 channels */
216 static m_uint32_t channel_offset[MUESLIX_NR_CHANNELS] = {
217 MUESLIX_CHANNEL0_OFFSET, MUESLIX_CHANNEL1_OFFSET,
218 MUESLIX_CHANNEL2_OFFSET, MUESLIX_CHANNEL3_OFFSET,
219 };
220
221 /* Lock/Unlock primitives */
222 #define MUESLIX_LOCK(d) pthread_mutex_lock(&(d)->lock)
223 #define MUESLIX_UNLOCK(d) pthread_mutex_unlock(&(d)->lock)
224
225 /* Log a Mueslix message */
226 #define MUESLIX_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
227
228 /* Returns TRUE if RX/TX is enabled for a channel */
dev_mueslix_is_rx_tx_enabled(struct mueslix_data * d,u_int id)229 static inline int dev_mueslix_is_rx_tx_enabled(struct mueslix_data *d,u_int id)
230 {
231 /* 2 bits for RX/TX, 4 channels max */
232 return((d->channel_enable_mask >> (id << 1)) & 0x03);
233 }
234
235 /* Update IRQ status */
dev_mueslix_update_irq_status(struct mueslix_data * d)236 static inline void dev_mueslix_update_irq_status(struct mueslix_data *d)
237 {
238 if (d->irq_status & d->irq_mask)
239 pci_dev_trigger_irq(d->vm,d->pci_dev);
240 else {
241 if (++d->irq_clearing_count == 3) {
242 pci_dev_clear_irq(d->vm,d->pci_dev);
243 d->irq_clearing_count = 0;
244 }
245 }
246 }
247
248 /* Compute clock rate */
dev_mueslix_update_clk_rate(struct mueslix_channel * channel)249 static void dev_mueslix_update_clk_rate(struct mueslix_channel *channel)
250 {
251 u_int clk_shift = channel->clk_shift;
252
253 if (clk_shift == 8)
254 clk_shift = 0;
255
256 channel->clk_rate = (8064000 >> clk_shift) / (channel->clk_div + 1);
257 MUESLIX_LOG(channel->parent,"channel %u: clock rate set to %u\n",
258 channel->id,channel->clk_rate);
259
260 /* Apply the bandwidth constraint to the NIO */
261 if (channel->nio != NULL)
262 netio_set_bandwidth(channel->nio,(channel->clk_rate+1000)/1000);
263 }
264
265 /*
266 * Access to channel registers.
267 */
dev_mueslix_chan_access(cpu_gen_t * cpu,struct mueslix_channel * channel,m_uint32_t offset,u_int op_size,u_int op_type,m_uint64_t * data)268 void dev_mueslix_chan_access(cpu_gen_t *cpu,struct mueslix_channel *channel,
269 m_uint32_t offset,u_int op_size,u_int op_type,
270 m_uint64_t *data)
271 {
272 switch(offset) {
273 case 0x00: /* CRC control register ? */
274 if (op_type == MTS_READ) {
275 *data = channel->crc_ctrl_reg;
276 } else {
277 channel->crc_ctrl_reg = *data;
278
279 switch(channel->crc_ctrl_reg) {
280 case 0x08:
281 case 0x0a:
282 channel->crc_size = channel->crc_ctrl_reg - 0x06;
283 break;
284
285 default:
286 MUESLIX_LOG(channel->parent,"channel %u: unknown value "
287 "for CRC ctrl reg 0x%4.4x\n",
288 channel->id,channel->crc_ctrl_reg);
289
290 channel->crc_size = 2;
291 }
292 MUESLIX_LOG(channel->parent,
293 "channel %u: CRC size set to 0x%4.4x\n",
294 channel->id,channel->crc_size);
295 }
296 break;
297
298 case 0x40:
299 if (op_type == MTS_READ)
300 *data = channel->clk_shift;
301 else
302 channel->clk_shift = *data;
303
304 /* Recompute clock rate */
305 dev_mueslix_update_clk_rate(channel);
306 break;
307
308 case 0x44:
309 if (op_type == MTS_READ)
310 *data = channel->clk_div;
311 else
312 channel->clk_div = *data;
313 break;
314
315 case 0x60: /* signals ? */
316 if ((op_type == MTS_READ) && (channel->nio != NULL))
317 *data = 0xFFFFFFFF;
318 break;
319
320 case 0x64: /* port status - cable type and probably other things */
321 if (op_type == MTS_READ)
322 *data = 0x7B;
323 break;
324
325 case 0x90: /* has influence on clock rate */
326 if (op_type == MTS_READ)
327 *data = 0x11111111;
328 break;
329
330 case 0x80: /* TX start */
331 if (op_type == MTS_WRITE)
332 channel->tx_start = channel->tx_current = *data;
333 else
334 *data = channel->tx_start;
335 break;
336
337 case 0x84: /* TX end */
338 if (op_type == MTS_WRITE)
339 channel->tx_end = *data;
340 else
341 *data = channel->tx_end;
342 break;
343
344 case 0x88: /* RX start */
345 if (op_type == MTS_WRITE)
346 channel->rx_start = channel->rx_current = *data;
347 else
348 *data = channel->rx_start;
349 break;
350
351 case 0x8c: /* RX end */
352 if (op_type == MTS_WRITE)
353 channel->rx_end = *data;
354 else
355 *data = channel->rx_end;
356 break;
357
358 #if DEBUG_UNKNOWN
359 default:
360 if (op_type == MTS_WRITE) {
361 MUESLIX_LOG(channel->parent,"channel %u: "
362 "write to unknown addr 0x%4.4x, value=0x%llx\n",
363 channel->id,offset,*data);
364 }
365 #endif
366 }
367 }
368
369 /* Handle TPU commands for chip mode 0 (3600) */
tpu_cm0_handle_cmd(struct mueslix_data * d,u_int cmd)370 static void tpu_cm0_handle_cmd(struct mueslix_data *d,u_int cmd)
371 {
372 struct mueslix_channel *channel;
373 u_int opcode,channel_id;
374
375 opcode = (cmd >> 12) & 0xFF;
376 channel_id = cmd & 0x03;
377 channel = &d->channel[channel_id];
378
379 switch(opcode) {
380 case 0x10:
381 MUESLIX_LOG(d,"channel %u disabled\n",channel_id);
382 channel->status = 0;
383 break;
384 case 0x00:
385 MUESLIX_LOG(d,"channel %u enabled\n",channel_id);
386 channel->status = 1;
387 break;
388 default:
389 MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd);
390 }
391 }
392
393 /* Handle TPU commands for chip mode 1 (7200) */
tpu_cm1_handle_cmd(struct mueslix_data * d,u_int cmd)394 static void tpu_cm1_handle_cmd(struct mueslix_data *d,u_int cmd)
395 {
396 struct mueslix_channel *channel;
397 u_int opcode,channel_id;
398
399 opcode = (cmd >> 12) & 0xFF;
400 channel_id = cmd & 0x03;
401 channel = &d->channel[channel_id];
402
403 switch(opcode) {
404 case 0x50:
405 case 0x30:
406 MUESLIX_LOG(d,"channel %u disabled\n",channel_id);
407 channel->status = 0;
408 break;
409 case 0x00:
410 MUESLIX_LOG(d,"channel %u enabled\n",channel_id);
411 channel->status = 1;
412 break;
413 default:
414 MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd);
415 }
416 }
417
418 /*
419 * dev_mueslix_access()
420 */
dev_mueslix_access(cpu_gen_t * cpu,struct vdevice * dev,m_uint32_t offset,u_int op_size,u_int op_type,m_uint64_t * data)421 void *dev_mueslix_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
422 u_int op_size,u_int op_type,m_uint64_t *data)
423 {
424 struct mueslix_data *d = dev->priv_data;
425 int i;
426
427 #if DEBUG_ACCESS >= 2
428 if (op_type == MTS_READ) {
429 cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n",
430 offset,cpu_get_pc(cpu),op_size);
431 } else {
432 cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, "
433 "val=0x%llx, size=%u\n",offset,cpu_get_pc(cpu),*data,op_size);
434 }
435 #endif
436
437 /* Returns 0 if we don't know the offset */
438 if (op_type == MTS_READ)
439 *data = 0x00000000;
440
441 /* Handle microcode access */
442 if ((offset >= MUESLIX_UCODE_OFFSET) &&
443 (offset < (MUESLIX_UCODE_OFFSET + MUESLIX_UCODE_LEN)))
444 return(d->ucode + offset - MUESLIX_UCODE_OFFSET);
445
446 /* Handle TPU XMem access */
447 if ((offset >= MUESLIX_XMEM_OFFSET) &&
448 (offset < (MUESLIX_XMEM_OFFSET + MUESLIX_XYMEM_LEN)))
449 return(d->xmem + offset - MUESLIX_XMEM_OFFSET);
450
451 /* Handle TPU YMem access */
452 if ((offset >= MUESLIX_YMEM_OFFSET) &&
453 (offset < (MUESLIX_YMEM_OFFSET + MUESLIX_XYMEM_LEN)))
454 return(d->ymem + offset - MUESLIX_YMEM_OFFSET);
455
456 /* Handle channel access */
457 for(i=0;i<MUESLIX_NR_CHANNELS;i++)
458 if ((offset >= channel_offset[i]) &&
459 (offset < (channel_offset[i] + MUESLIX_CHANNEL_LEN)))
460 {
461 MUESLIX_LOCK(d);
462 dev_mueslix_chan_access(cpu,&d->channel[i],
463 offset - channel_offset[i],
464 op_size,op_type,data);
465 MUESLIX_UNLOCK(d);
466 return NULL;
467 }
468
469 MUESLIX_LOCK(d);
470
471 /* Generic case */
472 switch(offset) {
473 /* this reg is accessed when an interrupt occurs */
474 case 0x0:
475 if (op_type == MTS_READ) {
476 *data = d->irq_status;
477 } else {
478 d->irq_status &= ~(*data);
479 dev_mueslix_update_irq_status(d);
480 }
481 break;
482
483 /* Maybe interrupt mask */
484 case 0x10:
485 if (op_type == MTS_READ) {
486 *data = d->irq_mask;
487 } else {
488 d->irq_mask = *data;
489 dev_mueslix_update_irq_status(d);
490 }
491 break;
492
493 case 0x14:
494 if (op_type == MTS_READ)
495 *data = d->channel_enable_mask;
496 else {
497 #if DEBUG_ACCESS
498 cpu_log(cpu,d->name,
499 "channel_enable_mask = 0x%5.5llx at pc=0x%llx\n",
500 *data,cpu_get_pc(cpu));
501 #endif
502 d->channel_enable_mask = *data;
503 }
504 break;
505
506 case 0x18:
507 if (op_type == MTS_READ)
508 *data = 0x7F7F7F7F;
509 break;
510
511 case 0x48:
512 if (op_type == MTS_READ)
513 *data = 0x00000000;
514 break;
515
516 case 0x7c:
517 if (op_type == MTS_READ)
518 *data = 0x492;
519 break;
520
521 case 0x2c00:
522 if (op_type == MTS_READ)
523 *data = d->tpu_options;
524 else
525 d->tpu_options = *data;
526 break;
527
528 /* cmd reg */
529 case MUESLIX_TPU_CMD_OFFSET:
530 #if DEBUG_ACCESS
531 if (op_type == MTS_WRITE) {
532 cpu_log(cpu,d->name,"cmd_reg = 0x%5.5llx at pc=0x%llx\n",
533 *data,cpu_get_pc(cpu));
534 }
535 #endif
536 switch(d->chip_mode) {
537 case 0: /* 3600 */
538 tpu_cm0_handle_cmd(d,*data);
539 break;
540 case 1: /* 7200 */
541 tpu_cm1_handle_cmd(d,*data);
542 break;
543 }
544 break;
545
546 /*
547 * cmd_rsp reg, it seems that 0xFFFF means OK
548 * (seen on a "sh contr se1/0" with "debug serial mueslix" enabled).
549 */
550 case MUESLIX_TPU_CMD_RSP_OFFSET:
551 if (op_type == MTS_READ)
552 *data = 0xFFFF;
553 break;
554
555 #if DEBUG_UNKNOWN
556 default:
557 if (op_type == MTS_READ) {
558 cpu_log(cpu,d->name,
559 "read from unknown addr 0x%x, pc=0x%llx (size=%u)\n",
560 offset,cpu_get_pc(cpu),op_size);
561 } else {
562 cpu_log(cpu,d->name,
563 "write to unknown addr 0x%x, value=0x%llx, "
564 "pc=0x%llx (size=%u)\n",
565 offset,*data,cpu_get_pc(cpu),op_size);
566 }
567 #endif
568 }
569
570 MUESLIX_UNLOCK(d);
571 return NULL;
572 }
573
574 /*
575 * Get the address of the next RX descriptor.
576 */
rxdesc_get_next(struct mueslix_channel * channel,m_uint32_t rxd_addr)577 static m_uint32_t rxdesc_get_next(struct mueslix_channel *channel,
578 m_uint32_t rxd_addr)
579 {
580 m_uint32_t nrxd_addr;
581
582 switch(channel->parent->chip_mode) {
583 case 0:
584 nrxd_addr = rxd_addr + sizeof(struct rx_desc);
585 if (nrxd_addr == channel->rx_end)
586 nrxd_addr = channel->rx_start;
587 break;
588
589 case 1:
590 default:
591 if (rxd_addr == channel->rx_end)
592 nrxd_addr = channel->rx_start;
593 else
594 nrxd_addr = rxd_addr + sizeof(struct rx_desc);
595 break;
596 }
597
598 return(nrxd_addr);
599 }
600
601 /* Read an RX descriptor */
rxdesc_read(struct mueslix_data * d,m_uint32_t rxd_addr,struct rx_desc * rxd)602 static void rxdesc_read(struct mueslix_data *d,m_uint32_t rxd_addr,
603 struct rx_desc *rxd)
604 {
605 #if DEBUG_RECEIVE
606 MUESLIX_LOG(d,"reading RX descriptor at address 0x%x\n",rxd_addr);
607 #endif
608
609 /* get the next descriptor from VM physical RAM */
610 physmem_copy_from_vm(d->vm,rxd,rxd_addr,sizeof(struct rx_desc));
611
612 /* byte-swapping */
613 rxd->rdes[0] = vmtoh32(rxd->rdes[0]);
614 rxd->rdes[1] = vmtoh32(rxd->rdes[1]);
615 }
616
617 /*
618 * Try to acquire the specified RX descriptor. Returns TRUE if we have it.
619 * It assumes that the byte-swapping is done.
620 */
rxdesc_acquire(m_uint32_t rdes0)621 static inline int rxdesc_acquire(m_uint32_t rdes0)
622 {
623 return(rdes0 & MUESLIX_RXDESC_OWN);
624 }
625
626 /* Put a packet in buffer of a descriptor */
rxdesc_put_pkt(struct mueslix_data * d,struct rx_desc * rxd,u_char ** pkt,ssize_t * pkt_len)627 static ssize_t rxdesc_put_pkt(struct mueslix_data *d,struct rx_desc *rxd,
628 u_char **pkt,ssize_t *pkt_len)
629 {
630 ssize_t len,cp_len;
631
632 len = rxd->rdes[0] & MUESLIX_RXDESC_LEN_MASK;
633
634 /* compute the data length to copy */
635 cp_len = m_min(len,*pkt_len);
636
637 #if DEBUG_RECEIVE
638 MUESLIX_LOG(d,"copying %d bytes at 0x%x\n",cp_len,rxd->rdes[1]);
639 #endif
640
641 /* copy packet data to the VM physical RAM */
642 physmem_copy_to_vm(d->vm,*pkt,rxd->rdes[1],cp_len);
643
644 *pkt += cp_len;
645 *pkt_len -= cp_len;
646 return(cp_len);
647 }
648
649 /*
650 * Put a packet in the RX ring of the Mueslix specified channel.
651 */
dev_mueslix_receive_pkt(struct mueslix_channel * channel,u_char * pkt,ssize_t pkt_len)652 static void dev_mueslix_receive_pkt(struct mueslix_channel *channel,
653 u_char *pkt,ssize_t pkt_len)
654 {
655 struct mueslix_data *d = channel->parent;
656 m_uint32_t rx_start,rxdn_addr,rxdn_rdes0;
657 struct rx_desc rxd0,rxdn,*rxdc;
658 ssize_t cp_len,tot_len = pkt_len;
659 u_char *pkt_ptr = pkt;
660 int i;
661
662 if ((channel->rx_start == 0) || (channel->status == 0) ||
663 (channel->nio == NULL))
664 return;
665
666 /* Don't make anything if RX is not enabled for this channel */
667 if (!(dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_RX_ENABLE))
668 return;
669
670 /* Truncate the packet if it is too big */
671 pkt_len = m_min(pkt_len,MUESLIX_MAX_PKT_SIZE);
672
673 /* Copy the current rxring descriptor */
674 rxdesc_read(d,channel->rx_current,&rxd0);
675
676 /* We must have the first descriptor... */
677 if (!rxdesc_acquire(rxd0.rdes[0]))
678 return;
679
680 /* Remember the first RX descriptor address */
681 rx_start = channel->rx_current;
682
683 for(i=0,rxdc=&rxd0;tot_len>0;i++)
684 {
685 /* Put data into the descriptor buffers */
686 cp_len = rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
687
688 /* Get address of the next descriptor */
689 rxdn_addr = rxdesc_get_next(channel,channel->rx_current);
690
691 /* We have finished if the complete packet has been stored */
692 if (tot_len == 0) {
693 rxdc->rdes[0] = MUESLIX_RXDESC_LS;
694 rxdc->rdes[0] |= cp_len + channel->crc_size + 1;
695
696 if (i != 0)
697 physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]);
698
699 channel->rx_current = rxdn_addr;
700 break;
701 }
702
703 #if DEBUG_RECEIVE
704 MUESLIX_LOG(d,"trying to acquire new descriptor at 0x%x\n",rxdn_addr);
705 #endif
706
707 /* Get status of the next descriptor to see if we can acquire it */
708 rxdn_rdes0 = physmem_copy_u32_from_vm(d->vm,rxdn_addr);
709
710 if (!rxdesc_acquire(rxdn_rdes0))
711 rxdc->rdes[0] = MUESLIX_RXDESC_LS | MUESLIX_RXDESC_OVERRUN;
712 else
713 rxdc->rdes[0] = 0x00000000; /* ok, no special flag */
714
715 rxdc->rdes[0] |= cp_len;
716
717 /* Update the new status (only if we are not on the first desc) */
718 if (i != 0)
719 physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]);
720
721 /* Update the RX pointer */
722 channel->rx_current = rxdn_addr;
723
724 if (rxdc->rdes[0] & MUESLIX_RXDESC_LS)
725 break;
726
727 /* Read the next descriptor from VM physical RAM */
728 rxdesc_read(d,rxdn_addr,&rxdn);
729 rxdc = &rxdn;
730 }
731
732 /* Update the first RX descriptor */
733 rxd0.rdes[0] |= MUESLIX_RXDESC_FS;
734 physmem_copy_u32_to_vm(d->vm,rx_start,rxd0.rdes[0]);
735
736 /* Indicate that we have a frame ready (XXX something to do ?) */
737
738 /* Generate IRQ on CPU */
739 d->irq_status |= MUESLIX_RX_IRQ << channel->id;
740 dev_mueslix_update_irq_status(d);
741 }
742
743 /* Handle the Mueslix RX ring of the specified channel */
dev_mueslix_handle_rxring(netio_desc_t * nio,u_char * pkt,ssize_t pkt_len,struct mueslix_channel * channel)744 static int dev_mueslix_handle_rxring(netio_desc_t *nio,
745 u_char *pkt,ssize_t pkt_len,
746 struct mueslix_channel *channel)
747 {
748 struct mueslix_data *d = channel->parent;
749
750 #if DEBUG_RECEIVE
751 MUESLIX_LOG(d,"channel %u: receiving a packet of %d bytes\n",
752 channel->id,pkt_len);
753 mem_dump(log_file,pkt,pkt_len);
754 #endif
755
756 MUESLIX_LOCK(d);
757 if (dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_RX_ENABLE)
758 dev_mueslix_receive_pkt(channel,pkt,pkt_len);
759 MUESLIX_UNLOCK(d);
760 return(TRUE);
761 }
762
763 /* Read a TX descriptor */
txdesc_read(struct mueslix_data * d,m_uint32_t txd_addr,struct tx_desc * txd)764 static void txdesc_read(struct mueslix_data *d,m_uint32_t txd_addr,
765 struct tx_desc *txd)
766 {
767 /* get the next descriptor from VM physical RAM */
768 physmem_copy_from_vm(d->vm,txd,txd_addr,sizeof(struct tx_desc));
769
770 /* byte-swapping */
771 txd->tdes[0] = vmtoh32(txd->tdes[0]);
772 txd->tdes[1] = vmtoh32(txd->tdes[1]);
773 }
774
775 /* Set the address of the next TX descriptor */
txdesc_set_next(struct mueslix_channel * channel)776 static void txdesc_set_next(struct mueslix_channel *channel)
777 {
778 switch(channel->parent->chip_mode) {
779 case 0:
780 channel->tx_current += sizeof(struct tx_desc);
781
782 if (channel->tx_current == channel->tx_end)
783 channel->tx_current = channel->tx_start;
784 break;
785
786 case 1:
787 default:
788 if (channel->tx_current == channel->tx_end)
789 channel->tx_current = channel->tx_start;
790 else
791 channel->tx_current += sizeof(struct tx_desc);
792 }
793 }
794
795 /* Handle the TX ring of a specific channel (single packet) */
dev_mueslix_handle_txring_single(struct mueslix_channel * channel)796 static int dev_mueslix_handle_txring_single(struct mueslix_channel *channel)
797 {
798 struct mueslix_data *d = channel->parent;
799 u_char pkt[MUESLIX_MAX_PKT_SIZE],*pkt_ptr;
800 m_uint32_t tx_start,clen,sub_len,tot_len,pad;
801 struct tx_desc txd0,ctxd,*ptxd;
802 int done = FALSE;
803
804 if ((channel->tx_start == 0) || (channel->status == 0))
805 return(FALSE);
806
807 /* Check if the NIO can transmit */
808 if (!netio_can_transmit(channel->nio))
809 return(FALSE);
810
811 /* Copy the current txring descriptor */
812 tx_start = channel->tx_current;
813 ptxd = &txd0;
814 txdesc_read(d,channel->tx_current,ptxd);
815
816 /* If we don't own the descriptor, we cannot transmit */
817 if (!(txd0.tdes[0] & MUESLIX_TXDESC_OWN))
818 return(FALSE);
819
820 #if DEBUG_TRANSMIT
821 MUESLIX_LOG(d,"mueslix_handle_txring: 1st desc: "
822 "tdes[0]=0x%x, tdes[1]=0x%x\n",
823 ptxd->tdes[0],ptxd->tdes[1]);
824 #endif
825
826 pkt_ptr = pkt;
827 tot_len = 0;
828
829 do {
830 #if DEBUG_TRANSMIT
831 MUESLIX_LOG(d,"mueslix_handle_txring: loop: "
832 "tdes[0]=0x%x, tdes[1]=0x%x\n",
833 ptxd->tdes[0],ptxd->tdes[1]);
834 #endif
835
836 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_OWN)) {
837 MUESLIX_LOG(d,"mueslix_handle_txring: descriptor not owned!\n");
838 return(FALSE);
839 }
840
841 switch(channel->parent->chip_mode) {
842 case 0:
843 clen = ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK;
844 break;
845
846 case 1:
847 default:
848 clen = (ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK) << 2;
849
850 if (ptxd->tdes[0] & MUESLIX_TXDESC_SUB) {
851 sub_len = ptxd->tdes[0] & MUESLIX_TXDESC_SUB_LEN;
852 sub_len >>= MUESLIX_TXDESC_SUB_SHIFT;
853 clen -= sub_len;
854 }
855 }
856
857 /* Be sure that we have length not null */
858 if (clen != 0) {
859 //printf("pkt_ptr = %p, ptxd->tdes[1] = 0x%x, clen = %d\n",
860 // pkt_ptr, ptxd->tdes[1], clen);
861 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tdes[1],clen);
862 }
863
864 pkt_ptr += clen;
865 tot_len += clen;
866
867 /* Clear the OWN bit if this is not the first descriptor */
868 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_FS))
869 physmem_copy_u32_to_vm(d->vm,channel->tx_current,0);
870
871 /* Go to the next descriptor */
872 txdesc_set_next(channel);
873
874 /* Copy the next txring descriptor */
875 if (!(ptxd->tdes[0] & MUESLIX_TXDESC_LS)) {
876 txdesc_read(d,channel->tx_current,&ctxd);
877 ptxd = &ctxd;
878 } else
879 done = TRUE;
880 }while(!done);
881
882 if (tot_len != 0) {
883 #if DEBUG_TRANSMIT
884 MUESLIX_LOG(d,"sending packet of %u bytes (flags=0x%4.4x)\n",
885 tot_len,txd0.tdes[0]);
886 mem_dump(log_file,pkt,tot_len);
887 #endif
888
889 pad = ptxd->tdes[0] & MUESLIX_TXDESC_PAD;
890 pad >>= MUESLIX_TXDESC_PAD_SHIFT;
891 tot_len -= (4 - pad) & 0x03;
892
893 /* send it on wire */
894 netio_send(channel->nio,pkt,tot_len);
895 }
896
897 /* Clear the OWN flag of the first descriptor */
898 physmem_copy_u32_to_vm(d->vm,tx_start,0);
899
900 /* Interrupt on completion ? */
901 d->irq_status |= MUESLIX_TX_IRQ << channel->id;
902 dev_mueslix_update_irq_status(d);
903 return(TRUE);
904 }
905
906 /* Handle the TX ring of a specific channel */
dev_mueslix_handle_txring(struct mueslix_channel * channel)907 static int dev_mueslix_handle_txring(struct mueslix_channel *channel)
908 {
909 struct mueslix_data *d = channel->parent;
910 int res,i;
911
912 if (!dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_TX_ENABLE)
913 return(FALSE);
914
915 for(i=0;i<MUESLIX_TXRING_PASS_COUNT;i++) {
916 MUESLIX_LOCK(d);
917 res = dev_mueslix_handle_txring_single(channel);
918 MUESLIX_UNLOCK(d);
919
920 if (!res)
921 break;
922 }
923
924 netio_clear_bw_stat(channel->nio);
925 return(TRUE);
926 }
927
928 /* pci_mueslix_read() */
pci_mueslix_read(cpu_gen_t * cpu,struct pci_device * dev,int reg)929 static m_uint32_t pci_mueslix_read(cpu_gen_t *cpu,struct pci_device *dev,
930 int reg)
931 {
932 struct mueslix_data *d = dev->priv_data;
933
934 switch(reg) {
935 case 0x08: /* Rev ID */
936 return(0x2800001);
937 case PCI_REG_BAR0:
938 return(d->dev->phys_addr);
939 default:
940 return(0);
941 }
942 }
943
944 /* pci_mueslix_write() */
pci_mueslix_write(cpu_gen_t * cpu,struct pci_device * dev,int reg,m_uint32_t value)945 static void pci_mueslix_write(cpu_gen_t *cpu,struct pci_device *dev,
946 int reg,m_uint32_t value)
947 {
948 struct mueslix_data *d = dev->priv_data;
949
950 switch(reg) {
951 case PCI_REG_BAR0:
952 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
953 MUESLIX_LOG(d,"registers are mapped at 0x%x\n",value);
954 break;
955 }
956 }
957
958 /* Initialize a Mueslix chip */
959 struct mueslix_data *
dev_mueslix_init(vm_instance_t * vm,char * name,int chip_mode,struct pci_bus * pci_bus,int pci_device,int irq)960 dev_mueslix_init(vm_instance_t *vm,char *name,int chip_mode,
961 struct pci_bus *pci_bus,int pci_device,int irq)
962 {
963 struct pci_device *pci_dev;
964 struct mueslix_data *d;
965 struct vdevice *dev;
966 int i;
967
968 /* Allocate the private data structure for Mueslix chip */
969 if (!(d = malloc(sizeof(*d)))) {
970 fprintf(stderr,"%s (Mueslix): out of memory\n",name);
971 return NULL;
972 }
973
974 memset(d,0,sizeof(*d));
975 pthread_mutex_init(&d->lock,NULL);
976 d->chip_mode = chip_mode;
977
978 for(i=0;i<MUESLIX_NR_CHANNELS;i++) {
979 d->channel[i].id = i;
980 d->channel[i].parent = d;
981 }
982
983 /* Add as PCI device */
984 pci_dev = pci_dev_add(pci_bus,name,
985 MUESLIX_PCI_VENDOR_ID,MUESLIX_PCI_PRODUCT_ID,
986 pci_device,0,irq,
987 d,NULL,pci_mueslix_read,pci_mueslix_write);
988
989 if (!pci_dev) {
990 fprintf(stderr,"%s (Mueslix): unable to create PCI device.\n",name);
991 return NULL;
992 }
993
994 /* Create the device itself */
995 if (!(dev = dev_create(name))) {
996 fprintf(stderr,"%s (Mueslix): unable to create device.\n",name);
997 return NULL;
998 }
999
1000 d->name = name;
1001 d->pci_dev = pci_dev;
1002 d->vm = vm;
1003
1004 dev->phys_addr = 0;
1005 dev->phys_len = 0x4000;
1006 dev->handler = dev_mueslix_access;
1007 dev->priv_data = d;
1008
1009 /* Store device info */
1010 dev->priv_data = d;
1011 d->dev = dev;
1012 return(d);
1013 }
1014
1015 /* Remove a Mueslix device */
dev_mueslix_remove(struct mueslix_data * d)1016 void dev_mueslix_remove(struct mueslix_data *d)
1017 {
1018 if (d != NULL) {
1019 pci_dev_remove(d->pci_dev);
1020 vm_unbind_device(d->vm,d->dev);
1021 cpu_group_rebuild_mts(d->vm->cpu_group);
1022 free(d->dev);
1023 free(d);
1024 }
1025 }
1026
1027 /* Bind a NIO to a Mueslix channel */
dev_mueslix_set_nio(struct mueslix_data * d,u_int channel_id,netio_desc_t * nio)1028 int dev_mueslix_set_nio(struct mueslix_data *d,u_int channel_id,
1029 netio_desc_t *nio)
1030 {
1031 struct mueslix_channel *channel;
1032
1033 if (channel_id >= MUESLIX_NR_CHANNELS)
1034 return(-1);
1035
1036 channel = &d->channel[channel_id];
1037
1038 /* check that a NIO is not already bound */
1039 if (channel->nio != NULL)
1040 return(-1);
1041
1042 /* define the new NIO */
1043 channel->nio = nio;
1044 channel->tx_tid = ptask_add((ptask_callback)dev_mueslix_handle_txring,
1045 channel,NULL);
1046 netio_rxl_add(nio,(netio_rx_handler_t)dev_mueslix_handle_rxring,
1047 channel,NULL);
1048 return(0);
1049 }
1050
1051 /* Unbind a NIO from a Mueslix channel */
dev_mueslix_unset_nio(struct mueslix_data * d,u_int channel_id)1052 int dev_mueslix_unset_nio(struct mueslix_data *d,u_int channel_id)
1053 {
1054 struct mueslix_channel *channel;
1055
1056 if (channel_id >= MUESLIX_NR_CHANNELS)
1057 return(-1);
1058
1059 channel = &d->channel[channel_id];
1060
1061 if (channel->nio) {
1062 ptask_remove(channel->tx_tid);
1063 netio_rxl_remove(channel->nio);
1064 channel->nio = NULL;
1065 }
1066 return(0);
1067 }
1068