xref: /netbsd/sys/dev/pci/cxgb/cxgb_adapter.h (revision 6550d01e)
1 /**************************************************************************
2 
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 
29 ***************************************************************************/
30 
31 
32 
33 #ifndef _CXGB_ADAPTER_H_
34 #define _CXGB_ADAPTER_H_
35 
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 
42 #include <net/if.h>
43 #include <net/if_ether.h>
44 #include <net/if_media.h>
45 
46 #include <machine/bus.h>
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 
50 #ifdef CONFIG_DEFINED
51 #include <cxgb_osdep.h>
52 #include <ulp/toecore/toedev.h>
53 #include <sys/mbufq.h>
54 #else
55 #include "cxgb_osdep.h"
56 #include "cxgb_mbuf.h"
57 #include "cxgb_toedev.h"
58 #endif
59 
60 struct adapter;
61 struct sge_qset;
62 extern int cxgb_debug;
63 
64 #ifdef DEBUG_LOCKING
65 #define MTX_INIT(lock, lockname, class, flags) \
66     do { \
67         printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
68         mtx_init((lock), lockname, class, flags);       \
69     } while (0)
70 
71 #define MTX_DESTROY(lock) \
72     do { \
73         printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
74         mtx_destroy((lock));                    \
75     } while (0)
76 
77 #define SX_INIT(lock, lockname) \
78     do { \
79         printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
80         sx_init((lock), lockname);      \
81     } while (0)
82 
83 #define SX_DESTROY(lock) \
84     do { \
85         printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
86         sx_destroy((lock));                 \
87     } while (0)
88 #else
89 #define MTX_INIT mtx_init
90 #define MTX_DESTROY mtx_destroy
91 #define SX_INIT sx_init
92 #define SX_DESTROY sx_destroy
93 #endif
94 
95 struct port_device {
96 	struct device	original;
97 	device_t	dev;
98 	struct adapter	*parent;
99 	int		port_number;
100 };
101 
102 struct port_info {
103     struct adapter  *adapter;
104     struct ifnet    *ifp;
105     struct port_device *pd;
106     int     port;
107     int     if_flags;
108     const struct port_type_info *port_type;
109     struct cphy phy;
110     struct cmac mac;
111     struct link_config link_config;
112     struct ifmedia  media;
113 #ifdef USE_SX
114     struct sx   lock;
115 #else
116     struct mtx  lock;
117 #endif
118     uint8_t     port_id;
119     uint8_t     tx_chan;
120     uint8_t     txpkt_intf;
121     uint8_t     nqsets;
122     uint8_t         first_qset;
123 
124     uint8_t     hw_addr[ETHER_ADDR_LEN];
125     struct cxgb_task start_task;
126     struct cxgb_task timer_reclaim_task;
127     struct cdev     *port_cdev;
128 
129 #define PORT_NAME_LEN 32
130 #define TASKQ_NAME_LEN 32
131     char            lockbuf[PORT_NAME_LEN];
132     char            taskqbuf[TASKQ_NAME_LEN];
133 };
134 
135 enum {              /* adapter flags */
136     FULL_INIT_DONE  = (1 << 0),
137     USING_MSI   = (1 << 1),
138     USING_MSIX  = (1 << 2),
139     QUEUES_BOUND    = (1 << 3),
140     FW_UPTODATE     = (1 << 4),
141     TPS_UPTODATE    = (1 << 5),
142 };
143 
144 
145 #define FL_Q_SIZE   4096
146 #define JUMBO_Q_SIZE    512
147 #define RSPQ_Q_SIZE 1024
148 #define TX_ETH_Q_SIZE   1024
149 
150 
151 
152 /*
153  * Types of Tx queues in each queue set.  Order here matters, do not change.
154  * XXX TOE is not implemented yet, so the extra queues are just placeholders.
155  */
156 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
157 
158 
159 /* careful, the following are set on priv_flags and must not collide with
160  * IFF_ flags!
161  */
162 enum {
163     LRO_ACTIVE = (1 << 8),
164 };
165 
166 /* Max concurrent LRO sessions per queue set */
167 #define MAX_LRO_SES 8
168 
169 struct t3_lro_session {
170     struct mbuf *head;
171     struct mbuf *tail;
172     uint32_t seq;
173     uint16_t ip_len;
174     uint16_t mss;
175     uint16_t vtag;
176     uint8_t npkts;
177 };
178 
179 struct lro_state {
180     unsigned short enabled;
181     unsigned short active_idx;
182     unsigned int nactive;
183     struct t3_lro_session sess[MAX_LRO_SES];
184 };
185 
186 #define RX_BUNDLE_SIZE 8
187 
188 struct rsp_desc;
189 
190 struct sge_rspq {
191     uint32_t    credits;
192     uint32_t    size;
193     uint32_t    cidx;
194     uint32_t    gen;
195     uint32_t    polling;
196     uint32_t    holdoff_tmr;
197     uint32_t    next_holdoff;
198     uint32_t        imm_data;
199     struct rsp_desc *desc;
200     uint32_t    cntxt_id;
201     struct mtx      lock;
202     struct mbuf     *rx_head;    /* offload packet receive queue head */
203     struct mbuf     *rx_tail;    /* offload packet receive queue tail */
204 
205     uint32_t        offload_pkts;
206     uint32_t        offload_bundles;
207     uint32_t        pure_rsps;
208     uint32_t        unhandled_irqs;
209 
210     bus_addr_t  phys_addr;
211     bus_dma_tag_t   desc_tag;
212     bus_dmamap_t    desc_map;
213 
214     struct t3_mbuf_hdr rspq_mh;
215 #define RSPQ_NAME_LEN  32
216     char            lockbuf[RSPQ_NAME_LEN];
217 
218 };
219 
220 #ifndef DISABLE_MBUF_IOVEC
221 #define rspq_mbuf rspq_mh.mh_head
222 #endif
223 
224 struct rx_desc;
225 struct rx_sw_desc;
226 
227 struct sge_fl {
228     uint32_t    buf_size;
229     uint32_t    credits;
230     uint32_t    size;
231     uint32_t    cidx;
232     uint32_t    pidx;
233     uint32_t    gen;
234     struct rx_desc  *desc;
235     struct rx_sw_desc *sdesc;
236     bus_addr_t  phys_addr;
237     uint32_t    cntxt_id;
238     uint64_t    empty;
239     bus_dma_tag_t   desc_tag;
240     bus_dmamap_t    desc_map;
241     bus_dma_tag_t   entry_tag;
242     int             type;
243 };
244 
245 struct tx_desc;
246 struct tx_sw_desc;
247 
248 #define TXQ_TRANSMITTING    0x1
249 
250 struct sge_txq {
251     uint64_t    flags;
252     uint32_t    in_use;
253     uint32_t    size;
254     uint32_t    processed;
255     uint32_t    cleaned;
256     uint32_t    stop_thres;
257     uint32_t    cidx;
258     uint32_t    pidx;
259     uint32_t    gen;
260     uint32_t    unacked;
261     struct tx_desc  *desc;
262     struct tx_sw_desc *sdesc;
263     uint32_t    token;
264     bus_addr_t  phys_addr;
265     struct cxgb_task qresume_task;
266     struct cxgb_task qreclaim_task;
267     struct port_info *port;
268     uint32_t    cntxt_id;
269     uint64_t    stops;
270     uint64_t    restarts;
271     bus_dma_tag_t   desc_tag;
272     bus_dmamap_t    desc_map;
273     bus_dma_tag_t   entry_tag;
274     struct mbuf_head sendq;
275     struct mtx      lock;
276 #define TXQ_NAME_LEN  32
277     char            lockbuf[TXQ_NAME_LEN];
278 };
279 
280 
281 enum {
282     SGE_PSTAT_TSO,              /* # of TSO requests */
283     SGE_PSTAT_RX_CSUM_GOOD,     /* # of successful RX csum offloads */
284     SGE_PSTAT_TX_CSUM,          /* # of TX checksum offloads */
285     SGE_PSTAT_VLANEX,           /* # of VLAN tag extractions */
286     SGE_PSTAT_VLANINS,          /* # of VLAN tag insertions */
287     SGE_PSTATS_LRO_QUEUED,      /* # of LRO appended packets */
288     SGE_PSTATS_LRO_FLUSHED,     /* # of LRO flushed packets */
289     SGE_PSTATS_LRO_X_STREAMS,   /* # of exceeded LRO contexts */
290 };
291 
292 #define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
293 
294 struct sge_qset {
295     struct sge_rspq     rspq;
296     struct sge_fl       fl[SGE_RXQ_PER_SET];
297     struct lro_state        lro;
298     struct sge_txq      txq[SGE_TXQ_PER_SET];
299     uint32_t                txq_stopped;       /* which Tx queues are stopped */
300     uint64_t                port_stats[SGE_PSTAT_MAX];
301     struct port_info        *port;
302     int                     idx; /* qset # */
303 };
304 
305 struct sge {
306     struct sge_qset         qs[SGE_QSETS];
307     struct mtx              reg_lock;
308 };
309 
310 struct filter_info;
311 
312 struct adapter {
313     struct device original;
314     device_t        dev; // so we have a compatible pointer
315     int         flags;
316     TAILQ_ENTRY(adapter)    adapter_entry;
317 
318     /* PCI register resources */
319     int         regs_rid;
320     struct resource     *regs_res;
321     bus_space_handle_t  bh;
322     bus_space_tag_t     bt;
323     bus_size_t              mmio_len;
324     uint32_t                link_width;
325     struct pci_attach_args pa;
326     uint32_t            bar0;
327     bus_space_handle_t  bar0_handle;
328     pci_intr_handle_t   intr_handle;
329     void               *intr_cookie;
330 
331     /* DMA resources */
332     bus_dma_tag_t       parent_dmat;
333     bus_dma_tag_t       rx_dmat;
334     bus_dma_tag_t       rx_jumbo_dmat;
335     bus_dma_tag_t       tx_dmat;
336 
337     /* Interrupt resources */
338     int         irq_rid;
339 
340     uint32_t        msix_regs_rid;
341     struct resource     *msix_regs_res;
342 
343     struct resource     *msix_irq_res[SGE_QSETS];
344     int         msix_irq_rid[SGE_QSETS];
345     void            *msix_intr_tag[SGE_QSETS];
346     uint8_t                 rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
347     uint8_t                 rrss_map[SGE_QSETS]; /* revers RSS map table */
348 
349     struct filter_info      *filters;
350 
351     /* Tasks */
352     struct cxgb_task    ext_intr_task;
353     struct cxgb_task    slow_intr_task;
354     struct cxgb_task    tick_task;
355     struct callout      cxgb_tick_ch;
356     struct callout      sge_timer_ch;
357 
358     /* Register lock for use by the hardware layer */
359     struct mtx      mdio_lock;
360     struct mtx      elmer_lock;
361 
362     /* Bookkeeping for the hardware layer */
363     struct adapter_params  params;
364     unsigned int slow_intr_mask;
365     unsigned long irq_stats[IRQ_NUM_STATS];
366 
367     struct sge              sge;
368     struct mc7              pmrx;
369     struct mc7              pmtx;
370     struct mc7              cm;
371     struct mc5              mc5;
372 
373     struct port_info    port[MAX_NPORTS];
374     device_t        portdev[MAX_NPORTS];
375     struct toedev           tdev;
376     char                    fw_version[64];
377     uint32_t                open_device_map;
378     uint32_t                registered_device_map;
379 #ifdef USE_SX
380     struct sx               lock;
381 #else
382     struct mtx              lock;
383 #endif
384     int                     (*cxgb_intr)(void *);
385     int                     msi_count;
386 
387 #define ADAPTER_LOCK_NAME_LEN   32
388     char                    lockbuf[ADAPTER_LOCK_NAME_LEN];
389     char                    reglockbuf[ADAPTER_LOCK_NAME_LEN];
390     char                    mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
391     char                    elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
392 };
393 
394 struct t3_rx_mode {
395 
396     uint32_t                idx;
397     struct port_info        *port;
398 };
399 
400 
401 #define MDIO_LOCK(adapter)  mtx_lock(&(adapter)->mdio_lock)
402 #define MDIO_UNLOCK(adapter)    mtx_unlock(&(adapter)->mdio_lock)
403 #define ELMR_LOCK(adapter)  mtx_lock(&(adapter)->elmer_lock)
404 #define ELMR_UNLOCK(adapter)    mtx_unlock(&(adapter)->elmer_lock)
405 
406 
407 #ifdef USE_SX
408 #define PORT_LOCK(port)          sx_xlock(&(port)->lock);
409 #define PORT_UNLOCK(port)        sx_xunlock(&(port)->lock);
410 #define PORT_LOCK_INIT(port, name)   SX_INIT(&(port)->lock, name)
411 #define PORT_LOCK_DEINIT(port)       SX_DESTROY(&(port)->lock)
412 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
413 
414 #define ADAPTER_LOCK(adap)             sx_xlock(&(adap)->lock);
415 #define ADAPTER_UNLOCK(adap)               sx_xunlock(&(adap)->lock);
416 #define ADAPTER_LOCK_INIT(adap, name)      SX_INIT(&(adap)->lock, name)
417 #define ADAPTER_LOCK_DEINIT(adap)          SX_DESTROY(&(adap)->lock)
418 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
419 #else
420 #define PORT_LOCK(port)          mtx_lock(&(port)->lock);
421 #define PORT_UNLOCK(port)        mtx_unlock(&(port)->lock);
422 #define PORT_LOCK_INIT(port, name)   mtx_init(&(port)->lock, name, 0, MTX_DEF)
423 #define PORT_LOCK_DEINIT(port)       mtx_destroy(&(port)->lock)
424 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
425 
426 #define ADAPTER_LOCK(adap)  mtx_lock(&(adap)->lock);
427 #define ADAPTER_UNLOCK(adap)    mtx_unlock(&(adap)->lock);
428 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
429 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
430 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
431 #endif
432 
433 
434 static __inline uint32_t
435 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
436 {
437     return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
438 }
439 
440 static __inline void
441 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
442 {
443     bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
444 }
445 
446 static __inline void
447 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
448 {
449     *val = pci_conf_read(adapter->pa.pa_pc, adapter->pa.pa_tag, reg);
450 }
451 
452 static __inline void
453 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
454 {
455     pci_conf_write(adapter->pa.pa_pc, adapter->pa.pa_tag, reg, val);
456 }
457 
458 static __inline void
459 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
460 {
461     uint32_t temp;
462     temp = pci_conf_read(adapter->pa.pa_pc, adapter->pa.pa_tag, reg&0xfc);
463     if (reg&0x2)
464         *val = (temp>>16)&0xffff;
465     else
466         *val = temp&0xffff;
467 }
468 
469 static __inline void
470 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
471 {
472     uint32_t temp = pci_conf_read(adapter->pa.pa_pc, adapter->pa.pa_tag, reg&0xfc);
473     if (reg&0x2)
474         temp = (temp&0xffff)|(val<<16);
475     else
476         temp = (temp&0xffff0000)|val;
477     pci_conf_write(adapter->pa.pa_pc, adapter->pa.pa_tag, reg&0xfc, temp);
478 }
479 
480 static __inline uint8_t *
481 t3_get_next_mcaddr(struct t3_rx_mode *rm)
482 {
483     uint8_t *macaddr = NULL;
484 
485     if (rm->idx == 0)
486         macaddr = rm->port->hw_addr;
487 
488     rm->idx++;
489     return (macaddr);
490 }
491 
492 static __inline void
493 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
494 {
495     rm->idx = 0;
496     rm->port = port;
497 }
498 
499 static __inline struct port_info *
500 adap2pinfo(struct adapter *adap, int idx)
501 {
502     return &adap->port[idx];
503 }
504 
505 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
506 int t3_os_pci_save_state(struct adapter *adapter);
507 int t3_os_pci_restore_state(struct adapter *adapter);
508 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
509             int speed, int duplex, int fc);
510 void t3_sge_err_intr_handler(adapter_t *adapter);
511 int t3_offload_tx(struct toedev *, struct mbuf *);
512 void t3_os_ext_intr_handler(adapter_t *adapter);
513 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
514 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
515 
516 
517 int t3_sge_alloc(struct adapter *);
518 int t3_sge_free(struct adapter *);
519 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
520     int, struct port_info *);
521 void t3_free_sge_resources(adapter_t *);
522 void t3_sge_start(adapter_t *);
523 void t3_sge_stop(adapter_t *);
524 int t3b_intr(void *data);
525 int t3_intr_msi(void *data);
526 int t3_intr_msix(void *data);
527 int t3_encap(struct port_info *, struct mbuf **, int *free);
528 
529 int t3_sge_init_adapter(adapter_t *);
530 int t3_sge_init_port(struct port_info *);
531 void t3_sge_deinit_sw(adapter_t *);
532 
533 void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
534     int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
535 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
536 void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state);
537 
538 void t3_add_sysctls(adapter_t *sc);
539 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
540     unsigned char *data);
541 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
542 /*
543  * XXX figure out how we can return this to being private to sge
544  */
545 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
546 
547 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
548 
549 static __inline struct sge_qset *
550 fl_to_qset(struct sge_fl *q, int qidx)
551 {
552     return container_of(q, struct sge_qset, fl[qidx]);
553 }
554 
555 static __inline struct sge_qset *
556 rspq_to_qset(struct sge_rspq *q)
557 {
558     return container_of(q, struct sge_qset, rspq);
559 }
560 
561 static __inline struct sge_qset *
562 txq_to_qset(struct sge_txq *q, int qidx)
563 {
564     return container_of(q, struct sge_qset, txq[qidx]);
565 }
566 
567 static __inline struct adapter *
568 tdev2adap(struct toedev *d)
569 {
570     return container_of(d, struct adapter, tdev);
571 }
572 
573 #undef container_of
574 
575 #define OFFLOAD_DEVMAP_BIT 15
576 static inline int offload_running(adapter_t *adapter)
577 {
578         return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
579 }
580 
581 
582 #endif
583