1 /*-
2 * Copyright (C) 2001 Eduardo Horvath.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * from: NetBSD: gemvar.h,v 1.8 2002/05/15 02:36:12 matt Exp
27 *
28 * $FreeBSD: releng/5.4/sys/dev/gem/if_gemvar.h 141016 2005-01-30 01:00:13Z imp $
29 */
30
31 #ifndef _IF_GEMVAR_H
32 #define _IF_GEMVAR_H
33
34
35 #include <sys/queue.h>
36 #include <sys/callout.h>
37
38 /*
39 * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver.
40 */
41
42 /*
43 * Transmit descriptor list size. This is arbitrary, but allocate
44 * enough descriptors for 64 pending transmissions and 16 segments
45 * per packet. This limit is not actually enforced (packets with more segments
46 * can be sent, depending on the busdma backend); it is however used as an
47 * estimate for the tx window size.
48 */
49 #define GEM_NTXSEGS 16
50
51 #define GEM_TXQUEUELEN 64
52 #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS)
53 #define GEM_MAXTXFREE (GEM_NTXDESC - 1)
54 #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1)
55 #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK)
56
57 /*
58 * Receive descriptor list size. We have one Rx buffer per incoming
59 * packet, so this logic is a little simpler.
60 */
61 #define GEM_NRXDESC 128
62 #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1)
63 #define GEM_PREVRX(x) ((x - 1) & GEM_NRXDESC_MASK)
64 #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK)
65
66 /*
67 * How many ticks to wait until to retry on a RX descriptor that is still owned
68 * by the hardware.
69 */
70 #define GEM_RXOWN_TICKS (hz / 50)
71
72 /*
73 * Control structures are DMA'd to the GEM chip. We allocate them in
74 * a single clump that maps to a single DMA segment to make several things
75 * easier.
76 */
77 struct gem_control_data {
78 /*
79 * The transmit descriptors.
80 */
81 struct gem_desc gcd_txdescs[GEM_NTXDESC];
82
83 /*
84 * The receive descriptors.
85 */
86 struct gem_desc gcd_rxdescs[GEM_NRXDESC];
87 };
88
89 #define GEM_CDOFF(x) offsetof(struct gem_control_data, x)
90 #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)])
91 #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)])
92
93 /*
94 * Software state for transmit job mbufs (may be elements of mbuf chains).
95 */
96 struct gem_txsoft {
97 struct mbuf *txs_mbuf; /* head of our mbuf chain */
98 bus_dmamap_t txs_dmamap; /* our DMA map */
99 int txs_firstdesc; /* first descriptor in packet */
100 int txs_lastdesc; /* last descriptor in packet */
101 int txs_ndescs; /* number of descriptors */
102 STAILQ_ENTRY(gem_txsoft) txs_q;
103 };
104
105 STAILQ_HEAD(gem_txsq, gem_txsoft);
106
107 /* Argument structure for busdma callback */
108 struct gem_txdma {
109 struct gem_softc *txd_sc;
110 struct gem_txsoft *txd_txs;
111 };
112
113 /*
114 * Software state for receive jobs.
115 */
116 struct gem_rxsoft {
117 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
118 bus_dmamap_t rxs_dmamap; /* our DMA map */
119 bus_addr_t rxs_paddr; /* physical address of the segment */
120 };
121
122 /*
123 * Software state per device.
124 */
125 struct gem_softc {
126 struct arpcom sc_arpcom; /* arp common data */
127 device_t sc_miibus;
128 struct mii_data *sc_mii; /* MII media control */
129 device_t sc_dev; /* generic device information */
130 struct callout sc_tick_ch; /* tick callout */
131 struct callout sc_rx_ch; /* delayed rx callout */
132
133 /* The following bus handles are to be provided by the bus front-end */
134 bus_space_tag_t sc_bustag; /* bus tag */
135 bus_dma_tag_t sc_pdmatag; /* parent bus dma tag */
136 bus_dma_tag_t sc_rdmatag; /* RX bus dma tag */
137 bus_dma_tag_t sc_tdmatag; /* TX bus dma tag */
138 bus_dma_tag_t sc_cdmatag; /* control data bus dma tag */
139 bus_dmamap_t sc_dmamap; /* bus dma handle */
140 bus_space_handle_t sc_h; /* bus space handle for all regs */
141
142 int sc_phys[2]; /* MII instance -> PHY map */
143
144 int sc_mif_config; /* Selected MII reg setting */
145
146 int sc_pci; /* XXXXX -- PCI buses are LE. */
147 u_int sc_variant; /* which GEM are we dealing with? */
148 #define GEM_UNKNOWN 0 /* don't know */
149 #define GEM_SUN_GEM 1 /* Sun GEM variant */
150 #define GEM_APPLE_GMAC 2 /* Apple GMAC variant */
151
152 u_int sc_flags; /* */
153 #define GEM_GIGABIT 0x0001 /* has a gigabit PHY */
154
155 /*
156 * Ring buffer DMA stuff.
157 */
158 bus_dma_segment_t sc_cdseg; /* control data memory */
159 int sc_cdnseg; /* number of segments */
160 bus_dmamap_t sc_cddmamap; /* control data DMA map */
161 bus_addr_t sc_cddma;
162
163 /*
164 * Software state for transmit and receive descriptors.
165 */
166 struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN];
167 struct gem_rxsoft sc_rxsoft[GEM_NRXDESC];
168
169 /*
170 * Control data structures.
171 */
172 struct gem_control_data *sc_control_data;
173 #define sc_txdescs sc_control_data->gcd_txdescs
174 #define sc_rxdescs sc_control_data->gcd_rxdescs
175
176 int sc_txfree; /* number of free Tx descriptors */
177 int sc_txnext; /* next ready Tx descriptor */
178 int sc_txwin; /* Tx descriptors since last Tx int */
179
180 struct gem_txsq sc_txfreeq; /* free Tx descsofts */
181 struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */
182
183 int sc_rxptr; /* next ready RX descriptor/descsoft */
184 int sc_rxfifosize; /* Rx FIFO size (bytes) */
185
186 /* ========== */
187 int sc_inited;
188 int sc_debug;
189 int sc_ifflags;
190 };
191
192 #define GEM_DMA_READ(sc, v) (((sc)->sc_pci) ? le64toh(v) : be64toh(v))
193 #define GEM_DMA_WRITE(sc, v) (((sc)->sc_pci) ? htole64(v) : htobe64(v))
194
195 #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x)))
196 #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x)))
197
198 #define GEM_CDSYNC(sc, ops) \
199 bus_dmamap_sync((sc)->sc_cdmatag, (sc)->sc_cddmamap, (ops)); \
200
201 #define GEM_INIT_RXDESC(sc, x) \
202 do { \
203 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \
204 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \
205 struct mbuf *__m = __rxs->rxs_mbuf; \
206 \
207 __m->m_data = __m->m_ext.ext_buf; \
208 __rxd->gd_addr = \
209 GEM_DMA_WRITE((sc), __rxs->rxs_paddr); \
210 __rxd->gd_flags = \
211 GEM_DMA_WRITE((sc), \
212 (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \
213 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \
214 } while (0)
215
216 #ifdef _KERNEL
217 extern devclass_t gem_devclass;
218
219 int gem_attach(struct gem_softc *);
220 void gem_detach(struct gem_softc *);
221 void gem_suspend(struct gem_softc *);
222 void gem_resume(struct gem_softc *);
223 void gem_intr(void *);
224
225 int gem_mediachange(struct ifnet *);
226 void gem_mediastatus(struct ifnet *, struct ifmediareq *);
227
228 void gem_reset(struct gem_softc *);
229
230 /* MII methods & callbacks */
231 int gem_mii_readreg(device_t, int, int);
232 int gem_mii_writereg(device_t, int, int, int);
233 void gem_mii_statchg(device_t);
234
235 #endif /* _KERNEL */
236
237
238 #endif
Cache object: 7fafdb87efab337f9035a545ab56302b
|