1 /* $NetBSD: if_le_ioasic.c,v 1.21 2002/10/02 16:53:03 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1996 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 /*
31 * LANCE on DEC IOCTL ASIC.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.21 2002/10/02 16:53:03 thorpej Exp $");
36
37 #include "opt_inet.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42 #include <sys/syslog.h>
43 #include <sys/socket.h>
44 #include <sys/device.h>
45
46 #include <net/if.h>
47 #include <net/if_ether.h>
48 #include <net/if_media.h>
49
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/if_inarp.h>
53 #endif
54
55 #include <dev/ic/lancereg.h>
56 #include <dev/ic/lancevar.h>
57 #include <dev/ic/am7990reg.h>
58 #include <dev/ic/am7990var.h>
59
60 #include <dev/tc/if_levar.h>
61 #include <dev/tc/tcvar.h>
62 #include <dev/tc/ioasicreg.h>
63 #include <dev/tc/ioasicvar.h>
64
65 struct le_ioasic_softc {
66 struct am7990_softc sc_am7990; /* glue to MI code */
67 struct lereg1 *sc_r1; /* LANCE registers */
68 /* XXX must match with le_softc of if_levar.h XXX */
69
70 bus_dma_tag_t sc_dmat; /* bus dma tag */
71 bus_dmamap_t sc_dmamap; /* bus dmamap */
72 };
73
74 static int le_ioasic_match __P((struct device *, struct cfdata *, void *));
75 static void le_ioasic_attach __P((struct device *, struct device *, void *));
76
77 CFATTACH_DECL(le_ioasic, sizeof(struct le_softc),
78 le_ioasic_match, le_ioasic_attach, NULL, NULL);
79
80 static void le_ioasic_copytobuf_gap2 __P((struct lance_softc *, void *,
81 int, int));
82 static void le_ioasic_copyfrombuf_gap2 __P((struct lance_softc *, void *,
83 int, int));
84 static void le_ioasic_copytobuf_gap16 __P((struct lance_softc *, void *,
85 int, int));
86 static void le_ioasic_copyfrombuf_gap16 __P((struct lance_softc *, void *,
87 int, int));
88 static void le_ioasic_zerobuf_gap16 __P((struct lance_softc *, int, int));
89
90 static int
91 le_ioasic_match(parent, match, aux)
92 struct device *parent;
93 struct cfdata *match;
94 void *aux;
95 {
96 struct ioasicdev_attach_args *d = aux;
97
98 if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0)
99 return 0;
100
101 return 1;
102 }
103
104 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
105 #define LE_IOASIC_MEMSIZE (128*1024)
106 #define LE_IOASIC_MEMALIGN (128*1024)
107
108 static void
109 le_ioasic_attach(parent, self, aux)
110 struct device *parent, *self;
111 void *aux;
112 {
113 struct le_ioasic_softc *sc = (void *)self;
114 struct ioasicdev_attach_args *d = aux;
115 struct lance_softc *le = &sc->sc_am7990.lsc;
116 bus_space_tag_t ioasic_bst;
117 bus_space_handle_t ioasic_bsh;
118 bus_dma_tag_t dmat;
119 bus_dma_segment_t seg;
120 tc_addr_t tca;
121 u_int32_t ssr;
122 int rseg;
123 caddr_t le_iomem;
124
125 ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst;
126 ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh;
127 dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat;
128 /*
129 * Allocate a DMA area for the chip.
130 */
131 if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
132 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
133 printf("can't allocate DMA area for LANCE\n");
134 return;
135 }
136 if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
137 &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
138 printf("can't map DMA area for LANCE\n");
139 bus_dmamem_free(dmat, &seg, rseg);
140 return;
141 }
142 /*
143 * Create and load the DMA map for the DMA area.
144 */
145 if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
146 LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
147 printf("can't create DMA map\n");
148 goto bad;
149 }
150 if (bus_dmamap_load(dmat, sc->sc_dmamap,
151 le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
152 printf("can't load DMA map\n");
153 goto bad;
154 }
155 /*
156 * Bind 128KB buffer with IOASIC DMA.
157 */
158 tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr);
159 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca);
160 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR);
161 ssr |= IOASIC_CSR_DMAEN_LANCE;
162 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr);
163
164 sc->sc_r1 = (struct lereg1 *)
165 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
166 le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
167 le->sc_copytodesc = le_ioasic_copytobuf_gap2;
168 le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
169 le->sc_copytobuf = le_ioasic_copytobuf_gap16;
170 le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
171 le->sc_zerobuf = le_ioasic_zerobuf_gap16;
172
173 dec_le_common_attach(&sc->sc_am7990,
174 (u_char *)((struct ioasic_softc *)parent)->sc_base
175 + IOASIC_SLOT_2_START);
176
177 ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
178 am7990_intr, sc);
179 return;
180
181 bad:
182 bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
183 bus_dmamem_free(dmat, &seg, rseg);
184 }
185
186 /*
187 * Special memory access functions needed by ioasic-attached LANCE
188 * chips.
189 */
190
191 /*
192 * gap2: two bytes of data followed by two bytes of pad.
193 *
194 * Buffers must be 4-byte aligned. The code doesn't worry about
195 * doing an extra byte.
196 */
197
198 void
199 le_ioasic_copytobuf_gap2(sc, fromv, boff, len)
200 struct lance_softc *sc;
201 void *fromv;
202 int boff;
203 int len;
204 {
205 volatile caddr_t buf = sc->sc_mem;
206 caddr_t from = fromv;
207 volatile u_int16_t *bptr;
208
209 if (boff & 0x1) {
210 /* handle unaligned first byte */
211 bptr = ((volatile u_int16_t *)buf) + (boff - 1);
212 *bptr = (*from++ << 8) | (*bptr & 0xff);
213 bptr += 2;
214 len--;
215 } else
216 bptr = ((volatile u_int16_t *)buf) + boff;
217 while (len > 1) {
218 *bptr = (from[1] << 8) | (from[0] & 0xff);
219 bptr += 2;
220 from += 2;
221 len -= 2;
222 }
223 if (len == 1)
224 *bptr = (u_int16_t)*from;
225 }
226
227 void
228 le_ioasic_copyfrombuf_gap2(sc, tov, boff, len)
229 struct lance_softc *sc;
230 void *tov;
231 int boff, len;
232 {
233 volatile caddr_t buf = sc->sc_mem;
234 caddr_t to = tov;
235 volatile u_int16_t *bptr;
236 u_int16_t tmp;
237
238 if (boff & 0x1) {
239 /* handle unaligned first byte */
240 bptr = ((volatile u_int16_t *)buf) + (boff - 1);
241 *to++ = (*bptr >> 8) & 0xff;
242 bptr += 2;
243 len--;
244 } else
245 bptr = ((volatile u_int16_t *)buf) + boff;
246 while (len > 1) {
247 tmp = *bptr;
248 *to++ = tmp & 0xff;
249 *to++ = (tmp >> 8) & 0xff;
250 bptr += 2;
251 len -= 2;
252 }
253 if (len == 1)
254 *to = *bptr & 0xff;
255 }
256
257 /*
258 * gap16: 16 bytes of data followed by 16 bytes of pad.
259 *
260 * Buffers must be 32-byte aligned.
261 */
262
263 void
264 le_ioasic_copytobuf_gap16(sc, fromv, boff, len)
265 struct lance_softc *sc;
266 void *fromv;
267 int boff;
268 int len;
269 {
270 volatile caddr_t buf = sc->sc_mem;
271 caddr_t from = fromv;
272 caddr_t bptr;
273
274 bptr = buf + ((boff << 1) & ~0x1f);
275 boff &= 0xf;
276
277 /*
278 * Dispose of boff so destination of subsequent copies is
279 * 16-byte aligned.
280 */
281 if (boff) {
282 int xfer;
283 xfer = min(len, 16 - boff);
284 bcopy(from, bptr + boff, xfer);
285 from += xfer;
286 bptr += 32;
287 len -= xfer;
288 }
289
290 /* Destination of copies is now 16-byte aligned. */
291 if (len >= 16)
292 switch ((u_long)from & (sizeof(u_int32_t) -1)) {
293 case 2:
294 /* Ethernet headers make this the dominant case. */
295 do {
296 u_int32_t *dst = (u_int32_t*)bptr;
297 u_int16_t t0;
298 u_int32_t t1, t2, t3, t4;
299
300 /* read from odd-16-bit-aligned, cached src */
301 t0 = *(u_int16_t*)from;
302 t1 = *(u_int32_t*)(from+2);
303 t2 = *(u_int32_t*)(from+6);
304 t3 = *(u_int32_t*)(from+10);
305 t4 = *(u_int16_t*)(from+14);
306
307 /* DMA buffer is uncached on mips */
308 dst[0] = t0 | (t1 << 16);
309 dst[1] = (t1 >> 16) | (t2 << 16);
310 dst[2] = (t2 >> 16) | (t3 << 16);
311 dst[3] = (t3 >> 16) | (t4 << 16);
312
313 from += 16;
314 bptr += 32;
315 len -= 16;
316 } while (len >= 16);
317 break;
318
319 case 0:
320 do {
321 u_int32_t *src = (u_int32_t*)from;
322 u_int32_t *dst = (u_int32_t*)bptr;
323 u_int32_t t0, t1, t2, t3;
324
325 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
326 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
327
328 from += 16;
329 bptr += 32;
330 len -= 16;
331 } while (len >= 16);
332 break;
333
334 default:
335 /* Does odd-aligned case ever happen? */
336 do {
337 bcopy(from, bptr, 16);
338 from += 16;
339 bptr += 32;
340 len -= 16;
341 } while (len >= 16);
342 break;
343 }
344 if (len)
345 bcopy(from, bptr, len);
346 }
347
348 void
349 le_ioasic_copyfrombuf_gap16(sc, tov, boff, len)
350 struct lance_softc *sc;
351 void *tov;
352 int boff, len;
353 {
354 volatile caddr_t buf = sc->sc_mem;
355 caddr_t to = tov;
356 caddr_t bptr;
357
358 bptr = buf + ((boff << 1) & ~0x1f);
359 boff &= 0xf;
360
361 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */
362 if (boff) {
363 int xfer;
364 xfer = min(len, 16 - boff);
365 bcopy(bptr+boff, to, xfer);
366 to += xfer;
367 bptr += 32;
368 len -= xfer;
369 }
370 if (len >= 16)
371 switch ((u_long)to & (sizeof(u_int32_t) -1)) {
372 case 2:
373 /*
374 * to is aligned to an odd 16-bit boundary. Ethernet headers
375 * make this the dominant case (98% or more).
376 */
377 do {
378 u_int32_t *src = (u_int32_t*)bptr;
379 u_int32_t t0, t1, t2, t3;
380
381 /* read from uncached aligned DMA buf */
382 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
383
384 /* write to odd-16-bit-word aligned dst */
385 *(u_int16_t *) (to+0) = (u_short) t0;
386 *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16);
387 *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16);
388 *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16);
389 *(u_int16_t *) (to+14) = (t3 >> 16);
390 bptr += 32;
391 to += 16;
392 len -= 16;
393 } while (len > 16);
394 break;
395 case 0:
396 /* 32-bit aligned aligned copy. Rare. */
397 do {
398 u_int32_t *src = (u_int32_t*)bptr;
399 u_int32_t *dst = (u_int32_t*)to;
400 u_int32_t t0, t1, t2, t3;
401
402 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
403 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
404 to += 16;
405 bptr += 32;
406 len -= 16;
407 } while (len > 16);
408 break;
409
410 /* XXX Does odd-byte-aligned case ever happen? */
411 default:
412 do {
413 bcopy(bptr, to, 16);
414 to += 16;
415 bptr += 32;
416 len -= 16;
417 } while (len > 16);
418 break;
419 }
420 if (len)
421 bcopy(bptr, to, len);
422 }
423
424 void
425 le_ioasic_zerobuf_gap16(sc, boff, len)
426 struct lance_softc *sc;
427 int boff, len;
428 {
429 volatile caddr_t buf = sc->sc_mem;
430 caddr_t bptr;
431 int xfer;
432
433 bptr = buf + ((boff << 1) & ~0x1f);
434 boff &= 0xf;
435 xfer = min(len, 16 - boff);
436 while (len > 0) {
437 bzero(bptr + boff, xfer);
438 bptr += 32;
439 boff = 0;
440 len -= xfer;
441 xfer = min(len, 16);
442 }
443 }
Cache object: 966087d5442ae198901bb812ed39bec4
|