FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/hd64570.c
1 /* $NetBSD: hd64570.c,v 1.26 2004/02/24 15:05:54 wiz Exp $ */
2
3 /*
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer@flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
38 */
39
40 /*
41 * TODO:
42 *
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
50 * and CTS changes.
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
61 *
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using DMA. currently the assumption is that
64 * rx uses a page and tx uses a page.
65 */
66
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.26 2004/02/24 15:05:54 wiz Exp $");
69
70 #include "bpfilter.h"
71 #include "opt_inet.h"
72 #include "opt_iso.h"
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/device.h>
77 #include <sys/mbuf.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
80 #include <sys/kernel.h>
81
82 #include <net/if.h>
83 #include <net/if_types.h>
84 #include <net/netisr.h>
85
86 #if defined(INET) || defined(INET6)
87 #include <netinet/in.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in_var.h>
90 #include <netinet/ip.h>
91 #ifdef INET6
92 #include <netinet6/in6_var.h>
93 #endif
94 #endif
95
96 #ifdef ISO
97 #include <net/if_llc.h>
98 #include <netiso/iso.h>
99 #include <netiso/iso_var.h>
100 #endif
101
102 #if NBPFILTER > 0
103 #include <net/bpf.h>
104 #endif
105
106 #include <machine/cpu.h>
107 #include <machine/bus.h>
108 #include <machine/intr.h>
109
110 #include <dev/pci/pcivar.h>
111 #include <dev/pci/pcireg.h>
112 #include <dev/pci/pcidevs.h>
113
114 #include <dev/ic/hd64570reg.h>
115 #include <dev/ic/hd64570var.h>
116
117 #define SCA_DEBUG_RX 0x0001
118 #define SCA_DEBUG_TX 0x0002
119 #define SCA_DEBUG_CISCO 0x0004
120 #define SCA_DEBUG_DMA 0x0008
121 #define SCA_DEBUG_RXPKT 0x0010
122 #define SCA_DEBUG_TXPKT 0x0020
123 #define SCA_DEBUG_INTR 0x0040
124 #define SCA_DEBUG_CLOCK 0x0080
125
126 #if 0
127 #define SCA_DEBUG_LEVEL ( 0xFFFF )
128 #else
129 #define SCA_DEBUG_LEVEL 0
130 #endif
131
132 u_int32_t sca_debug = SCA_DEBUG_LEVEL;
133
134 #if SCA_DEBUG_LEVEL > 0
135 #define SCA_DPRINTF(l, x) do { \
136 if ((l) & sca_debug) \
137 printf x;\
138 } while (0)
139 #else
140 #define SCA_DPRINTF(l, x)
141 #endif
142
143 #if 0
144 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
145 #endif
146
147 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
148 static inline u_int8_t msci_read_1(sca_port_t *, u_int);
149
150 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
151 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
152 static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
153 static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
154
155 static void sca_msci_init(struct sca_softc *, sca_port_t *);
156 static void sca_dmac_init(struct sca_softc *, sca_port_t *);
157 static void sca_dmac_rxinit(sca_port_t *);
158
159 static int sca_dmac_intr(sca_port_t *, u_int8_t);
160 static int sca_msci_intr(sca_port_t *, u_int8_t);
161
162 static void sca_get_packets(sca_port_t *);
163 static int sca_frame_avail(sca_port_t *);
164 static void sca_frame_process(sca_port_t *);
165 static void sca_frame_read_done(sca_port_t *);
166
167 static void sca_port_starttx(sca_port_t *);
168
169 static void sca_port_up(sca_port_t *);
170 static void sca_port_down(sca_port_t *);
171
172 static int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
173 struct rtentry *));
174 static int sca_ioctl __P((struct ifnet *, u_long, caddr_t));
175 static void sca_start __P((struct ifnet *));
176 static void sca_watchdog __P((struct ifnet *));
177
178 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, caddr_t, u_int);
179
180 #if SCA_DEBUG_LEVEL > 0
181 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
182 #endif
183
184
185 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
186 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
187 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
188 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
189
190 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
191
192 static inline void
193 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
194 {
195 sca_write_1(scp->sca, scp->msci_off + reg, val);
196 }
197
198 static inline u_int8_t
199 msci_read_1(sca_port_t *scp, u_int reg)
200 {
201 return sca_read_1(scp->sca, scp->msci_off + reg);
202 }
203
204 static inline void
205 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
206 {
207 sca_write_1(scp->sca, scp->dmac_off + reg, val);
208 }
209
210 static inline void
211 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
212 {
213 sca_write_2(scp->sca, scp->dmac_off + reg, val);
214 }
215
216 static inline u_int8_t
217 dmac_read_1(sca_port_t *scp, u_int reg)
218 {
219 return sca_read_1(scp->sca, scp->dmac_off + reg);
220 }
221
222 static inline u_int16_t
223 dmac_read_2(sca_port_t *scp, u_int reg)
224 {
225 return sca_read_2(scp->sca, scp->dmac_off + reg);
226 }
227
228 /*
229 * read the chain pointer
230 */
231 static inline u_int16_t
232 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
233 {
234 if (sc->sc_usedma)
235 return ((dp)->sd_chainp);
236 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
237 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
238 }
239
240 /*
241 * write the chain pointer
242 */
243 static inline void
244 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
245 {
246 if (sc->sc_usedma)
247 (dp)->sd_chainp = cp;
248 else
249 bus_space_write_2(sc->scu_memt, sc->scu_memh,
250 sca_page_addr(sc, dp)
251 + offsetof(struct sca_desc, sd_chainp), cp);
252 }
253
254 /*
255 * read the buffer pointer
256 */
257 static inline u_int32_t
258 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
259 {
260 u_int32_t address;
261
262 if (sc->sc_usedma)
263 address = dp->sd_bufp | dp->sd_hbufp << 16;
264 else {
265 address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
266 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
267 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
268 sca_page_addr(sc, dp)
269 + offsetof(struct sca_desc, sd_hbufp)) << 16;
270 }
271 return (address);
272 }
273
274 /*
275 * write the buffer pointer
276 */
277 static inline void
278 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
279 {
280 if (sc->sc_usedma) {
281 dp->sd_bufp = bufp & 0xFFFF;
282 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
283 } else {
284 bus_space_write_2(sc->scu_memt, sc->scu_memh,
285 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
286 bufp & 0xFFFF);
287 bus_space_write_1(sc->scu_memt, sc->scu_memh,
288 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
289 (bufp & 0x00FF0000) >> 16);
290 }
291 }
292
293 /*
294 * read the buffer length
295 */
296 static inline u_int16_t
297 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
298 {
299 if (sc->sc_usedma)
300 return ((dp)->sd_buflen);
301 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
302 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
303 }
304
305 /*
306 * write the buffer length
307 */
308 static inline void
309 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
310 {
311 if (sc->sc_usedma)
312 (dp)->sd_buflen = len;
313 else
314 bus_space_write_2(sc->scu_memt, sc->scu_memh,
315 sca_page_addr(sc, dp)
316 + offsetof(struct sca_desc, sd_buflen), len);
317 }
318
319 /*
320 * read the descriptor status
321 */
322 static inline u_int8_t
323 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
324 {
325 if (sc->sc_usedma)
326 return ((dp)->sd_stat);
327 return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
328 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
329 }
330
331 /*
332 * write the descriptor status
333 */
334 static inline void
335 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
336 {
337 if (sc->sc_usedma)
338 (dp)->sd_stat = stat;
339 else
340 bus_space_write_1(sc->scu_memt, sc->scu_memh,
341 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
342 stat);
343 }
344
345 void
346 sca_init(struct sca_softc *sc)
347 {
348 /*
349 * Do a little sanity check: check number of ports.
350 */
351 if (sc->sc_numports < 1 || sc->sc_numports > 2)
352 panic("sca can\'t handle more than 2 or less than 1 ports");
353
354 /*
355 * disable DMA and MSCI interrupts
356 */
357 sca_write_1(sc, SCA_DMER, 0);
358 sca_write_1(sc, SCA_IER0, 0);
359 sca_write_1(sc, SCA_IER1, 0);
360 sca_write_1(sc, SCA_IER2, 0);
361
362 /*
363 * configure interrupt system
364 */
365 sca_write_1(sc, SCA_ITCR,
366 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
367 #if 0
368 /* these are for the intrerrupt ack cycle which we don't use */
369 sca_write_1(sc, SCA_IVR, 0x40);
370 sca_write_1(sc, SCA_IMVR, 0x40);
371 #endif
372
373 /*
374 * set wait control register to zero wait states
375 */
376 sca_write_1(sc, SCA_PABR0, 0);
377 sca_write_1(sc, SCA_PABR1, 0);
378 sca_write_1(sc, SCA_WCRL, 0);
379 sca_write_1(sc, SCA_WCRM, 0);
380 sca_write_1(sc, SCA_WCRH, 0);
381
382 /*
383 * disable DMA and reset status
384 */
385 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
386
387 /*
388 * disable transmit DMA for all channels
389 */
390 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
391 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
392 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
393 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
394 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
395 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
396 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
397 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
398
399 /*
400 * enable DMA based on channel enable flags for each channel
401 */
402 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
403
404 /*
405 * Should check to see if the chip is responding, but for now
406 * assume it is.
407 */
408 }
409
410 /*
411 * initialize the port and attach it to the networking layer
412 */
413 void
414 sca_port_attach(struct sca_softc *sc, u_int port)
415 {
416 sca_port_t *scp = &sc->sc_ports[port];
417 struct ifnet *ifp;
418 static u_int ntwo_unit = 0;
419
420 scp->sca = sc; /* point back to the parent */
421
422 scp->sp_port = port;
423
424 if (port == 0) {
425 scp->msci_off = SCA_MSCI_OFF_0;
426 scp->dmac_off = SCA_DMAC_OFF_0;
427 if(sc->sc_parent != NULL)
428 ntwo_unit=sc->sc_parent->dv_unit * 2 + 0;
429 else
430 ntwo_unit = 0; /* XXX */
431 } else {
432 scp->msci_off = SCA_MSCI_OFF_1;
433 scp->dmac_off = SCA_DMAC_OFF_1;
434 if(sc->sc_parent != NULL)
435 ntwo_unit=sc->sc_parent->dv_unit * 2 + 1;
436 else
437 ntwo_unit = 1; /* XXX */
438 }
439
440 sca_msci_init(sc, scp);
441 sca_dmac_init(sc, scp);
442
443 /*
444 * attach to the network layer
445 */
446 ifp = &scp->sp_if;
447 sprintf(ifp->if_xname, "ntwo%d", ntwo_unit);
448 ifp->if_softc = scp;
449 ifp->if_mtu = SCA_MTU;
450 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
451 ifp->if_type = IFT_PTPSERIAL;
452 ifp->if_hdrlen = HDLC_HDRLEN;
453 ifp->if_ioctl = sca_ioctl;
454 ifp->if_output = sca_output;
455 ifp->if_watchdog = sca_watchdog;
456 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
457 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
458 #ifdef SCA_USE_FASTQ
459 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
460 #endif
461 IFQ_SET_READY(&ifp->if_snd);
462 if_attach(ifp);
463 if_alloc_sadl(ifp);
464
465 #if NBPFILTER > 0
466 bpfattach(ifp, DLT_HDLC, HDLC_HDRLEN);
467 #endif
468
469 if (sc->sc_parent == NULL)
470 printf("%s: port %d\n", ifp->if_xname, port);
471 else
472 printf("%s at %s port %d\n",
473 ifp->if_xname, sc->sc_parent->dv_xname, port);
474
475 /*
476 * reset the last seen times on the cisco keepalive protocol
477 */
478 scp->cka_lasttx = time.tv_usec;
479 scp->cka_lastrx = 0;
480 }
481
482 #if 0
483 /*
484 * returns log2(div), sets 'tmc' for the required freq 'hz'
485 */
486 static u_int8_t
487 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
488 {
489 u_int32_t tmc, div;
490 u_int32_t clock;
491
492 /* clock hz = (chipclock / tmc) / 2^(div); */
493 /*
494 * TD == tmc * 2^(n)
495 *
496 * note:
497 * 1 <= TD <= 256 TD is inc of 1
498 * 2 <= TD <= 512 TD is inc of 2
499 * 4 <= TD <= 1024 TD is inc of 4
500 * ...
501 * 512 <= TD <= 256*512 TD is inc of 512
502 *
503 * so note there are overlaps. We lose prec
504 * as div increases so we wish to minize div.
505 *
506 * basically we want to do
507 *
508 * tmc = chip / hz, but have tmc <= 256
509 */
510
511 /* assume system clock is 9.8304MHz or 9830400Hz */
512 clock = clock = 9830400 >> 1;
513
514 /* round down */
515 div = 0;
516 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
517 clock >>= 1;
518 div++;
519 }
520 if (clock / tmc > hz)
521 tmc++;
522 if (!tmc)
523 tmc = 1;
524
525 if (div > SCA_RXS_DIV_512) {
526 /* set to maximums */
527 div = SCA_RXS_DIV_512;
528 tmc = 0;
529 }
530
531 *tmcp = (tmc & 0xFF); /* 0 == 256 */
532 return (div & 0xFF);
533 }
534 #endif
535
536 /*
537 * initialize the port's MSCI
538 */
539 static void
540 sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
541 {
542 /* reset the channel */
543 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
544
545 msci_write_1(scp, SCA_MD00,
546 ( SCA_MD0_CRC_1
547 | SCA_MD0_CRC_CCITT
548 | SCA_MD0_CRC_ENABLE
549 | SCA_MD0_MODE_HDLC));
550 #if 0
551 /* immediately send receive reset so the above takes */
552 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
553 #endif
554
555 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
556 msci_write_1(scp, SCA_MD20,
557 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
558
559 /* be safe and do it again */
560 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
561
562 /* setup underrun and idle control, and initial RTS state */
563 msci_write_1(scp, SCA_CTL0,
564 (SCA_CTL_IDLC_PATTERN
565 | SCA_CTL_UDRNC_AFTER_FCS
566 | SCA_CTL_RTS_LOW));
567
568 /* reset the transmitter */
569 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
570
571 /*
572 * set the clock sources
573 */
574 msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
575 msci_write_1(scp, SCA_TXS0, scp->sp_txs);
576 msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
577
578 /* set external clock generate as requested */
579 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
580
581 /*
582 * XXX don't pay attention to CTS or CD changes right now. I can't
583 * simulate one, and the transmitter will try to transmit even if
584 * CD isn't there anyway, so nothing bad SHOULD happen.
585 */
586 #if 0
587 msci_write_1(scp, SCA_IE00, 0);
588 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
589 #else
590 /* this would deliver transmitter underrun to ST1/ISR1 */
591 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
592 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
593 #endif
594 msci_write_1(scp, SCA_IE20, 0);
595
596 msci_write_1(scp, SCA_FIE0, 0);
597
598 msci_write_1(scp, SCA_SA00, 0);
599 msci_write_1(scp, SCA_SA10, 0);
600
601 msci_write_1(scp, SCA_IDL0, 0x7e);
602
603 msci_write_1(scp, SCA_RRC0, 0x0e);
604 /* msci_write_1(scp, SCA_TRC00, 0x10); */
605 /*
606 * the correct values here are important for avoiding underruns
607 * for any value less than or equal to TRC0 txrdy is activated
608 * which will start the dmac transfer to the fifo.
609 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
610 *
611 * thus if we are using a very fast clock that empties the fifo
612 * quickly, delays in the dmac starting to fill the fifo can
613 * lead to underruns so we want a fairly full fifo to still
614 * cause the dmac to start. for cards with on board ram this
615 * has no effect on system performance. For cards that DMA
616 * to/from system memory it will cause more, shorter,
617 * bus accesses rather than fewer longer ones.
618 */
619 msci_write_1(scp, SCA_TRC00, 0x00);
620 msci_write_1(scp, SCA_TRC10, 0x1f);
621 }
622
623 /*
624 * Take the memory for the port and construct two circular linked lists of
625 * descriptors (one tx, one rx) and set the pointers in these descriptors
626 * to point to the buffer space for this port.
627 */
628 static void
629 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
630 {
631 sca_desc_t *desc;
632 u_int32_t desc_p;
633 u_int32_t buf_p;
634 int i;
635
636 if (sc->sc_usedma)
637 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
638 BUS_DMASYNC_PREWRITE);
639 else {
640 /*
641 * XXX assumes that all tx desc and bufs in same page
642 */
643 sc->scu_page_on(sc);
644 sc->scu_set_page(sc, scp->sp_txdesc_p);
645 }
646
647 desc = scp->sp_txdesc;
648 desc_p = scp->sp_txdesc_p;
649 buf_p = scp->sp_txbuf_p;
650 scp->sp_txcur = 0;
651 scp->sp_txinuse = 0;
652
653 #ifdef DEBUG
654 /* make sure that we won't wrap */
655 if ((desc_p & 0xffff0000) !=
656 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
657 panic("sca: tx descriptors cross architecural boundary");
658 if ((buf_p & 0xff000000) !=
659 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
660 panic("sca: tx buffers cross architecural boundary");
661 #endif
662
663 for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
664 /*
665 * desc_p points to the physcial address of the NEXT desc
666 */
667 desc_p += sizeof(sca_desc_t);
668
669 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
670 sca_desc_write_bufp(sc, desc, buf_p);
671 sca_desc_write_buflen(sc, desc, SCA_BSIZE);
672 sca_desc_write_stat(sc, desc, 0);
673
674 desc++; /* point to the next descriptor */
675 buf_p += SCA_BSIZE;
676 }
677
678 /*
679 * "heal" the circular list by making the last entry point to the
680 * first.
681 */
682 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
683
684 /*
685 * Now, initialize the transmit DMA logic
686 *
687 * CPB == chain pointer base address
688 */
689 dmac_write_1(scp, SCA_DSR1, 0);
690 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
691 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
692 /* XXX1
693 dmac_write_1(scp, SCA_DIR1,
694 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
695 */
696 dmac_write_1(scp, SCA_DIR1,
697 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
698 dmac_write_1(scp, SCA_CPB1,
699 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
700
701 /*
702 * now, do the same thing for receive descriptors
703 *
704 * XXX assumes that all rx desc and bufs in same page
705 */
706 if (!sc->sc_usedma)
707 sc->scu_set_page(sc, scp->sp_rxdesc_p);
708
709 desc = scp->sp_rxdesc;
710 desc_p = scp->sp_rxdesc_p;
711 buf_p = scp->sp_rxbuf_p;
712
713 #ifdef DEBUG
714 /* make sure that we won't wrap */
715 if ((desc_p & 0xffff0000) !=
716 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
717 panic("sca: rx descriptors cross architecural boundary");
718 if ((buf_p & 0xff000000) !=
719 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
720 panic("sca: rx buffers cross architecural boundary");
721 #endif
722
723 for (i = 0 ; i < scp->sp_nrxdesc; i++) {
724 /*
725 * desc_p points to the physcial address of the NEXT desc
726 */
727 desc_p += sizeof(sca_desc_t);
728
729 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
730 sca_desc_write_bufp(sc, desc, buf_p);
731 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
732 sca_desc_write_buflen(sc, desc, 0);
733 sca_desc_write_stat(sc, desc, 0);
734
735 desc++; /* point to the next descriptor */
736 buf_p += SCA_BSIZE;
737 }
738
739 /*
740 * "heal" the circular list by making the last entry point to the
741 * first.
742 */
743 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
744
745 sca_dmac_rxinit(scp);
746
747 if (sc->sc_usedma)
748 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
749 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
750 else
751 sc->scu_page_off(sc);
752 }
753
754 /*
755 * reset and reinitialize the receive DMA logic
756 */
757 static void
758 sca_dmac_rxinit(sca_port_t *scp)
759 {
760 /*
761 * ... and the receive DMA logic ...
762 */
763 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
764 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
765
766 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
767 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
768
769 /* reset descriptors to initial state */
770 scp->sp_rxstart = 0;
771 scp->sp_rxend = scp->sp_nrxdesc - 1;
772
773 /*
774 * CPB == chain pointer base
775 * CDA == current descriptor address
776 * EDA == error descriptor address (overwrite position)
777 * because cda can't be eda when starting we always
778 * have a single buffer gap between cda and eda
779 */
780 dmac_write_1(scp, SCA_CPB0,
781 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
782 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
783 dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
784 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
785
786 /*
787 * enable receiver DMA
788 */
789 dmac_write_1(scp, SCA_DIR0,
790 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
791 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
792 }
793
794 /*
795 * Queue the packet for our start routine to transmit
796 */
797 static int
798 sca_output(ifp, m, dst, rt0)
799 struct ifnet *ifp;
800 struct mbuf *m;
801 struct sockaddr *dst;
802 struct rtentry *rt0;
803 {
804 #ifdef ISO
805 struct hdlc_llc_header *llc;
806 #endif
807 struct hdlc_header *hdlc;
808 struct ifqueue *ifq = NULL;
809 int s, error, len;
810 short mflags;
811 ALTQ_DECL(struct altq_pktattr pktattr;)
812
813 error = 0;
814
815 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
816 error = ENETDOWN;
817 goto bad;
818 }
819
820 /*
821 * If the queueing discipline needs packet classification,
822 * do it before prepending link headers.
823 */
824 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
825
826 /*
827 * determine address family, and priority for this packet
828 */
829 switch (dst->sa_family) {
830 #ifdef INET
831 case AF_INET:
832 #ifdef SCA_USE_FASTQ
833 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
834 == IPTOS_LOWDELAY)
835 ifq = &((sca_port_t *)ifp->if_softc)->fastq;
836 #endif
837 /*
838 * Add cisco serial line header. If there is no
839 * space in the first mbuf, allocate another.
840 */
841 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
842 if (m == 0)
843 return (ENOBUFS);
844 hdlc = mtod(m, struct hdlc_header *);
845 hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
846 break;
847 #endif
848 #ifdef INET6
849 case AF_INET6:
850 /*
851 * Add cisco serial line header. If there is no
852 * space in the first mbuf, allocate another.
853 */
854 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
855 if (m == 0)
856 return (ENOBUFS);
857 hdlc = mtod(m, struct hdlc_header *);
858 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
859 break;
860 #endif
861 #ifdef ISO
862 case AF_ISO:
863 /*
864 * Add cisco llc serial line header. If there is no
865 * space in the first mbuf, allocate another.
866 */
867 M_PREPEND(m, sizeof(struct hdlc_llc_header), M_DONTWAIT);
868 if (m == 0)
869 return (ENOBUFS);
870 hdlc = mtod(m, struct hdlc_header *);
871 llc = mtod(m, struct hdlc_llc_header *);
872 llc->hl_dsap = llc->hl_ssap = LLC_ISO_LSAP;
873 llc->hl_ffb = 0;
874 break;
875 #endif
876 default:
877 printf("%s: address family %d unsupported\n",
878 ifp->if_xname, dst->sa_family);
879 error = EAFNOSUPPORT;
880 goto bad;
881 }
882
883 /* finish */
884 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
885 hdlc->h_addr = CISCO_MULTICAST;
886 else
887 hdlc->h_addr = CISCO_UNICAST;
888 hdlc->h_resv = 0;
889
890 /*
891 * queue the packet. If interactive, use the fast queue.
892 */
893 mflags = m->m_flags;
894 len = m->m_pkthdr.len;
895 s = splnet();
896 if (ifq != NULL) {
897 if (IF_QFULL(ifq)) {
898 IF_DROP(ifq);
899 m_freem(m);
900 error = ENOBUFS;
901 } else
902 IF_ENQUEUE(ifq, m);
903 } else
904 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
905 if (error != 0) {
906 splx(s);
907 ifp->if_oerrors++;
908 ifp->if_collisions++;
909 return (error);
910 }
911 ifp->if_obytes += len;
912 if (mflags & M_MCAST)
913 ifp->if_omcasts++;
914
915 sca_start(ifp);
916 splx(s);
917
918 return (error);
919
920 bad:
921 if (m)
922 m_freem(m);
923 return (error);
924 }
925
926 static int
927 sca_ioctl(ifp, cmd, addr)
928 struct ifnet *ifp;
929 u_long cmd;
930 caddr_t addr;
931 {
932 struct ifreq *ifr;
933 struct ifaddr *ifa;
934 int error;
935 int s;
936
937 s = splnet();
938
939 ifr = (struct ifreq *)addr;
940 ifa = (struct ifaddr *)addr;
941 error = 0;
942
943 switch (cmd) {
944 case SIOCSIFADDR:
945 switch(ifa->ifa_addr->sa_family) {
946 #ifdef INET
947 case AF_INET:
948 #endif
949 #ifdef INET6
950 case AF_INET6:
951 #endif
952 #if defined(INET) || defined(INET6)
953 ifp->if_flags |= IFF_UP;
954 sca_port_up(ifp->if_softc);
955 break;
956 #endif
957 default:
958 error = EAFNOSUPPORT;
959 break;
960 }
961 break;
962
963 case SIOCSIFDSTADDR:
964 #ifdef INET
965 if (ifa->ifa_addr->sa_family == AF_INET)
966 break;
967 #endif
968 #ifdef INET6
969 if (ifa->ifa_addr->sa_family == AF_INET6)
970 break;
971 #endif
972 error = EAFNOSUPPORT;
973 break;
974
975 case SIOCADDMULTI:
976 case SIOCDELMULTI:
977 /* XXX need multicast group management code */
978 if (ifr == 0) {
979 error = EAFNOSUPPORT; /* XXX */
980 break;
981 }
982 switch (ifr->ifr_addr.sa_family) {
983 #ifdef INET
984 case AF_INET:
985 break;
986 #endif
987 #ifdef INET6
988 case AF_INET6:
989 break;
990 #endif
991 default:
992 error = EAFNOSUPPORT;
993 break;
994 }
995 break;
996
997 case SIOCSIFFLAGS:
998 if (ifr->ifr_flags & IFF_UP) {
999 ifp->if_flags |= IFF_UP;
1000 sca_port_up(ifp->if_softc);
1001 } else {
1002 ifp->if_flags &= ~IFF_UP;
1003 sca_port_down(ifp->if_softc);
1004 }
1005
1006 break;
1007
1008 default:
1009 error = EINVAL;
1010 }
1011
1012 splx(s);
1013 return error;
1014 }
1015
1016 /*
1017 * start packet transmission on the interface
1018 *
1019 * MUST BE CALLED AT splnet()
1020 */
1021 static void
1022 sca_start(ifp)
1023 struct ifnet *ifp;
1024 {
1025 sca_port_t *scp = ifp->if_softc;
1026 struct sca_softc *sc = scp->sca;
1027 struct mbuf *m, *mb_head;
1028 sca_desc_t *desc;
1029 u_int8_t *buf, stat;
1030 u_int32_t buf_p;
1031 int nexttx;
1032 int trigger_xmit;
1033 u_int len;
1034
1035 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1036
1037 /*
1038 * can't queue when we are full or transmitter is busy
1039 */
1040 #ifdef oldcode
1041 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1042 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1043 return;
1044 #else
1045 if (scp->sp_txinuse
1046 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1047 return;
1048 #endif
1049 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1050
1051 /*
1052 * XXX assume that all tx desc and bufs in same page
1053 */
1054 if (sc->sc_usedma)
1055 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1056 0, sc->scu_allocsize,
1057 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1058 else {
1059 sc->scu_page_on(sc);
1060 sc->scu_set_page(sc, scp->sp_txdesc_p);
1061 }
1062
1063 trigger_xmit = 0;
1064
1065 txloop:
1066 IF_DEQUEUE(&scp->linkq, mb_head);
1067 if (mb_head == NULL)
1068 #ifdef SCA_USE_FASTQ
1069 IF_DEQUEUE(&scp->fastq, mb_head);
1070 if (mb_head == NULL)
1071 #endif
1072 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1073 if (mb_head == NULL)
1074 goto start_xmit;
1075
1076 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1077 #ifdef oldcode
1078 if (scp->txinuse != 0) {
1079 /* Kill EOT interrupts on the previous descriptor. */
1080 desc = &scp->sp_txdesc[scp->txcur];
1081 stat = sca_desc_read_stat(sc, desc);
1082 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1083
1084 /* Figure out what the next free descriptor is. */
1085 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1086 } else
1087 nexttx = 0;
1088 #endif /* oldcode */
1089
1090 if (scp->sp_txinuse)
1091 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1092 else
1093 nexttx = 0;
1094
1095 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1096
1097 buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1098 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1099
1100 /* XXX hoping we can delay the desc write till after we don't drop. */
1101 desc = &scp->sp_txdesc[nexttx];
1102
1103 /* XXX isn't this set already?? */
1104 sca_desc_write_bufp(sc, desc, buf_p);
1105 len = 0;
1106
1107 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1108
1109 #if 0 /* uncomment this for a core in cc1 */
1110 X
1111 #endif
1112 /*
1113 * Run through the chain, copying data into the descriptor as we
1114 * go. If it won't fit in one transmission block, drop the packet.
1115 * No, this isn't nice, but most of the time it _will_ fit.
1116 */
1117 for (m = mb_head ; m != NULL ; m = m->m_next) {
1118 if (m->m_len != 0) {
1119 len += m->m_len;
1120 if (len > SCA_BSIZE) {
1121 m_freem(mb_head);
1122 goto txloop;
1123 }
1124 SCA_DPRINTF(SCA_DEBUG_TX,
1125 ("TX: about to mbuf len %d\n", m->m_len));
1126
1127 if (sc->sc_usedma)
1128 memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1129 else
1130 bus_space_write_region_1(sc->scu_memt,
1131 sc->scu_memh, sca_page_addr(sc, buf_p),
1132 mtod(m, u_int8_t *), m->m_len);
1133 buf += m->m_len;
1134 buf_p += m->m_len;
1135 }
1136 }
1137
1138 /* set the buffer, the length, and mark end of frame and end of xfer */
1139 sca_desc_write_buflen(sc, desc, len);
1140 sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1141
1142 ifp->if_opackets++;
1143
1144 #if NBPFILTER > 0
1145 /*
1146 * Pass packet to bpf if there is a listener.
1147 */
1148 if (ifp->if_bpf)
1149 bpf_mtap(ifp->if_bpf, mb_head);
1150 #endif
1151
1152 m_freem(mb_head);
1153
1154 scp->sp_txcur = nexttx;
1155 scp->sp_txinuse++;
1156 trigger_xmit = 1;
1157
1158 SCA_DPRINTF(SCA_DEBUG_TX,
1159 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1160
1161 /*
1162 * XXX so didn't this used to limit us to 1?! - multi may be untested
1163 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1164 * to find bug
1165 */
1166 #ifdef oldcode
1167 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1168 #endif
1169 if (scp->sp_txinuse < scp->sp_ntxdesc)
1170 goto txloop;
1171
1172 start_xmit:
1173 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1174
1175 if (trigger_xmit != 0) {
1176 /* set EOT on final descriptor */
1177 desc = &scp->sp_txdesc[scp->sp_txcur];
1178 stat = sca_desc_read_stat(sc, desc);
1179 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1180 }
1181
1182 if (sc->sc_usedma)
1183 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1184 sc->scu_allocsize,
1185 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1186
1187 if (trigger_xmit != 0)
1188 sca_port_starttx(scp);
1189
1190 if (!sc->sc_usedma)
1191 sc->scu_page_off(sc);
1192 }
1193
1194 static void
1195 sca_watchdog(ifp)
1196 struct ifnet *ifp;
1197 {
1198 }
1199
1200 int
1201 sca_hardintr(struct sca_softc *sc)
1202 {
1203 u_int8_t isr0, isr1, isr2;
1204 int ret;
1205
1206 ret = 0; /* non-zero means we processed at least one interrupt */
1207
1208 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1209
1210 while (1) {
1211 /*
1212 * read SCA interrupts
1213 */
1214 isr0 = sca_read_1(sc, SCA_ISR0);
1215 isr1 = sca_read_1(sc, SCA_ISR1);
1216 isr2 = sca_read_1(sc, SCA_ISR2);
1217
1218 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1219 break;
1220
1221 SCA_DPRINTF(SCA_DEBUG_INTR,
1222 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1223 isr0, isr1, isr2));
1224
1225 /*
1226 * check DMAC interrupt
1227 */
1228 if (isr1 & 0x0f)
1229 ret += sca_dmac_intr(&sc->sc_ports[0],
1230 isr1 & 0x0f);
1231
1232 if (isr1 & 0xf0)
1233 ret += sca_dmac_intr(&sc->sc_ports[1],
1234 (isr1 & 0xf0) >> 4);
1235
1236 /*
1237 * mcsi intterupts
1238 */
1239 if (isr0 & 0x0f)
1240 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1241
1242 if (isr0 & 0xf0)
1243 ret += sca_msci_intr(&sc->sc_ports[1],
1244 (isr0 & 0xf0) >> 4);
1245
1246 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1247 if (isr2)
1248 ret += sca_timer_intr(sc, isr2);
1249 #endif
1250 }
1251
1252 return (ret);
1253 }
1254
1255 static int
1256 sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1257 {
1258 u_int8_t dsr;
1259 int ret;
1260
1261 ret = 0;
1262
1263 /*
1264 * Check transmit channel
1265 */
1266 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1267 SCA_DPRINTF(SCA_DEBUG_INTR,
1268 ("TX INTERRUPT port %d\n", scp->sp_port));
1269
1270 dsr = 1;
1271 while (dsr != 0) {
1272 ret++;
1273 /*
1274 * reset interrupt
1275 */
1276 dsr = dmac_read_1(scp, SCA_DSR1);
1277 dmac_write_1(scp, SCA_DSR1,
1278 dsr | SCA_DSR_DEWD);
1279
1280 /*
1281 * filter out the bits we don't care about
1282 */
1283 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1284 if (dsr == 0)
1285 break;
1286
1287 /*
1288 * check for counter overflow
1289 */
1290 if (dsr & SCA_DSR_COF) {
1291 printf("%s: TXDMA counter overflow\n",
1292 scp->sp_if.if_xname);
1293
1294 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1295 scp->sp_txcur = 0;
1296 scp->sp_txinuse = 0;
1297 }
1298
1299 /*
1300 * check for buffer overflow
1301 */
1302 if (dsr & SCA_DSR_BOF) {
1303 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1304 scp->sp_if.if_xname,
1305 dmac_read_2(scp, SCA_CDAL1),
1306 dmac_read_2(scp, SCA_EDAL1),
1307 dmac_read_1(scp, SCA_CPB1));
1308
1309 /*
1310 * Yikes. Arrange for a full
1311 * transmitter restart.
1312 */
1313 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1314 scp->sp_txcur = 0;
1315 scp->sp_txinuse = 0;
1316 }
1317
1318 /*
1319 * check for end of transfer, which is not
1320 * an error. It means that all data queued
1321 * was transmitted, and we mark ourself as
1322 * not in use and stop the watchdog timer.
1323 */
1324 if (dsr & SCA_DSR_EOT) {
1325 SCA_DPRINTF(SCA_DEBUG_TX,
1326 ("Transmit completed. cda %x eda %x dsr %x\n",
1327 dmac_read_2(scp, SCA_CDAL1),
1328 dmac_read_2(scp, SCA_EDAL1),
1329 dsr));
1330
1331 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1332 scp->sp_txcur = 0;
1333 scp->sp_txinuse = 0;
1334
1335 /*
1336 * check for more packets
1337 */
1338 sca_start(&scp->sp_if);
1339 }
1340 }
1341 }
1342 /*
1343 * receive channel check
1344 */
1345 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1346 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1347 (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1348
1349 dsr = 1;
1350 while (dsr != 0) {
1351 ret++;
1352
1353 dsr = dmac_read_1(scp, SCA_DSR0);
1354 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1355
1356 /*
1357 * filter out the bits we don't care about
1358 */
1359 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1360 | SCA_DSR_BOF | SCA_DSR_EOT);
1361 if (dsr == 0)
1362 break;
1363
1364 /*
1365 * End of frame
1366 */
1367 if (dsr & SCA_DSR_EOM) {
1368 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1369
1370 sca_get_packets(scp);
1371 }
1372
1373 /*
1374 * check for counter overflow
1375 */
1376 if (dsr & SCA_DSR_COF) {
1377 printf("%s: RXDMA counter overflow\n",
1378 scp->sp_if.if_xname);
1379
1380 sca_dmac_rxinit(scp);
1381 }
1382
1383 /*
1384 * check for end of transfer, which means we
1385 * ran out of descriptors to receive into.
1386 * This means the line is much faster than
1387 * we can handle.
1388 */
1389 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1390 printf("%s: RXDMA buffer overflow\n",
1391 scp->sp_if.if_xname);
1392
1393 sca_dmac_rxinit(scp);
1394 }
1395 }
1396 }
1397
1398 return ret;
1399 }
1400
1401 static int
1402 sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1403 {
1404 u_int8_t st1, trc0;
1405
1406 /* get and clear the specific interrupt -- should act on it :)*/
1407 if ((st1 = msci_read_1(scp, SCA_ST10))) {
1408 /* clear the interrupt */
1409 msci_write_1(scp, SCA_ST10, st1);
1410
1411 if (st1 & SCA_ST1_UDRN) {
1412 /* underrun -- try to increase ready control */
1413 trc0 = msci_read_1(scp, SCA_TRC00);
1414 if (trc0 == 0x1f)
1415 printf("TX: underrun - fifo depth maxed\n");
1416 else {
1417 if ((trc0 += 2) > 0x1f)
1418 trc0 = 0x1f;
1419 SCA_DPRINTF(SCA_DEBUG_TX,
1420 ("TX: udrn - incr fifo to %d\n", trc0));
1421 msci_write_1(scp, SCA_TRC00, trc0);
1422 }
1423 }
1424 }
1425 return (0);
1426 }
1427
1428 static void
1429 sca_get_packets(sca_port_t *scp)
1430 {
1431 struct sca_softc *sc;
1432
1433 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1434
1435 sc = scp->sca;
1436 if (sc->sc_usedma)
1437 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1438 0, sc->scu_allocsize,
1439 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1440 else {
1441 /*
1442 * XXX this code is unable to deal with rx stuff
1443 * in more than 1 page
1444 */
1445 sc->scu_page_on(sc);
1446 sc->scu_set_page(sc, scp->sp_rxdesc_p);
1447 }
1448
1449 /* process as many frames as are available */
1450 while (sca_frame_avail(scp)) {
1451 sca_frame_process(scp);
1452 sca_frame_read_done(scp);
1453 }
1454
1455 if (sc->sc_usedma)
1456 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1457 0, sc->scu_allocsize,
1458 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1459 else
1460 sc->scu_page_off(sc);
1461 }
1462
1463 /*
1464 * Starting with the first descriptor we wanted to read into, up to but
1465 * not including the current SCA read descriptor, look for a packet.
1466 *
1467 * must be called at splnet()
1468 */
1469 static int
1470 sca_frame_avail(sca_port_t *scp)
1471 {
1472 u_int16_t cda;
1473 u_int32_t desc_p; /* physical address (lower 16 bits) */
1474 sca_desc_t *desc;
1475 u_int8_t rxstat;
1476 int cdaidx, toolong;
1477
1478 /*
1479 * Read the current descriptor from the SCA.
1480 */
1481 cda = dmac_read_2(scp, SCA_CDAL0);
1482
1483 /*
1484 * calculate the index of the current descriptor
1485 */
1486 desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1487 desc_p = cda - desc_p;
1488 cdaidx = desc_p / sizeof(sca_desc_t);
1489
1490 SCA_DPRINTF(SCA_DEBUG_RX,
1491 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1492 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1493
1494 /* note confusion */
1495 if (cdaidx >= scp->sp_nrxdesc)
1496 panic("current descriptor index out of range");
1497
1498 /* see if we have a valid frame available */
1499 toolong = 0;
1500 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1501 /*
1502 * We might have a valid descriptor. Set up a pointer
1503 * to the kva address for it so we can more easily examine
1504 * the contents.
1505 */
1506 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1507 rxstat = sca_desc_read_stat(scp->sca, desc);
1508
1509 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1510 scp->sp_port, scp->sp_rxstart, rxstat));
1511
1512 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1513 scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1514
1515 /*
1516 * check for errors
1517 */
1518 if (rxstat & SCA_DESC_ERRORS) {
1519 /*
1520 * consider an error condition the end
1521 * of a frame
1522 */
1523 scp->sp_if.if_ierrors++;
1524 toolong = 0;
1525 continue;
1526 }
1527
1528 /*
1529 * if we aren't skipping overlong frames
1530 * we are done, otherwise reset and look for
1531 * another good frame
1532 */
1533 if (rxstat & SCA_DESC_EOM) {
1534 if (!toolong)
1535 return (1);
1536 toolong = 0;
1537 } else if (!toolong) {
1538 /*
1539 * we currently don't deal with frames
1540 * larger than a single buffer (fixed MTU)
1541 */
1542 scp->sp_if.if_ierrors++;
1543 toolong = 1;
1544 }
1545 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1546 scp->sp_rxstart));
1547 }
1548
1549 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1550 return 0;
1551 }
1552
1553 /*
1554 * Pass the packet up to the kernel if it is a packet we want to pay
1555 * attention to.
1556 *
1557 * MUST BE CALLED AT splnet()
1558 */
1559 static void
1560 sca_frame_process(sca_port_t *scp)
1561 {
1562 struct ifqueue *ifq;
1563 struct hdlc_header *hdlc;
1564 struct cisco_pkt *cisco;
1565 sca_desc_t *desc;
1566 struct mbuf *m;
1567 u_int8_t *bufp;
1568 u_int16_t len;
1569 u_int32_t t;
1570
1571 t = (time.tv_sec - boottime.tv_sec) * 1000;
1572 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1573 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1574 len = sca_desc_read_buflen(scp->sca, desc);
1575
1576 SCA_DPRINTF(SCA_DEBUG_RX,
1577 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1578 (bus_addr_t)bufp, len));
1579
1580 #if SCA_DEBUG_LEVEL > 0
1581 if (sca_debug & SCA_DEBUG_RXPKT)
1582 sca_frame_print(scp, desc, bufp);
1583 #endif
1584 /*
1585 * skip packets that are too short
1586 */
1587 if (len < sizeof(struct hdlc_header)) {
1588 scp->sp_if.if_ierrors++;
1589 return;
1590 }
1591
1592 m = sca_mbuf_alloc(scp->sca, bufp, len);
1593 if (m == NULL) {
1594 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1595 return;
1596 }
1597
1598 /*
1599 * read and then strip off the HDLC information
1600 */
1601 m = m_pullup(m, sizeof(struct hdlc_header));
1602 if (m == NULL) {
1603 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1604 return;
1605 }
1606
1607 #if NBPFILTER > 0
1608 if (scp->sp_if.if_bpf)
1609 bpf_mtap(scp->sp_if.if_bpf, m);
1610 #endif
1611
1612 scp->sp_if.if_ipackets++;
1613
1614 hdlc = mtod(m, struct hdlc_header *);
1615 switch (ntohs(hdlc->h_proto)) {
1616 #ifdef INET
1617 case HDLC_PROTOCOL_IP:
1618 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1619 m->m_pkthdr.rcvif = &scp->sp_if;
1620 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1621 m->m_data += sizeof(struct hdlc_header);
1622 m->m_len -= sizeof(struct hdlc_header);
1623 ifq = &ipintrq;
1624 schednetisr(NETISR_IP);
1625 break;
1626 #endif /* INET */
1627 #ifdef INET6
1628 case HDLC_PROTOCOL_IPV6:
1629 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1630 m->m_pkthdr.rcvif = &scp->sp_if;
1631 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1632 m->m_data += sizeof(struct hdlc_header);
1633 m->m_len -= sizeof(struct hdlc_header);
1634 ifq = &ip6intrq;
1635 schednetisr(NETISR_IPV6);
1636 break;
1637 #endif /* INET6 */
1638 #ifdef ISO
1639 case HDLC_PROTOCOL_ISO:
1640 if (m->m_pkthdr.len < sizeof(struct hdlc_llc_header))
1641 goto dropit;
1642 m->m_pkthdr.rcvif = &scp->sp_if;
1643 m->m_pkthdr.len -= sizeof(struct hdlc_llc_header);
1644 m->m_data += sizeof(struct hdlc_llc_header);
1645 m->m_len -= sizeof(struct hdlc_llc_header);
1646 ifq = &clnlintrq;
1647 schednetisr(NETISR_ISO);
1648 break;
1649 #endif /* ISO */
1650 case CISCO_KEEPALIVE:
1651 SCA_DPRINTF(SCA_DEBUG_CISCO,
1652 ("Received CISCO keepalive packet\n"));
1653
1654 if (len < CISCO_PKT_LEN) {
1655 SCA_DPRINTF(SCA_DEBUG_CISCO,
1656 ("short CISCO packet %d, wanted %d\n",
1657 len, CISCO_PKT_LEN));
1658 scp->sp_if.if_ierrors++;
1659 goto dropit;
1660 }
1661
1662 m = m_pullup(m, sizeof(struct cisco_pkt));
1663 if (m == NULL) {
1664 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1665 return;
1666 }
1667
1668 cisco = (struct cisco_pkt *)
1669 (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1670 m->m_pkthdr.rcvif = &scp->sp_if;
1671
1672 switch (ntohl(cisco->type)) {
1673 case CISCO_ADDR_REQ:
1674 printf("Got CISCO addr_req, ignoring\n");
1675 scp->sp_if.if_ierrors++;
1676 goto dropit;
1677
1678 case CISCO_ADDR_REPLY:
1679 printf("Got CISCO addr_reply, ignoring\n");
1680 scp->sp_if.if_ierrors++;
1681 goto dropit;
1682
1683 case CISCO_KEEPALIVE_REQ:
1684
1685 SCA_DPRINTF(SCA_DEBUG_CISCO,
1686 ("Received KA, mseq %d,"
1687 " yseq %d, rel 0x%04x, t0"
1688 " %04x, t1 %04x\n",
1689 ntohl(cisco->par1), ntohl(cisco->par2),
1690 ntohs(cisco->rel), ntohs(cisco->time0),
1691 ntohs(cisco->time1)));
1692
1693 scp->cka_lastrx = ntohl(cisco->par1);
1694 scp->cka_lasttx++;
1695
1696 /*
1697 * schedule the transmit right here.
1698 */
1699 cisco->par2 = cisco->par1;
1700 cisco->par1 = htonl(scp->cka_lasttx);
1701 cisco->time0 = htons((u_int16_t)(t >> 16));
1702 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1703
1704 ifq = &scp->linkq;
1705 if (IF_QFULL(ifq)) {
1706 IF_DROP(ifq);
1707 goto dropit;
1708 }
1709 IF_ENQUEUE(ifq, m);
1710
1711 sca_start(&scp->sp_if);
1712
1713 /* since start may have reset this fix */
1714 if (!scp->sca->sc_usedma) {
1715 scp->sca->scu_set_page(scp->sca,
1716 scp->sp_rxdesc_p);
1717 scp->sca->scu_page_on(scp->sca);
1718 }
1719 return;
1720 default:
1721 SCA_DPRINTF(SCA_DEBUG_CISCO,
1722 ("Unknown CISCO keepalive protocol 0x%04x\n",
1723 ntohl(cisco->type)));
1724
1725 scp->sp_if.if_noproto++;
1726 goto dropit;
1727 }
1728 return;
1729 default:
1730 SCA_DPRINTF(SCA_DEBUG_RX,
1731 ("Unknown/unexpected ethertype 0x%04x\n",
1732 ntohs(hdlc->h_proto)));
1733 scp->sp_if.if_noproto++;
1734 goto dropit;
1735 }
1736
1737 /* queue the packet */
1738 if (!IF_QFULL(ifq)) {
1739 IF_ENQUEUE(ifq, m);
1740 } else {
1741 IF_DROP(ifq);
1742 scp->sp_if.if_iqdrops++;
1743 goto dropit;
1744 }
1745 return;
1746 dropit:
1747 if (m)
1748 m_freem(m);
1749 return;
1750 }
1751
1752 #if SCA_DEBUG_LEVEL > 0
1753 /*
1754 * do a hex dump of the packet received into descriptor "desc" with
1755 * data buffer "p"
1756 */
1757 static void
1758 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1759 {
1760 int i;
1761 int nothing_yet = 1;
1762 struct sca_softc *sc;
1763 u_int len;
1764
1765 sc = scp->sca;
1766 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1767 desc,
1768 sca_desc_read_chainp(sc, desc),
1769 sca_desc_read_bufp(sc, desc),
1770 sca_desc_read_stat(sc, desc),
1771 (len = sca_desc_read_buflen(sc, desc)));
1772
1773 for (i = 0 ; i < len && i < 256; i++) {
1774 if (nothing_yet == 1 &&
1775 (sc->sc_usedma ? *p
1776 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1777 sca_page_addr(sc, p))) == 0) {
1778 p++;
1779 continue;
1780 }
1781 nothing_yet = 0;
1782 if (i % 16 == 0)
1783 printf("\n");
1784 printf("%02x ",
1785 (sc->sc_usedma ? *p
1786 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1787 sca_page_addr(sc, p))));
1788 p++;
1789 }
1790
1791 if (i % 16 != 1)
1792 printf("\n");
1793 }
1794 #endif
1795
1796 /*
1797 * adjust things because we have just read the current starting
1798 * frame
1799 *
1800 * must be called at splnet()
1801 */
1802 static void
1803 sca_frame_read_done(sca_port_t *scp)
1804 {
1805 u_int16_t edesc_p;
1806
1807 /* update where our indicies are */
1808 scp->sp_rxend = scp->sp_rxstart;
1809 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1810
1811 /* update the error [end] descriptor */
1812 edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1813 (sizeof(sca_desc_t) * scp->sp_rxend);
1814 dmac_write_2(scp, SCA_EDAL0, edesc_p);
1815 }
1816
1817 /*
1818 * set a port to the "up" state
1819 */
1820 static void
1821 sca_port_up(sca_port_t *scp)
1822 {
1823 struct sca_softc *sc = scp->sca;
1824 #if 0
1825 u_int8_t ier0, ier1;
1826 #endif
1827
1828 /*
1829 * reset things
1830 */
1831 #if 0
1832 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1833 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1834 #endif
1835 /*
1836 * clear in-use flag
1837 */
1838 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1839 scp->sp_if.if_flags |= IFF_RUNNING;
1840
1841 /*
1842 * raise DTR
1843 */
1844 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1845
1846 /*
1847 * raise RTS
1848 */
1849 msci_write_1(scp, SCA_CTL0,
1850 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1851 | SCA_CTL_RTS_HIGH);
1852
1853 #if 0
1854 /*
1855 * enable interrupts (no timer IER2)
1856 */
1857 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1858 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1859 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1860 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1861 if (scp->sp_port == 1) {
1862 ier0 <<= 4;
1863 ier1 <<= 4;
1864 }
1865 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1866 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1867 #else
1868 if (scp->sp_port == 0) {
1869 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1870 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1871 } else {
1872 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1873 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1874 }
1875 #endif
1876
1877 /*
1878 * enable transmit and receive
1879 */
1880 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1881 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1882
1883 /*
1884 * reset internal state
1885 */
1886 scp->sp_txinuse = 0;
1887 scp->sp_txcur = 0;
1888 scp->cka_lasttx = time.tv_usec;
1889 scp->cka_lastrx = 0;
1890 }
1891
1892 /*
1893 * set a port to the "down" state
1894 */
1895 static void
1896 sca_port_down(sca_port_t *scp)
1897 {
1898 struct sca_softc *sc = scp->sca;
1899 #if 0
1900 u_int8_t ier0, ier1;
1901 #endif
1902
1903 /*
1904 * lower DTR
1905 */
1906 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1907
1908 /*
1909 * lower RTS
1910 */
1911 msci_write_1(scp, SCA_CTL0,
1912 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1913 | SCA_CTL_RTS_LOW);
1914
1915 /*
1916 * disable interrupts
1917 */
1918 #if 0
1919 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1920 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1921 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1922 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1923 if (scp->sp_port == 1) {
1924 ier0 <<= 4;
1925 ier1 <<= 4;
1926 }
1927 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1928 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1929 #else
1930 if (scp->sp_port == 0) {
1931 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1932 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1933 } else {
1934 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1935 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1936 }
1937 #endif
1938
1939 /*
1940 * disable transmit and receive
1941 */
1942 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1943 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1944
1945 /*
1946 * no, we're not in use anymore
1947 */
1948 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1949 }
1950
1951 /*
1952 * disable all DMA and interrupts for all ports at once.
1953 */
1954 void
1955 sca_shutdown(struct sca_softc *sca)
1956 {
1957 /*
1958 * disable DMA and interrupts
1959 */
1960 sca_write_1(sca, SCA_DMER, 0);
1961 sca_write_1(sca, SCA_IER0, 0);
1962 sca_write_1(sca, SCA_IER1, 0);
1963 }
1964
1965 /*
1966 * If there are packets to transmit, start the transmit DMA logic.
1967 */
1968 static void
1969 sca_port_starttx(sca_port_t *scp)
1970 {
1971 u_int32_t startdesc_p, enddesc_p;
1972 int enddesc;
1973
1974 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1975
1976 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1977 || scp->sp_txinuse == 0)
1978 return;
1979
1980 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1981
1982 scp->sp_if.if_flags |= IFF_OACTIVE;
1983
1984 /*
1985 * We have something to do, since we have at least one packet
1986 * waiting, and we are not already marked as active.
1987 */
1988 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1989 startdesc_p = scp->sp_txdesc_p;
1990 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1991
1992 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1993 startdesc_p, enddesc_p));
1994
1995 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1996 dmac_write_2(scp, SCA_CDAL1,
1997 (u_int16_t)(startdesc_p & 0x0000ffff));
1998
1999 /*
2000 * enable the DMA
2001 */
2002 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
2003 }
2004
2005 /*
2006 * allocate an mbuf at least long enough to hold "len" bytes.
2007 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
2008 * otherwise let the caller handle copying the data in.
2009 */
2010 static struct mbuf *
2011 sca_mbuf_alloc(struct sca_softc *sc, caddr_t p, u_int len)
2012 {
2013 struct mbuf *m;
2014
2015 /*
2016 * allocate an mbuf and copy the important bits of data
2017 * into it. If the packet won't fit in the header,
2018 * allocate a cluster for it and store it there.
2019 */
2020 MGETHDR(m, M_DONTWAIT, MT_DATA);
2021 if (m == NULL)
2022 return NULL;
2023 if (len > MHLEN) {
2024 if (len > MCLBYTES) {
2025 m_freem(m);
2026 return NULL;
2027 }
2028 MCLGET(m, M_DONTWAIT);
2029 if ((m->m_flags & M_EXT) == 0) {
2030 m_freem(m);
2031 return NULL;
2032 }
2033 }
2034 if (p != NULL) {
2035 /* XXX do we need to sync here? */
2036 if (sc->sc_usedma)
2037 memcpy(mtod(m, caddr_t), p, len);
2038 else
2039 bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2040 sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2041 }
2042 m->m_len = len;
2043 m->m_pkthdr.len = len;
2044
2045 return (m);
2046 }
2047
2048 /*
2049 * get the base clock
2050 */
2051 void
2052 sca_get_base_clock(struct sca_softc *sc)
2053 {
2054 struct timeval btv, ctv, dtv;
2055 u_int64_t bcnt;
2056 u_int32_t cnt;
2057 u_int16_t subcnt;
2058
2059 /* disable the timer, set prescale to 0 */
2060 sca_write_1(sc, SCA_TCSR0, 0);
2061 sca_write_1(sc, SCA_TEPR0, 0);
2062
2063 /* reset the counter */
2064 (void)sca_read_1(sc, SCA_TCSR0);
2065 subcnt = sca_read_2(sc, SCA_TCNTL0);
2066
2067 /* count to max */
2068 sca_write_2(sc, SCA_TCONRL0, 0xffff);
2069
2070 cnt = 0;
2071 microtime(&btv);
2072 /* start the timer -- no interrupt enable */
2073 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2074 for (;;) {
2075 microtime(&ctv);
2076
2077 /* end around 3/4 of a second */
2078 timersub(&ctv, &btv, &dtv);
2079 if (dtv.tv_usec >= 750000)
2080 break;
2081
2082 /* spin */
2083 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2084 ;
2085 /* reset the timer */
2086 (void)sca_read_2(sc, SCA_TCNTL0);
2087 cnt++;
2088 }
2089
2090 /* stop the timer */
2091 sca_write_1(sc, SCA_TCSR0, 0);
2092
2093 subcnt = sca_read_2(sc, SCA_TCNTL0);
2094 /* add the slop in and get the total timer ticks */
2095 cnt = (cnt << 16) | subcnt;
2096
2097 /* cnt is 1/8 the actual time */
2098 bcnt = cnt * 8;
2099 /* make it proportional to 3/4 of a second */
2100 bcnt *= (u_int64_t)750000;
2101 bcnt /= (u_int64_t)dtv.tv_usec;
2102 cnt = bcnt;
2103
2104 /* make it Hz */
2105 cnt *= 4;
2106 cnt /= 3;
2107
2108 SCA_DPRINTF(SCA_DEBUG_CLOCK,
2109 ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2110
2111 /*
2112 * round to the nearest 200 -- this allows for +-3 ticks error
2113 */
2114 sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2115 }
2116
2117 /*
2118 * print the information about the clock on the ports
2119 */
2120 void
2121 sca_print_clock_info(struct sca_softc *sc)
2122 {
2123 struct sca_port *scp;
2124 u_int32_t mhz, div;
2125 int i;
2126
2127 printf("%s: base clock %d Hz\n", sc->sc_parent->dv_xname,
2128 sc->sc_baseclock);
2129
2130 /* print the information about the port clock selection */
2131 for (i = 0; i < sc->sc_numports; i++) {
2132 scp = &sc->sc_ports[i];
2133 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2134 div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2135
2136 printf("%s: rx clock: ", scp->sp_if.if_xname);
2137 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2138 case SCA_RXS_CLK_LINE:
2139 printf("line");
2140 break;
2141 case SCA_RXS_CLK_LINE_SN:
2142 printf("line with noise suppression");
2143 break;
2144 case SCA_RXS_CLK_INTERNAL:
2145 printf("internal %d Hz", (mhz >> div));
2146 break;
2147 case SCA_RXS_CLK_ADPLL_OUT:
2148 printf("adpll using internal %d Hz", (mhz >> div));
2149 break;
2150 case SCA_RXS_CLK_ADPLL_IN:
2151 printf("adpll using line clock");
2152 break;
2153 }
2154 printf(" tx clock: ");
2155 div = scp->sp_txs & SCA_TXS_DIV_MASK;
2156 switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2157 case SCA_TXS_CLK_LINE:
2158 printf("line\n");
2159 break;
2160 case SCA_TXS_CLK_INTERNAL:
2161 printf("internal %d Hz\n", (mhz >> div));
2162 break;
2163 case SCA_TXS_CLK_RXCLK:
2164 printf("rxclock\n");
2165 break;
2166 }
2167 if (scp->sp_eclock)
2168 printf("%s: outputting line clock\n",
2169 scp->sp_if.if_xname);
2170 }
2171 }
2172
Cache object: 11c5130094fe564e0f24cd7294298f7f
|