FreeBSD/Linux Kernel Cross Reference
sys/dev/qbus/if_dmc.c
1 /* $NetBSD: if_dmc.c,v 1.7 2003/08/07 16:31:14 agc Exp $ */
2 /*
3 * Copyright (c) 1982, 1986 Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * @(#)if_dmc.c 7.10 (Berkeley) 12/16/90
31 */
32
33 /*
34 * DMC11 device driver, internet version
35 *
36 * Bill Nesheim
37 * Cornell University
38 *
39 * Lou Salkind
40 * New York University
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_dmc.c,v 1.7 2003/08/07 16:31:14 agc Exp $");
45
46 #undef DMCDEBUG /* for base table dump on fatal error */
47
48 #include "opt_inet.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/ioctl.h>
54 #include <sys/socket.h>
55 #include <sys/syslog.h>
56 #include <sys/device.h>
57
58 #include <net/if.h>
59 #include <net/netisr.h>
60
61 #ifdef INET
62 #include <netinet/in.h>
63 #include <netinet/in_var.h>
64 #endif
65
66 #include <machine/bus.h>
67
68 #include <dev/qbus/ubareg.h>
69 #include <dev/qbus/ubavar.h>
70 #include <dev/qbus/if_uba.h>
71
72 #include <dev/qbus/if_dmcreg.h>
73
74
75 /*
76 * output timeout value, sec.; should depend on line speed.
77 */
78 static int dmc_timeout = 20;
79
80 #define NRCV 7
81 #define NXMT 3
82 #define NCMDS (NRCV+NXMT+4) /* size of command queue */
83
84 #define DMC_WBYTE(csr, val) \
85 bus_space_write_1(sc->sc_iot, sc->sc_ioh, csr, val)
86 #define DMC_WWORD(csr, val) \
87 bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
88 #define DMC_RBYTE(csr) \
89 bus_space_read_1(sc->sc_iot, sc->sc_ioh, csr)
90 #define DMC_RWORD(csr) \
91 bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
92
93
94 #ifdef DMCDEBUG
95 #define printd if(dmcdebug)printf
96 int dmcdebug = 0;
97 #endif
98
99 /* error reporting intervals */
100 #define DMC_RPNBFS 50
101 #define DMC_RPDSC 1
102 #define DMC_RPTMO 10
103 #define DMC_RPDCK 10
104
105 struct dmc_command {
106 char qp_cmd; /* command */
107 short qp_ubaddr; /* buffer address */
108 short qp_cc; /* character count || XMEM */
109 struct dmc_command *qp_next; /* next command on queue */
110 };
111
112 struct dmcbufs {
113 int ubinfo; /* from uballoc */
114 short cc; /* buffer size */
115 short flags; /* access control */
116 };
117 #define DBUF_OURS 0 /* buffer is available */
118 #define DBUF_DMCS 1 /* buffer claimed by somebody */
119 #define DBUF_XMIT 4 /* transmit buffer */
120 #define DBUF_RCV 8 /* receive buffer */
121
122
123 /*
124 * DMC software status per interface.
125 *
126 * Each interface is referenced by a network interface structure,
127 * sc_if, which the routing code uses to locate the interface.
128 * This structure contains the output queue for the interface, its address, ...
129 * We also have, for each interface, a set of 7 UBA interface structures
130 * for each, which
131 * contain information about the UNIBUS resources held by the interface:
132 * map registers, buffered data paths, etc. Information is cached in this
133 * structure for use by the if_uba.c routines in running the interface
134 * efficiently.
135 */
136 struct dmc_softc {
137 struct device sc_dev; /* Configuration common part */
138 struct ifnet sc_if; /* network-visible interface */
139 short sc_oused; /* output buffers currently in use */
140 short sc_iused; /* input buffers given to DMC */
141 short sc_flag; /* flags */
142 struct ubinfo sc_ui; /* UBA mapping info for base table */
143 int sc_errors[4]; /* non-fatal error counters */
144 bus_space_tag_t sc_iot;
145 bus_addr_t sc_ioh;
146 bus_dma_tag_t sc_dmat;
147 struct evcnt sc_rintrcnt; /* Interrupt counting */
148 struct evcnt sc_tintrcnt; /* Interrupt counting */
149 #define sc_datck sc_errors[0]
150 #define sc_timeo sc_errors[1]
151 #define sc_nobuf sc_errors[2]
152 #define sc_disc sc_errors[3]
153 struct dmcbufs sc_rbufs[NRCV]; /* receive buffer info */
154 struct dmcbufs sc_xbufs[NXMT]; /* transmit buffer info */
155 struct ifubinfo sc_ifuba; /* UNIBUS resources */
156 struct ifrw sc_ifr[NRCV]; /* UNIBUS receive buffer maps */
157 struct ifxmt sc_ifw[NXMT]; /* UNIBUS receive buffer maps */
158 /* command queue stuff */
159 struct dmc_command sc_cmdbuf[NCMDS];
160 struct dmc_command *sc_qhead; /* head of command queue */
161 struct dmc_command *sc_qtail; /* tail of command queue */
162 struct dmc_command *sc_qactive; /* command in progress */
163 struct dmc_command *sc_qfreeh; /* head of list of free cmd buffers */
164 struct dmc_command *sc_qfreet; /* tail of list of free cmd buffers */
165 /* end command queue stuff */
166 struct dmc_base {
167 short d_base[128]; /* DMC base table */
168 } dmc_base;
169 };
170
171 static int dmcmatch(struct device *, struct cfdata *, void *);
172 static void dmcattach(struct device *, struct device *, void *);
173 static int dmcinit(struct ifnet *);
174 static void dmcrint(void *);
175 static void dmcxint(void *);
176 static void dmcdown(struct dmc_softc *sc);
177 static void dmcrestart(struct dmc_softc *);
178 static void dmcload(struct dmc_softc *, int, u_short, u_short);
179 static void dmcstart(struct ifnet *);
180 static void dmctimeout(struct ifnet *);
181 static int dmcioctl(struct ifnet *, u_long, caddr_t);
182 static int dmcoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
183 struct rtentry *);
184 static void dmcreset(struct device *);
185
186 CFATTACH_DECL(dmc, sizeof(struct dmc_softc),
187 dmcmatch, dmcattach, NULL, NULL);
188
189 /* flags */
190 #define DMC_RUNNING 0x01 /* device initialized */
191 #define DMC_BMAPPED 0x02 /* base table mapped */
192 #define DMC_RESTART 0x04 /* software restart in progress */
193 #define DMC_ONLINE 0x08 /* device running (had a RDYO) */
194
195
196 /* queue manipulation macros */
197 #define QUEUE_AT_HEAD(qp, head, tail) \
198 (qp)->qp_next = (head); \
199 (head) = (qp); \
200 if ((tail) == (struct dmc_command *) 0) \
201 (tail) = (head)
202
203 #define QUEUE_AT_TAIL(qp, head, tail) \
204 if ((tail)) \
205 (tail)->qp_next = (qp); \
206 else \
207 (head) = (qp); \
208 (qp)->qp_next = (struct dmc_command *) 0; \
209 (tail) = (qp)
210
211 #define DEQUEUE(head, tail) \
212 (head) = (head)->qp_next;\
213 if ((head) == (struct dmc_command *) 0)\
214 (tail) = (head)
215
216 int
217 dmcmatch(struct device *parent, struct cfdata *cf, void *aux)
218 {
219 struct uba_attach_args *ua = aux;
220 struct dmc_softc ssc;
221 struct dmc_softc *sc = &ssc;
222 int i;
223
224 sc->sc_iot = ua->ua_iot;
225 sc->sc_ioh = ua->ua_ioh;
226
227 DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
228 for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
229 ;
230 if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
231 printf("dmcprobe: can't start device\n" );
232 return (0);
233 }
234 DMC_WBYTE(DMC_BSEL0, DMC_RQI|DMC_IEI);
235 /* let's be paranoid */
236 DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) | DMC_RQI|DMC_IEI);
237 DELAY(1000000);
238 DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
239 for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
240 ;
241 return (1);
242 }
243
244 /*
245 * Interface exists: make available by filling in network interface
246 * record. System will initialize the interface when it is ready
247 * to accept packets.
248 */
249 void
250 dmcattach(struct device *parent, struct device *self, void *aux)
251 {
252 struct uba_attach_args *ua = aux;
253 struct dmc_softc *sc = (struct dmc_softc *)self;
254
255 sc->sc_iot = ua->ua_iot;
256 sc->sc_ioh = ua->ua_ioh;
257 sc->sc_dmat = ua->ua_dmat;
258
259 strcpy(sc->sc_if.if_xname, sc->sc_dev.dv_xname);
260 sc->sc_if.if_mtu = DMCMTU;
261 sc->sc_if.if_init = dmcinit;
262 sc->sc_if.if_output = dmcoutput;
263 sc->sc_if.if_ioctl = dmcioctl;
264 sc->sc_if.if_watchdog = dmctimeout;
265 sc->sc_if.if_flags = IFF_POINTOPOINT;
266 sc->sc_if.if_softc = sc;
267 IFQ_SET_READY(&sc->sc_if.if_snd);
268
269 uba_intr_establish(ua->ua_icookie, ua->ua_cvec, dmcrint, sc,
270 &sc->sc_rintrcnt);
271 uba_intr_establish(ua->ua_icookie, ua->ua_cvec+4, dmcxint, sc,
272 &sc->sc_tintrcnt);
273 uba_reset_establish(dmcreset, &sc->sc_dev);
274 evcnt_attach_dynamic(&sc->sc_rintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
275 sc->sc_dev.dv_xname, "intr");
276 evcnt_attach_dynamic(&sc->sc_tintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
277 sc->sc_dev.dv_xname, "intr");
278
279 if_attach(&sc->sc_if);
280 }
281
282 /*
283 * Reset of interface after UNIBUS reset.
284 * If interface is on specified UBA, reset its state.
285 */
286 void
287 dmcreset(struct device *dev)
288 {
289 struct dmc_softc *sc = (struct dmc_softc *)dev;
290
291 sc->sc_flag = 0;
292 sc->sc_if.if_flags &= ~IFF_RUNNING;
293 dmcinit(&sc->sc_if);
294 }
295
296 /*
297 * Initialization of interface; reinitialize UNIBUS usage.
298 */
299 int
300 dmcinit(struct ifnet *ifp)
301 {
302 struct dmc_softc *sc = ifp->if_softc;
303 struct ifrw *ifrw;
304 struct ifxmt *ifxp;
305 struct dmcbufs *rp;
306 struct dmc_command *qp;
307 struct ifaddr *ifa;
308 struct cfdata *ui = sc->sc_dev.dv_cfdata;
309 int base;
310 int s;
311
312 /*
313 * Check to see that an address has been set
314 * (both local and destination for an address family).
315 */
316 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)
317 if (ifa->ifa_addr->sa_family && ifa->ifa_dstaddr->sa_family)
318 break;
319 if (ifa == (struct ifaddr *) 0)
320 return 0;
321
322 if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
323 printf("dmcinit: DMC not running\n");
324 ifp->if_flags &= ~IFF_UP;
325 return 0;
326 }
327 /* map base table */
328 if ((sc->sc_flag & DMC_BMAPPED) == 0) {
329 sc->sc_ui.ui_size = sizeof(struct dmc_base);
330 sc->sc_ui.ui_vaddr = (caddr_t)&sc->dmc_base;
331 uballoc((void *)sc->sc_dev.dv_parent, &sc->sc_ui, 0);
332 sc->sc_flag |= DMC_BMAPPED;
333 }
334 /* initialize UNIBUS resources */
335 sc->sc_iused = sc->sc_oused = 0;
336 if ((ifp->if_flags & IFF_RUNNING) == 0) {
337 if (if_ubaminit(&sc->sc_ifuba, (void *)sc->sc_dev.dv_parent,
338 sizeof(struct dmc_header) + DMCMTU,
339 sc->sc_ifr, NRCV, sc->sc_ifw, NXMT) == 0) {
340 printf("%s: can't allocate uba resources\n",
341 sc->sc_dev.dv_xname);
342 ifp->if_flags &= ~IFF_UP;
343 return 0;
344 }
345 ifp->if_flags |= IFF_RUNNING;
346 }
347 sc->sc_flag &= ~DMC_ONLINE;
348 sc->sc_flag |= DMC_RUNNING;
349 /*
350 * Limit packets enqueued until we see if we're on the air.
351 */
352 ifp->if_snd.ifq_maxlen = 3;
353
354 /* initialize buffer pool */
355 /* receives */
356 ifrw = &sc->sc_ifr[0];
357 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
358 rp->ubinfo = ifrw->ifrw_info;
359 rp->cc = DMCMTU + sizeof (struct dmc_header);
360 rp->flags = DBUF_OURS|DBUF_RCV;
361 ifrw++;
362 }
363 /* transmits */
364 ifxp = &sc->sc_ifw[0];
365 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
366 rp->ubinfo = ifxp->ifw_info;
367 rp->cc = 0;
368 rp->flags = DBUF_OURS|DBUF_XMIT;
369 ifxp++;
370 }
371
372 /* set up command queues */
373 sc->sc_qfreeh = sc->sc_qfreet
374 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
375 (struct dmc_command *)0;
376 /* set up free command buffer list */
377 for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
378 QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
379 }
380
381 /* base in */
382 base = sc->sc_ui.ui_baddr;
383 dmcload(sc, DMC_BASEI, (u_short)base, (base>>2) & DMC_XMEM);
384 /* specify half duplex operation, flags tell if primary */
385 /* or secondary station */
386 if (ui->cf_flags == 0)
387 /* use DDCMP mode in full duplex */
388 dmcload(sc, DMC_CNTLI, 0, 0);
389 else if (ui->cf_flags == 1)
390 /* use MAINTENENCE mode */
391 dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
392 else if (ui->cf_flags == 2)
393 /* use DDCMP half duplex as primary station */
394 dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
395 else if (ui->cf_flags == 3)
396 /* use DDCMP half duplex as secondary station */
397 dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
398
399 /* enable operation done interrupts */
400 while ((DMC_RBYTE(DMC_BSEL2) & DMC_IEO) == 0)
401 DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) | DMC_IEO);
402 s = splnet();
403 /* queue first NRCV buffers for DMC to fill */
404 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
405 rp->flags |= DBUF_DMCS;
406 dmcload(sc, DMC_READ, rp->ubinfo,
407 (((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
408 sc->sc_iused++;
409 }
410 splx(s);
411 return 0;
412 }
413
414 /*
415 * Start output on interface. Get another datagram
416 * to send from the interface queue and map it to
417 * the interface before starting output.
418 *
419 * Must be called at spl 5
420 */
421 void
422 dmcstart(struct ifnet *ifp)
423 {
424 struct dmc_softc *sc = ifp->if_softc;
425 struct mbuf *m;
426 struct dmcbufs *rp;
427 int n;
428
429 /*
430 * Dequeue up to NXMT requests and map them to the UNIBUS.
431 * If no more requests, or no dmc buffers available, just return.
432 */
433 n = 0;
434 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
435 /* find an available buffer */
436 if ((rp->flags & DBUF_DMCS) == 0) {
437 IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
438 if (m == 0)
439 return;
440 /* mark it dmcs */
441 rp->flags |= (DBUF_DMCS);
442 /*
443 * Have request mapped to UNIBUS for transmission
444 * and start the output.
445 */
446 rp->cc = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[n], m);
447 rp->cc &= DMC_CCOUNT;
448 if (++sc->sc_oused == 1)
449 sc->sc_if.if_timer = dmc_timeout;
450 dmcload(sc, DMC_WRITE, rp->ubinfo,
451 rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
452 }
453 n++;
454 }
455 }
456
457 /*
458 * Utility routine to load the DMC device registers.
459 */
460 void
461 dmcload(struct dmc_softc *sc, int type, u_short w0, u_short w1)
462 {
463 struct dmc_command *qp;
464 int sps;
465
466 sps = splnet();
467
468 /* grab a command buffer from the free list */
469 if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
470 panic("dmc command queue overflow");
471 DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
472
473 /* fill in requested info */
474 qp->qp_cmd = (type | DMC_RQI);
475 qp->qp_ubaddr = w0;
476 qp->qp_cc = w1;
477
478 if (sc->sc_qactive) { /* command in progress */
479 if (type == DMC_READ) {
480 QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
481 } else {
482 QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
483 }
484 } else { /* command port free */
485 sc->sc_qactive = qp;
486 DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
487 dmcrint(sc);
488 }
489 splx(sps);
490 }
491
492 /*
493 * DMC interface receiver interrupt.
494 * Ready to accept another command,
495 * pull one off the command queue.
496 */
497 void
498 dmcrint(void *arg)
499 {
500 struct dmc_softc *sc = arg;
501 struct dmc_command *qp;
502 int n;
503
504 if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
505 printf("%s: dmcrint no command\n", sc->sc_dev.dv_xname);
506 return;
507 }
508 while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
509 DMC_WWORD(DMC_SEL4, qp->qp_ubaddr);
510 DMC_WWORD(DMC_SEL6, qp->qp_cc);
511 DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & ~(DMC_IEI|DMC_RQI));
512 /* free command buffer */
513 QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
514 while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
515 /*
516 * Can't check for RDYO here 'cause
517 * this routine isn't reentrant!
518 */
519 DELAY(5);
520 }
521 /* move on to next command */
522 if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
523 break; /* all done */
524 /* more commands to do, start the next one */
525 qp = sc->sc_qactive;
526 DEQUEUE(sc->sc_qhead, sc->sc_qtail);
527 DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
528 n = RDYSCAN;
529 while (n-- > 0)
530 if ((DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) ||
531 (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO))
532 break;
533 }
534 if (sc->sc_qactive) {
535 DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
536 /* VMS does it twice !*$%@# */
537 DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
538 }
539
540 }
541
542 /*
543 * DMC interface transmitter interrupt.
544 * A transfer may have completed, check for errors.
545 * If it was a read, notify appropriate protocol.
546 * If it was a write, pull the next one off the queue.
547 */
548 void
549 dmcxint(void *a)
550 {
551 struct dmc_softc *sc = a;
552
553 struct ifnet *ifp;
554 struct mbuf *m;
555 struct ifqueue *inq;
556 int arg, pkaddr, cmd, len, s;
557 struct ifrw *ifrw;
558 struct dmcbufs *rp;
559 struct ifxmt *ifxp;
560 struct dmc_header *dh;
561 char buf[64];
562
563 ifp = &sc->sc_if;
564
565 while (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO) {
566
567 cmd = DMC_RBYTE(DMC_BSEL2) & 0xff;
568 arg = DMC_RWORD(DMC_SEL6) & 0xffff;
569 /* reconstruct UNIBUS address of buffer returned to us */
570 pkaddr = ((arg&DMC_XMEM)<<2) | (DMC_RWORD(DMC_SEL4) & 0xffff);
571 /* release port */
572 DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) & ~DMC_RDYO);
573 switch (cmd & 07) {
574
575 case DMC_OUR:
576 /*
577 * A read has completed.
578 * Pass packet to type specific
579 * higher-level input routine.
580 */
581 ifp->if_ipackets++;
582 /* find location in dmcuba struct */
583 ifrw= &sc->sc_ifr[0];
584 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
585 if(rp->ubinfo == pkaddr)
586 break;
587 ifrw++;
588 }
589 if (rp >= &sc->sc_rbufs[NRCV])
590 panic("dmc rcv");
591 if ((rp->flags & DBUF_DMCS) == 0)
592 printf("%s: done unalloc rbuf\n",
593 sc->sc_dev.dv_xname);
594
595 len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
596 if (len < 0 || len > DMCMTU) {
597 ifp->if_ierrors++;
598 #ifdef DMCDEBUG
599 printd("%s: bad rcv pkt addr 0x%x len 0x%x\n",
600 sc->sc_dev.dv_xname, pkaddr, len);
601 #endif
602 goto setup;
603 }
604 /*
605 * Deal with trailer protocol: if type is trailer
606 * get true type from first 16-bit word past data.
607 * Remember that type was trailer by setting off.
608 */
609 dh = (struct dmc_header *)ifrw->ifrw_addr;
610 dh->dmc_type = ntohs((u_short)dh->dmc_type);
611 if (len == 0)
612 goto setup;
613
614 /*
615 * Pull packet off interface. Off is nonzero if
616 * packet has trailing header; dmc_get will then
617 * force this header information to be at the front,
618 * but we still have to drop the type and length
619 * which are at the front of any trailer data.
620 */
621 m = if_ubaget(&sc->sc_ifuba, ifrw, ifp, len);
622 if (m == 0)
623 goto setup;
624 /* Shave off dmc_header */
625 m_adj(m, sizeof(struct dmc_header));
626 switch (dh->dmc_type) {
627
628 #ifdef INET
629 case DMC_IPTYPE:
630 schednetisr(NETISR_IP);
631 inq = &ipintrq;
632 break;
633 #endif
634 default:
635 m_freem(m);
636 goto setup;
637 }
638
639 s = splnet();
640 if (IF_QFULL(inq)) {
641 IF_DROP(inq);
642 m_freem(m);
643 } else
644 IF_ENQUEUE(inq, m);
645 splx(s);
646
647 setup:
648 /* is this needed? */
649 rp->ubinfo = ifrw->ifrw_info;
650
651 dmcload(sc, DMC_READ, rp->ubinfo,
652 ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
653 break;
654
655 case DMC_OUX:
656 /*
657 * A write has completed, start another
658 * transfer if there is more data to send.
659 */
660 ifp->if_opackets++;
661 /* find associated dmcbuf structure */
662 ifxp = &sc->sc_ifw[0];
663 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
664 if(rp->ubinfo == pkaddr)
665 break;
666 ifxp++;
667 }
668 if (rp >= &sc->sc_xbufs[NXMT]) {
669 printf("%s: bad packet address 0x%x\n",
670 sc->sc_dev.dv_xname, pkaddr);
671 break;
672 }
673 if ((rp->flags & DBUF_DMCS) == 0)
674 printf("%s: unallocated packet 0x%x\n",
675 sc->sc_dev.dv_xname, pkaddr);
676 /* mark buffer free */
677 if_ubaend(&sc->sc_ifuba, ifxp);
678 rp->flags &= ~DBUF_DMCS;
679 if (--sc->sc_oused == 0)
680 sc->sc_if.if_timer = 0;
681 else
682 sc->sc_if.if_timer = dmc_timeout;
683 if ((sc->sc_flag & DMC_ONLINE) == 0) {
684 extern int ifqmaxlen;
685
686 /*
687 * We're on the air.
688 * Open the queue to the usual value.
689 */
690 sc->sc_flag |= DMC_ONLINE;
691 ifp->if_snd.ifq_maxlen = ifqmaxlen;
692 }
693 break;
694
695 case DMC_CNTLO:
696 arg &= DMC_CNTMASK;
697 if (arg & DMC_FATAL) {
698 if (arg != DMC_START) {
699 bitmask_snprintf(arg, CNTLO_BITS,
700 buf, sizeof(buf));
701 log(LOG_ERR,
702 "%s: fatal error, flags=%s\n",
703 sc->sc_dev.dv_xname, buf);
704 }
705 dmcrestart(sc);
706 break;
707 }
708 /* ACCUMULATE STATISTICS */
709 switch(arg) {
710 case DMC_NOBUFS:
711 ifp->if_ierrors++;
712 if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
713 goto report;
714 break;
715 case DMC_DISCONN:
716 if ((sc->sc_disc++ % DMC_RPDSC) == 0)
717 goto report;
718 break;
719 case DMC_TIMEOUT:
720 if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
721 goto report;
722 break;
723 case DMC_DATACK:
724 ifp->if_oerrors++;
725 if ((sc->sc_datck++ % DMC_RPDCK) == 0)
726 goto report;
727 break;
728 default:
729 goto report;
730 }
731 break;
732 report:
733 #ifdef DMCDEBUG
734 bitmask_snprintf(arg, CNTLO_BITS, buf, sizeof(buf));
735 printd("%s: soft error, flags=%s\n",
736 sc->sc_dev.dv_xname, buf);
737 #endif
738 if ((sc->sc_flag & DMC_RESTART) == 0) {
739 /*
740 * kill off the dmc to get things
741 * going again by generating a
742 * procedure error
743 */
744 sc->sc_flag |= DMC_RESTART;
745 arg = sc->sc_ui.ui_baddr;
746 dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
747 }
748 break;
749
750 default:
751 printf("%s: bad control %o\n",
752 sc->sc_dev.dv_xname, cmd);
753 break;
754 }
755 }
756 dmcstart(ifp);
757 }
758
759 /*
760 * DMC output routine.
761 * Encapsulate a packet of type family for the dmc.
762 * Use trailer local net encapsulation if enough data in first
763 * packet leaves a multiple of 512 bytes of data in remainder.
764 */
765 int
766 dmcoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
767 struct rtentry *rt)
768 {
769 int type, error, s;
770 struct mbuf *m = m0;
771 struct dmc_header *dh;
772 ALTQ_DECL(struct altq_pktattr pktattr;)
773
774 if ((ifp->if_flags & IFF_UP) == 0) {
775 error = ENETDOWN;
776 goto bad;
777 }
778
779 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
780
781 switch (dst->sa_family) {
782 #ifdef INET
783 case AF_INET:
784 type = DMC_IPTYPE;
785 break;
786 #endif
787
788 case AF_UNSPEC:
789 dh = (struct dmc_header *)dst->sa_data;
790 type = dh->dmc_type;
791 break;
792
793 default:
794 printf("%s: can't handle af%d\n", ifp->if_xname,
795 dst->sa_family);
796 error = EAFNOSUPPORT;
797 goto bad;
798 }
799
800 /*
801 * Add local network header
802 * (there is space for a uba on a vax to step on)
803 */
804 M_PREPEND(m, sizeof(struct dmc_header), M_DONTWAIT);
805 if (m == 0) {
806 error = ENOBUFS;
807 goto bad;
808 }
809 dh = mtod(m, struct dmc_header *);
810 dh->dmc_type = htons((u_short)type);
811
812 /*
813 * Queue message on interface, and start output if interface
814 * not yet active.
815 */
816 s = splnet();
817 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
818 if (error) {
819 /* mbuf is already freed */
820 splx(s);
821 return (error);
822 }
823 dmcstart(ifp);
824 splx(s);
825 return (0);
826
827 bad:
828 m_freem(m0);
829 return (error);
830 }
831
832
833 /*
834 * Process an ioctl request.
835 */
836 /* ARGSUSED */
837 int
838 dmcioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
839 {
840 int s = splnet(), error = 0;
841 register struct dmc_softc *sc = ifp->if_softc;
842
843 switch (cmd) {
844
845 case SIOCSIFADDR:
846 ifp->if_flags |= IFF_UP;
847 if ((ifp->if_flags & IFF_RUNNING) == 0)
848 dmcinit(ifp);
849 break;
850
851 case SIOCSIFDSTADDR:
852 if ((ifp->if_flags & IFF_RUNNING) == 0)
853 dmcinit(ifp);
854 break;
855
856 case SIOCSIFFLAGS:
857 if ((ifp->if_flags & IFF_UP) == 0 &&
858 sc->sc_flag & DMC_RUNNING)
859 dmcdown(sc);
860 else if (ifp->if_flags & IFF_UP &&
861 (sc->sc_flag & DMC_RUNNING) == 0)
862 dmcrestart(sc);
863 break;
864
865 default:
866 error = EINVAL;
867 }
868 splx(s);
869 return (error);
870 }
871
872 /*
873 * Restart after a fatal error.
874 * Clear device and reinitialize.
875 */
876 void
877 dmcrestart(struct dmc_softc *sc)
878 {
879 int s, i;
880
881 #ifdef DMCDEBUG
882 /* dump base table */
883 printf("%s base table:\n", sc->sc_dev.dv_xname);
884 for (i = 0; i < sizeof (struct dmc_base); i++)
885 printf("%o\n" ,dmc_base[unit].d_base[i]);
886 #endif
887
888 dmcdown(sc);
889
890 /*
891 * Let the DMR finish the MCLR. At 1 Mbit, it should do so
892 * in about a max of 6.4 milliseconds with diagnostics enabled.
893 */
894 for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
895 ;
896 /* Did the timer expire or did the DMR finish? */
897 if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
898 log(LOG_ERR, "%s: M820 Test Failed\n", sc->sc_dev.dv_xname);
899 return;
900 }
901
902 /* restart DMC */
903 dmcinit(&sc->sc_if);
904 sc->sc_flag &= ~DMC_RESTART;
905 s = splnet();
906 dmcstart(&sc->sc_if);
907 splx(s);
908 sc->sc_if.if_collisions++; /* why not? */
909 }
910
911 /*
912 * Reset a device and mark down.
913 * Flush output queue and drop queue limit.
914 */
915 void
916 dmcdown(struct dmc_softc *sc)
917 {
918 struct ifxmt *ifxp;
919
920 DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
921 sc->sc_flag &= ~(DMC_RUNNING | DMC_ONLINE);
922
923 for (ifxp = sc->sc_ifw; ifxp < &sc->sc_ifw[NXMT]; ifxp++) {
924 #ifdef notyet
925 if (ifxp->ifw_xtofree) {
926 (void) m_freem(ifxp->ifw_xtofree);
927 ifxp->ifw_xtofree = 0;
928 }
929 #endif
930 }
931 IF_PURGE(&sc->sc_if.if_snd);
932 }
933
934 /*
935 * Watchdog timeout to see that transmitted packets don't
936 * lose interrupts. The device has to be online (the first
937 * transmission may block until the other side comes up).
938 */
939 void
940 dmctimeout(struct ifnet *ifp)
941 {
942 struct dmc_softc *sc = ifp->if_softc;
943 char buf1[64], buf2[64];
944
945 if (sc->sc_flag & DMC_ONLINE) {
946 bitmask_snprintf(DMC_RBYTE(DMC_BSEL0) & 0xff, DMC0BITS,
947 buf1, sizeof(buf1));
948 bitmask_snprintf(DMC_RBYTE(DMC_BSEL2) & 0xff, DMC2BITS,
949 buf2, sizeof(buf2));
950 log(LOG_ERR, "%s: output timeout, bsel0=%s bsel2=%s\n",
951 sc->sc_dev.dv_xname, buf1, buf2);
952 dmcrestart(sc);
953 }
954 }
Cache object: 110c64b29b0a117e6b28b2a3a2a5cfc4
|