FreeBSD/Linux Kernel Cross Reference
sys/chips/lance.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1989 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: lance.c,v $
29 * Revision 2.31 93/11/17 16:11:41 dbg
30 * Removed lint.
31 * [93/09/27 dbg]
32 *
33 * Revision 2.30 93/05/30 21:07:14 rvb
34 * Sanity check in copy_to_lance was screwed by type promotions.
35 * [93/05/28 af]
36 *
37 * Revision 2.29 93/05/15 19:38:06 mrt
38 * machparam.h -> machspl.h
39 *
40 * Revision 2.28 93/05/10 20:08:02 rvb
41 * No more sys/types.h. Define what is exported and what not.
42 * [93/05/06 09:52:17 af]
43 *
44 * Revision 2.27 93/05/10 17:45:04 rvb
45 * Tell the lance to send the packet now instead of waiting
46 * until the next 1.6 ms poll interval expires.
47 * [from cmaeda.]
48 *
49 * Revision 2.26 93/03/26 17:58:11 mrt
50 * Removed all uses of minor().
51 * [93/03/17 af]
52 *
53 * Revision 2.25 93/03/09 10:51:59 danner
54 * Propagated protos of switch structure.
55 * [93/03/06 af]
56 * Removed dev_t, which is just a U*x compat type. Lint.
57 * Would be nice to be able to make this file readable again.
58 * Reset lo(n)g history, contributors so far:
59 * rvb, af, mja, rpd, jsb, danner, jfriedl, jeffreyh
60 * [93/03/05 af]
61 *
62 * 18-May-89 Robert Baron (rvb) at Carnegie-Mellon University
63 * Created.
64 */
65 /*
66 * File: lance.c
67 * Author: Robert V. Baron & Alessandro Forin
68 * Date: 5/90
69 *
70 * Driver for the DEC LANCE Ethernet Controller.
71 */
72
73 /*
74
75 Byte ordering issues.
76
77 The lance sees data naturally as half word (16 bit) quantitites.
78 Bit 2 (BSWP) in control register 3 (CSR3) controls byte swapping.
79 To quote the spec:
80
81 02 BSWP BYTE SWAP allows the chip to
82 operate in systems that consdier bits (15:08) of data pointers
83 by an even addressa and bits (7:0) to be pointed by an
84 odd address.
85
86 When BSWP=1, the chip will swap the high and low bytes on DMA
87 data transfers between the silo and bus memory. Only data from
88 silo transfers is swapped; the Initialization Block data and
89 the Descriptor Ring entries are NOT swapped. (emphasis theirs)
90
91
92 So on systems with BYTE_MSF=1, the BSWP bit should be set. Note,
93 however, that all shorts in the descriptor ring and initialization
94 block need to be swapped. The BITFIELD macros in lance.h handle this
95 magic.
96
97 */
98
99 #include <ln.h>
100 #if NLN > 0
101 #include <platforms.h>
102
103 /*
104 * AMD Am7990 LANCE (Ethernet Interface)
105 */
106 #include <sys/ioctl.h>
107 #include <vm/vm_kern.h>
108
109 #include <machine/machspl.h> /* spl definitions */
110 #include <kern/time_out.h>
111 #include <sys/syslog.h>
112 #include <ipc/ipc_port.h>
113 #include <ipc/ipc_kmsg.h>
114
115 #include <device/device_types.h>
116 #include <device/errno.h>
117 #include <device/io_req.h>
118 #include <device/if_hdr.h>
119 #include <device/if_ether.h>
120 #include <device/net_status.h>
121 #include <device/net_io.h>
122
123 #ifdef FLAMINGO
124 #define se_reg_type unsigned int
125 #endif
126
127 #include <chips/lance.h>
128 #include <chips/busses.h>
129
130 #define private static
131 #define public
132
133 typedef struct se_softc *se_softc_t; /* move above prototypes */
134
135 void se_write_reg( /* forwards */
136 se_reg_t regptr,
137 int val,
138 int result,
139 char *regname);
140
141 void se_read(
142 se_softc_t sc,
143 volatile char *lnrbuf,
144 int len,
145 io_req_t loop_back);
146
147 void se_rint(
148 int unit);
149
150 void se_tint(
151 int unit);
152
153 private vm_offset_t se_Hmem_nogap(vm_offset_t);
154 private vm_offset_t se_Hmem_gap16(vm_offset_t);
155 private vm_offset_t se_malloc(
156 se_softc_t sc,
157 int size);
158
159
160 /* This config section should go into a separate file */
161
162 #ifdef LUNA88K
163 # include <luna88k/board.h>
164 # define MAPPED 1
165 #undef bcopy
166 extern void bcopy(), bzero();
167
168 #define wbflush()
169 #define Hmem(lna) (vm_offset_t)((lna) + sc->lnbuf)
170 #define Lmem(lna) (vm_offset_t)((lna) + sc->lnoffset)
171
172 #define SPACE (TRI_PORT_RAM_SPACE>>1)
173 private struct se_switch se_switch[] = {
174 { LANCE_ADDR - TRI_PORT_RAM, /* pointer */
175 SPACE /* host side */,
176 SPACE /* lance side */,
177 - TRI_PORT_RAM,
178 0, /* romstride */
179 0, /* ramstride */
180 SPACE,
181 /* desc_copyin */ bcopy,
182 /* desc_copyout */ bcopy,
183 /* data_copyin */ bcopy,
184 /* data_copyout */ bcopy,
185 /* bzero */ bzero,
186 /* mapaddr */ se_Hmem_nogap,
187 /* mapoffs */ se_Hmem_nogap
188 },
189 };
190
191 #endif
192
193 #ifdef DECSTATION
194 #include <mips/mips_cpu.h>
195 #include <mips/PMAX/pmad_aa.h>
196
197 #define MAPPED 1
198
199 /*
200 * The LANCE buffer memory as seen from the Pmax cpu is funny.
201 * It is viewed as short words (16bits), spaced at word (32bits)
202 * intervals. The same applies to the registers. From the LANCE
203 * point of view memory is instead contiguous.
204 * The ROM that contains the station address is in the space belonging
205 * to the clock/battery backup memory. This space is again 16 bits
206 * in a 32bit envelope. And the ether address is stored in the "high"
207 * byte of 6 consecutive quantities.
208 *
209 * But Pmaxen and 3maxen (and..) map lance space differently.
210 * This requires dynamic adaptation of the driver, which
211 * is done via the following switches.
212 * For convenience, the switch holds information about
213 * the location of the lance control registers as well.
214 * This could be either absolute (pmax) or relative to
215 * some register base (3max, turbochannel)
216 */
217 extern void copyin_gap16(), copyout_gap16(), bzero_gap16();
218 extern void bcopy(), bzero();
219 extern void copyin_gap32(), copyout_gap32();
220
221 private struct se_switch se_switch[] = {
222 /* pmax */
223 { 0x00000000, 0x01000000, 0x0, 0x05000000, 8, 16, 64*1024,
224 copyin_gap16, copyout_gap16, copyin_gap16, copyout_gap16,
225 bzero_gap16, se_Hmem_gap16, se_Hmem_gap16},
226 /* 3max */
227 { PMAD_OFFSET_LANCE, PMAD_OFFSET_RAM, PMAD_OFFSET_RAM, PMAD_OFFSET_ROM,
228 16, 0, PMAD_RAM_SIZE,
229 bcopy, bcopy, bcopy, bcopy, bzero, se_Hmem_nogap, se_Hmem_nogap},
230 /* 3min */
231 /* XXX re-use other 64k */
232 { 0/*later*/, 0/*later*/, 0x0, 0/*later*/, 0, 128, 64*1024,
233 copyin_gap16, copyout_gap16, copyin_gap32, copyout_gap32,
234 bzero_gap16, se_Hmem_gap16, se_Hmem_nogap},
235 };
236
237 /*
238 * "lna" is what se_malloc hands back. They are offsets using
239 * the sizing that the Lance would use. The Lance space is
240 * mapped somewhere in the I/O space, as indicated by the softc.
241 * Hence we have these two macros:
242 */
243 /* H & L are not hi and lo but
244 H = HOST == addresses for host to reference board memory
245 L = LOCAL == addresses on board
246 */
247 #define Hmem(lna) (vm_offset_t)((se_sw->mapaddr)(lna) + sc->lnbuf)
248 #define Lmem(lna) (vm_offset_t)((vm_offset_t)lna + sc->lnoffset)
249 #endif /*DECSTATION*/
250
251
252 #ifdef VAXSTATION
253 #include <vax/ka3100.h>
254
255 #define wbflush()
256
257 void xzero(x, l) vm_offset_t x; int l; { blkclr(x, l); }
258 void xcopy(f, t, l) vm_offset_t f, t; int l; { bcopy(f, t, l); }
259
260 private struct se_switch se_switch[] = {
261 /* pvax sees contiguous bits in lower 16Meg of memory */
262 { 0, 0, 0, 0, 0, 0, 64*1024,
263 xcopy, xcopy, xcopy, xcopy, xzero, se_Hmem_nogap, se_Hmem_nogap},
264 };
265
266 /*
267 * "lna" is what se_malloc hands back. They are offsets using
268 * the sizing that the Lance would use. The Lance space is
269 * mapped somewhere in the I/O space, as indicated by the softc.
270 * Hence we have these two macros:
271 */
272 /* H & L are not hi and lo but
273 H = HOST == addresses for host to reference board memory
274 L = LOCAL == addresses on board
275 */
276 /*
277 * This does not deal with > 16 Meg physical memory, where
278 * Hmem != Lmem
279 */
280 #define Hmem(lna) (vm_offset_t)((lna) + sc->lnbuf)
281 #define Lmem(lna) (vm_offset_t)((lna) + sc->lnoffset)
282
283 #endif /*VAXSTATION*/
284
285
286 #ifdef FLAMINGO
287 #include <alpha/alpha_cpu.h>
288
289 /* XXX might be wrong, mostly stolen from kmin */
290 extern void copyin_gap16(), copyout_gap16(), bzero_gap16();
291 extern void copyin_gap32(), copyout_gap32();
292 extern void bcopy(), bzero();
293
294 private struct se_switch se_switch[] = {
295 /* XXX re-use other 64k */
296 { 0/*later*/, 0/*later*/, 0x0, 0/*later*/, 0, 128, 64*1024,
297 copyin_gap16, copyout_gap16, copyin_gap32, copyout_gap32,
298 bzero_gap16, se_Hmem_gap16, se_Hmem_nogap},
299 };
300
301 /*
302 * "lna" is what se_malloc hands back. They are offsets using
303 * the sizing that the Lance would use. The Lance space is
304 * mapped somewhere in the I/O space, as indicated by the softc.
305 * Hence we have these two macros:
306 */
307 /* H & L are not hi and lo but
308 H = HOST == addresses for host to reference board memory
309 L = LOCAL == addresses on board
310 */
311 #define Hmem(lna) (vm_offset_t)((se_sw->mapaddr)(lna) + sc->lnbuf)
312 #define Lmem(lna) (vm_offset_t)((vm_offset_t)lna + sc->lnoffset)
313 #endif /*FLAMINGO*/
314
315
316 /*
317 * Map a lance-space offset into an host-space one
318 */
319 private vm_offset_t se_Hmem_nogap( vm_offset_t lna) { return lna;}
320 private vm_offset_t se_Hmem_gap16( vm_offset_t lna) { return lna << 1;}
321
322 /*
323 * Memory addresses for LANCE are 24 bits wide.
324 */
325 #define Addr_lo(y) ((unsigned short)((vm_offset_t)(y) & 0xffff))
326 #define Addr_hi(y) ((unsigned short)(((vm_offset_t)(y)>>16) & 0xff))
327
328 #define LN_MEMORY_SIZE (se_sw->ramsize)
329
330 /* XXX to accomodate heterogeneity this should be made per-drive */
331 /* XXX and then some more */
332
333 struct se_switch *se_sw = se_switch;
334
335 void set_se_switch(
336 int n)
337 {
338 se_sw = &se_switch[n];
339 }
340
341 #ifndef LUNA88K
342 void setse_switch(
343 int n, /* number */
344 vm_offset_t r, /* regspace */
345 vm_offset_t b, /* bufspace */
346 vm_offset_t l, /* lance bufspace */
347 vm_offset_t o) /* romspace */
348 {
349 se_switch[n].regspace = r;
350 se_switch[n].bufspace = b;
351 se_switch[n].ln_bufspace = l;
352 se_switch[n].romspace = o;
353
354 /* make sure longword aligned */
355 if (se_switch[n].bufspace & 0x7) {
356 se_switch[n].bufspace = (se_switch[n].bufspace+0x7) & ~0x7;
357 }
358
359 set_se_switch(n);
360 }
361 #endif
362
363 /*
364 * Autoconf info
365 */
366
367 private vm_offset_t se_std[NLN] = { 0 };
368 private struct bus_device *se_info[NLN];
369 private boolean_t se_probe(
370 vm_offset_t reg,
371 struct bus_device *ui);
372 private void se_attach(
373 struct bus_device *ui);
374
375 struct bus_driver se_driver =
376 { se_probe, 0, se_attach, 0, se_std, "se", se_info, };
377
378 /*
379 * Externally visible functions
380 */
381 char *se_unprobed_addr = 0;
382 void se_intr( /* kernel */
383 int unit,
384 spl_t spllevel);
385
386 int se_open(), se_output(), se_get_status(), /* user */
387 se_set_status(), se_setinput(), se_restart();
388
389 /*
390 *
391 * Internal functions & definitions
392 *
393 */
394
395 private void se_init(
396 int unit);
397 private void init_lance_space(
398 se_softc_t sc);
399 private void se_desc_set_status(
400 se_desc_t lndesc,
401 int val);
402 private volatile long *se_desc_alloc(
403 se_softc_t sc,
404 se_desc_t lndesc); /* must be aligned! */
405 void se_start(
406 int unit);
407 private void copy_from_lance(
408 volatile unsigned char *rbuf,
409 unsigned int nbytes,
410 struct ether_header *hdr,
411 struct packet_header *pkt);
412 private int copy_to_lance(
413 io_req_t request,
414 volatile char *sbuf);
415
416 int se_verbose = 0; /* debug flag */
417
418 #define RLOG 4 /* 2**4 = 16 receive descriptors */
419 #define TLOG 4 /* 2**4 = 16 transmit descriptors */
420 #define NRCV (1<<RLOG) /* Receive descriptors */
421 #define NXMT (1<<TLOG) /* Transmit descriptors */
422
423 #define LN_BUFFER_SIZE (0x800-0x80)
424
425 /*
426 * Ethernet software status per interface.
427 *
428 * Each interface is referenced by a network interface structure,
429 * is_if, which contains the output queue for the interface, its address, ...
430 */
431 int se_loopback_hack = 1;
432
433 struct se_softc {
434 struct ifnet is_if; /* generic interface header */
435 unsigned char is_addr[6]; /* ethernet hardware address */
436 unsigned short pad;
437 se_reg_t lnregs; /* Lance registers */
438 vm_offset_t lnbuf; /* Lance memory, Host offset */
439 vm_offset_t lnoffset; /* Lance memory, Lance offset */
440 vm_offset_t lnrom;
441 vm_offset_t lnsbrk; /* Lance memory allocator */
442 vm_offset_t lninit_block; /* Init block address */
443 se_desc_t lnrring[NRCV]; /* Receive ring desc. */
444 volatile long *lnrbuf[NRCV]; /* Receive buffers */
445 se_desc_t lntring[NXMT]; /* Transmit ring desc. */
446 volatile long *lntbuf[NXMT]; /* Transmit buffers */
447
448 int rcv_last; /* Rcv buffer last read */
449
450 io_req_t tpkt[NXMT+1]; /* Xmt pkt queue */
451 int xmt_count; /* Xmt queue size */
452 int xmt_last; /* Xmt queue head (insert) */
453 int xmt_complete; /* Xmt queue tail (remove) */
454
455 int se_flags; /* Flags for SIOCSIFFLAGS */
456 int counters[4]; /* error counters */
457 #define bablcnt counters[0]
458 #define misscnt counters[1]
459 #define merrcnt counters[2]
460 #define rstrtcnt counters[3]
461 } se_softc_data[NLN];
462
463 se_softc_t se_softc[NLN]; /* quick access */
464
465 /*
466 * Probe the Lance to see if it's there
467 */
468 private int se_open_state = 0;
469
470 private boolean_t se_probe(
471 vm_offset_t reg,
472 register struct bus_device *ui)
473 {
474 register se_softc_t sc;
475 se_reg_t rdp, rap;
476 int unit = ui->unit;
477
478 /*
479 * See if the interface is there by reading the lance CSR. On pmaxen
480 * and 3maxen this is superfluous, but..
481 */
482 rdp = (se_reg_t) (reg + se_sw->regspace);
483 #ifdef DECSTATION
484 if (check_memory(rdp, 0))
485 return 0;
486 #endif /*DECSTATION*/
487 #ifdef MAPPED
488 SE_probe(reg,ui);
489 #endif /*MAPPED*/
490 rap = rdp + 2; /* XXX might not be true in the future XXX */
491 /* rdp and rap are "shorts" on consecutive
492 "long" word boundaries */
493
494 /*
495 * Bind this interface to the softc.
496 */
497 sc = &se_softc_data[unit];
498 se_softc[unit] = sc;
499 sc->lnregs = (se_reg_t) (reg + se_sw->regspace);
500 sc->lnbuf = (vm_offset_t) (reg + se_sw->bufspace);
501 sc->lnoffset = (vm_offset_t) (se_sw->ln_bufspace);
502 sc->lnrom = (vm_offset_t) (reg + se_sw->romspace);
503
504 /*
505 * Reset the interface, and make sure we really do it! (the 3max
506 * seems quite stubborn about these registers)
507 */
508 se_write_reg(rap, CSR0_SELECT, CSR0_SELECT, "RAP");
509 se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0");
510
511 /*
512 * Allocate lance RAM buffer memory
513 */
514 init_lance_space(sc);
515
516 /*
517 * Initialize the chip
518 *
519 * NOTE: From now on we will only touch csr0
520 */
521 if (se_ship_init_block(sc, unit))
522 return 0;
523
524 /*
525 * Tell the world we are alive and well
526 */
527 se_open_state++;
528 return 1;
529 }
530
531 int se_ship_init_block(
532 register se_softc_t sc,
533 int unit)
534 {
535 se_reg_t rdp = sc->lnregs;
536 se_reg_t rap;
537 register int i = 0;
538
539 rap = rdp + 2; /* XXX might not be true in the future XXX */
540
541 /*
542 * Load LANCE control block.
543 */
544
545 #ifdef LUNA88K
546 /* turn on byte swap bit in csr3, set bcon bit - as in 2.5 */
547 se_write_reg(rap, CSR3_SELECT, CSR3_SELECT, "RAP");
548 se_write_reg(rdp, LN_CSR3_BSWP|LN_CSR3_BCON,
549 LN_CSR3_BSWP|LN_CSR3_BCON, "csr3");
550 #endif
551
552 se_write_reg(rap, CSR1_SELECT, CSR1_SELECT, "RAP");
553 se_write_reg(rdp, Addr_lo(Lmem(sc->lninit_block)),
554 Addr_lo(Lmem(sc->lninit_block)), "csr1");
555
556 se_write_reg(rap, CSR2_SELECT, CSR2_SELECT, "RAP");
557 se_write_reg(rdp, Addr_hi(Lmem(sc->lninit_block)),
558 Addr_hi(Lmem(sc->lninit_block)), "csr2");
559
560 /*
561 * Start the INIT sequence now
562 */
563 se_write_reg(rap, CSR0_SELECT, CSR0_SELECT, "RAP");
564 *rdp = (LN_CSR0_IDON | LN_CSR0_INIT);
565 wbflush();
566
567 /* give it plenty of time to settle */
568 while (i++ < 10000) {
569 delay(100);
570 if ((*rdp & LN_CSR0_IDON) != 0)
571 break;
572 }
573 /* make sure got out okay */
574 if ((*rdp & LN_CSR0_IDON) == 0) {
575 printf("se%d: cannot initialize\n", unit);
576 if (*rdp & LN_CSR0_ERR)
577 printf("se%d: initialization error, csr = %04x\n",
578 unit, (*rdp & 0xffff));
579 return 1;
580 }
581 /*
582 * Do not enable interrupts just yet.
583 */
584 /* se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0"); */
585
586 return 0;
587 }
588
589 void
590 se_write_reg(
591 register se_reg_t regptr,
592 register int val,
593 register int result,
594 char *regname)
595 {
596 register int i = 0;
597
598 while ((unsigned short)(*regptr) != (unsigned short)result) {
599 *regptr = (se_reg_type)val;
600 wbflush();
601 if (++i > 10000) {
602 printf("se: %s did not settle (to x%x): x%x\n",
603 regname, result, (unsigned short)(*regptr));
604 return;
605 }
606 delay(100);
607 }
608 }
609
610 unsigned short
611 se_read_reg(
612 register se_reg_t regptr)
613 {
614 return (unsigned short) (*regptr);
615 }
616
617 private void
618 init_lance_space(
619 register se_softc_t sc)
620 {
621 register int lptr; /* Generic lance pointer */
622 se_desc_t ringaddr;
623 long *rom_eaddress = (long *) sc->lnrom;
624 int i;
625 struct se_init_block init_block;
626
627 /*
628 * Allocate local RAM buffer memory for the init block,
629 * fill in our local copy then copyout.
630 */
631
632 sc->lninit_block = se_malloc(sc, sizeof (struct se_init_block));
633
634 /*
635 * Set values on stack, then copyout en-masse
636 */
637 bzero(&init_block, sizeof(init_block));
638 init_block.mode = 0;
639
640 /* byte swapping between host and lance */
641
642 init_block.phys_addr_low = ((rom_eaddress[0]>>se_sw->romstride)&0xff) |
643 (((rom_eaddress[1]>>se_sw->romstride)&0xff) << 8);
644 init_block.phys_addr_med = ((rom_eaddress[2]>>se_sw->romstride)&0xff) |
645 (((rom_eaddress[3]>>se_sw->romstride)&0xff) << 8);
646 init_block.phys_addr_high = ((rom_eaddress[4]>>se_sw->romstride)&0xff) |
647 (((rom_eaddress[5]>>se_sw->romstride)&0xff) << 8);
648
649 /*
650 * Allocate both descriptor rings at once.
651 * Note that the quadword alignment requirement is
652 * inherent in the way we perform allocation,
653 * but it does depend on the size of the init block.
654 */
655 lptr = se_malloc(sc, sizeof (struct se_desc) * (NXMT + NRCV));
656
657 /*
658 * Initialize the buffer descriptors
659 */
660 init_block.recv_ring_pointer_lo = Addr_lo(Lmem(lptr));
661 init_block.recv_ring_pointer_hi = Addr_hi(Lmem(lptr));
662 init_block.recv_ring_len = RLOG;
663
664 for ( i = 0; i < NRCV ; i++, lptr += sizeof(struct se_desc)) {
665 ringaddr = (se_desc_t)Hmem(lptr);
666 sc->lnrring[i] = ringaddr;
667 sc->lnrbuf[i] = se_desc_alloc (sc, ringaddr);
668 }
669
670 init_block.xmit_ring_pointer_lo = Addr_lo(Lmem(lptr));
671 init_block.xmit_ring_pointer_hi = Addr_hi(Lmem(lptr));
672 init_block.xmit_ring_len = TLOG;
673
674 for ( i = 0 ; i < NXMT ; i++, lptr += sizeof(struct se_desc)) {
675 ringaddr = (se_desc_t)Hmem(lptr);
676 sc->lntring[i] = ringaddr;
677 sc->lntbuf[i] = se_desc_alloc (sc, ringaddr);
678 }
679
680 /*
681 * No logical address filtering
682 */
683 init_block.logical_addr_filter0 = 0;
684 init_block.logical_addr_filter1 = 0;
685 init_block.logical_addr_filter2 = 0;
686 init_block.logical_addr_filter3 = 0;
687
688 /*
689 * Move init block into lance space
690 */
691 (se_sw->desc_copyout)((vm_offset_t)&init_block, Hmem(sc->lninit_block), sizeof(init_block));
692 wbflush();
693 }
694
695 /*
696 * Interface exists: make available by filling in network interface
697 * record. System will initialize the interface when it is ready
698 * to accept packets.
699 */
700 private void
701 se_attach(
702 register struct bus_device *ui)
703 {
704 unsigned char *enaddr;
705 struct ifnet *ifp;
706 long *rom_eaddress;
707 int unit = ui->unit;
708 se_softc_t sc = se_softc[unit];
709
710 rom_eaddress = (long *) sc->lnrom;
711
712 /*
713 * Read the address from the prom and save it.
714 */
715 enaddr = sc->is_addr;
716 enaddr[0] = (unsigned char) ((rom_eaddress[0] >> se_sw->romstride) & 0xff);
717 enaddr[1] = (unsigned char) ((rom_eaddress[1] >> se_sw->romstride) & 0xff);
718 enaddr[2] = (unsigned char) ((rom_eaddress[2] >> se_sw->romstride) & 0xff);
719 enaddr[3] = (unsigned char) ((rom_eaddress[3] >> se_sw->romstride) & 0xff);
720 enaddr[4] = (unsigned char) ((rom_eaddress[4] >> se_sw->romstride) & 0xff);
721 enaddr[5] = (unsigned char) ((rom_eaddress[5] >> se_sw->romstride) & 0xff);
722
723 printf(": %x-%x-%x-%x-%x-%x",
724 (rom_eaddress[0] >> se_sw->romstride) & 0xff,
725 (rom_eaddress[1] >> se_sw->romstride) & 0xff,
726 (rom_eaddress[2] >> se_sw->romstride) & 0xff,
727 (rom_eaddress[3] >> se_sw->romstride) & 0xff,
728 (rom_eaddress[4] >> se_sw->romstride) & 0xff,
729 (rom_eaddress[5] >> se_sw->romstride) & 0xff);
730
731 /*
732 * Initialize the standard interface descriptor
733 */
734 ifp = &sc->is_if;
735 ifp->if_unit = unit;
736 ifp->if_header_size = sizeof(struct ether_header);
737 ifp->if_header_format = HDR_ETHERNET;
738 ifp->if_address_size = 6;
739 ifp->if_mtu = ETHERMTU;
740 ifp->if_flags |= IFF_BROADCAST;
741
742 ifp->if_address = (char *) enaddr;
743
744 if_init_queues(ifp);
745 #ifdef MAPPED
746 SE_attach(ui);
747 #endif /*MAPPED*/
748
749 }
750
751 /*
752 * Use a different hardware address for interface
753 */
754 void
755 se_setaddr(
756 unsigned char eaddr[6],
757 int unit)
758 {
759 register se_softc_t sc = se_softc[unit];
760 struct se_init_block init_block;
761
762 /*
763 * Modify initialization block accordingly
764 */
765 (se_sw->desc_copyin) (Hmem(sc->lninit_block), (vm_offset_t)&init_block, sizeof(init_block));
766 bcopy(eaddr, &init_block.phys_addr_low, sizeof(*eaddr));
767 (se_sw->desc_copyout)((vm_offset_t)&init_block, Hmem(sc->lninit_block), sizeof(init_block));
768 /*
769 * Make a note of it
770 */
771 bcopy(eaddr, sc->is_addr, sizeof(*eaddr));
772
773 /*
774 * Restart the interface
775 */
776 se_restart(&sc->is_if);
777 se_init(unit);
778 }
779
780 /*
781 * Restart interface
782 *
783 * We use this internally on those errors that hang the chip,
784 * not sure yet what use the MI code will make of it.
785 *
786 * After stopping the chip and effectively turning off the interface
787 * we release all pending buffers and cause the chip to init
788 * itself. We do not enable interrupts here.
789 */
790 int
791 se_restart( register struct ifnet *ifp )
792 {
793 register se_softc_t sc = se_softc[ifp->if_unit];
794 se_reg_t rdp;
795 register int i;
796
797 rdp = sc->lnregs;
798
799 /*
800 * stop the chip
801 */
802 se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0");
803
804 /*
805 * stop network activity
806 */
807 if (ifp->if_flags & IFF_RUNNING) {
808 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
809 sc->se_flags &= ~(IFF_UP | IFF_RUNNING);
810 }
811 sc->rstrtcnt++;
812
813 if (se_verbose)
814 printf("se%d: %d restarts\n", ifp->if_unit, sc->rstrtcnt);
815
816 /*
817 * free up any buffers currently in use
818 */
819 for (i = 0; i < NXMT; i++)
820 if (sc->tpkt[i]) {
821 iodone(sc->tpkt[i]);
822 sc->tpkt[i] = (io_req_t) 0;
823 }
824 /*
825 * INIT the chip again, no need to reload init block address.
826 */
827 se_ship_init_block(sc, ifp->if_unit);
828
829 return 0;
830 }
831
832 /*
833 * Initialize the interface.
834 */
835 private void
836 se_init( int unit )
837 {
838 register se_softc_t sc = se_softc[unit];
839 register se_desc_t *rp;
840 register struct ifnet *ifp = &sc->is_if;
841 se_reg_t rdp;
842 short mode;
843 spl_t s;
844 int i;
845
846 if (ifp->if_flags & IFF_RUNNING)
847 return;
848
849 rdp = sc->lnregs;
850
851 /*
852 * Init the buffer descriptors and indexes for each of the rings.
853 */
854 for (i = 0, rp = sc->lnrring; i < NRCV; i++, rp++)
855 se_desc_set_status(*rp, LN_RSTATE_OWN);
856
857 for (i = 0, rp = sc->lntring; i < NXMT; i++, rp++)
858 se_desc_set_status(*rp, 0);
859
860 sc->xmt_count = sc->xmt_complete = sc->xmt_last = sc->rcv_last = 0;
861
862 /*
863 * Deal with loopback mode operation
864 */
865 s = splimp();
866
867 (se_sw->desc_copyin) (Hmem(sc->lninit_block), (vm_offset_t)&mode, sizeof(mode));
868
869 if (ifp->if_flags & IFF_LOOPBACK
870 && ((mode & LN_MODE_LOOP) == 0)) {
871 /* if not already in loopback mode, do external loopback */
872 mode &= ~LN_MODE_INTL;
873 mode |= LN_MODE_LOOP;
874 (se_sw->desc_copyout) ((vm_offset_t)&mode, Hmem(sc->lninit_block), sizeof(mode));
875 se_restart(ifp);
876 se_init(ifp->if_unit);
877 splx(s);
878 return;
879 }
880
881 ifp->if_flags |= (IFF_UP | IFF_RUNNING);
882 sc->se_flags |= (IFF_UP | IFF_RUNNING);
883
884 /*
885 * Start the Lance and enable interrupts
886 */
887 *rdp = (LN_CSR0_STRT | LN_CSR0_INEA);
888 wbflush();
889
890 /*
891 * See if anything is already queued
892 */
893 se_start(unit);
894 splx(s);
895 }
896
897
898 /*
899 * Shut off the lance
900 */
901 void
902 se_stop(int unit)
903 {
904 se_reg_t rdp = se_softc[unit]->lnregs;
905
906 se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0");
907 }
908
909
910 /*
911 * Open the device, declaring the interface up
912 * and enabling lance interrupts.
913 */
914 /*ARGSUSED*/
915 io_return_t
916 se_open(
917 int unit,
918 int flag)
919 {
920 register se_softc_t sc = se_softc[unit];
921
922 if (unit >= NLN)
923 return EINVAL;
924 if (!se_open_state)
925 return ENXIO;
926
927 sc->is_if.if_flags |= IFF_UP;
928 se_open_state++;
929 se_init(unit);
930 return 0;
931 }
932
933 #ifdef MAPPED
934 int se_use_mapped_interface[NLN];
935 #endif /*MAPPED*/
936
937 void
938 se_normal(int unit)
939 {
940 #ifdef MAPPED
941 se_use_mapped_interface[unit] = 0;
942 #endif /*MAPPED*/
943 if (se_softc[unit]) {
944 se_restart((struct ifnet *)se_softc[unit]);
945 se_init(unit);
946 }
947 }
948
949 /*
950 * Ethernet interface interrupt routine
951 */
952 void
953 se_intr(
954 int unit,
955 spl_t spllevel)
956 {
957 register se_softc_t sc = se_softc[unit];
958 se_reg_t rdp;
959 register struct ifnet *ifp = &sc->is_if;
960 register unsigned short csr;
961
962 #ifdef MAPPED
963 if (se_use_mapped_interface[unit])
964 {
965 SE_intr(unit,spllevel);
966 return;
967 }
968 #endif /*MAPPED*/
969
970 if (se_open_state < 2) { /* Stray, or not open for business */
971 rdp = (sc ? sc->lnregs : (se_reg_t)se_unprobed_addr);
972 *rdp |= LN_CSR0_STOP;
973 wbflush();
974 return;
975 }
976 rdp = sc->lnregs;
977
978 /*
979 * Read the CSR and process any error condition.
980 * Later on, restart the lance by writing back
981 * the CSR (for set-to-clear bits).
982 */
983 csr = *rdp; /* pick up the csr */
984
985 /* drop spurious interrupts */
986 if ((csr & LN_CSR0_INTR) == 0)
987 return;
988
989 #ifdef DECSTATION
990 splx(spllevel); /* drop priority now */
991 #endif /*DECSTATION*/
992 again:
993 /*
994 * Check for errors first
995 */
996 if ( csr & LN_CSR0_ERR ) {
997 if (csr & LN_CSR0_MISS) {
998 /*
999 * Stop the chip to prevent a corrupt packet from
1000 * being transmitted. There is a known problem with
1001 * missed packet errors causing corrupted data to
1002 * be transmitted to the same host as was just
1003 * transmitted, with a valid crc appended to the
1004 * packet. The only solution is to stop the chip,
1005 * which will clear the Lance silo, thus preventing
1006 * the corrupt data from being sent.
1007 */
1008 se_write_reg(rdp, LN_CSR0_STOP, LN_CSR0_STOP, "csr0");
1009
1010 sc->misscnt++;
1011 if (se_verbose) {
1012 int me = 0, lance = 0, index;
1013 struct se_desc r;
1014 for (index = 0; index < NRCV; index++) {
1015 (se_sw->desc_copyin)(
1016 (vm_offset_t)sc->lnrring[index],
1017 (vm_offset_t)&r,
1018 sizeof(r));
1019 if (r.status & LN_RSTATE_OWN)
1020 lance++;
1021 else
1022 me++;
1023 }
1024 printf("se%d: missed packet (%d) csr = %x, Lance %x, me %x\n",
1025 unit, sc->misscnt, csr, lance, me);
1026 }
1027 se_restart(ifp);
1028 se_init(unit);
1029 return;
1030 }
1031 if (csr & LN_CSR0_BABL) {
1032 sc->bablcnt++;
1033 if (se_verbose)
1034 printf("se%d: xmt timeout (%d)\n",
1035 unit, sc->bablcnt);
1036 }
1037 if (csr & LN_CSR0_MERR) {
1038 sc->merrcnt++;
1039 printf("se%d: memory error (%d)\n",
1040 unit, sc->merrcnt);
1041
1042 if (((csr & LN_CSR0_RXON) == 0)
1043 || ((csr & LN_CSR0_TXON) == 0)) {
1044 se_restart(ifp);
1045 se_init(unit);
1046 return;
1047 }
1048 }
1049 }
1050
1051 *rdp = LN_CSR0_INEA | (csr & LN_CSR0_WTC);
1052 wbflush();
1053
1054 if ( csr & LN_CSR0_RINT )
1055 se_rint( unit );
1056
1057 if ( csr & LN_CSR0_TINT )
1058 se_tint( unit );
1059
1060 if ((csr = *rdp) & (LN_CSR0_RINT | LN_CSR0_TINT))
1061 goto again;
1062 }
1063
1064 /*
1065 * Handle a transmitter complete interrupt.
1066 */
1067 void
1068 se_tint(int unit)
1069 {
1070 register se_softc_t sc = se_softc[unit];
1071 register index;
1072 register status;
1073 io_req_t request;
1074 struct se_desc r;
1075
1076 /*
1077 * Free up descriptors for all packets in queue for which
1078 * transmission is complete. Start from queue tail, stop at first
1079 * descriptor we do not OWN, or which is in an inconsistent state
1080 * (lance still working).
1081 */
1082
1083 while ((sc->xmt_complete != sc->xmt_last) && (sc->xmt_count > 0)) {
1084
1085 index = sc->xmt_complete;
1086 (se_sw->desc_copyin) ((vm_offset_t)sc->lntring[index],
1087 (vm_offset_t)&r, sizeof(r));
1088 status = r.status;
1089
1090 /*
1091 * Does lance still own it ?
1092 */
1093 if (status & LN_TSTATE_OWN)
1094 break;
1095
1096 /*
1097 * Packet sent allright, release queue slot.
1098 */
1099 request = sc->tpkt[index];
1100 sc->tpkt[index] = (io_req_t) 0;
1101 sc->xmt_complete = ++index & (NXMT - 1);
1102 --sc->xmt_count;
1103
1104 sc->is_if.if_opackets++;
1105 if (status & (LN_TSTATE_DEF|LN_TSTATE_ONE|LN_TSTATE_MORE))
1106 sc->is_if.if_collisions++;
1107
1108 /*
1109 * Check for transmission errors.
1110 */
1111 if (!se_loopback_hack && status & LN_TSTATE_ERR) {
1112 sc->is_if.if_oerrors++;
1113 if (se_verbose)
1114 printf("se%d: xmt error (x%x)\n", unit, r.status2);
1115
1116 if (r.status2 & (LN_TSTATE2_RTRY|LN_TSTATE2_LCOL))
1117 sc->is_if.if_collisions++;
1118
1119 /*
1120 * Restart chip on errors that disable the
1121 * transmitter.
1122 */
1123 iodone(request);
1124 if (r.status2 & LN_TSTATE2_DISABLE) {
1125 register struct ifnet *ifp = &sc->is_if;
1126 se_restart(ifp);
1127 se_init(ifp->if_unit);
1128 return;
1129 }
1130 } else if (request) {
1131 /*
1132 * If this was a broadcast packet loop it back.
1133 * Signal successful transmission of the packet.
1134 */
1135 register struct ether_header *eh;
1136 register int i;
1137
1138 eh = (struct ether_header *) request->io_data;
1139 /* ether broadcast address is in the spec */
1140 for (i = 0; (i < 6) && (eh->ether_dhost[i] == 0xff); i++)
1141 ; /* nop */
1142 /* sending to ourselves makes sense sometimes */
1143 if (i != 6 && se_loopback_hack)
1144 for (i = 0;
1145 (i < 6) && (eh->ether_dhost[i] == sc->is_addr[i]);
1146 i++)
1147 ; /* nop */
1148 if (i == 6)
1149 se_read(sc, 0, request->io_count, request);
1150 iodone(request);
1151 }
1152 }
1153 /*
1154 * Dequeue next transmit request, if any.
1155 */
1156 if (sc->xmt_count <= 0)
1157 se_start(unit);
1158 }
1159
1160 /*
1161 * Handle a receiver complete interrupt.
1162 */
1163 void
1164 se_rint(int unit)
1165 {
1166 register se_softc_t sc = se_softc[unit];
1167 register index, first, len;
1168 unsigned char status, status1;
1169 int ring_cnt;
1170 struct se_desc r;
1171
1172 /*
1173 * Starting from where we left off, look around the receive ring and
1174 * pass on all complete packets.
1175 */
1176
1177 for (;; sc->rcv_last = ++index & (NRCV - 1)) {
1178
1179 /*
1180 * Read in current descriptor
1181 */
1182 read_descriptor:
1183 (se_sw->desc_copyin) ((vm_offset_t)sc->lnrring[sc->rcv_last],
1184 (vm_offset_t)&r, sizeof(r));
1185 status = r.status;
1186 if (status & LN_RSTATE_OWN)
1187 break;
1188 first = index = sc->rcv_last;
1189
1190 /*
1191 * If not the start of a packet, error
1192 */
1193 if (!(status & LN_RSTATE_STP)) {
1194 if (se_verbose)
1195 printf("se%d: Rring #%d, status=%x !STP\n",
1196 unit, index, status);
1197 break;
1198 }
1199 /*
1200 * See if packet is chained (should not) by looking at
1201 * the last descriptor (OWN clear and ENP set).
1202 * Remember the status info in this last descriptor.
1203 */
1204 ring_cnt = 1, status1 = status;
1205 while (((status1 & (LN_RSTATE_ERR | LN_RSTATE_OWN | LN_RSTATE_ENP)) == 0) &&
1206 (ring_cnt++ <= NRCV)) {
1207 struct se_desc r1;
1208 index = (index + 1) & (NRCV - 1);
1209 (se_sw->desc_copyin) ((vm_offset_t)sc->lnrring[index],
1210 (vm_offset_t)&r1, sizeof(r1));
1211 status1 = r1.status;
1212 }
1213
1214 /*
1215 * Chained packet (--> illegally sized!); re-init the
1216 * descriptors involved and ignore this bogus packet. I
1217 * donno how, but it really happens that we get these
1218 * monsters.
1219 */
1220 if (ring_cnt > 1) {
1221 /*
1222 * Return all descriptors to lance
1223 */
1224 se_desc_set_status(sc->lnrring[first], LN_RSTATE_OWN);
1225 while (first != index) {
1226 first = (first + 1) & (NRCV - 1);
1227 se_desc_set_status(sc->lnrring[first], LN_RSTATE_OWN);
1228 }
1229 if ((status1 & LN_RSTATE_ERR) && se_verbose)
1230 printf("se%d: rcv error %x (chained)\n", unit, status1);
1231 continue;
1232 }
1233
1234 /*
1235 * Good packets must be owned by us and have the end of
1236 * packet flag. And nothing else.
1237 */
1238 if ((status & ~LN_RSTATE_STP) == LN_RSTATE_ENP) {
1239 sc->is_if.if_ipackets++;
1240
1241 if ((len = r.message_size) == 0)
1242 /* race seen on pmaxen: the lance
1243 * has not updated the size yet ??
1244 */
1245 goto read_descriptor;
1246 /*
1247 * Drop trailing CRC bytes from len and ship packet
1248 * up
1249 */
1250 se_read(sc, (volatile char*)sc->lnrbuf[first], len-4,0);
1251
1252 /*
1253 * Return descriptor to lance, and move on to next
1254 * packet
1255 */
1256 r.status = LN_RSTATE_OWN;
1257 (se_sw->desc_copyout)((vm_offset_t)&r,
1258 (vm_offset_t)sc->lnrring[first],
1259 sizeof(r));
1260 continue;
1261 }
1262 /*
1263 * Not a good packet, see what is wrong
1264 */
1265 if (status & LN_RSTATE_ERR) {
1266 sc->is_if.if_ierrors++;
1267
1268 if (se_verbose)
1269 printf("se%d: rcv error (x%x)\n", unit, status);
1270
1271 /*
1272 * Return descriptor to lance
1273 */
1274 se_desc_set_status(sc->lnrring[first], LN_RSTATE_OWN);
1275 } else {
1276 /*
1277 * Race condition viz lance, Wait for the next
1278 * interrupt.
1279 */
1280 return;
1281 }
1282 }
1283 }
1284
1285 /*
1286 * Output routine.
1287 * Call common function for wiring memory,
1288 * come back later (to se_start) to get
1289 * things going.
1290 */
1291 io_return_t
1292 se_output(
1293 int dev,
1294 io_req_t ior)
1295 {
1296 return net_write(&se_softc[dev]->is_if, se_start, ior);
1297 }
1298
1299 /*
1300 * Start output on interface.
1301 *
1302 */
1303 void
1304 se_start(int unit)
1305 {
1306 register se_softc_t sc = se_softc[unit];
1307 io_req_t request;
1308 struct se_desc r;
1309 int tlen;
1310 spl_t s;
1311 register int index;
1312
1313 s = splimp();
1314
1315 for (index = sc->xmt_last;
1316 sc->xmt_count < (NXMT - 1);
1317 sc->xmt_last = index = (index + 1) & (NXMT - 1)) {
1318 /*
1319 * Dequeue the next transmit request, if any.
1320 */
1321 IF_DEQUEUE(&sc->is_if.if_snd, request);
1322 if (request == 0) {
1323 /*
1324 * Tell the lance to send the packet now
1325 * instead of waiting until the next 1.6 ms
1326 * poll interval expires.
1327 */
1328 *sc->lnregs = LN_CSR0_TDMD | LN_CSR0_INEA;
1329 splx(s);
1330 return; /* Nothing on the queue */
1331 }
1332
1333 /*
1334 * Keep request around until transmission complete
1335 */
1336 sc->tpkt[index] = request;
1337 tlen = copy_to_lance(request,
1338 (volatile char *)sc->lntbuf[index]);
1339
1340 /*
1341 * Give away buffer. Must copyin/out, set len,
1342 * and set the OWN flag. We do not do chaining.
1343 */
1344 (se_sw->desc_copyin)((vm_offset_t)sc->lntring[index],
1345 (vm_offset_t)&r, sizeof(r));
1346 r.buffer_size = -(tlen) | 0xf000;
1347 r.status = (LN_TSTATE_OWN | LN_TSTATE_STP | LN_TSTATE_ENP);
1348 (se_sw->desc_copyout)((vm_offset_t)&r,
1349 (vm_offset_t)sc->lntring[index],
1350 sizeof(r));
1351 wbflush();
1352
1353 sc->xmt_count++;
1354 }
1355 /*
1356 * Since we actually have queued new packets, tell
1357 * the chip to rescan the descriptors _now_.
1358 * It is quite unlikely that the ring be filled,
1359 * but if it is .. the more reason to do it!
1360 */
1361 *sc->lnregs = LN_CSR0_TDMD | LN_CSR0_INEA;
1362 splx(s);
1363 }
1364
1365
1366 /*
1367 * Pull a packet off the interface and
1368 * hand it up to the higher levels.
1369 *
1370 * Simulate broadcast packets in software.
1371 */
1372 void
1373 se_read(
1374 register se_softc_t sc,
1375 volatile char *lnrbuf,
1376 int len,
1377 io_req_t loop_back)
1378 {
1379 register struct ifnet *ifp = &sc->is_if;
1380 register ipc_kmsg_t new_kmsg;
1381 char *hdr, *pkt;
1382
1383 if (len <= sizeof(struct ether_header))
1384 return; /* sanity */
1385
1386 /*
1387 * Get a new kmsg to put data into.
1388 */
1389 new_kmsg = net_kmsg_get();
1390 if (new_kmsg == IKM_NULL) {
1391 /*
1392 * No room, drop the packet
1393 */
1394 ifp->if_rcvdrops++;
1395 return;
1396 }
1397
1398 hdr = net_kmsg(new_kmsg)->header;
1399 pkt = net_kmsg(new_kmsg)->packet;
1400
1401 #define OFF0 (sizeof(struct ether_header) - sizeof(struct packet_header))
1402 #define OFF1 (OFF0 & ~3)
1403 if (loop_back) {
1404 bcopy(loop_back->io_data, hdr, sizeof(struct ether_header));
1405 bcopy(loop_back->io_data + OFF0,
1406 pkt, len - OFF0);
1407 } else
1408 copy_from_lance(lnrbuf, len, (struct ether_header*)hdr,
1409 (struct packet_header*)pkt);
1410
1411 /*
1412 * Set up the 'fake' header with length. Type has been left
1413 * in the correct place.
1414 */
1415 len = len - OFF0;
1416 ((struct packet_header *)pkt)->length = len;
1417
1418 /*
1419 * Hand the packet to the network module.
1420 */
1421 net_packet(ifp, new_kmsg, len, ethernet_priority(new_kmsg));
1422 }
1423
1424
1425 /*
1426 * Get a packet out of Lance memory and into main memory.
1427 */
1428 private void
1429 copy_from_lance(
1430 register volatile unsigned char *rbuf,
1431 register unsigned int nbytes,
1432 struct ether_header *hdr,
1433 struct packet_header *pkt)
1434 {
1435 /*
1436 * Read in ethernet header
1437 */
1438 (se_sw->data_copyin) ((vm_offset_t)rbuf, (vm_offset_t)hdr, sizeof(struct ether_header));
1439
1440 nbytes -= sizeof(struct ether_header);
1441 rbuf += (se_sw->mapoffs) (sizeof(struct ether_header));
1442
1443 pkt->type = (unsigned short) hdr->ether_type;
1444
1445 (se_sw->data_copyin) ((vm_offset_t)rbuf, (vm_offset_t)(pkt + 1), nbytes);
1446 }
1447
1448
1449 /*
1450 * Move a packet into Lance space
1451 */
1452 private int
1453 copy_to_lance(
1454 register io_req_t request,
1455 volatile char *sbuf)
1456 {
1457 register unsigned short *dp;
1458 register int len;
1459
1460 dp = (unsigned short *) request->io_data;
1461 len = request->io_count;
1462
1463 if (len > (int)(ETHERMTU + sizeof(struct ether_header))) {
1464 printf("se: truncating HUGE packet\n");
1465 len = ETHERMTU + sizeof(struct ether_header);
1466 }
1467
1468 (se_sw->data_copyout) ((vm_offset_t)dp, (vm_offset_t)sbuf, len);
1469
1470 if (len < LN_MINBUF_NOCH)
1471 /*
1472 * The lance needs at least this much data in a packet. Who
1473 * cares if I send some garbage that was left in the lance
1474 * buffer ? If one can spoof packets then one can spoof
1475 * packets!
1476 */
1477 len = LN_MINBUF_NOCH;
1478 return len;
1479 }
1480
1481 /*
1482 * Reset a descriptor's flags.
1483 * Optionally give the descriptor to the lance
1484 */
1485 private void
1486 se_desc_set_status (
1487 register se_desc_t lndesc,
1488 int val)
1489 {
1490 struct se_desc desc;
1491
1492 (se_sw->desc_copyin) ((vm_offset_t)lndesc, (vm_offset_t)&desc, sizeof(desc));
1493 desc.desc4.bits = 0;
1494 desc.status = val;
1495 (se_sw->desc_copyout) ((vm_offset_t)&desc, (vm_offset_t)lndesc, sizeof(desc));
1496 wbflush();
1497 }
1498
1499 /*
1500 * Set/Get status functions
1501 */
1502 int
1503 se_get_status(
1504 int dev,
1505 dev_flavor_t flavor,
1506 dev_status_t status, /* pointer to OUT array */
1507 natural_t *status_count) /* out */
1508 {
1509 return net_getstat(&se_softc[dev]->is_if,
1510 flavor, status, status_count);
1511 }
1512
1513 int
1514 se_set_status(
1515 int unit,
1516 dev_flavor_t flavor,
1517 dev_status_t status,
1518 natural_t status_count)
1519 {
1520 register se_softc_t sc;
1521
1522 sc = se_softc[unit];
1523
1524
1525 switch (flavor) {
1526
1527 case NET_STATUS:
1528 break;
1529
1530 case NET_ADDRESS: {
1531
1532 register union ether_cvt {
1533 unsigned char addr[6];
1534 int lwd[2];
1535 } *ec = (union ether_cvt *) status;
1536
1537 if (status_count < sizeof(*ec) / sizeof(int))
1538 return (D_INVALID_SIZE);
1539
1540 ec->lwd[0] = ntohl(ec->lwd[0]);
1541 ec->lwd[1] = ntohl(ec->lwd[1]);
1542
1543 se_setaddr(ec->addr, unit);
1544
1545 break;
1546 }
1547
1548 default:
1549 return D_INVALID_OPERATION;
1550 }
1551
1552 return D_SUCCESS;
1553 }
1554
1555
1556 /*
1557 * Install new filter.
1558 * Nothing special needs to be done here.
1559 */
1560 io_return_t
1561 se_setinput(
1562 int dev,
1563 ipc_port_t receive_port,
1564 int priority,
1565 filter_t *filter,
1566 natural_t filter_count)
1567 {
1568 return net_set_filter(&se_softc[dev]->is_if,
1569 receive_port, priority,
1570 filter, filter_count);
1571 }
1572
1573 /*
1574 * Allocate and initialize a ring descriptor.
1575 * Allocates a buffer from the lance memory and writes a descriptor
1576 * for that buffer to the host virtual address LNDESC.
1577 */
1578 private volatile long
1579 *se_desc_alloc (
1580 register se_softc_t sc,
1581 register se_desc_t lndesc)
1582 {
1583 register vm_offset_t dp; /* data pointer */
1584 struct se_desc desc;
1585
1586 /*
1587 * Allocate buffer in lance space
1588 */
1589 dp = se_malloc(sc, LN_BUFFER_SIZE);
1590
1591 /*
1592 * Build a descriptor pointing to it
1593 */
1594 desc.addr_low = Addr_lo(Lmem(dp));
1595 desc.addr_hi = Addr_hi(Lmem(dp));
1596 desc.status = 0;
1597 desc.buffer_size = -LN_BUFFER_SIZE;
1598 desc.desc4.bits = 0;
1599
1600 /*
1601 * Copy the descriptor to lance space
1602 */
1603 (se_sw->desc_copyout) ((vm_offset_t)&desc, (vm_offset_t)lndesc, sizeof(desc));
1604 wbflush();
1605
1606 return (volatile long *) Hmem(dp);
1607 }
1608
1609 /*
1610 * Allocate a chunk of lance RAM buffer. Since we never
1611 * give lance RAM buffer memory back, we'll just step up the
1612 * byte-count on a per-unit basis.
1613 *
1614 * The return value is an index into the lance memory, which can be
1615 * passed with Hmem() and Lmem() to get the host and chip virtual addresses.
1616 */
1617 private vm_offset_t
1618 se_malloc(
1619 se_softc_t sc,
1620 int size)
1621 {
1622 register vm_offset_t ret;
1623
1624 /*
1625 * On first call, zero lance memory
1626 */
1627 if (sc->lnsbrk == 0)
1628 (se_sw->bzero) (Hmem(0), LN_MEMORY_SIZE);
1629
1630 /*
1631 * Start out on the first double longword boundary
1632 * (this accomodates some machines, with minimal loss)
1633 */
1634 if (sc->lnsbrk & 0xf)
1635 sc->lnsbrk = (sc->lnsbrk + 0x10) & ~0xf;
1636
1637 ret = sc->lnsbrk;
1638 sc->lnsbrk += size;
1639
1640 if (sc->lnsbrk > LN_MEMORY_SIZE)
1641 panic("se_malloc");
1642
1643 return ret;
1644 }
1645
1646 #endif /* NLN > 0 */
Cache object: a93aff47ca84947d9c65356af45db329
|