FreeBSD/Linux Kernel Cross Reference
sys/sqtsec/if_se.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993, 1991 Carnegie Mellon University
4 * Copyright (c) 1991 Sequent Computer Systems
5 * All Rights Reserved.
6 *
7 * Permission to use, copy, modify and distribute this software and its
8 * documentation is hereby granted, provided that both the copyright
9 * notice and this permission notice appear in all copies of the
10 * software, derivative works or modified versions, and any portions
11 * thereof, and that both notices appear in supporting documentation.
12 *
13 * CARNEGIE MELLON AND SEQUENT COMPUTER SYSTEMS ALLOW FREE USE OF
14 * THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON AND
15 * SEQUENT COMPUTER SYSTEMS DISCLAIM ANY LIABILITY OF ANY KIND FOR
16 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie Mellon
26 * the rights to redistribute these changes.
27 */
28
29 /*
30 * HISTORY
31 * $Log: if_se.c,v $
32 * Revision 2.5 93/11/17 18:47:22 dbg
33 * Removed more lint.
34 * [93/06/28 dbg]
35 *
36 * Revision 2.4 93/01/14 17:56:21 danner
37 * Ansified preprocessor comments, fixed a static/extern problem
38 * with se_output.
39 * [93/01/14 danner]
40 *
41 * Revision 2.3 91/07/31 18:06:35 dbg
42 * Changed copyright.
43 * [91/07/31 dbg]
44 *
45 * Revision 2.2 91/05/08 13:05:43 dbg
46 * Changed net_filter to net_packet.
47 *
48 * MACH_KERNEL conversion.
49 * Added volatile declarations.
50 * [91/03/22 dbg]
51 *
52 */
53
54 #undef RAW_ETHER
55 /* #define RAW_ETHER */ /* Not in MACH */
56
57 #undef PROMISCUOUS /* UNDO promiscuous kernel */
58 /* #define PROMISCUOUS */ /* promiscuous kernel */ /* Not in MACH*/
59
60 #ifndef lint
61 static char rcsid[] = "$Header: if_se.c,v 2.5 93/11/17 18:47:22 dbg Exp $";
62 #endif
63
64 /*
65 * SCSI/Ether Ethernet Communications Controller interface
66 *
67 * ETHER OUTPUT NOTES:
68 * -------------------
69 * Only one output request is active at a time. A simple array
70 * of iats holds the addresses of the mbuf data that get written.
71 * We copy transmits into a single buffer because the higher-level
72 * network code can generate mbufs too small for the DMA's to handle
73 * (the firmware doesn't have enough time to turn around and reload).
74 *
75 * As a matter of convention, all SEC Ether ioctls are done with the
76 * output device and output locks.
77 *
78 * ETHER INPUT NOTES:
79 * ------------------
80 * Device programs for Ether read contain a pointer to an iat
81 * and the number of data blocks in that iat. The iat dp_data
82 * fields give the physical addresses of the m_data field of
83 * a parallel array of mbufs. Ether packets read from the net
84 * are placed into these data blocks (and hence right into the
85 * mbufs).
86 *
87 * Each controller has a circular queue of pointers to mbufs and
88 * a circular queue of iats that are continually filled by the
89 * SEC firmware with input packets.
90 * Our job is to replace the used mbufs as quickly as possible
91 * at interrupt time, and refill the iats. We then add another
92 * device program (or two if we wrap around the end of the iat
93 * ring) for ether input.
94 *
95 * Important hints:
96 * - There is an iat queue and an mbuf pointer queue for each
97 * controller.
98 * - The iat queue and the mbuf queue have the same number
99 * of elements.
100 * - Except when refilling the read programs (at interrupt time),
101 * the heads of the iat queue and the mbuf queue should be the same.
102 * Even here, they should only be different during the actual
103 * refilling of the iat and mbuf queues.
104 * - All hell breaks loose if we run out of input programs
105 * to replace the iats. We can't sleep and wait for more at
106 * interrupt level.
107 * - Overspecifying the size of the Ether read request and
108 * done queues in the binary config file is a very good idea.
109 * - Refilling the queues after reading short packets will cause
110 * each packet to have a single new device program added to the
111 * Ether read device request queue.
112 * - No attempt is made to optimize these programs, as there is
113 * no synchronization with the SEC firmware: I can't ask him
114 * to stop for a second while I increase the number of iats in
115 * that last device program.
116 *
117 *
118 *
119 * TMP AND LOCKING NOTES:
120 * ----------------------
121 *
122 * There is very little locking or synchronization needed at this
123 * level of the software. Most of it really goes on above when necessary.
124 *
125 * In general, we try to lock only the portions of the controller state
126 * that we have to. When changing "important" information (like fields
127 * int the arp and/or ifnet structures), we lock everything.
128 *
129 * To lock everything, it is safe to lock structure from the inside out.
130 * That is, lock either the input or output segment of the controller
131 * state, then lock the common structure. With the macros defined
132 * below, the order OS_LOCK, then SS_LOCK should be safe.
133 * See how se_init does locking for an example.
134 */
135
136 /*
137 * Revision 1.2 89/08/16 15:22:05 root
138 * balance -> sqt
139 *
140 * Revision 1.1 89/07/05 13:18:31 kak
141 * Initial revision
142 *
143 */
144
145 #ifdef MACH_KERNEL
146 #include <device/device_types.h>
147 #include <device/io_req.h>
148 #include <device/net_io.h>
149
150 #include <sqt/vm_defs.h>
151 #include <sqt/intctl.h>
152 #include <sqt/ioconf.h>
153 #include <sqt/cfg.h>
154 #include <sqt/slic.h>
155 #include <sqt/mutex.h>
156
157 #include <sqtsec/sec.h>
158 #include <sqtsec/if_se.h>
159
160 /*
161 * Convert to Mach style assert
162 */
163 #define ASSERT(C,S) assert(C)
164
165 #include <kern/assert.h>
166
167 #else /* MACH_KERNEL */
168 #include "sys/param.h"
169 #include "sys/systm.h"
170 #include "sys/mbuf.h"
171 #include "sys/buf.h"
172 #include "sys/protosw.h"
173 #include "sys/socket.h"
174 #include "sys/socketvar.h"
175 #include "sys/ioctl.h"
176 #include "sys/errno.h"
177 #include "sys/vm.h"
178 #include "sys/conf.h"
179
180 #include "net/if.h"
181 #include "net/netisr.h"
182 #include "net/route.h"
183
184 #include "netinet/in.h"
185 #include "netinet/in_systm.h"
186 #include "netinet/in_var.h" /* MACH/4.3 */
187 #include "netinet/ip.h"
188 #include "netinet/ip_var.h"
189 #include "netinet/if_ether.h"
190
191 #include "sqt/pte.h"
192 #include "sqt/intctl.h"
193 #include "sqt/ioconf.h"
194 #include "sqt/cfg.h"
195 #include "sqt/slic.h"
196
197 #include "sqt/mutex.h"
198
199 #include "sqtsec/sec.h"
200
201 #include "sqtif/if_se.h"
202
203 #ifdef PROMISCUOUS
204 #include "net/promisc.h"
205 #endif /* PROMISCUOUS */
206
207 #define KVIRTTOPHYS(addr) \
208 (PTETOPHYS(Sysmap[btop(addr)]) + ((int)(addr) & (NBPG-1)))
209
210 #ifdef MACH
211 /*
212 * Driver not yet converted to MACH/4.3 names.
213 */
214
215 #define ETHERPUP_PUPTYPE ETHERTYPE_PUP
216 #define ETHERPUP_IPTYPE ETHERTYPE_IP
217 #define ETHERPUP_ARPTYPE ETHERTYPE_ARP
218 #define ETHERPUP_TRAIL ETHERTYPE_TRAIL
219 #define ETHERPUP_NTRAILER ETHERTYPE_NTRAILER
220
221 /*
222 * Convert to Mach style assert
223 */
224 #define ASSERT(C,S) assert(C)
225
226 #include "kern/assert.h"
227
228 #endif /* MACH */
229 #endif /* MACH_KERNEL */
230
231 /*
232 * All procedures are referenced either through the se_driver structure,
233 * or via the procedure handles in the ifnet structure.
234 * Hence, everything but the se_driver structure should be able to be static.
235 */
236
237 static int se_probe(), se_boot(), se_intr(), se_watch();
238 static int se_init(), se_ioctl(), se_reset();
239 #ifndef MACH_KERNEL
240 static int se_output();
241 #endif
242
243 #ifndef PROMISCUOUS
244
245 static struct mbuf *se_reorder_trailer_packet();
246
247 #else
248
249 /* N.B. this routine is not static so promiscq handler can call it */
250
251 struct mbuf *se_reorder_trailer_packet();
252
253 #endif /* PROMISCUOUS */
254
255 int se_handle_read(), se_add_read_progs();
256 int se_start(), se_set_addr();
257 int se_set_modes();
258
259 #ifdef MACH_KERNEL
260 void se_start_u(int unit); /* takes unit number */
261 #endif /* MACH_KERNEL */
262
263 struct sec_driver se_driver = {
264 /* name chan flags probe boot intr */
265 "se", 1, SED_TYPICAL, se_probe, se_boot, se_intr
266 };
267
268 /*
269 * SCSI-command template for Ether write.
270 * These are placed in the write device programs,
271 * and altered by the se_start routine just before
272 * we write the packet.
273 */
274
275 u_char se_scsi_cmd[10] = { SCSI_ETHER_WRITE, SCSI_ETHER_STATION };
276
277 #ifdef RAW_ETHER
278
279 #include "../net/raw_cb.h"
280
281 #endif /* RAW_ETHER */
282
283 /*
284 * sec_init_iatq()
285 * initialize a ring of iat entries.
286 *
287 * If locking is needed, it is presumed to be done elsewhere.
288 */
289
290 static void
291 sec_init_iatq(iq, count)
292 register struct sec_iatq *iq;
293 unsigned count;
294 {
295 iq->iq_iats = (struct sec_iat *)
296 calloc((int)(count*sizeof(struct sec_iat)));
297 iq->iq_size = count;
298 iq->iq_head = 0;
299 }
300
301 #ifndef MACH_KERNEL
302 /*
303 * sec_spray_mbuf_iatq - spray an mbuf chain into a queue of iats.
304 *
305 * The head of the iat queue is adjusted here.
306 * It is illegal to wrap around the IAT ring.
307 *
308 * Returns pointer to first IAT if it worked.
309 * Returns 0 otherwise.
310 */
311
312 static struct sec_iat *
313 sec_spray_mbuf_iatq(m, iq)
314 register struct mbuf *m;
315 register struct sec_iatq *iq;
316 {
317 register struct sec_iat *iat;
318 int i, n;
319
320 n = mbuf_chain_length(m);
321 ASSERT(n > 0, "sec_spray_iatq: n <= 0");
322 ASSERT(n <= iq->iq_size, "sec_spray_iatq: n > size");
323 if (n > (iq->iq_size - iq->iq_head))
324 return((struct sec_iat *)0);
325
326 for (i = 0; i < n; ++i, m = m->m_next) {
327 int k = (iq->iq_head + i) % iq->iq_size;
328
329 ASSERT(m != (struct mbuf *)0, "sec_spray_iatq: m == 0");
330 iat = &iq->iq_iats[k];
331 iat->iat_data = (u_char *)KVIRTTOPHYS(mtod(m, int));
332 iat->iat_count = m->m_len;
333 }
334
335 ASSERT(m == (struct mbuf *)0, "sec_spray_iatq: m != 0");
336 iat = &iq->iq_iats[iq->iq_head];
337 iq->iq_head = (iq->iq_head + n) % iq->iq_size;
338 return(iat);
339 }
340 #endif /* MACH_KERNEL */
341
342 /*
343 * sec_start_prog - start a program on a SCSI/Ether device.
344 */
345
346 static int
347 sec_start_prog(cmd, cib, slic, bin, vector, splok)
348 struct sec_cib *cib;
349 u_char slic, bin, vector;
350 {
351 register volatile int *stat
352 = PHYSTOKV(cib->cib_status, volatile int *);
353 spl_t sipl;
354
355 if (splok)
356 sipl = splhi();
357
358 cib->cib_inst = cmd;
359 *stat = 0;
360 mIntr(slic, bin, vector);
361 if (splok)
362 splx(sipl);
363
364 while ((*stat & SINST_INSDONE) == 0)
365 continue;
366 return(*stat & ~SINST_INSDONE);
367 }
368
369 #ifdef MACH_KERNEL
370 /*
371 * Fill in the fields of a net-kmsg pointer queue.
372 */
373 void
374 sec_init_msgq(
375 register struct sec_msgq *mq,
376 unsigned int size)
377 {
378 mq->mq_msgs =
379 (ipc_kmsg_t *) calloc((int) size * sizeof(ipc_kmsg_t));
380 mq->mq_size = size;
381 mq->mq_head = 0;
382 }
383
384 #else /* MACH_KERNEL */
385 /*
386 * Fill in the fields of an mbuf pointer queue.
387 *
388 * We can't fill in the mbuf pointers yet, as it is too soon
389 * to allocate mbufs yet.
390 */
391
392 static
393 sec_init_mbufq(mq, size)
394 register struct sec_mbufq *mq;
395 unsigned size;
396 {
397 mq->mq_mbufs =
398 (struct mbuf **) calloc((int)(size*sizeof(struct mbuf *)));
399 mq->mq_size = size;
400 mq->mq_head = 0;
401 }
402
403 /*
404 * sec_spray_mbuf_mbufq - spray an mbuf chain into a queue of mbuf pointers.
405 *
406 * The head of the mbuf queue is adjusted here.
407 * It is illegal to wrap around the mbuf pointer ring.
408 *
409 * This work is done in parallel with the iat queue.
410 *
411 * Returns pointer to first mbuf pointer if it worked.
412 * Returns 0 otherwise.
413 */
414
415 static struct mbuf **
416 sec_spray_mbuf_mbufq(m, mq)
417 register struct mbuf *m;
418 register struct sec_mbufq *mq;
419 {
420 register struct mbuf **mbufp;
421 register int i, n;
422
423 n = mbuf_chain_length(m);
424 ASSERT(n > 0, "sec_spray_mbufq: n <= 0");
425 ASSERT(n <= mq->mq_size, "sec_spray_mbufq: n > size");
426 if (n > (mq->mq_size - mq->mq_head))
427 return((struct mbuf **)0);
428
429 for (i = 0; i < n; ++i, m = m->m_next) {
430 ASSERT(m != (struct mbuf *)0, "sec_spray_mbufq: m == 0");
431 mbufp = &mq->mq_mbufs[(mq->mq_head + i) % mq->mq_size];
432 *mbufp = m;
433 }
434 ASSERT(m == (struct mbuf *)0, "sec_spray_mbufq: m != 0");
435 mbufp = &mq->mq_mbufs[mq->mq_head];
436 mq->mq_head = (mq->mq_head + n) % mq->mq_size;
437 return(mbufp);
438 }
439
440 /*
441 * sec_chain_mbufs - chain 'n' mbufs from the mbuf q and return the head.
442 *
443 * We don't touch the head pointer of the queue.
444 * We set the length correctly for each mbuf.
445 */
446
447 static struct mbuf *
448 sec_chain_mbufs(mq, n, length)
449 register struct sec_mbufq *mq;
450 {
451 register struct mbuf *m;
452 register int k;
453
454 ASSERT(n > 0, "sec_chain: n <= 0");
455 ASSERT(n <= mq->mq_size, "sec_chain: n > size");
456
457 /*
458 * build the chain from the back to the front.
459 * 'k' always refers to the entry cyclically after
460 * the one we want to chain next.
461 */
462
463 m = 0;
464 k = n + mq->mq_head;
465 if (k >= mq->mq_size) k -= mq->mq_size;
466 ASSERT(k >= 0, "sec_chain: k < 0");
467 ASSERT(k < mq->mq_size, "sec_chain: k >= size");
468
469 do {
470 --k;
471 if (k < 0) k = mq->mq_size - 1;
472 mq->mq_mbufs[k]->m_next = m;
473 m = mq->mq_mbufs[k];
474 if (m->m_next == (struct mbuf *)0) {
475
476 /*
477 * "last" mbuf. Adjust it's length.
478 */
479
480 m->m_len = MLEN - (n*MLEN - length);
481
482 } else {
483 m->m_len = MLEN;
484 }
485 } while (k != mq->mq_head);
486
487 ASSERT(mbuf_chain_length(m) == n, "sec_chain: length");
488 ASSERT(mbuf_chain_size(m) == length, "sec_chain: size");
489
490 return(m);
491 }
492
493 /*
494 * mbuf_chain_size - Determine the number of data bytes in a chain of mbufs.
495 */
496
497 static int
498 mbuf_chain_size(m)
499 register struct mbuf *m;
500 {
501 register int count;
502
503 for (count = 0; m != (struct mbuf *)0; m = m->m_next)
504 count += m->m_len;
505 return(count);
506 }
507
508 /*
509 * mbuf_chain_length - Determine the number of mbufs in a chain.
510 */
511
512 static int
513 mbuf_chain_length(m)
514 register struct mbuf *m;
515 {
516 register int count;
517
518 for (count = 0; m != (struct mbuf *)0; m = m->m_next)
519 ++count;
520 return(count);
521 }
522
523 #ifdef MACH
524 /*
525 * m_getm() -- get multiple mbuf's.
526 *
527 * This interface is used in DYNIX to use only one "gate" round-trip
528 * to allocate a set of mbuf's. This implementation is simplistic
529 * mono-processor version.
530 */
531
532 /*ARGSUSED*/
533 struct mbuf *
534 m_getm(canwait, type, n) /* get n mbufs */
535 register int n;
536 {
537 register struct mbuf *m;
538 register struct mbuf *msave = NULL;
539
540 while (n-- > 0) {
541 MGET(m, canwait, type);
542 if (m == NULL) {
543 m_freem(msave);
544 return((struct mbuf *) NULL);
545 }
546 m->m_next = msave;
547 msave = m;
548 }
549
550 return(msave);
551 }
552 #endif /* MACH */
553 #endif /* MACH_KERNEL */
554
555 #ifdef MACH_KERNEL
556
557 /* lock the controller state */
558 #define SS_LOCK(softp) (p_lock(&(softp)->ss_lock, SPLIMP))
559 #define SS_UNLOCK(softp, sipl) (v_lock(&(softp)->ss_lock, sipl))
560
561 /* lock the output state */
562 #define OS_LOCK(softp) (p_lock(&(softp)->os_lock, SPLIMP))
563 #define OS_UNLOCK(softp, sipl) (v_lock(&(softp)->os_lock, sipl))
564
565 #else /* MACH_KERNEL */
566 #ifndef MACH
567
568 /* lock the controller state */
569 #define SS_LOCK(softp) (p_lock(&(softp)->ss_lock, SPLIMP))
570 #define SS_UNLOCK(softp, sipl) (v_lock(&(softp)->ss_lock, sipl))
571
572 /* lock the output state */
573 #define OS_LOCK(softp) (p_lock(&(softp)->os_lock, SPLIMP))
574 #define OS_UNLOCK(softp, sipl) (v_lock(&(softp)->os_lock, sipl))
575
576 #else
577 /*
578 * For now, MACH is "mono-processor" for all network code.
579 */
580
581 /* lock the controller state */
582 #define SS_LOCK(softp) splimp()
583 #define SS_UNLOCK(softp, sipl) splx(sipl)
584
585 /* lock the output state */
586 #define OS_LOCK(softp) splimp()
587 #define OS_UNLOCK(softp, sipl) splx(sipl)
588
589 #endif /* MACH */
590 #endif /* MACH_KERNEL */
591
592 int se_max_unit = -1; /* largest index of active ether controller */
593 u_char se_base_vec; /* base interrupt vector */
594 struct se_state *se_state; /* pointer to array of soft states */
595
596 /*
597 * Probe an SEC for existence of Ether controller.
598 *
599 * There's some debate about what this means: presently
600 * if the controller is there, so is the Ether part.
601 * This is expected to be changed in the future,
602 * when the world of depopulated boards arrives.
603 * So let's look at the diagnostics flags, and make
604 * the decision based on that.
605 */
606
607 static
608 se_probe(probe)
609 struct sec_probe *probe;
610 {
611 if (probe->secp_desc->sec_diag_flags & CFG_S_ETHER)
612 return(0);
613 return(1);
614 }
615
616 /*
617 * se_boot_one()
618 * boot procedure for a single device.
619 *
620 * Allocate the non-mbuf data structures for the device.
621 * We shouldn't really talk to the device now either.
622 *
623 * For both ether read and ether write, the request and done queues
624 * were allocated by autoconfig code. We record handles to these
625 * queues and fill in the actual device programs.
626 *
627 * The done queues should not need anything done to them, as they
628 * never need programs of their own.
629 *
630 * The status pointers for each cib are set to point to local data
631 * in the state structures.
632 *
633 * Iat queues are also allocated, but can't be filled in yet
634 * (no mbufs to allocate yet). For input, the parallel array
635 * of mbuf pointers is allocated as well.
636 *
637 * No locking needs to be done here, as we are still running config
638 * code single-processor.
639 */
640
641 static void
642 se_boot_one(softp, sd)
643 register struct se_state *softp;
644 register struct sec_dev *sd;
645 {
646 int i;
647
648 /*
649 * Controller info: Can do this with
650 * either the input device or output device.
651 * Either way, we just do it once.
652 */
653
654 if (!softp->ss_initted) {
655 register struct ifnet *ifp;
656 register struct sockaddr_in *sin;
657
658 #ifdef MACH_KERNEL
659 ifp = &softp->ss_if;
660 ifp->if_unit = softp-se_state;
661 ifp->if_header_size = sizeof(struct ether_header);
662 ifp->if_header_format = HDR_ETHERNET;
663 ifp->if_address_size = 6;
664 #else /* MACH_KERNEL */
665 ifp = &softp->ss_arp.ac_if;
666 ifp->if_unit = softp-se_state;
667 ifp->if_name = se_driver.sed_name;
668 #endif /* MACH_KERNEL */
669 ifp->if_mtu = se_mtu;
670 #ifndef MACH
671 init_lock(&ifp->if_snd.ifq_lock, G_IFNET);
672 #endif /* MACH */
673 #ifdef MACH_KERNEL
674 #else /* MACH_KERNEL */
675 #ifndef MACH
676 sin = (struct sockaddr_in *)&ifp->if_addr;
677 sin->sin_family = AF_INET;
678 sin->sin_addr = arpmyaddr((struct arpcom *)0);
679 #endif /* MACH */
680 ifp->if_init = se_init;
681 ifp->if_output = se_output;
682 ifp->if_ioctl = se_ioctl;
683 ifp->if_reset = se_reset;
684 #endif /* MACH_KERNEL */
685 init_lock(&softp->ss_lock, se_gate);
686 bzero((caddr_t)&softp->ss_sum, sizeof(softp->ss_sum));
687 softp->ss_scan_int = se_watch_interval;
688 #ifdef MACH_KERNEL
689 ifp->if_address = (char *)softp->ss_addr;
690 bcopy((char *)sd->sd_desc->sec_ether_addr,
691 (char *)softp->ss_addr, 6);
692 #else /* MACH_KERNEL */
693 bcopy((caddr_t)sd->sd_desc->sec_ether_addr,
694 (caddr_t)softp->ss_arp.ac_enaddr, 6);
695 #endif /* MACH_KERNEL */
696 softp->ss_slic = sd->sd_desc->sec_slicaddr;
697 softp->ss_bin = se_bin;
698 softp->ss_ether_flags = SETHER_S_AND_B;
699 softp->ss_alive = 1;
700 softp->ss_initted = 1;
701 softp->ss_init_called = 0;
702 #ifdef MACH_KERNEL
703 if_init_queues(ifp);
704 #else /* MACH_KERNEL */
705 if_attach(ifp);
706 #endif /* MACH_KERNEL */
707 #ifndef MACH
708 pciattach(ifp, softp->ss_arp.ac_enaddr);
709 #endif /* MACH */
710 if ((int)(&softp->os_gmode + 1) > 4*1024*1024) {
711 printf("%s%d: data structures above 4Mb!\n",
712 se_driver.sed_name, softp-se_state);
713 printf(" Ethernet function is unpredictable.\n");
714 }
715 }
716 if (sd->sd_chan == SDEV_ETHERREAD) {
717 init_lock(&softp->is_lock, se_gate);
718 softp->is_cib = sd->sd_cib;
719 softp->is_cib->cib_status =
720 KVTOPHYS(&softp->is_status, int *);
721 softp->is_reqq.sq_progq = sd->sd_requestq;
722 softp->is_reqq.sq_size = sd->sd_req_size;
723 softp->is_doneq.sq_progq = sd->sd_doneq;
724 softp->is_doneq.sq_size = sd->sd_doneq_size;
725 SEC_fill_progq(softp->is_reqq.sq_progq,
726 (int)softp->is_reqq.sq_size,
727 (int)sizeof(struct sec_edev_prog));
728 sec_init_iatq(&softp->is_iatq, softp->is_reqq.sq_size-3);
729 #ifdef MACH_KERNEL
730 sec_init_msgq(&softp->is_msgq, softp->is_reqq.sq_size-3);
731 #else /* MACH_KERNEL */
732 sec_init_mbufq(&softp->is_mbufq, softp->is_reqq.sq_size-3);
733 #endif /* MACH_KERNEL */
734 softp->is_status = 0;
735 softp->is_initted = 1;
736 } else if (sd->sd_chan == SDEV_ETHERWRITE) {
737 long cur_brk;
738
739 init_lock(&softp->os_lock, se_gate);
740 softp->os_cib = sd->sd_cib;
741 softp->os_cib->cib_status =
742 KVTOPHYS(&softp->os_status, int *);
743 softp->os_status = 0;
744 softp->os_reqq.sq_progq = sd->sd_requestq;
745 softp->os_reqq.sq_size = sd->sd_req_size;
746 softp->os_doneq.sq_progq = sd->sd_doneq;
747 softp->os_doneq.sq_size = sd->sd_doneq_size;
748 SEC_fill_progq(softp->os_reqq.sq_progq,
749 (int)softp->os_reqq.sq_size,
750 (int)sizeof(struct sec_dev_prog));
751 for (i = 0; i < softp->os_reqq.sq_size; ++i) {
752 register struct sec_dev_prog *dp;
753
754 dp =
755 PHYSTOKV(softp->os_reqq.sq_progq->pq_un.pq_progs[i],
756 struct sec_dev_prog *);
757 bcopy((caddr_t)se_scsi_cmd,
758 (caddr_t)dp->dp_cmd, sizeof se_scsi_cmd);
759 dp->dp_cmd_len = sizeof se_scsi_cmd;
760 }
761
762 /*
763 * transmit buffer can't cross 64K boundary
764 */
765
766 cur_brk = (long)calloc(0);
767 if ((cur_brk & 0xFFFF0000)
768 != ((cur_brk+OS_BUF_SIZE) & 0xFFFF0000)) {
769 callocrnd((int)0x10000);
770 }
771 softp->os_buf = (u_char *)calloc(OS_BUF_SIZE);
772 sec_init_iatq(&softp->os_iatq, (unsigned)se_write_iats);
773 #ifdef MACH_KERNEL
774 softp->os_pending = (io_req_t) 0;
775 #else /* MACH_KERNEL */
776 softp->os_pending = (struct mbuf *)0;
777 #endif /* MACH_KERNEL */
778 softp->os_initted = 1;
779 softp->os_active = 0;
780 } else {
781 printf("%s%d: invalid device chan %d (0x%x) in boot routine\n",
782 se_driver.sed_name, softp-se_state,
783 sd->sd_chan, sd->sd_chan);
784 panic("se_bootone");
785 }
786 }
787
788 /*
789 * se_boot - allocate data structures, etc at beginning of time.
790 *
791 * Called with an array of configured devices and the number
792 * of ether devices.
793 * We allocate the necessary soft descriptions, and fill them
794 * in with info from the devs[] array.
795 * Due to the dual-device-channel nature of the SEC description,
796 * each entry in the devs[] array describes either the Ether input
797 * device or the Ether output device. We combine this into a
798 * single device state per controller.
799 *
800 * The program queues are allocated by the routine that calls us,
801 * but the device programs themselves are not allocated til now.
802 *
803 * Work related to allocating mbufs is done later at se_init time.
804 */
805
806 static
807 se_boot(ndevs, devs)
808 struct sec_dev devs[];
809 {
810 register int i;
811
812 /*
813 * First, allocate soft descriptions.
814 */
815
816 se_max_unit = ndevs/2;
817 se_state = (struct se_state *)calloc((se_max_unit+1)
818 * sizeof(struct se_state));
819 se_base_vec = devs[0].sd_vector;
820
821 /*
822 * Now, boot each configured device.
823 */
824
825 for (i = 0; i < ndevs; ++i) {
826 register struct sec_dev *devp;
827 register int unit;
828
829 devp = &devs[i];
830 if (devp->sd_alive == 0)
831 continue;
832 unit = i/2;
833 se_boot_one(&se_state[unit], devp);
834 }
835 }
836
837 /*
838 * Initialization of interface; clear recorded pending
839 * operations, and reinitialize SCSI/Ether usage.
840 */
841
842 static
843 se_init(unit)
844 int unit;
845 {
846 register struct se_state *softp = &se_state[unit];
847 register struct ifnet *ifp;
848 register int i;
849 #ifdef MACH_KERNEL
850 #else /* MACH_KERNEL */
851 struct mbuf *m;
852 struct sockaddr_in *sin;
853 #endif /* MACH_KERNEL */
854 spl_t sipl;
855
856 if (unit < 0 || unit > se_max_unit) {
857 printf("%s%d: invalid unit in init\n",
858 se_driver.sed_name, unit);
859 return;
860 }
861 sipl = OS_LOCK(softp);
862 (void) SS_LOCK(softp);
863
864 if (!softp->ss_alive || !softp->ss_initted || !softp->is_initted
865 || !softp->os_initted)
866 goto ret;
867
868 #ifdef MACH_KERNEL
869 ifp = &softp->ss_if;
870 #else /* MACH_KERNEL */
871 ifp = &softp->ss_arp.ac_if;
872 #ifndef MACH
873 sin = (struct sockaddr_in *)&ifp->if_addr;
874 if (sin->sin_addr.s_addr == 0)
875 goto ret; /* address still unknown */
876 #else
877 /* not yet, if address still unknown */
878 if (ifp->if_addrlist == (struct ifaddr *)0)
879 goto ret;
880 #endif /* MACH */
881 #endif /* MACH_KERNEL */
882
883 if (ifp->if_flags & IFF_RUNNING)
884 goto justarp;
885
886 if (softp->ss_init_called)
887 goto justarp;
888
889 #ifdef MACH_KERNEL
890 timeout(se_watch, (char *)0, softp->ss_scan_int);
891 #else /* MACH_KERNEL */
892 ifp->if_watchdog = se_watch;
893 ifp->if_timer = softp->ss_scan_int;
894 #endif /* MACH_KERNEL */
895
896 #ifdef MACH_KERNEL
897 /*
898 * Set the Ether modes before we add input programs,
899 * as the firmware will need to know the size of the
900 * input packets before we do the SINST_STARTIO.
901 */
902
903 se_set_modes(softp);
904
905 /*
906 * Allocate some net_kmsgs for input packets and fill in
907 * the iats.
908 */
909 for (i = 0; i < softp->is_msgq.mq_size; ++i) {
910 register ipc_kmsg_t nk;
911
912 nk = net_kmsg_alloc();
913
914 se_add_read_progs(softp, nk);
915 }
916 #else /* MACH_KERNEL */
917 /*
918 * Allocate the mbufs parallel to the iat queue for input packets
919 * and fill in the iats.
920 * We allocate the mbufs one at a time because expansion happens
921 * slowly (a page cluster at a time) and we ask for a lot
922 * of mbufs at once.
923 */
924
925 m = (struct mbuf *)0;
926 for (i = 0; i < softp->is_mbufq.mq_size; ++i) {
927 register struct mbuf *newm = m_getm(M_DONTWAIT, MT_DATA, 1);
928
929 if (newm == (struct mbuf *)0) {
930 printf("%s%d: can't allocate %d mbufs!\n",
931 se_driver.sed_name, softp-se_state,
932 softp->is_mbufq.mq_size);
933 m_freem(m);
934 goto ret;
935 }
936 newm->m_next = m;
937 m = newm;
938 }
939
940 /*
941 * Set the Ether modes before we add input programs,
942 * as the firmware will need to know the size of the
943 * input packets before we do the SINST_STARTIO.
944 */
945
946 se_set_modes(softp);
947 se_add_read_progs(softp, m);
948 #endif /* MACH_KERNEL */
949
950 if (sec_start_prog(SINST_STARTIO, softp->is_cib, softp->ss_slic,
951 softp->ss_bin, SDEV_ETHERREAD, 1)
952 != SEC_ERR_NONE) {
953 printf("%s%d: can't initialize.\n",
954 se_driver.sed_name, softp-se_state);
955 goto ret;
956 }
957
958 /*
959 * Shouldn't have to restart output to the device,
960 * as nothing was reset.
961 */
962
963 #ifndef MACH
964 ifp->if_flags |= IFF_UP|IFF_RUNNING;
965 #else
966 ifp->if_flags |= IFF_RUNNING;
967 #endif /* MACH */
968 softp->ss_init_called = 1;
969
970 justarp:
971 #ifdef MACH_KERNEL
972 SS_UNLOCK(softp, SPLIMP);
973 OS_UNLOCK(softp, sipl);
974 #else /* MACH_KERNEL */
975 #ifndef MACH
976 se_set_modes(softp);
977 if_rtinit(ifp, RTF_UP);
978 arpattach(&softp->ss_arp);
979 SS_UNLOCK(softp, SPLIMP);
980 OS_UNLOCK(softp, sipl);
981 arpwhohas(&softp->ss_arp, &sin->sin_addr);
982 #endif /* MACH */
983 #endif /* MACH_KERNEL */
984 return;
985 ret:
986 SS_UNLOCK(softp, SPLIMP);
987 OS_UNLOCK(softp, sipl);
988 }
989
990 #ifdef MACH_KERNEL
991 io_return_t
992 se_open(
993 int unit,
994 int flag)
995 {
996 if (unit < 0 || unit > se_max_unit)
997 return (D_NO_SUCH_DEVICE);
998
999 se_state[unit].ss_if.if_flags |= IFF_UP;
1000 se_init(unit); /* XXX should return status */
1001 return D_SUCCESS;
1002 }
1003
1004 io_return_t
1005 se_setinput(
1006 int unit,
1007 mach_port_t receive_port,
1008 int priority,
1009 filter_t filter[],
1010 natural_t filter_count)
1011 {
1012 if (unit < 0 || unit > se_max_unit)
1013 return D_NO_SUCH_DEVICE;
1014
1015 return net_set_filter(&se_state[unit].ss_if,
1016 receive_port, priority,
1017 filter, filter_count);
1018 }
1019
1020
1021 #endif /* MACH_KERNEL */
1022 /*
1023 * Ethernet interface interrupt routine.
1024 * Could be output or input interrupt.
1025 * We determine the source and call the appropriate routines
1026 * to decode the done queue programs.
1027 */
1028
1029 static
1030 se_intr(vector)
1031 int vector;
1032 {
1033 int unit = (vector - se_base_vec)/2;
1034 int is_read = (vector - 2*unit) == 0;
1035 register struct se_state *softp = &se_state[unit];
1036 register struct sec_pq *sq;
1037 spl_t sipl;
1038
1039 if (unit < 0 || unit > se_max_unit) {
1040 printf("%s%d: invalid interrupt vector %d\n",
1041 se_driver.sed_name, unit, vector);
1042 return;
1043 }
1044
1045 if (is_read) { /* Receiver interrupt. */
1046 register struct sec_eprogq *epq;
1047 #ifdef DEBUG
1048 if (se_ibug)
1049 printf("R%d ", unit);
1050 #endif /* DEBUG */
1051 ASSERT(softp->ss_alive, "se_intr: alive");
1052 ASSERT(softp->ss_init_called, "se_intr: initted");
1053 sq = &softp->is_doneq;
1054 epq = (struct sec_eprogq *)sq->sq_progq;
1055 ASSERT(epq->epq_tail < sq->sq_size, "se_intr: tail");
1056 ASSERT(epq->epq_head < sq->sq_size, "se_intr: head");
1057
1058 /*
1059 * Lock the input state before we test for work.
1060 * This keeps other processors out of the way once
1061 * we commit to entering the loop and doing work.
1062 * There is a race here between the decision to
1063 * leave the loop and the v_lock that can cause an
1064 * interrupt from the SCSI/Ether controller to be
1065 * missed, but we say this is acceptable, as the
1066 * net should always be busy, and we will eventually
1067 * see the packet on the next interrupt.
1068 */
1069
1070 sipl = cp_lock(&softp->is_lock, SPLIMP);
1071 if (sipl == CPLOCKFAIL)
1072 return;
1073 while (epq->epq_tail != epq->epq_head) { /* work to do */
1074 se_handle_read(softp, &epq->epq_status[epq->epq_tail]);
1075 epq->epq_tail = (epq->epq_tail + 1) % sq->sq_size;
1076 }
1077 v_lock(&softp->is_lock, sipl);
1078
1079 } else { /* Transmitter interrupt. */
1080
1081 register struct sec_progq *pq;
1082 register struct sec_dev_prog *dp;
1083 #ifdef DEBUG
1084 if (se_obug)
1085 printf("X%d ", unit);
1086 #endif
1087 /*
1088 * Since there is only one device program active
1089 * at a time on ether output, it makes more sense to
1090 * spin on the output side lock rather than conditionally
1091 * lock an interrupt lock here.
1092 * When we switch to multiple active device outputs
1093 * per SCSI/Ether, this decision should be reversed,
1094 * as there is the potential for idling an unbounded
1095 * number of processors while Ether transmit interrupts
1096 * occur.
1097 */
1098
1099 sipl = OS_LOCK(softp);
1100 if (!softp->ss_alive || !softp->ss_init_called) {
1101 OS_UNLOCK(softp, sipl);
1102 return;
1103 }
1104 sq = &softp->os_doneq;
1105 pq = sq->sq_progq;
1106
1107 ASSERT(pq->pq_tail < sq->sq_size, "se_intr: tail 2");
1108 ASSERT(pq->pq_head < sq->sq_size, "se_intr: head 2");
1109 if (!softp->os_active) { /* spurious interrupt */
1110 OS_UNLOCK(softp, sipl);
1111 return;
1112 }
1113 while (pq->pq_tail != pq->pq_head) {
1114 dp = PHYSTOKV(pq->pq_un.pq_progs[pq->pq_tail],
1115 struct sec_dev_prog *);
1116 if (dp->dp_status1 != 0) {
1117 int status = sec_start_prog(SINST_RESTARTIO,
1118 softp->os_cib,
1119 softp->ss_slic,
1120 softp->ss_bin,
1121 SDEV_ETHERWRITE, 1);
1122 if (status != SEC_ERR_NONE
1123 && status != SEC_ERR_NO_MORE_IO) {
1124 printf("%s%d: se_intr: status 0x%x\n",
1125 se_driver.sed_name,
1126 softp-se_state,
1127 softp->os_status);
1128 }
1129 }
1130 pq->pq_tail = (pq->pq_tail + 1) % sq->sq_size;
1131 softp->os_active = 0;
1132 ASSERT(softp->os_pending != (struct mbuf *)0,
1133 "se_intr: os_pending");
1134 #ifdef PROMISCUOUS
1135 if (promiscon) {
1136 struct mbuf * xm;
1137 struct promiscif * xpm;
1138
1139 /*
1140 * manage monitor receipt of transmitted packets.
1141 */
1142
1143 xm = m_getm(M_DONTWAIT, MT_DATA, 1);
1144 (void) IF_LOCK(&promiscq);
1145 if (!xm || IF_QFULL(&promiscq)) {
1146 IF_DROP(&promiscq);
1147 IF_UNLOCK(&promiscq, SPLIMP);
1148 m_freem(softp->os_pending);
1149 if(xm) (void) m_free(xm);
1150 }else{
1151 xm->m_next = softp->os_pending;
1152 xm->m_len = sizeof(struct promiscif);
1153 xpm = mtod(xm, struct promiscif *);
1154 xpm->promiscif_ifnet = (caddr_t) softp;
1155 xpm->promiscif_flag = PROMISC_XMIT;
1156
1157 IF_ENQUEUE(&promiscq, xm);
1158 if (!promiscq.ifq_busy) {
1159 schednetisr(NETISR_PROMISC);
1160 }
1161 IF_UNLOCK(&promiscq, SPLIMP);
1162 }
1163
1164 } else /* not monitoring */
1165
1166 #endif /* PROMISCUOUS */
1167 #ifdef MACH_KERNEL
1168 iodone(softp->os_pending);
1169 softp->os_pending = 0;
1170 #else
1171 m_freem(softp->os_pending);
1172 softp->os_pending = (struct mbuf *)0;
1173 #endif /* MACH_KERNEL */
1174 /* Should only be one program in the queue */
1175 ASSERT(pq->pq_tail == pq->pq_head,"se_intr: head/tail");
1176 }
1177 OS_UNLOCK(softp, sipl);
1178 se_start(softp);
1179 }
1180 }
1181
1182 #ifdef MACH_KERNEL
1183
1184 /*
1185 * We set the offset for receives so that the data portion of a packet
1186 * lands at sizeof(struct packet_header) into the data portion
1187 * of a network message. That leaves the Ethernet type word at
1188 * the 'type' field of the packet_header.
1189 */
1190 #define ETHER_HDR_OFF \
1191 (sizeof(struct packet_header) - sizeof(struct ether_header))
1192
1193 se_handle_read(softp, statp)
1194 register struct se_state *softp;
1195 struct sec_ether_status *statp;
1196 {
1197 struct sec_msgq *mq = &softp->is_msgq;
1198 ipc_kmsg_t old_kmsg, new_kmsg;
1199 struct ifnet *ifp = &softp->ss_if;
1200 int len;
1201 char *old_addr;
1202
1203 old_kmsg = mq->mq_msgs[mq->mq_head];
1204 old_addr = &net_kmsg(old_kmsg)->packet[ETHER_HDR_OFF];
1205
1206 if (KVTOPHYS(old_addr, u_char *) != statp->es_data) {
1207 struct sec_iatq *iq = &softp->is_iatq;
1208 struct sec_iat *iat = &iq->iq_iats[iq->iq_head];
1209 register int i;
1210
1211 printf("%s%d: botch: statp 0x%x from mq 0x%x; es_data 0x%x\n",
1212 se_driver.sed_name, softp-se_state, statp, mq,
1213 statp->es_data);
1214 printf("mq->mq_head %d KVTOPHYS(old_kmsg) 0x%x\n",
1215 mq->mq_head, KVTOPHYS(old_kmsg, int));
1216 printf("iatq 0x%x iq->iq_head %d iat 0x%x addr 0x%x count %d\n",
1217 iq, iq->iq_head, iat, iat->iat_data, iat->iat_count);
1218 for (i = 0; i < 500000; ++i) {
1219 if (KVTOPHYS(old_kmsg, int) == (int)statp->es_data)
1220 break;
1221 }
1222 }
1223
1224 len = (int)statp->es_count;
1225
1226 #ifdef DEBUG
1227 if (se_ibug) {
1228 printf("got %d ", statp->es_count);
1229 }
1230 #endif
1231
1232 softp->ss_if.if_ipackets++;
1233
1234 new_kmsg = net_kmsg_get();
1235 if (new_kmsg == 0) {
1236 /*
1237 * Cannot allocate replacement message.
1238 * Use the old one again.
1239 */
1240 softp->ss_if.if_rcvdrops++;
1241
1242 se_add_read_progs(softp, old_kmsg);
1243 if (sec_start_prog(SINST_STARTIO, softp->is_cib,
1244 softp->ss_slic, softp->ss_bin,
1245 SDEV_ETHERREAD, 1)
1246 != SEC_ERR_NONE) {
1247 printf("%s%d: se_handle_read: status 0x%x\n",
1248 se_driver.sed_name, softp-se_state,
1249 softp->is_status);
1250 }
1251 #ifdef DEBUG
1252 if (se_ibug > 1)
1253 printf("lose%d ", n);
1254 #endif
1255 return;
1256 }
1257
1258 /*
1259 * Replace the kmsg in the queue.
1260 */
1261
1262 se_add_read_progs(softp, new_kmsg);
1263
1264 if (sec_start_prog(SINST_STARTIO, softp->is_cib, softp->ss_slic,
1265 softp->ss_bin, SDEV_ETHERREAD, 1)
1266 != SEC_ERR_NONE) {
1267 printf("%s%d: se_handle_read: status 0x%x\n",
1268 se_driver.sed_name, softp-se_state, softp->is_status);
1269 }
1270 #ifdef DEBUG
1271 if (se_ibug > 1)
1272 printf("repl%d ", n);
1273 #endif
1274
1275 /*
1276 * Fill in the missing fields of the old kmsg.
1277 */
1278 {
1279 register struct ether_header *eh;
1280 register struct packet_header *ph;
1281
1282 eh = (struct ether_header *) &net_kmsg(old_kmsg)->header[0];
1283 ph = (struct packet_header *) &net_kmsg(old_kmsg)->packet[0];
1284
1285 /*
1286 * Copy the Ethernet header from where it was received.
1287 */
1288 *eh = *(struct ether_header *)old_addr;
1289
1290 /*
1291 * Set up the type and length fields in the packet header.
1292 */
1293 ph->type = eh->ether_type;
1294 ph->length = len - sizeof(struct ether_header)
1295 + sizeof(struct packet_header);
1296
1297 /*
1298 * Hand the packet to the network module.
1299 */
1300 net_packet(&softp->ss_if, old_kmsg, ph->length,
1301 ethernet_priority(old_kmsg));
1302 }
1303 #ifdef DEBUG
1304 if (se_ibug) printf("\n");
1305 #endif
1306 }
1307
1308 /*
1309 * se_add_read_progs - Add a read program to the request queue.
1310 *
1311 * We replace the net_kmsgs in the kmsg queue 'mq' here.
1312 * 'm' is a net_kmsg.
1313 */
1314 se_add_read_progs(softp, m)
1315 register struct se_state *softp;
1316 register ipc_kmsg_t m;
1317 {
1318 struct sec_msgq *mq;
1319 struct sec_iatq *iq;
1320 struct sec_pq *sq;
1321 struct sec_progq *pq;
1322 struct sec_edev_prog *dp;
1323 struct sec_iat *iat;
1324
1325 mq = &softp->is_msgq;
1326 iq = &softp->is_iatq;
1327 sq = &softp->is_reqq;
1328 pq = sq->sq_progq;
1329
1330 /* We do the entire msg in one piece. */
1331
1332 assert(iq->iq_size - iq->iq_head >= 1);
1333 assert(mq->mq_size - mq->mq_head >= 1);
1334
1335 /*
1336 * Point iats at the net_kmsg.
1337 * It is illegal to wrap around the ring.
1338 */
1339 iat = &iq->iq_iats[iq->iq_head];
1340
1341 iat->iat_data = KVTOPHYS(&net_kmsg(m)->packet[ETHER_HDR_OFF],
1342 u_char *);
1343 iat->iat_count = sizeof(struct ether_header) + ETHERMTU;
1344
1345 iat = &iq->iq_iats[iq->iq_head];
1346 iq->iq_head = (iq->iq_head + 1) % iq->iq_size;
1347
1348 mq->mq_msgs[mq->mq_head] = m;
1349 mq->mq_head = (mq->mq_head + 1) % mq->mq_size;
1350
1351 /*
1352 * Add the device program.
1353 */
1354 assert((pq->pq_head + 1) % sq->sq_size != pq->pq_tail);
1355 dp = PHYSTOKV(pq->pq_un.pq_eprogs[pq->pq_head],
1356 struct sec_edev_prog *);
1357 assert(dp != 0);
1358 dp->edp_iat_count = 1;
1359 dp->edp_iat = SEC_IATIFY(KVTOPHYS(iat, vm_offset_t));
1360 pq->pq_head = (pq->pq_head + 1) % sq->sq_size;
1361
1362 }
1363
1364 #else /* MACH_KERNEL */
1365 /*
1366 * Handle read interrupt requests.
1367 * This includes recognizing trailer protocol,
1368 * and passing up to the higher level software.
1369 *
1370 * This is called with the input state locked.
1371 */
1372
1373 extern struct custom_client custom_clients[];
1374
1375 static
1376 se_handle_read(softp, statp)
1377 register struct se_state *softp;
1378 struct sec_ether_status *statp;
1379 {
1380 struct sec_mbufq *mq = &softp->is_mbufq;
1381 struct mbuf *m, *mnew;
1382 #ifdef PROMISCUOUS
1383 struct mbuf *mpromisc;
1384 struct promiscif * mp;
1385 #endif
1386 int len, n, int_to_sched;
1387 struct ether_header *hp;
1388 struct ifqueue *inq;
1389 spl_t sipl;
1390 struct ifnet *ifp = &softp->ss_arp.ac_if;
1391 #ifdef RAW_ETHER
1392 struct raw_header * rh;
1393 struct mbuf * mrh;
1394 #endif
1395 int trailer = 0;
1396 int ci;
1397
1398 m = mq->mq_mbufs[mq->mq_head];
1399 if ((u_char *)KVIRTTOPHYS(mtod(m, int)) != statp->es_data) {
1400 struct sec_iatq *iq = &softp->is_iatq;
1401 struct sec_iat *iat = &iq->iq_iats[iq->iq_head];
1402 register int i;
1403
1404 printf("%s%d: botch: statp 0x%x from mq 0x%x; es_data 0x%x\n",
1405 se_driver.sed_name, softp-se_state, statp, mq,
1406 statp->es_data);
1407 printf("mq->mq_head %d KVIRTTOPHYS(m) 0x%x\n",
1408 mq->mq_head, KVIRTTOPHYS(mtod(m, int)));
1409 printf("iatq 0x%x iq->iq_head %d iat 0x%x addr 0x%x count %d\n",
1410 iq, iq->iq_head, iat, iat->iat_data, iat->iat_count);
1411 for (i = 0; i < 500000; ++i) {
1412 if (KVIRTTOPHYS(mtod(m, int)) == (int)statp->es_data)
1413 break;
1414 }
1415 }
1416
1417 n = howmany(statp->es_count, MLEN);
1418 m = sec_chain_mbufs(mq, n, (int)statp->es_count);
1419
1420 #ifdef DEBUG
1421 if (se_ibug) {
1422 printf("got%d ", statp->es_count);
1423 }
1424 #endif
1425
1426 softp->ss_arp.ac_if.if_ipackets++;
1427
1428 #ifdef PROMISCUOUS
1429
1430 /*
1431 * get an mbuf for passing softp to promiscintr.
1432 */
1433
1434 mnew = m_getm(M_DONTWAIT, MT_DATA, n+1);
1435 #else
1436 mnew = m_getm(M_DONTWAIT, MT_DATA, n);
1437
1438 #endif /* PROMISCUOUS */
1439
1440 if (mnew == 0) {
1441
1442 /*
1443 * Can't allocate replacement mbufs.
1444 * Go ahead and use the old ones again.
1445 */
1446
1447 se_add_read_progs(softp, m);
1448 if (sec_start_prog(SINST_STARTIO, softp->is_cib,
1449 softp->ss_slic, softp->ss_bin,
1450 SDEV_ETHERREAD, 1)
1451 != SEC_ERR_NONE) {
1452 printf("%s%d: se_handle_read: status 0x%x\n",
1453 se_driver.sed_name, softp-se_state,
1454 softp->is_status);
1455 }
1456 #ifdef DEBUG
1457 if (se_ibug > 1)
1458 printf("lose%d ", n);
1459 #endif
1460 softp->ss_arp.ac_if.if_ierrors++;
1461 return;
1462 }
1463
1464 /*
1465 * Replace the mbufs in the circular mbuf queue.
1466 */
1467
1468 #ifdef PROMISCUOUS
1469
1470 /*
1471 * strip off promiscif mbuf
1472 */
1473
1474 mpromisc = mnew;
1475 mnew = mnew->m_next;
1476 mpromisc->m_next = m;
1477 mpromisc->m_len = sizeof(struct promiscif);
1478
1479 #endif
1480
1481 se_add_read_progs(softp, mnew);
1482 if (sec_start_prog(SINST_STARTIO, softp->is_cib, softp->ss_slic,
1483 softp->ss_bin, SDEV_ETHERREAD, 1)
1484 != SEC_ERR_NONE) {
1485 printf("%s%d: se_handle_read: status 0x%x\n",
1486 se_driver.sed_name, softp-se_state, softp->is_status);
1487 }
1488 #ifdef DEBUG
1489 if (se_ibug > 1)
1490 printf("repl%d ", n);
1491 #endif
1492
1493 /*
1494 * m now contains a packet from the interface.
1495 * Check for trailer protocol.
1496 */
1497
1498 hp = mtod(m, struct ether_header *);
1499 len = statp->es_count;
1500 ASSERT(len == mbuf_chain_size(m), "se_handle_read: size");
1501 len -= sizeof(struct ether_header);
1502 if (len < ETHERMIN) {
1503
1504 #ifdef DEBUG
1505 if (se_ibug)
1506 printf("packet returned of size %d\n", len);
1507 #endif
1508 ++softp->ss_arp.ac_if.if_ierrors;
1509
1510 #ifdef PROMISCUOUS
1511 m_freem(mpromisc);
1512 #else
1513 m_freem(m);
1514 #endif
1515
1516 return;
1517 }
1518
1519 #ifdef PROMISCUOUS
1520 if (promiscon) { /* promiscon => give promiscintr the packet */
1521 inq = &promiscq;
1522 int_to_sched = NETISR_PROMISC;
1523 mp = mtod(mpromisc, struct promiscif *);
1524 mp->promiscif_ifnet = (caddr_t)softp;
1525 mp->promiscif_flag = PROMISC_RCVD;
1526 sipl = IF_LOCK(inq);
1527 if (IF_QFULL(inq)) {
1528 IF_DROP(inq);
1529 IF_UNLOCK(inq, sipl);
1530 m_freem(mpromisc);
1531 return;
1532 }
1533 IF_ENQUEUE(inq, mpromisc);
1534 if (!inq->ifq_busy) {
1535 schednetisr(int_to_sched);
1536 }
1537 IF_UNLOCK(inq, sipl);
1538 return;
1539 }
1540
1541 #endif /* PROMISCUOUS */
1542
1543 /*
1544 * check for SETHER_PROMISCUOUS but !promiscon
1545 */
1546
1547 if (softp->ss_ether_flags == SETHER_PROMISCUOUS) {
1548 if (bcmp((char *)etherbroadcastaddr,
1549 (char *)hp->ether_dhost, 6) != 0
1550 && bcmp((char *)softp->ss_arp.ac_enaddr,
1551 (char *)hp->ether_dhost, 6) != 0) {
1552
1553 #ifdef PROMISCUOUS
1554 m_freem(mpromisc);
1555 #else
1556 m_freem(m); /* throw promiscuous packets away. */
1557 #endif
1558 return;
1559 }
1560 }
1561
1562 #ifdef PROMISCUOUS
1563
1564 (void) m_free(mpromisc); /* promiscoff, no need for promiscif buffer */
1565
1566 #endif
1567
1568 m->m_off += sizeof(struct ether_header);
1569 m->m_len -= sizeof(struct ether_header);
1570 hp->ether_type = ntohs((u_short)hp->ether_type);
1571 if (hp->ether_type >= ETHERPUP_TRAIL
1572 && hp->ether_type < ETHERPUP_TRAIL + ETHERPUP_NTRAILER) {
1573 mnew = se_reorder_trailer_packet(hp, m);
1574 if (mnew == (struct mbuf *)0) {
1575 m_freem(m);
1576 return;
1577 }
1578 m = mnew;
1579 trailer++;
1580 }
1581 #ifdef MACH
1582 /*
1583 * 4.3 wants the interface pointer inserted in front of the
1584 * data -- do this. For now, just tack on an mbuf to front
1585 * of the mbuf chain (this works for both trailers and non-trailer
1586 * packets). This can likely be done more efficiently.
1587 *
1588 * See IF_ADJ()/IF_DEQUEUEIF().
1589 */
1590 MGET(mnew, M_DONTWAIT, MT_DATA);
1591 if (mnew == (struct mbuf *) NULL) {
1592 m_freem(m);
1593 return;
1594 }
1595 mnew->m_len = sizeof (struct ifnet *);
1596 *(mtod(mnew, struct ifnet **)) = ifp;
1597 mnew->m_next = m;
1598 m = mnew;
1599 #endif /* MACH */
1600
1601 #ifdef DEBUG
1602 if (se_ibug)
1603 printf("type0x%x ", hp->ether_type);
1604 #endif
1605
1606 switch (hp->ether_type) {
1607 #ifdef INET
1608 case ETHERPUP_IPTYPE:
1609 #ifdef DEBUG
1610 if (se_ibug) {
1611 printf("ip ");
1612 if (hp->ether_dhost[0] & 0x01)
1613 printf("broad ");
1614 else printf("station ");
1615 }
1616 #endif /* DEBUG */
1617 int_to_sched = NETISR_IP;
1618 inq = &ipintrq;
1619 break;
1620
1621 case ETHERPUP_ARPTYPE:
1622 #ifdef DEBUG
1623 if (se_ibug)
1624 printf("arp\n");
1625 #endif /* DEBUG */
1626 arpinput(&softp->ss_arp, m);
1627 return;
1628 #endif /* INET */
1629
1630 #ifndef MACH
1631 case PCI_TYPE:
1632 pcirint(hp, m);
1633 return;
1634 #endif MACH
1635
1636 default:
1637
1638 #ifndef MACH
1639 /* do not queue reordered trailer to rawif or custom */
1640
1641 if(trailer) {
1642 m_freem(m);
1643 return;
1644 }
1645
1646 /* allow for custom ether_read device drivers */
1647
1648 for(ci = 0; ci < 4; ci++) {
1649 if(custom_clients[ci].custom_devno
1650 && custom_clients[ci].custom_type == hp->ether_type)
1651 {
1652
1653 ASSERT(cdevsw[major(custom_clients[ci].custom_devno)].d_read,
1654 "no custom_client cdevsw.d_read!");
1655
1656 (*cdevsw[major(custom_clients[ci].custom_devno)].d_read)
1657 (hp, m, ifp);
1658
1659 custom_clients[ci].custom_count++;
1660 return;
1661 }
1662 }
1663
1664 #ifdef RAW_ETHER
1665
1666 /*
1667 * reput the ether header into the lead data buffer
1668 * *and* copy a Unix4.2 raw_header for compatibility
1669 */
1670
1671 m->m_off -= sizeof(struct ether_header);
1672 m->m_len += sizeof(struct ether_header);
1673 int_to_sched = NETISR_RAW;
1674 inq = &rawif.if_snd;
1675 mrh = m_getclrm(M_DONTWAIT, MT_DATA, 1);
1676 if(mrh == (struct mbuf *) NULL) {
1677 m_freem(m);
1678 return;
1679 }
1680
1681 /*
1682 * link the raw_header into the ether packet for 4.2
1683 * compatibility (?)
1684 *
1685 * set up raw header, using type as sa_data for bind.
1686 * raw_input() could do this if static struct set up.
1687 * - for now assign AF_UNSPEC for protocol
1688 */
1689
1690 mrh->m_next = m;
1691 m = mrh;
1692 rh = mtod(mrh, struct raw_header*);
1693 rh->raw_proto.sp_family = AF_RAWE;
1694 rh->raw_proto.sp_protocol = AF_UNSPEC;
1695
1696 /*
1697 * copy AF_RAWE and ether_type in for dst addr
1698 */
1699
1700 rh->raw_dst.sa_family = AF_RAWE;
1701 bcopy((caddr_t)&hp->ether_type,
1702 (caddr_t)rh->raw_dst.sa_data, 2);
1703 bcopy((caddr_t)&hp->ether_type,
1704 (caddr_t)rh->raw_src.sa_data, 2);
1705
1706 /*
1707 * put type back into net order
1708 */
1709
1710 hp->ether_type = htons(hp->ether_type);
1711
1712 /*
1713 * copy AF_RAWE and if_unit # in for src addr
1714 */
1715
1716 rh->raw_src.sa_family = AF_RAWE;
1717 bcopy((caddr_t)&ifp->if_unit,
1718 (caddr_t)&rh->raw_src.sa_data[2], sizeof(short));
1719
1720 #else /* not RAW_ETHER */
1721
1722 m_freem(m);
1723 return;
1724
1725 #endif /* RAW_ETHER */
1726 #else /* MACH */
1727 m_freem(m);
1728 return;
1729 #endif /* MACH */
1730
1731 } /* end switch */
1732
1733 #ifndef MACH
1734 sipl = IF_LOCK(inq);
1735 #else
1736 sipl = splimp();
1737 #endif /* MACH */
1738 if (IF_QFULL(inq)) {
1739 IF_DROP(inq);
1740 #ifndef MACH
1741 IF_UNLOCK(inq, sipl);
1742 #else
1743 splx(sipl);
1744 #endif /* MACH */
1745 m_freem(m);
1746 return;
1747 }
1748 IF_ENQUEUE(inq, m);
1749 #ifndef MACH
1750 if (!inq->ifq_busy) {
1751 schednetisr(int_to_sched);
1752 }
1753 IF_UNLOCK(inq, sipl);
1754 #else
1755 schednetisr(int_to_sched);
1756 splx(sipl);
1757 #endif /* MACH */
1758 #ifdef DEBUG
1759 if (se_ibug) printf("\n");
1760 #endif /* DEBUG */
1761 }
1762
1763
1764
1765 /*
1766 * se_reorder_trailer_packet - return a real mbuf chain after noticing trailer
1767 * protocol is being used.
1768 *
1769 * Return the new chain, and modify the header to reflect the real type.
1770 * Return a null mbuf if we couldn't do it.
1771 * It is the responsibility of the caller to free the original, if necessary.
1772 */
1773
1774 struct trailer {
1775 u_short tl_type;
1776 u_short tl_count;
1777 };
1778
1779 #ifdef PROMISCUOUS
1780
1781 /* N.B. this routine is not static so promiscq handler can call it */
1782
1783 struct mbuf *se_reorder_trailer_packet(hp, m)
1784
1785 #else
1786
1787 static struct mbuf *se_reorder_trailer_packet(hp, m)
1788
1789 #endif /* PROMISCUOUS */
1790
1791 struct ether_header *hp;
1792 register struct mbuf *m;
1793 {
1794 register struct mbuf *mnew, *split;
1795 int trail_off;
1796 struct trailer *trailerp;
1797
1798 /*
1799 * find mbuf where we have to split things.
1800 */
1801
1802 trail_off = (hp->ether_type - ETHERPUP_TRAIL)*512;
1803 if (trail_off != 512 && trail_off != 1024) {
1804 printf("%s: ignore trailer with type 0x%x\n",
1805 se_driver.sed_name, hp->ether_type);
1806 return((struct mbuf *)0);
1807 }
1808 split = m;
1809 for (; split != 0 && split->m_len <= trail_off; split = split->m_next)
1810 trail_off -= split->m_len;
1811 #ifdef DEBUG
1812 if (se_ibug > 1)
1813 printf("split 0x%x trail_off %d ", split, trail_off);
1814 #endif /* DEBUG */
1815
1816 if (split == (struct mbuf *)0)
1817 return((struct mbuf *)0);
1818
1819 /*
1820 * trail_off has the index into 'split' of the trailer.
1821 * Lots of potential boundary conditions here that should
1822 * be checked, but since we know the size of data blocks
1823 * in trailer-protocol packets == 512 or 1024 and MLEN == 112,
1824 * we are guaranteed that the trailer header is completely
1825 * embedded in a single mbuf.
1826 */
1827
1828 trailerp = (struct trailer *)(mtod(split, int) + trail_off);
1829 if (trail_off + sizeof(struct trailer) > split->m_len)
1830 return((struct mbuf *)0);
1831
1832 MGET(mnew, M_DONTWAIT, MT_DATA);
1833 if (mnew == 0)
1834 return((struct mbuf *)0);
1835
1836 /*
1837 * Know where to split, and have place for start of header.
1838 * Build real header by copying and chaining.
1839 */
1840
1841 mnew->m_off = MMINOFF;
1842 mnew->m_len = split->m_len - trail_off - sizeof(struct trailer);
1843 mnew->m_next = split->m_next;
1844 bcopy((caddr_t)(trailerp+1), mtod(mnew, caddr_t), (u_int)mnew->m_len);
1845 hp->ether_type = ntohs(trailerp->tl_type);
1846
1847 #ifdef DEBUG
1848 if (se_ibug > 1)
1849 printf("new len %d tlr len %d trtype0x%x ",
1850 mnew->m_len, ntohs(trailerp->tl_count), hp->ether_type);
1851 #endif /* DEBUG */
1852
1853 split->m_len = trail_off;
1854 split->m_next = 0;
1855
1856 #ifdef DEBUG
1857 if (ntohs(trailerp->tl_count) !=
1858 sizeof(struct trailer) + mbuf_chain_size(mnew)) {
1859 printf("%s: odd trailer count %d, expected %d+%d\n",
1860 se_driver.sed_name, ntohs(trailerp->tl_count),
1861 sizeof(struct trailer), mbuf_chain_size(mnew));
1862 }
1863 #endif /* DEBUG */
1864
1865 mnew->m_next = m;
1866 return(mnew);
1867 }
1868
1869
1870 /*
1871 * se_add_read_progs - Add one or two read programs to the request queue.
1872 *
1873 * We replace the mbufs in the mbuf queue 'mq' here. 'm' is an
1874 * mbuf chain of the appropriate length.
1875 */
1876
1877 static
1878 se_add_read_progs(softp, m)
1879 register struct se_state *softp;
1880 register struct mbuf *m;
1881 {
1882 struct sec_mbufq *mq;
1883 struct sec_iatq *iq;
1884 struct sec_pq *sq;
1885 struct sec_progq *pq;
1886 struct sec_edev_prog *dp;
1887 int n;
1888
1889 mq = &softp->is_mbufq;
1890 iq = &softp->is_iatq;
1891 n = mbuf_chain_length(m);
1892 sq = &softp->is_reqq;
1893 pq = sq->sq_progq;
1894
1895 while (n > 0) {
1896 int nnow = MIN(n, iq->iq_size - iq->iq_head);
1897 struct sec_iat *iat;
1898 struct mbuf *mnext;
1899
1900 #ifdef DEBUG
1901 if (se_ibug > 1)
1902 printf("add%d ", nnow);
1903 #endif /* DEBUG */
1904 ASSERT(nnow >= 1, "se_add_read: nnow 1");
1905 ASSERT(nnow == MIN(n, mq->mq_size - mq->mq_head),
1906 "se_add_read: nnow 2");
1907 if (nnow != n) {
1908 register struct mbuf *mprev;
1909 int i;
1910
1911 for (mprev = m, i = 0; i < nnow-1; ++i, mprev = mprev->m_next)
1912 continue;
1913 mnext = mprev->m_next;
1914 mprev->m_next = (struct mbuf *)0;
1915 ASSERT(mbuf_chain_length(m) == nnow, "se_add_read: length");
1916 ASSERT(mbuf_chain_length(mnext) == n - nnow,
1917 "se_add_read: length 2");
1918 } else {
1919 mnext = (struct mbuf *)0;
1920 }
1921
1922 /*
1923 * m now has chain to spray into iats.
1924 * mnext has the rest of the chain.
1925 */
1926
1927 iat = sec_spray_mbuf_iatq(m, iq);
1928 if (iat == 0)
1929 panic("se_add_read_progs");
1930
1931 (void) sec_spray_mbuf_mbufq(m, mq);
1932
1933 /*
1934 * add the device program.
1935 */
1936
1937 ASSERT((pq->pq_head + 1) % sq->sq_size != pq->pq_tail,
1938 "se_add_read: bad head");
1939 dp = pq->pq_un.pq_eprogs[pq->pq_head];
1940 ASSERT(dp != (struct sec_edev_prog *)0, "se_add_read: dp 0");
1941 dp->edp_iat_count = nnow;
1942 dp->edp_iat = SEC_IATIFY(iat);
1943 pq->pq_head = (pq->pq_head + 1) % sq->sq_size;
1944
1945 n -= nnow;
1946 m = mnext;
1947 }
1948 }
1949 #endif /* MACH_KERNEL */
1950 /*
1951 * Ethernet output routine.
1952 * Encapsulate a packet of type family for the local net.
1953 * If this packet is a broadcast packet or is destined for
1954 * ourselves, we pass a copy of it through the loopback
1955 * interface, as the SEEQ chip is not capable of hearing
1956 * its own transmissions.
1957 */
1958 #ifdef MACH_KERNEL
1959 io_return_t
1960 se_output(
1961 int unit,
1962 io_req_t ior)
1963 {
1964 register struct se_state *softp = &se_state[unit];
1965
1966 return net_write(&softp->ss_if, se_start_u, ior);
1967 }
1968
1969 void se_start_u(
1970 int unit)
1971 {
1972 se_start(&se_state[unit]);
1973 }
1974
1975 #else /* MACH_KERNEL */
1976 static
1977 se_output(ifp, m, dest)
1978 struct ifnet *ifp;
1979 register struct mbuf *m;
1980 struct sockaddr *dest;
1981 {
1982 register struct se_state *softp = &se_state[ifp->if_unit];
1983 u_char ether_dest[6];
1984 register struct ether_header *header;
1985 int type;
1986 spl_t sipl;
1987 extern struct ifnet loif;
1988 #ifdef MACH
1989 int usetrailers;
1990 #endif /* MACH */
1991
1992 #ifdef MACH
1993 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
1994 m_freem(m);
1995 return(ENETDOWN);
1996 }
1997 #endif /* MACH */
1998
1999 #ifdef DEBUG
2000 if (se_obug) {
2001 printf("O%d ", ifp->if_unit);
2002 if (se_obug > 1) {
2003 printf("se_output called with...\n");
2004 dump_mbuf_chain(m);
2005 }
2006 }
2007 #endif /* DEBUG */
2008 switch (dest->sa_family) {
2009 #ifdef INET
2010 case AF_INET: {
2011 struct in_addr inet_dest;
2012 register struct mbuf *m0 = m;
2013 int off;
2014 struct trailer *tl;
2015
2016 inet_dest = ((struct sockaddr_in *)dest)->sin_addr;
2017 #ifndef MACH
2018 if (!arpresolve(&softp->ss_arp, m, &inet_dest, ether_dest))
2019 return(0); /* Not yet resolved */
2020 #else
2021 /*
2022 * New parameter, tells if should use trailers.
2023 * Need the paramater, but can ignore the result.
2024 */
2025 if (!arpresolve(&softp->ss_arp, m, &inet_dest, ether_dest, &usetrailers))
2026 return(0); /* Not yet resolved */
2027 #endif /* MACH */
2028 if (in_lnaof(inet_dest) == INADDR_ANY) {
2029 struct mbuf *copy = (struct mbuf *)0;
2030
2031 copy = m_copy(m, 0, (int)M_COPYALL);
2032 if (copy != (struct mbuf *)0)
2033 (void) looutput(&loif, copy, dest);
2034 }
2035
2036 /* Generate trailer protocol? */
2037
2038 off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
2039 if ((ifp->if_flags & IFF_NOTRAILERS) == 0
2040 && off > 0 && (off & 0x1FF) == 0
2041 && m->m_off >= MMINOFF + sizeof(struct trailer)) {
2042 type = ETHERPUP_TRAIL + (off >> 9);
2043 m->m_off -= sizeof(struct trailer);
2044 m->m_len += sizeof(struct trailer);
2045 tl = mtod(m, struct trailer *);
2046 tl->tl_type = htons((u_short)ETHERPUP_IPTYPE);
2047 tl->tl_count = htons((u_short)m->m_len);
2048
2049 /*
2050 * Move first packet (control information)
2051 * to end of chain.
2052 */
2053
2054 while (m0->m_next)
2055 m0 = m0->m_next;
2056 m0->m_next = m;
2057 m0 = m->m_next;
2058 m->m_next = (struct mbuf *)0;
2059 m = m0;
2060 } else {
2061 type = ETHERPUP_IPTYPE;
2062 }
2063 }
2064 break;
2065 #endif /* INET */
2066
2067 case AF_UNSPEC:
2068 header = (struct ether_header *)dest->sa_data;
2069 bcopy((caddr_t)header->ether_dhost, (caddr_t)ether_dest,
2070 sizeof(ether_dest));
2071 type = header->ether_type;
2072 break;
2073
2074 default:
2075 printf("%s%d: can't handle address family %d\n",
2076 se_driver.sed_name, softp-se_state, dest->sa_family);
2077 m_freem(m);
2078 return(EAFNOSUPPORT);
2079 }
2080
2081 /*
2082 * Add the local header.
2083 * Always add a new mbuf for the header so that we can compress
2084 * short mbufs at the front easily.
2085 */
2086 {
2087 register struct mbuf *m0;
2088
2089 MGET(m0, M_DONTWAIT, MT_HEADER);
2090 if (m0 == (struct mbuf *)0) {
2091 m_freem(m);
2092 return(ENOBUFS);
2093 }
2094 m0->m_next = m;
2095 m0->m_off = MMINOFF;
2096 m0->m_len = sizeof(struct ether_header);
2097 m = m0;
2098 }
2099
2100 header = mtod(m, struct ether_header *);
2101 header->ether_type = htons((u_short)type);
2102 bcopy((caddr_t)ether_dest, (caddr_t)header->ether_dhost,
2103 sizeof(ether_dest));
2104 sipl = OS_LOCK(softp);
2105 bcopy((caddr_t)softp->ss_arp.ac_enaddr,
2106 (caddr_t)header->ether_shost, 6);
2107
2108 /*
2109 * The SCSI/Ether interface is not very good at handling short
2110 * output packets, so try to condense the first few mbufs
2111 * together.
2112 * Note that we are guaranteed that m->m_off == MMINOFF, as
2113 * we just placed the 14-byte Ethernet header there.
2114 */
2115
2116 while (m->m_next && (m->m_len + m->m_next->m_len) <= MLEN) {
2117 register struct mbuf *mn = m->m_next;
2118
2119 bcopy(mtod(mn, caddr_t), (caddr_t)(mtod(m, int)+m->m_len), (u_int)mn->m_len);
2120 m->m_len += mn->m_len;
2121 ASSERT(m->m_len <= MLEN, "se_output: MLEN");
2122 m->m_next = mn->m_next;
2123 mn->m_next = (struct mbuf *)0;
2124 m_freem(mn);
2125 }
2126
2127 /*
2128 * Queue message on interface, and start output if interface not active.
2129 */
2130
2131 #ifndef MACH
2132 (void) IF_LOCK(&ifp->if_snd);
2133 #endif /* MACH */
2134 if (IF_QFULL(&ifp->if_snd)) {
2135 #ifdef DEBUG
2136 if (se_obug)
2137 printf("dropo\n");
2138 #endif /* DEBUG */
2139 IF_DROP(&ifp->if_snd);
2140 #ifndef MACH
2141 IF_UNLOCK(&ifp->if_snd, SPLIMP);
2142 #endif /* MACH */
2143 OS_UNLOCK(softp, sipl);
2144 m_freem(m);
2145 return(ENETDOWN);
2146 }
2147 IF_ENQUEUE(&ifp->if_snd, m);
2148 #ifndef MACH
2149 IF_UNLOCK(&ifp->if_snd, SPLIMP);
2150 #endif /* MACH */
2151 OS_UNLOCK(softp, sipl);
2152 se_start(softp);
2153 return(0);
2154 }
2155 #endif /* MACH_KERNEL */
2156
2157
2158 /*
2159 * se_start - start output on the interface.
2160 *
2161 * First we make sure it is idle and that there is work to do.
2162 *
2163 * We spray the mbuf into the output iat queue,
2164 * build the device program and start the program running.
2165 *
2166 * EMERGENCY FIX: Since someone is putting tiny mbufs in the
2167 * middle of the mbuf chain, we must copy mbufs into an output
2168 * buffer until we understand the problem better.
2169 */
2170
2171 se_start(softp)
2172 register struct se_state *softp;
2173 {
2174 spl_t sipl;
2175
2176 if (softp-se_state > se_max_unit)
2177 return;
2178 #ifdef DEBUG
2179 if (se_obug)
2180 printf("S%d ", softp-se_state);
2181 #endif /* DEBUG */
2182 sipl = OS_LOCK(softp);
2183 if (softp->os_active) {
2184 #ifdef DEBUG
2185 if (se_obug)
2186 printf("active ");
2187 #endif /* DEBUG */
2188 goto ret;
2189 }
2190
2191 /*
2192 * Device not busy. Is there something in the queue?
2193 */
2194
2195 for (;;) {
2196 #ifdef MACH_KERNEL
2197 struct ifqueue *ifq = &softp->ss_if.if_snd;
2198 register io_req_t m;
2199 #else /* MACH_KERNEL */
2200 struct ifqueue *ifq = &softp->ss_arp.ac_if.if_snd;
2201 register struct mbuf *m, *n;
2202 #endif /* MACH_KERNEL */
2203 register struct sec_pq *sq = &softp->os_reqq;
2204 register struct sec_progq *pq = sq->sq_progq;
2205 register struct sec_dev_prog *dp;
2206 u_char *cp;
2207 struct ether_header *header;
2208 int packetsize, padcount;
2209
2210 dp = PHYSTOKV(pq->pq_un.pq_progs[pq->pq_head],
2211 struct sec_dev_prog *);
2212 #ifndef MACH
2213 (void) IF_LOCK(ifq);
2214 #endif /* MACH */
2215 IF_DEQUEUE(ifq, m);
2216 #ifndef MACH
2217 IF_UNLOCK(ifq, SPLIMP);
2218 #endif /* MACH */
2219 #ifdef MACH_KERNEL
2220 if (m == 0)
2221 #else /* MACH_KERNEL */
2222 if (m == (struct mbuf *)0)
2223 #endif /* MACH_KERNEL */
2224 break;
2225
2226 /*
2227 * m is a nonempty chain of mbufs
2228 * corresponding to a packet.
2229 * Flush the iat queue to empty, and
2230 * place the mbufs there.
2231 */
2232
2233 ASSERT(sq->sq_size != 0, "se_start: size");
2234 ASSERT(pq->pq_head < sq->sq_size, "se_start: head");
2235 ASSERT(pq->pq_tail < sq->sq_size, "se_start: tail");
2236 ASSERT((pq->pq_head + 1) % sq->sq_size != pq->pq_tail,
2237 "se_start: head+1");
2238 ASSERT(pq->pq_tail == pq->pq_head, "se_start: head/tail");
2239
2240 #ifdef MACH_KERNEL
2241 softp->ss_if.if_opackets++;
2242 #else /* MACH_KERNEL */
2243 softp->ss_arp.ac_if.if_opackets++;
2244 #endif /* MACH_KERNEL */
2245 softp->os_active = 1;
2246 softp->os_pending = m;
2247 #ifdef MACH_KERNEL
2248 packetsize = m->io_count;
2249 #else /* MACH_KERNEL */
2250 packetsize = mbuf_chain_size(m);
2251 #endif /* MACH_KERNEL */
2252 padcount = ETHERMIN - (packetsize - sizeof(struct ether_header));
2253 if (padcount > 0)
2254 packetsize += padcount;
2255 #ifdef MACH_KERNEL
2256 bcopy(m->io_data, softp->os_buf, m->io_count);
2257 cp = softp->os_buf + m->io_count;
2258 #else /* MACH_KERNEL */
2259 for (cp = softp->os_buf, n = m; n != 0; n = n->m_next) {
2260 bcopy(mtod(n, caddr_t), (caddr_t)cp, (u_int)n->m_len);
2261 cp += n->m_len;
2262 }
2263 #endif /* MACH_KERNEL */
2264 ASSERT(cp >= softp->os_buf, "se_start: cp < os_buf");
2265 ASSERT(cp <= softp->os_buf + OS_BUF_SIZE,
2266 "se_start: cp > os_buf");
2267 dp->dp_un.dp_data = KVTOPHYS(softp->os_buf, unsigned char *);
2268 dp->dp_data_len = packetsize;
2269 dp->dp_cmd_len = 0;
2270 dp->dp_next = (struct sec_dev_prog *)0;
2271 #ifdef DEBUG
2272 if (se_obug > 1) {
2273 printf("se_start: starting...");
2274 dump_bytes((char *) softp->os_buf, packetsize);
2275 }
2276 #endif /* DEBUG */
2277
2278 /*
2279 * If the packet is a multicast or broadcast
2280 * packet, place an indicator in the dp_cmd[]
2281 * so that the firmware knows to turn off the
2282 * receiver. The SCSI/Ether firmware can't look
2283 * at the packet itself, as the mbuf might not
2284 * be within its 4MB window.
2285 */
2286
2287 dp->dp_cmd[0] = SCSI_ETHER_WRITE;
2288 #ifdef MACH_KERNEL
2289 header = (struct ether_header *)m->io_data;
2290 #else /* MACH_KERNEL */
2291 header = mtod(m, struct ether_header *);
2292 #endif /* MACH_KERNEL */
2293
2294 if ((header->ether_dhost[0] & 0x01)
2295 #ifdef PROMISCUOUS
2296 || (softp->ss_ether_flags == SETHER_PROMISCUOUS)
2297 #endif /* PROMISCUOUS */
2298 )
2299 {
2300 dp->dp_cmd[1] = SCSI_ETHER_MULTICAST;
2301 } else {
2302 dp->dp_cmd[1] = SCSI_ETHER_STATION;
2303 }
2304
2305 #ifdef DEBUG
2306 if (se_obug)
2307 printf("sio ");
2308 #endif /* DEBUG */
2309 pq->pq_head = (pq->pq_head + 1) % sq->sq_size;
2310 if (sec_start_prog(SINST_STARTIO, softp->os_cib,
2311 softp->ss_slic, softp->ss_bin,
2312 SDEV_ETHERWRITE, 1)
2313 != SEC_ERR_NONE) {
2314 printf("%s%d: se_start: status 0x%x\n",
2315 se_driver.sed_name, softp-se_state,
2316 softp->os_status);
2317 }
2318 break;
2319 }
2320 ret:
2321
2322 #ifdef DEBUG
2323 if (se_obug)
2324 printf("\n");
2325 #endif /* DEBUG */
2326 OS_UNLOCK(softp, sipl);
2327 }
2328
2329 #ifndef MACH
2330 /*
2331 * se_ioctl
2332 */
2333
2334 static
2335 se_ioctl(ifp, cmd, data)
2336 register struct ifnet *ifp;
2337 int cmd;
2338 caddr_t data;
2339 {
2340 register struct ifreq *ifr = (struct ifreq *)data;
2341 register struct se_state *softp = &se_state[ifp->if_unit];
2342 spl_t sipl;
2343
2344 switch (cmd) {
2345 case SIOCSIFADDR:
2346 sipl = SS_LOCK(softp);
2347 if (ifp->if_flags & IFF_RUNNING)
2348 if_rtinit(ifp, -1);
2349
2350 se_set_addr(ifp, (struct sockaddr_in *)&ifr->ifr_addr);
2351 SS_UNLOCK(softp, sipl);
2352 se_init(ifp->if_unit);
2353 return(0);
2354
2355 default:
2356
2357 #ifdef PROMISCUOUS
2358 if(promiscdev)
2359 return((*cdevsw[major(promiscdev)].d_ioctl)(ifp, cmd, data));
2360 else
2361 return(EINVAL);
2362 #else
2363 return(EINVAL);
2364 #endif /* PROMISCUOUS */
2365 }
2366 }
2367
2368 #else
2369 /*
2370 * MACH/4.3 changed this a bunch.
2371 */
2372 #ifdef MACH_KERNEL
2373 io_return_t
2374 se_getstat(
2375 int unit,
2376 int flavor,
2377 dev_status_t status, /* pointer to OUT array */
2378 natural_t *count) /* out */
2379 {
2380 register struct se_state *softp = &se_state[unit];
2381
2382 return net_getstat(&softp->ss_if, flavor, status, count);
2383 }
2384
2385 io_return_t
2386 se_setstat(
2387 int unit,
2388 int flavor,
2389 dev_status_t status,
2390 natural_t count)
2391 {
2392 register struct se_state *softp = &se_state[unit];
2393
2394 switch (flavor) {
2395 case NET_STATUS:
2396 {
2397 /*
2398 * All we can change are flags, and not many of those.
2399 */
2400 register struct net_status *ns = (struct net_status *)status;
2401 int mode = 0;
2402
2403 if (count < NET_STATUS_COUNT)
2404 return D_INVALID_OPERATION;
2405
2406 /*
2407 * XXX This cannot be right-
2408 * the multicast and promiscuous flags
2409 * seem to be mutually exclusive!
2410 */
2411 if (ns->flags & IFF_ALLMULTI)
2412 mode |= SETHER_MULTICAST;
2413 if (ns->flags & IFF_PROMISC)
2414 mode |= SETHER_PROMISCUOUS;
2415
2416 /*
2417 * Force a complete reset if the receive mode changes
2418 * so that these take effect immediately.
2419 */
2420 if (softp->ss_ether_flags != mode) {
2421 softp->ss_ether_flags = mode;
2422 se_set_modes(softp);
2423 }
2424 break;
2425 }
2426 case NET_ADDRESS:
2427 {
2428 register union ether_cvt {
2429 char addr[6];
2430 int lwd[2];
2431 } *ec = (union ether_cvt *)status;
2432
2433 if (count < sizeof(*ec)/sizeof(int))
2434 return (D_INVALID_SIZE);
2435 ec->lwd[0] = ntohl(ec->lwd[0]);
2436 ec->lwd[1] = ntohl(ec->lwd[1]);
2437
2438 bcopy((char *)ec->addr, (char *)softp->ss_addr, 6);
2439 se_set_modes(softp);
2440 break;
2441 }
2442
2443 default:
2444 return D_INVALID_OPERATION;
2445 }
2446 return D_SUCCESS;
2447
2448 }
2449 #else /* MACH_KERNEL */
2450 /*
2451 * se_ioctl
2452 */
2453
2454 static
2455 se_ioctl(ifp, cmd, data)
2456 register struct ifnet *ifp;
2457 int cmd;
2458 caddr_t data;
2459 {
2460 register struct ifaddr *ifa = (struct ifaddr *)data;
2461 int s = splimp(), error = 0;
2462
2463 switch (cmd) {
2464
2465 case SIOCSIFADDR:
2466 ifp->if_flags |= IFF_UP;
2467 se_init(ifp->if_unit);
2468
2469 switch (ifa->ifa_addr.sa_family) {
2470 #ifdef INET
2471 case AF_INET:
2472 ((struct arpcom *)ifp)->ac_ipaddr =
2473 IA_SIN(ifa)->sin_addr;
2474 arpwhohas((struct arpcom *)ifp, &IA_SIN(ifa)->sin_addr);
2475 break;
2476 #endif
2477 #ifdef NS
2478 ERROR -- this case not really here yet
2479 case AF_NS:
2480 {
2481 register struct ns_addr *ina = &(IA_SNS(ifa)->sns_addr);
2482
2483 if (ns_nullhost(*ina))
2484 ina->x_host = *(union ns_host *)(ds->ds_addr);
2485 else
2486 se_setaddr(ina->x_host.c_host,ifp->if_unit);
2487 break;
2488 }
2489
2490 #endif
2491 default:
2492 break;
2493 }
2494 break;
2495
2496 default:
2497 error = EINVAL;
2498 }
2499 splx(s);
2500 return (error);
2501 }
2502 #endif /* MACH_KERNEL */
2503 #endif /* MACH */
2504
2505 #ifndef MACH
2506 static
2507 se_set_addr(ifp, sin)
2508 register struct ifnet *ifp;
2509 register struct sockaddr_in *sin;
2510 {
2511 ifp->if_addr = *(struct sockaddr *)sin;
2512 ifp->if_net = in_netof(sin->sin_addr);
2513 ifp->if_host[0] = in_lnaof(sin->sin_addr);
2514 sin = (struct sockaddr_in *)&ifp->if_broadaddr;
2515 sin->sin_family = AF_INET;
2516 sin->sin_addr = if_makeaddr(ifp->if_net, INADDR_ANY);
2517 ifp->if_flags |= IFF_BROADCAST;
2518 }
2519 #endif /* MACH */
2520
2521 /*
2522 * se_watch - watchdog routine, request statistics from board.
2523 *
2524 * The cib's status pointer must have an address that is physical == virtual,
2525 * and must reside within the SEC's 4MB window.
2526 */
2527
2528 static
2529 se_watch(unit)
2530 int unit;
2531 {
2532 register struct se_state *softp = &se_state[unit];
2533 register struct sec_cib *cib;
2534 #ifdef MACH_KERNEL
2535 struct ifnet *ifp = &softp->ss_if;
2536 #else /* MACH_KERNEL */
2537 struct ifnet *ifp = &softp->ss_arp.ac_if;
2538 #endif /* MACH_KERNEL */
2539 volatile int *saved_status;
2540 spl_t sipl;
2541
2542 if (unit < 0 || unit > se_max_unit)
2543 return;
2544
2545 sipl = OS_LOCK(softp);
2546 cib = softp->os_cib;
2547 saved_status = cib->cib_status;
2548 cib->cib_status = KVTOPHYS(&softp->os_gmode, int *);
2549
2550 if (sec_start_prog(SINST_GETMODE, cib, softp->ss_slic,
2551 softp->ss_bin, SDEV_ETHERWRITE, 1)
2552 != SEC_ERR_NONE) {
2553 printf("%s%d: se_watch: status 0x%x\n",
2554 se_driver.sed_name, softp-se_state,
2555 softp->os_gmode.gm_status);
2556 }
2557
2558 cib->cib_status = saved_status;
2559
2560 (void) SS_LOCK(softp);
2561
2562 #define INCR(field1, field2) \
2563 softp->ss_sum.field1 += softp->os_gmode.gm_un.gm_ether.field2
2564
2565 INCR(ec_rx_ovfl, egm_rx_ovfl);
2566 INCR(ec_rx_crc, egm_rx_crc);
2567 INCR(ec_rx_dribbles, egm_rx_dribbles);
2568 INCR(ec_rx_short, egm_rx_short);
2569 INCR(ec_rx_good, egm_rx_good);
2570
2571 INCR(ec_tx_unfl, egm_tx_unfl);
2572 INCR(ec_tx_coll, egm_tx_coll);
2573 INCR(ec_tx_16xcoll, egm_tx_16x_coll);
2574 INCR(ec_tx_good, egm_tx_good);
2575 #undef INCR
2576
2577 #ifdef MACH_KERNEL
2578 softp->ss_if.if_ierrors +=
2579 #else /* MACH_KERNEL */
2580 softp->ss_arp.ac_if.if_ierrors +=
2581 #endif /* MACH_KERNEL */
2582 softp->os_gmode.gm_un.gm_ether.egm_rx_ovfl
2583 + softp->os_gmode.gm_un.gm_ether.egm_rx_crc
2584 + softp->os_gmode.gm_un.gm_ether.egm_rx_dribbles;
2585 #ifdef MACH_KERNEL
2586 softp->ss_if.if_oerrors +=
2587 #else /* MACH_KERNEL */
2588 softp->ss_arp.ac_if.if_oerrors +=
2589 #endif /* MACH_KERNEL */
2590 softp->os_gmode.gm_un.gm_ether.egm_tx_unfl;
2591
2592 #ifdef MACH_KERNEL
2593 softp->ss_if.if_collisions = softp->ss_sum.ec_tx_coll;
2594 #else /* MACH_KERNEL */
2595 softp->ss_arp.ac_if.if_collisions = softp->ss_sum.ec_tx_coll;
2596 #endif /* MACH_KERNEL */
2597
2598 #ifdef MACH_KERNEL
2599 #else /* MACH_KERNEL */
2600 ifp->if_timer = softp->ss_scan_int;
2601 #endif /* MACH_KERNEL */
2602 SS_UNLOCK(softp, SPLIMP);
2603 OS_UNLOCK(softp, sipl);
2604
2605 #ifdef MACH_KERNEL
2606 timeout(se_watch, (char *)0, softp->ss_scan_int);
2607 #endif /* MACH_KERNEL */
2608
2609 }
2610
2611
2612
2613 /*
2614 * reset: not necessary on sequent hardware.
2615 */
2616
2617 static
2618 se_reset()
2619 {
2620 panic("se_reset");
2621 }
2622
2623
2624
2625 /*
2626 * se_set_modes - set the Ethernet modes based upon the soft state.
2627 *
2628 * Called with all pieces of the state locked.
2629 *
2630 * When we do the SINST_SETMODE, we use the get_mode structure
2631 * in the output state. This is fair as everyone else is locked
2632 * out and the first part of the get_mode structure is a set_mode
2633 * piece.
2634 */
2635
2636 se_set_modes(softp)
2637 register struct se_state *softp;
2638 {
2639 register volatile struct sec_ether_smodes *esm;
2640 register struct sec_cib *cib = softp->os_cib;
2641 volatile int *saved_status = cib->cib_status;
2642
2643 cib->cib_status = KVTOPHYS(&softp->os_gmode, volatile int *);
2644
2645 esm = &softp->os_gmode.gm_un.gm_ether.egm_sm;
2646 #ifdef MACH_KERNEL
2647 bcopy((caddr_t)softp->ss_addr, (caddr_t)esm->esm_addr, 6);
2648 #else MACH_KERNEL
2649 bcopy((caddr_t)softp->ss_arp.ac_enaddr, (caddr_t)esm->esm_addr, 6);
2650 #endif MACH_KERNEL
2651 esm->esm_flags = softp->ss_ether_flags;
2652 #ifdef MACH_KERNEL
2653 esm->esm_size = sizeof(struct ether_header) + ETHERMTU;
2654 /* we receive entire packet at once */
2655 #else MACH_KERNEL
2656 esm->esm_size = MLEN;
2657 #endif MACH_KERNEL
2658
2659 if (sec_start_prog(SINST_SETMODE, cib, softp->ss_slic,
2660 softp->ss_bin, SDEV_ETHERWRITE, 1)
2661 != SEC_ERR_NONE) {
2662 printf("%s%d: se_set_mode: status 0x%x\n",
2663 se_driver.sed_name, softp-se_state,
2664 softp->os_gmode.gm_status);
2665 }
2666 cib->cib_status = saved_status;
2667 }
2668
2669 #ifdef DEBUG
2670 static char hex[] = "0123456789abcdef";
2671
2672 dump_mbuf_chain(m)
2673 register struct mbuf *m;
2674 {
2675 register int mcnt;
2676
2677 for (mcnt = 0; m != NULL; mcnt++, m = m->m_next) {
2678 printf("mbuf[%d]:", mcnt);
2679 dump_bytes(mtod(m, char *), m->m_len);
2680 }
2681 }
2682
2683 dump_bytes(cp, len)
2684 register char *cp;
2685 register int len;
2686 {
2687 register int cnt;
2688
2689 for (cnt = 0; cnt < len; cnt++, cp++) {
2690 if ((cnt % 20) == 0)
2691 printf("\n\t");
2692 printf(" %c%c", hex[((int)(*cp) >> 4) & 0xf], hex[(*cp) & 0xf]);
2693 }
2694 printf("\n");
2695 }
2696 #endif DEBUG
Cache object: f690966304f369d189c87cb8f307a016
|