FreeBSD/Linux Kernel Cross Reference
sys/sqtsec/if_se.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993, 1991 Carnegie Mellon University
4 * Copyright (c) 1991 Sequent Computer Systems
5 * All Rights Reserved.
6 *
7 * Permission to use, copy, modify and distribute this software and its
8 * documentation is hereby granted, provided that both the copyright
9 * notice and this permission notice appear in all copies of the
10 * software, derivative works or modified versions, and any portions
11 * thereof, and that both notices appear in supporting documentation.
12 *
13 * CARNEGIE MELLON AND SEQUENT COMPUTER SYSTEMS ALLOW FREE USE OF
14 * THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON AND
15 * SEQUENT COMPUTER SYSTEMS DISCLAIM ANY LIABILITY OF ANY KIND FOR
16 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie Mellon
26 * the rights to redistribute these changes.
27 */
28
29 /*
30 * HISTORY
31 * $Log: if_se.c,v $
32 * Revision 2.4 93/01/14 17:56:21 danner
33 * Ansified preprocessor comments, fixed a static/extern problem
34 * with se_output.
35 * [93/01/14 danner]
36 *
37 * Revision 2.3 91/07/31 18:06:35 dbg
38 * Changed copyright.
39 * [91/07/31 dbg]
40 *
41 * Revision 2.2 91/05/08 13:05:43 dbg
42 * Changed net_filter to net_packet.
43 *
44 * MACH_KERNEL conversion.
45 * Added volatile declarations.
46 * [91/03/22 dbg]
47 *
48 */
49
50 #undef RAW_ETHER
51 /* #define RAW_ETHER */ /* Not in MACH */
52
53 #undef PROMISCUOUS /* UNDO promiscuous kernel */
54 /* #define PROMISCUOUS */ /* promiscuous kernel */ /* Not in MACH*/
55
56 #ifndef lint
57 static char rcsid[] = "$Header: if_se.c,v 2.4 93/01/14 17:56:21 danner Exp $";
58 #endif
59
60 /*
61 * SCSI/Ether Ethernet Communications Controller interface
62 *
63 * ETHER OUTPUT NOTES:
64 * -------------------
65 * Only one output request is active at a time. A simple array
66 * of iats holds the addresses of the mbuf data that get written.
67 * We copy transmits into a single buffer because the higher-level
68 * network code can generate mbufs too small for the DMA's to handle
69 * (the firmware doesn't have enough time to turn around and reload).
70 *
71 * As a matter of convention, all SEC Ether ioctls are done with the
72 * output device and output locks.
73 *
74 * ETHER INPUT NOTES:
75 * ------------------
76 * Device programs for Ether read contain a pointer to an iat
77 * and the number of data blocks in that iat. The iat dp_data
78 * fields give the physical addresses of the m_data field of
79 * a parallel array of mbufs. Ether packets read from the net
80 * are placed into these data blocks (and hence right into the
81 * mbufs).
82 *
83 * Each controller has a circular queue of pointers to mbufs and
84 * a circular queue of iats that are continually filled by the
85 * SEC firmware with input packets.
86 * Our job is to replace the used mbufs as quickly as possible
87 * at interrupt time, and refill the iats. We then add another
88 * device program (or two if we wrap around the end of the iat
89 * ring) for ether input.
90 *
91 * Important hints:
92 * - There is an iat queue and an mbuf pointer queue for each
93 * controller.
94 * - The iat queue and the mbuf queue have the same number
95 * of elements.
96 * - Except when refilling the read programs (at interrupt time),
97 * the heads of the iat queue and the mbuf queue should be the same.
98 * Even here, they should only be different during the actual
99 * refilling of the iat and mbuf queues.
100 * - All hell breaks loose if we run out of input programs
101 * to replace the iats. We can't sleep and wait for more at
102 * interrupt level.
103 * - Overspecifying the size of the Ether read request and
104 * done queues in the binary config file is a very good idea.
105 * - Refilling the queues after reading short packets will cause
106 * each packet to have a single new device program added to the
107 * Ether read device request queue.
108 * - No attempt is made to optimize these programs, as there is
109 * no synchronization with the SEC firmware: I can't ask him
110 * to stop for a second while I increase the number of iats in
111 * that last device program.
112 *
113 *
114 *
115 * TMP AND LOCKING NOTES:
116 * ----------------------
117 *
118 * There is very little locking or synchronization needed at this
119 * level of the software. Most of it really goes on above when necessary.
120 *
121 * In general, we try to lock only the portions of the controller state
122 * that we have to. When changing "important" information (like fields
123 * int the arp and/or ifnet structures), we lock everything.
124 *
125 * To lock everything, it is safe to lock structure from the inside out.
126 * That is, lock either the input or output segment of the controller
127 * state, then lock the common structure. With the macros defined
128 * below, the order OS_LOCK, then SS_LOCK should be safe.
129 * See how se_init does locking for an example.
130 */
131
132 /*
133 * Revision 1.2 89/08/16 15:22:05 root
134 * balance -> sqt
135 *
136 * Revision 1.1 89/07/05 13:18:31 kak
137 * Initial revision
138 *
139 */
140
141 #ifdef MACH_KERNEL
142 #include <device/device_types.h>
143 #include <device/io_req.h>
144 #include <device/net_io.h>
145
146 #include <sqt/vm_defs.h>
147 #include <sqt/intctl.h>
148 #include <sqt/ioconf.h>
149 #include <sqt/cfg.h>
150 #include <sqt/slic.h>
151 #include <sqt/mutex.h>
152
153 #include <sqtsec/sec.h>
154 #include <sqtsec/if_se.h>
155
156 /*
157 * Convert to Mach style assert
158 */
159 #define ASSERT(C,S) assert(C)
160
161 #include <kern/assert.h>
162
163 #else MACH_KERNEL
164 #include "sys/param.h"
165 #include "sys/systm.h"
166 #include "sys/mbuf.h"
167 #include "sys/buf.h"
168 #include "sys/protosw.h"
169 #include "sys/socket.h"
170 #include "sys/socketvar.h"
171 #include "sys/ioctl.h"
172 #include "sys/errno.h"
173 #include "sys/vm.h"
174 #include "sys/conf.h"
175
176 #include "net/if.h"
177 #include "net/netisr.h"
178 #include "net/route.h"
179
180 #include "netinet/in.h"
181 #include "netinet/in_systm.h"
182 #include "netinet/in_var.h" /* MACH/4.3 */
183 #include "netinet/ip.h"
184 #include "netinet/ip_var.h"
185 #include "netinet/if_ether.h"
186
187 #include "sqt/pte.h"
188 #include "sqt/intctl.h"
189 #include "sqt/ioconf.h"
190 #include "sqt/cfg.h"
191 #include "sqt/slic.h"
192
193 #include "sqt/mutex.h"
194
195 #include "sqtsec/sec.h"
196
197 #include "sqtif/if_se.h"
198
199 #ifdef PROMISCUOUS
200 #include "net/promisc.h"
201 #endif PROMISCUOUS
202
203 #define KVIRTTOPHYS(addr) \
204 (PTETOPHYS(Sysmap[btop(addr)]) + ((int)(addr) & (NBPG-1)))
205
206 #ifdef MACH
207 /*
208 * Driver not yet converted to MACH/4.3 names.
209 */
210
211 #define ETHERPUP_PUPTYPE ETHERTYPE_PUP
212 #define ETHERPUP_IPTYPE ETHERTYPE_IP
213 #define ETHERPUP_ARPTYPE ETHERTYPE_ARP
214 #define ETHERPUP_TRAIL ETHERTYPE_TRAIL
215 #define ETHERPUP_NTRAILER ETHERTYPE_NTRAILER
216
217 /*
218 * Convert to Mach style assert
219 */
220 #define ASSERT(C,S) assert(C)
221
222 #include "kern/assert.h"
223
224 #endif /* MACH */
225 #endif /* MACH_KERNEL */
226
227 /*
228 * All procedures are referenced either through the se_driver structure,
229 * or via the procedure handles in the ifnet structure.
230 * Hence, everything but the se_driver structure should be able to be static.
231 */
232
233 static int se_probe(), se_boot(), se_intr(), se_watch();
234 static int se_init(), se_ioctl(), se_reset();
235 #ifndef MACH_KERNEL
236 static int se_output();
237 #endif
238
239 #ifndef PROMISCUOUS
240
241 static struct mbuf *se_reorder_trailer_packet();
242
243 #else
244
245 /* N.B. this routine is not static so promiscq handler can call it */
246
247 struct mbuf *se_reorder_trailer_packet();
248
249 #endif /* PROMISCUOUS */
250
251 int se_handle_read(), se_add_read_progs();
252 int se_start(), se_set_addr();
253 int se_set_modes();
254
255 #ifdef MACH_KERNEL
256 int se_start_u(); /* takes unit number */
257 #endif /* MACH_KERNEL */
258
259 struct sec_driver se_driver = {
260 /* name chan flags probe boot intr */
261 "se", 1, SED_TYPICAL, se_probe, se_boot, se_intr
262 };
263
264 /*
265 * SCSI-command template for Ether write.
266 * These are placed in the write device programs,
267 * and altered by the se_start routine just before
268 * we write the packet.
269 */
270
271 u_char se_scsi_cmd[10] = { SCSI_ETHER_WRITE, SCSI_ETHER_STATION };
272
273 #ifdef RAW_ETHER
274
275 #include "../net/raw_cb.h"
276
277 #endif /* RAW_ETHER */
278
279 /*
280 * sec_init_iatq()
281 * initialize a ring of iat entries.
282 *
283 * If locking is needed, it is presumed to be done elsewhere.
284 */
285
286 static void
287 sec_init_iatq(iq, count)
288 register struct sec_iatq *iq;
289 unsigned count;
290 {
291 iq->iq_iats = (struct sec_iat *)
292 calloc((int)(count*sizeof(struct sec_iat)));
293 iq->iq_size = count;
294 iq->iq_head = 0;
295 }
296
297 #ifndef MACH_KERNEL
298 /*
299 * sec_spray_mbuf_iatq - spray an mbuf chain into a queue of iats.
300 *
301 * The head of the iat queue is adjusted here.
302 * It is illegal to wrap around the IAT ring.
303 *
304 * Returns pointer to first IAT if it worked.
305 * Returns 0 otherwise.
306 */
307
308 static struct sec_iat *
309 sec_spray_mbuf_iatq(m, iq)
310 register struct mbuf *m;
311 register struct sec_iatq *iq;
312 {
313 register struct sec_iat *iat;
314 int i, n;
315
316 n = mbuf_chain_length(m);
317 ASSERT(n > 0, "sec_spray_iatq: n <= 0");
318 ASSERT(n <= iq->iq_size, "sec_spray_iatq: n > size");
319 if (n > (iq->iq_size - iq->iq_head))
320 return((struct sec_iat *)0);
321
322 for (i = 0; i < n; ++i, m = m->m_next) {
323 int k = (iq->iq_head + i) % iq->iq_size;
324
325 ASSERT(m != (struct mbuf *)0, "sec_spray_iatq: m == 0");
326 iat = &iq->iq_iats[k];
327 iat->iat_data = (u_char *)KVIRTTOPHYS(mtod(m, int));
328 iat->iat_count = m->m_len;
329 }
330
331 ASSERT(m == (struct mbuf *)0, "sec_spray_iatq: m != 0");
332 iat = &iq->iq_iats[iq->iq_head];
333 iq->iq_head = (iq->iq_head + n) % iq->iq_size;
334 return(iat);
335 }
336 #endif /* MACH_KERNEL */
337
338 /*
339 * sec_start_prog - start a program on a SCSI/Ether device.
340 */
341
342 static int
343 sec_start_prog(cmd, cib, slic, bin, vector, splok)
344 struct sec_cib *cib;
345 u_char slic, bin, vector;
346 {
347 register volatile int *stat
348 = PHYSTOKV(cib->cib_status, volatile int *);
349 spl_t sipl;
350
351 if (splok)
352 sipl = splhi();
353
354 cib->cib_inst = cmd;
355 *stat = 0;
356 mIntr(slic, bin, vector);
357 if (splok)
358 splx(sipl);
359
360 while ((*stat & SINST_INSDONE) == 0)
361 continue;
362 return(*stat & ~SINST_INSDONE);
363 }
364
365 #ifdef MACH_KERNEL
366 /*
367 * Fill in the fields of a net-kmsg pointer queue.
368 */
369 void
370 sec_init_msgq(mq, size)
371 register struct sec_msgq *mq;
372 unsigned int size;
373 {
374 mq->mq_msgs =
375 (ipc_kmsg_t *) calloc((int) size * sizeof(ipc_kmsg_t));
376 mq->mq_size = size;
377 mq->mq_head = 0;
378 }
379
380 #else MACH_KERNEL
381 /*
382 * Fill in the fields of an mbuf pointer queue.
383 *
384 * We can't fill in the mbuf pointers yet, as it is too soon
385 * to allocate mbufs yet.
386 */
387
388 static
389 sec_init_mbufq(mq, size)
390 register struct sec_mbufq *mq;
391 unsigned size;
392 {
393 mq->mq_mbufs =
394 (struct mbuf **) calloc((int)(size*sizeof(struct mbuf *)));
395 mq->mq_size = size;
396 mq->mq_head = 0;
397 }
398
399 /*
400 * sec_spray_mbuf_mbufq - spray an mbuf chain into a queue of mbuf pointers.
401 *
402 * The head of the mbuf queue is adjusted here.
403 * It is illegal to wrap around the mbuf pointer ring.
404 *
405 * This work is done in parallel with the iat queue.
406 *
407 * Returns pointer to first mbuf pointer if it worked.
408 * Returns 0 otherwise.
409 */
410
411 static struct mbuf **
412 sec_spray_mbuf_mbufq(m, mq)
413 register struct mbuf *m;
414 register struct sec_mbufq *mq;
415 {
416 register struct mbuf **mbufp;
417 register int i, n;
418
419 n = mbuf_chain_length(m);
420 ASSERT(n > 0, "sec_spray_mbufq: n <= 0");
421 ASSERT(n <= mq->mq_size, "sec_spray_mbufq: n > size");
422 if (n > (mq->mq_size - mq->mq_head))
423 return((struct mbuf **)0);
424
425 for (i = 0; i < n; ++i, m = m->m_next) {
426 ASSERT(m != (struct mbuf *)0, "sec_spray_mbufq: m == 0");
427 mbufp = &mq->mq_mbufs[(mq->mq_head + i) % mq->mq_size];
428 *mbufp = m;
429 }
430 ASSERT(m == (struct mbuf *)0, "sec_spray_mbufq: m != 0");
431 mbufp = &mq->mq_mbufs[mq->mq_head];
432 mq->mq_head = (mq->mq_head + n) % mq->mq_size;
433 return(mbufp);
434 }
435
436 /*
437 * sec_chain_mbufs - chain 'n' mbufs from the mbuf q and return the head.
438 *
439 * We don't touch the head pointer of the queue.
440 * We set the length correctly for each mbuf.
441 */
442
443 static struct mbuf *
444 sec_chain_mbufs(mq, n, length)
445 register struct sec_mbufq *mq;
446 {
447 register struct mbuf *m;
448 register int k;
449
450 ASSERT(n > 0, "sec_chain: n <= 0");
451 ASSERT(n <= mq->mq_size, "sec_chain: n > size");
452
453 /*
454 * build the chain from the back to the front.
455 * 'k' always refers to the entry cyclically after
456 * the one we want to chain next.
457 */
458
459 m = 0;
460 k = n + mq->mq_head;
461 if (k >= mq->mq_size) k -= mq->mq_size;
462 ASSERT(k >= 0, "sec_chain: k < 0");
463 ASSERT(k < mq->mq_size, "sec_chain: k >= size");
464
465 do {
466 --k;
467 if (k < 0) k = mq->mq_size - 1;
468 mq->mq_mbufs[k]->m_next = m;
469 m = mq->mq_mbufs[k];
470 if (m->m_next == (struct mbuf *)0) {
471
472 /*
473 * "last" mbuf. Adjust it's length.
474 */
475
476 m->m_len = MLEN - (n*MLEN - length);
477
478 } else {
479 m->m_len = MLEN;
480 }
481 } while (k != mq->mq_head);
482
483 ASSERT(mbuf_chain_length(m) == n, "sec_chain: length");
484 ASSERT(mbuf_chain_size(m) == length, "sec_chain: size");
485
486 return(m);
487 }
488
489 /*
490 * mbuf_chain_size - Determine the number of data bytes in a chain of mbufs.
491 */
492
493 static int
494 mbuf_chain_size(m)
495 register struct mbuf *m;
496 {
497 register int count;
498
499 for (count = 0; m != (struct mbuf *)0; m = m->m_next)
500 count += m->m_len;
501 return(count);
502 }
503
504 /*
505 * mbuf_chain_length - Determine the number of mbufs in a chain.
506 */
507
508 static int
509 mbuf_chain_length(m)
510 register struct mbuf *m;
511 {
512 register int count;
513
514 for (count = 0; m != (struct mbuf *)0; m = m->m_next)
515 ++count;
516 return(count);
517 }
518
519 #ifdef MACH
520 /*
521 * m_getm() -- get multiple mbuf's.
522 *
523 * This interface is used in DYNIX to use only one "gate" round-trip
524 * to allocate a set of mbuf's. This implementation is simplistic
525 * mono-processor version.
526 */
527
528 /*ARGSUSED*/
529 struct mbuf *
530 m_getm(canwait, type, n) /* get n mbufs */
531 register int n;
532 {
533 register struct mbuf *m;
534 register struct mbuf *msave = NULL;
535
536 while (n-- > 0) {
537 MGET(m, canwait, type);
538 if (m == NULL) {
539 m_freem(msave);
540 return((struct mbuf *) NULL);
541 }
542 m->m_next = msave;
543 msave = m;
544 }
545
546 return(msave);
547 }
548 #endif /* MACH */
549 #endif /* MACH_KERNEL */
550
551 #ifdef MACH_KERNEL
552
553 /* lock the controller state */
554 #define SS_LOCK(softp) (p_lock(&(softp)->ss_lock, SPLIMP))
555 #define SS_UNLOCK(softp, sipl) (v_lock(&(softp)->ss_lock, sipl))
556
557 /* lock the output state */
558 #define OS_LOCK(softp) (p_lock(&(softp)->os_lock, SPLIMP))
559 #define OS_UNLOCK(softp, sipl) (v_lock(&(softp)->os_lock, sipl))
560
561 #else MACH_KERNEL
562 #ifndef MACH
563
564 /* lock the controller state */
565 #define SS_LOCK(softp) (p_lock(&(softp)->ss_lock, SPLIMP))
566 #define SS_UNLOCK(softp, sipl) (v_lock(&(softp)->ss_lock, sipl))
567
568 /* lock the output state */
569 #define OS_LOCK(softp) (p_lock(&(softp)->os_lock, SPLIMP))
570 #define OS_UNLOCK(softp, sipl) (v_lock(&(softp)->os_lock, sipl))
571
572 #else
573 /*
574 * For now, MACH is "mono-processor" for all network code.
575 */
576
577 /* lock the controller state */
578 #define SS_LOCK(softp) splimp()
579 #define SS_UNLOCK(softp, sipl) splx(sipl)
580
581 /* lock the output state */
582 #define OS_LOCK(softp) splimp()
583 #define OS_UNLOCK(softp, sipl) splx(sipl)
584
585 #endif /* MACH */
586 #endif /* MACH_KERNEL */
587
588 int se_max_unit = -1; /* largest index of active ether controller */
589 u_char se_base_vec; /* base interrupt vector */
590 struct se_state *se_state; /* pointer to array of soft states */
591
592 /*
593 * Probe an SEC for existence of Ether controller.
594 *
595 * There's some debate about what this means: presently
596 * if the controller is there, so is the Ether part.
597 * This is expected to be changed in the future,
598 * when the world of depopulated boards arrives.
599 * So let's look at the diagnostics flags, and make
600 * the decision based on that.
601 */
602
603 static
604 se_probe(probe)
605 struct sec_probe *probe;
606 {
607 if (probe->secp_desc->sec_diag_flags & CFG_S_ETHER)
608 return(0);
609 return(1);
610 }
611
612 /*
613 * se_boot_one()
614 * boot procedure for a single device.
615 *
616 * Allocate the non-mbuf data structures for the device.
617 * We shouldn't really talk to the device now either.
618 *
619 * For both ether read and ether write, the request and done queues
620 * were allocated by autoconfig code. We record handles to these
621 * queues and fill in the actual device programs.
622 *
623 * The done queues should not need anything done to them, as they
624 * never need programs of their own.
625 *
626 * The status pointers for each cib are set to point to local data
627 * in the state structures.
628 *
629 * Iat queues are also allocated, but can't be filled in yet
630 * (no mbufs to allocate yet). For input, the parallel array
631 * of mbuf pointers is allocated as well.
632 *
633 * No locking needs to be done here, as we are still running config
634 * code single-processor.
635 */
636
637 static void
638 se_boot_one(softp, sd)
639 register struct se_state *softp;
640 register struct sec_dev *sd;
641 {
642 int i;
643
644 /*
645 * Controller info: Can do this with
646 * either the input device or output device.
647 * Either way, we just do it once.
648 */
649
650 if (!softp->ss_initted) {
651 register struct ifnet *ifp;
652 register struct sockaddr_in *sin;
653
654 #ifdef MACH_KERNEL
655 ifp = &softp->ss_if;
656 ifp->if_unit = softp-se_state;
657 ifp->if_header_size = sizeof(struct ether_header);
658 ifp->if_header_format = HDR_ETHERNET;
659 ifp->if_address_size = 6;
660 #else MACH_KERNEL
661 ifp = &softp->ss_arp.ac_if;
662 ifp->if_unit = softp-se_state;
663 ifp->if_name = se_driver.sed_name;
664 #endif /* MACH_KERNEL */
665 ifp->if_mtu = se_mtu;
666 #ifndef MACH
667 init_lock(&ifp->if_snd.ifq_lock, G_IFNET);
668 #endif /* MACH */
669 #ifdef MACH_KERNEL
670 #else /* MACH_KERNEL */
671 #ifndef MACH
672 sin = (struct sockaddr_in *)&ifp->if_addr;
673 sin->sin_family = AF_INET;
674 sin->sin_addr = arpmyaddr((struct arpcom *)0);
675 #endif /* MACH */
676 ifp->if_init = se_init;
677 ifp->if_output = se_output;
678 ifp->if_ioctl = se_ioctl;
679 ifp->if_reset = se_reset;
680 #endif /* MACH_KERNEL */
681 init_lock(&softp->ss_lock, se_gate);
682 bzero((caddr_t)&softp->ss_sum, sizeof(softp->ss_sum));
683 softp->ss_scan_int = se_watch_interval;
684 #ifdef MACH_KERNEL
685 ifp->if_address = (char *)softp->ss_addr;
686 bcopy((char *)sd->sd_desc->sec_ether_addr,
687 (char *)softp->ss_addr, 6);
688 #else /* MACH_KERNEL */
689 bcopy((caddr_t)sd->sd_desc->sec_ether_addr,
690 (caddr_t)softp->ss_arp.ac_enaddr, 6);
691 #endif /* MACH_KERNEL */
692 softp->ss_slic = sd->sd_desc->sec_slicaddr;
693 softp->ss_bin = se_bin;
694 softp->ss_ether_flags = SETHER_S_AND_B;
695 softp->ss_alive = 1;
696 softp->ss_initted = 1;
697 softp->ss_init_called = 0;
698 #ifdef MACH_KERNEL
699 if_init_queues(ifp);
700 #else /* MACH_KERNEL */
701 if_attach(ifp);
702 #endif /* MACH_KERNEL */
703 #ifndef MACH
704 pciattach(ifp, softp->ss_arp.ac_enaddr);
705 #endif /* MACH */
706 if ((int)(&softp->os_gmode + 1) > 4*1024*1024) {
707 printf("%s%d: data structures above 4Mb!\n",
708 se_driver.sed_name, softp-se_state);
709 printf(" Ethernet function is unpredictable.\n");
710 }
711 }
712 if (sd->sd_chan == SDEV_ETHERREAD) {
713 init_lock(&softp->is_lock, se_gate);
714 softp->is_cib = sd->sd_cib;
715 softp->is_cib->cib_status =
716 KVTOPHYS(&softp->is_status, int *);
717 softp->is_reqq.sq_progq = sd->sd_requestq;
718 softp->is_reqq.sq_size = sd->sd_req_size;
719 softp->is_doneq.sq_progq = sd->sd_doneq;
720 softp->is_doneq.sq_size = sd->sd_doneq_size;
721 SEC_fill_progq(softp->is_reqq.sq_progq,
722 (int)softp->is_reqq.sq_size,
723 (int)sizeof(struct sec_edev_prog));
724 sec_init_iatq(&softp->is_iatq, softp->is_reqq.sq_size-3);
725 #ifdef MACH_KERNEL
726 sec_init_msgq(&softp->is_msgq, softp->is_reqq.sq_size-3);
727 #else /* MACH_KERNEL */
728 sec_init_mbufq(&softp->is_mbufq, softp->is_reqq.sq_size-3);
729 #endif /* MACH_KERNEL */
730 softp->is_status = 0;
731 softp->is_initted = 1;
732 } else if (sd->sd_chan == SDEV_ETHERWRITE) {
733 long cur_brk;
734
735 init_lock(&softp->os_lock, se_gate);
736 softp->os_cib = sd->sd_cib;
737 softp->os_cib->cib_status =
738 KVTOPHYS(&softp->os_status, int *);
739 softp->os_status = 0;
740 softp->os_reqq.sq_progq = sd->sd_requestq;
741 softp->os_reqq.sq_size = sd->sd_req_size;
742 softp->os_doneq.sq_progq = sd->sd_doneq;
743 softp->os_doneq.sq_size = sd->sd_doneq_size;
744 SEC_fill_progq(softp->os_reqq.sq_progq,
745 (int)softp->os_reqq.sq_size,
746 (int)sizeof(struct sec_dev_prog));
747 for (i = 0; i < softp->os_reqq.sq_size; ++i) {
748 register struct sec_dev_prog *dp;
749
750 dp =
751 PHYSTOKV(softp->os_reqq.sq_progq->pq_un.pq_progs[i],
752 struct sec_dev_prog *);
753 bcopy((caddr_t)se_scsi_cmd,
754 (caddr_t)dp->dp_cmd, sizeof se_scsi_cmd);
755 dp->dp_cmd_len = sizeof se_scsi_cmd;
756 }
757
758 /*
759 * transmit buffer can't cross 64K boundary
760 */
761
762 cur_brk = (long)calloc(0);
763 if ((cur_brk & 0xFFFF0000)
764 != ((cur_brk+OS_BUF_SIZE) & 0xFFFF0000)) {
765 callocrnd((int)0x10000);
766 }
767 softp->os_buf = (u_char *)calloc(OS_BUF_SIZE);
768 sec_init_iatq(&softp->os_iatq, (unsigned)se_write_iats);
769 #ifdef MACH_KERNEL
770 softp->os_pending = (io_req_t) 0;
771 #else /* MACH_KERNEL */
772 softp->os_pending = (struct mbuf *)0;
773 #endif /* MACH_KERNEL */
774 softp->os_initted = 1;
775 softp->os_active = 0;
776 } else {
777 printf("%s%d: invalid device chan %d (0x%x) in boot routine\n",
778 se_driver.sed_name, softp-se_state,
779 sd->sd_chan, sd->sd_chan);
780 panic("se_bootone");
781 }
782 }
783
784 /*
785 * se_boot - allocate data structures, etc at beginning of time.
786 *
787 * Called with an array of configured devices and the number
788 * of ether devices.
789 * We allocate the necessary soft descriptions, and fill them
790 * in with info from the devs[] array.
791 * Due to the dual-device-channel nature of the SEC description,
792 * each entry in the devs[] array describes either the Ether input
793 * device or the Ether output device. We combine this into a
794 * single device state per controller.
795 *
796 * The program queues are allocated by the routine that calls us,
797 * but the device programs themselves are not allocated til now.
798 *
799 * Work related to allocating mbufs is done later at se_init time.
800 */
801
802 static
803 se_boot(ndevs, devs)
804 struct sec_dev devs[];
805 {
806 register int i;
807
808 /*
809 * First, allocate soft descriptions.
810 */
811
812 se_max_unit = ndevs/2;
813 se_state = (struct se_state *)calloc((se_max_unit+1)
814 * sizeof(struct se_state));
815 se_base_vec = devs[0].sd_vector;
816
817 /*
818 * Now, boot each configured device.
819 */
820
821 for (i = 0; i < ndevs; ++i) {
822 register struct sec_dev *devp;
823 register int unit;
824
825 devp = &devs[i];
826 if (devp->sd_alive == 0)
827 continue;
828 unit = i/2;
829 se_boot_one(&se_state[unit], devp);
830 }
831 }
832
833 /*
834 * Initialization of interface; clear recorded pending
835 * operations, and reinitialize SCSI/Ether usage.
836 */
837
838 static
839 se_init(unit)
840 int unit;
841 {
842 register struct se_state *softp = &se_state[unit];
843 register struct ifnet *ifp;
844 register int i;
845 #ifdef MACH_KERNEL
846 #else /* MACH_KERNEL */
847 struct mbuf *m;
848 struct sockaddr_in *sin;
849 #endif /* MACH_KERNEL */
850 spl_t sipl;
851
852 if (unit < 0 || unit > se_max_unit) {
853 printf("%s%d: invalid unit in init\n",
854 se_driver.sed_name, unit);
855 return;
856 }
857 sipl = OS_LOCK(softp);
858 (void) SS_LOCK(softp);
859
860 if (!softp->ss_alive || !softp->ss_initted || !softp->is_initted
861 || !softp->os_initted)
862 goto ret;
863
864 #ifdef MACH_KERNEL
865 ifp = &softp->ss_if;
866 #else MACH_KERNEL
867 ifp = &softp->ss_arp.ac_if;
868 #ifndef MACH
869 sin = (struct sockaddr_in *)&ifp->if_addr;
870 if (sin->sin_addr.s_addr == 0)
871 goto ret; /* address still unknown */
872 #else
873 /* not yet, if address still unknown */
874 if (ifp->if_addrlist == (struct ifaddr *)0)
875 goto ret;
876 #endif /* MACH */
877 #endif /* MACH_KERNEL */
878
879 if (ifp->if_flags & IFF_RUNNING)
880 goto justarp;
881
882 if (softp->ss_init_called)
883 goto justarp;
884
885 #ifdef MACH_KERNEL
886 timeout(se_watch, (char *)0, softp->ss_scan_int);
887 #else /* MACH_KERNEL */
888 ifp->if_watchdog = se_watch;
889 ifp->if_timer = softp->ss_scan_int;
890 #endif /* MACH_KERNEL */
891
892 #ifdef MACH_KERNEL
893 /*
894 * Set the Ether modes before we add input programs,
895 * as the firmware will need to know the size of the
896 * input packets before we do the SINST_STARTIO.
897 */
898
899 se_set_modes(softp);
900
901 /*
902 * Allocate some net_kmsgs for input packets and fill in
903 * the iats.
904 */
905 for (i = 0; i < softp->is_msgq.mq_size; ++i) {
906 register ipc_kmsg_t nk;
907
908 nk = net_kmsg_alloc();
909
910 se_add_read_progs(softp, nk);
911 }
912 #else MACH_KERNEL
913 /*
914 * Allocate the mbufs parallel to the iat queue for input packets
915 * and fill in the iats.
916 * We allocate the mbufs one at a time because expansion happens
917 * slowly (a page cluster at a time) and we ask for a lot
918 * of mbufs at once.
919 */
920
921 m = (struct mbuf *)0;
922 for (i = 0; i < softp->is_mbufq.mq_size; ++i) {
923 register struct mbuf *newm = m_getm(M_DONTWAIT, MT_DATA, 1);
924
925 if (newm == (struct mbuf *)0) {
926 printf("%s%d: can't allocate %d mbufs!\n",
927 se_driver.sed_name, softp-se_state,
928 softp->is_mbufq.mq_size);
929 m_freem(m);
930 goto ret;
931 }
932 newm->m_next = m;
933 m = newm;
934 }
935
936 /*
937 * Set the Ether modes before we add input programs,
938 * as the firmware will need to know the size of the
939 * input packets before we do the SINST_STARTIO.
940 */
941
942 se_set_modes(softp);
943 se_add_read_progs(softp, m);
944 #endif /* MACH_KERNEL */
945
946 if (sec_start_prog(SINST_STARTIO, softp->is_cib, softp->ss_slic,
947 softp->ss_bin, SDEV_ETHERREAD, 1)
948 != SEC_ERR_NONE) {
949 printf("%s%d: can't initialize.\n",
950 se_driver.sed_name, softp-se_state);
951 goto ret;
952 }
953
954 /*
955 * Shouldn't have to restart output to the device,
956 * as nothing was reset.
957 */
958
959 #ifndef MACH
960 ifp->if_flags |= IFF_UP|IFF_RUNNING;
961 #else
962 ifp->if_flags |= IFF_RUNNING;
963 #endif /* MACH */
964 softp->ss_init_called = 1;
965
966 justarp:
967 #ifdef MACH_KERNEL
968 SS_UNLOCK(softp, SPLIMP);
969 OS_UNLOCK(softp, sipl);
970 #else MACH_KERNEL
971 #ifndef MACH
972 se_set_modes(softp);
973 if_rtinit(ifp, RTF_UP);
974 arpattach(&softp->ss_arp);
975 SS_UNLOCK(softp, SPLIMP);
976 OS_UNLOCK(softp, sipl);
977 arpwhohas(&softp->ss_arp, &sin->sin_addr);
978 #endif /* MACH */
979 #endif /* MACH_KERNEL */
980 return;
981 ret:
982 SS_UNLOCK(softp, SPLIMP);
983 OS_UNLOCK(softp, sipl);
984 }
985
986 #ifdef MACH_KERNEL
987 se_open(unit, flag)
988 int unit;
989 int flag;
990 {
991 if (unit < 0 || unit > se_max_unit)
992 return (D_NO_SUCH_DEVICE);
993
994 se_state[unit].ss_if.if_flags |= IFF_UP;
995 se_init(unit); /* XXX should return status */
996 return (D_SUCCESS);
997 }
998
999 se_setinput(unit, receive_port, priority, filter, filter_count)
1000 int unit;
1001 mach_port_t receive_port;
1002 int priority;
1003 filter_t filter[];
1004 unsigned int filter_count;
1005 {
1006 if (unit < 0 || unit > se_max_unit)
1007 return (D_NO_SUCH_DEVICE);
1008
1009 return (net_set_filter(&se_state[unit].ss_if,
1010 receive_port, priority,
1011 filter, filter_count));
1012 }
1013
1014
1015 #endif /* MACH_KERNEL */
1016 /*
1017 * Ethernet interface interrupt routine.
1018 * Could be output or input interrupt.
1019 * We determine the source and call the appropriate routines
1020 * to decode the done queue programs.
1021 */
1022
1023 static
1024 se_intr(vector)
1025 int vector;
1026 {
1027 int unit = (vector - se_base_vec)/2;
1028 int is_read = (vector - 2*unit) == 0;
1029 register struct se_state *softp = &se_state[unit];
1030 register struct sec_pq *sq;
1031 spl_t sipl;
1032
1033 if (unit < 0 || unit > se_max_unit) {
1034 printf("%s%d: invalid interrupt vector %d\n",
1035 se_driver.sed_name, unit, vector);
1036 return;
1037 }
1038
1039 if (is_read) { /* Receiver interrupt. */
1040 register struct sec_eprogq *epq;
1041 #ifdef DEBUG
1042 if (se_ibug)
1043 printf("R%d ", unit);
1044 #endif /* DEBUG */
1045 ASSERT(softp->ss_alive, "se_intr: alive");
1046 ASSERT(softp->ss_init_called, "se_intr: initted");
1047 sq = &softp->is_doneq;
1048 epq = (struct sec_eprogq *)sq->sq_progq;
1049 ASSERT(epq->epq_tail < sq->sq_size, "se_intr: tail");
1050 ASSERT(epq->epq_head < sq->sq_size, "se_intr: head");
1051
1052 /*
1053 * Lock the input state before we test for work.
1054 * This keeps other processors out of the way once
1055 * we commit to entering the loop and doing work.
1056 * There is a race here between the decision to
1057 * leave the loop and the v_lock that can cause an
1058 * interrupt from the SCSI/Ether controller to be
1059 * missed, but we say this is acceptable, as the
1060 * net should always be busy, and we will eventually
1061 * see the packet on the next interrupt.
1062 */
1063
1064 sipl = cp_lock(&softp->is_lock, SPLIMP);
1065 if (sipl == CPLOCKFAIL)
1066 return;
1067 while (epq->epq_tail != epq->epq_head) { /* work to do */
1068 se_handle_read(softp, &epq->epq_status[epq->epq_tail]);
1069 epq->epq_tail = (epq->epq_tail + 1) % sq->sq_size;
1070 }
1071 v_lock(&softp->is_lock, sipl);
1072
1073 } else { /* Transmitter interrupt. */
1074
1075 register struct sec_progq *pq;
1076 register struct sec_dev_prog *dp;
1077 #ifdef DEBUG
1078 if (se_obug)
1079 printf("X%d ", unit);
1080 #endif
1081 /*
1082 * Since there is only one device program active
1083 * at a time on ether output, it makes more sense to
1084 * spin on the output side lock rather than conditionally
1085 * lock an interrupt lock here.
1086 * When we switch to multiple active device outputs
1087 * per SCSI/Ether, this decision should be reversed,
1088 * as there is the potential for idling an unbounded
1089 * number of processors while Ether transmit interrupts
1090 * occur.
1091 */
1092
1093 sipl = OS_LOCK(softp);
1094 if (!softp->ss_alive || !softp->ss_init_called) {
1095 OS_UNLOCK(softp, sipl);
1096 return;
1097 }
1098 sq = &softp->os_doneq;
1099 pq = sq->sq_progq;
1100
1101 ASSERT(pq->pq_tail < sq->sq_size, "se_intr: tail 2");
1102 ASSERT(pq->pq_head < sq->sq_size, "se_intr: head 2");
1103 if (!softp->os_active) { /* spurious interrupt */
1104 OS_UNLOCK(softp, sipl);
1105 return;
1106 }
1107 while (pq->pq_tail != pq->pq_head) {
1108 dp = PHYSTOKV(pq->pq_un.pq_progs[pq->pq_tail],
1109 struct sec_dev_prog *);
1110 if (dp->dp_status1 != 0) {
1111 int status = sec_start_prog(SINST_RESTARTIO,
1112 softp->os_cib,
1113 softp->ss_slic,
1114 softp->ss_bin,
1115 SDEV_ETHERWRITE, 1);
1116 if (status != SEC_ERR_NONE
1117 && status != SEC_ERR_NO_MORE_IO) {
1118 printf("%s%d: se_intr: status 0x%x\n",
1119 se_driver.sed_name,
1120 softp-se_state,
1121 softp->os_status);
1122 }
1123 }
1124 pq->pq_tail = (pq->pq_tail + 1) % sq->sq_size;
1125 softp->os_active = 0;
1126 ASSERT(softp->os_pending != (struct mbuf *)0,
1127 "se_intr: os_pending");
1128 #ifdef PROMISCUOUS
1129 if (promiscon) {
1130 struct mbuf * xm;
1131 struct promiscif * xpm;
1132
1133 /*
1134 * manage monitor receipt of transmitted packets.
1135 */
1136
1137 xm = m_getm(M_DONTWAIT, MT_DATA, 1);
1138 (void) IF_LOCK(&promiscq);
1139 if (!xm || IF_QFULL(&promiscq)) {
1140 IF_DROP(&promiscq);
1141 IF_UNLOCK(&promiscq, SPLIMP);
1142 m_freem(softp->os_pending);
1143 if(xm) (void) m_free(xm);
1144 }else{
1145 xm->m_next = softp->os_pending;
1146 xm->m_len = sizeof(struct promiscif);
1147 xpm = mtod(xm, struct promiscif *);
1148 xpm->promiscif_ifnet = (caddr_t) softp;
1149 xpm->promiscif_flag = PROMISC_XMIT;
1150
1151 IF_ENQUEUE(&promiscq, xm);
1152 if (!promiscq.ifq_busy) {
1153 schednetisr(NETISR_PROMISC);
1154 }
1155 IF_UNLOCK(&promiscq, SPLIMP);
1156 }
1157
1158 } else /* not monitoring */
1159
1160 #endif /* PROMISCUOUS */
1161 #ifdef MACH_KERNEL
1162 iodone(softp->os_pending);
1163 softp->os_pending = 0;
1164 #else
1165 m_freem(softp->os_pending);
1166 softp->os_pending = (struct mbuf *)0;
1167 #endif /* MACH_KERNEL */
1168 /* Should only be one program in the queue */
1169 ASSERT(pq->pq_tail == pq->pq_head,"se_intr: head/tail");
1170 }
1171 OS_UNLOCK(softp, sipl);
1172 se_start(softp);
1173 }
1174 }
1175
1176 #ifdef MACH_KERNEL
1177
1178 /*
1179 * We set the offset for receives so that the data portion of a packet
1180 * lands at sizeof(struct packet_header) into the data portion
1181 * of a network message. That leaves the Ethernet type word at
1182 * the 'type' field of the packet_header.
1183 */
1184 #define ETHER_HDR_OFF \
1185 (sizeof(struct packet_header) - sizeof(struct ether_header))
1186
1187 se_handle_read(softp, statp)
1188 register struct se_state *softp;
1189 struct sec_ether_status *statp;
1190 {
1191 struct sec_msgq *mq = &softp->is_msgq;
1192 ipc_kmsg_t old_kmsg, new_kmsg;
1193 struct ifnet *ifp = &softp->ss_if;
1194 int len;
1195 char *old_addr;
1196
1197 old_kmsg = mq->mq_msgs[mq->mq_head];
1198 old_addr = &net_kmsg(old_kmsg)->packet[ETHER_HDR_OFF];
1199
1200 if (KVTOPHYS(old_addr, u_char *) != statp->es_data) {
1201 struct sec_iatq *iq = &softp->is_iatq;
1202 struct sec_iat *iat = &iq->iq_iats[iq->iq_head];
1203 register int i;
1204
1205 printf("%s%d: botch: statp 0x%x from mq 0x%x; es_data 0x%x\n",
1206 se_driver.sed_name, softp-se_state, statp, mq,
1207 statp->es_data);
1208 printf("mq->mq_head %d KVTOPHYS(old_kmsg) 0x%x\n",
1209 mq->mq_head, KVTOPHYS(old_kmsg, int));
1210 printf("iatq 0x%x iq->iq_head %d iat 0x%x addr 0x%x count %d\n",
1211 iq, iq->iq_head, iat, iat->iat_data, iat->iat_count);
1212 for (i = 0; i < 500000; ++i) {
1213 if (KVTOPHYS(old_kmsg, int) == (int)statp->es_data)
1214 break;
1215 }
1216 }
1217
1218 len = (int)statp->es_count;
1219
1220 #ifdef DEBUG
1221 if (se_ibug) {
1222 printf("got %d ", statp->es_count);
1223 }
1224 #endif
1225
1226 softp->ss_if.if_ipackets++;
1227
1228 new_kmsg = net_kmsg_get();
1229 if (new_kmsg == 0) {
1230 /*
1231 * Cannot allocate replacement message.
1232 * Use the old one again.
1233 */
1234 softp->ss_if.if_rcvdrops++;
1235
1236 se_add_read_progs(softp, old_kmsg);
1237 if (sec_start_prog(SINST_STARTIO, softp->is_cib,
1238 softp->ss_slic, softp->ss_bin,
1239 SDEV_ETHERREAD, 1)
1240 != SEC_ERR_NONE) {
1241 printf("%s%d: se_handle_read: status 0x%x\n",
1242 se_driver.sed_name, softp-se_state,
1243 softp->is_status);
1244 }
1245 #ifdef DEBUG
1246 if (se_ibug > 1)
1247 printf("lose%d ", n);
1248 #endif
1249 return;
1250 }
1251
1252 /*
1253 * Replace the kmsg in the queue.
1254 */
1255
1256 se_add_read_progs(softp, new_kmsg);
1257
1258 if (sec_start_prog(SINST_STARTIO, softp->is_cib, softp->ss_slic,
1259 softp->ss_bin, SDEV_ETHERREAD, 1)
1260 != SEC_ERR_NONE) {
1261 printf("%s%d: se_handle_read: status 0x%x\n",
1262 se_driver.sed_name, softp-se_state, softp->is_status);
1263 }
1264 #ifdef DEBUG
1265 if (se_ibug > 1)
1266 printf("repl%d ", n);
1267 #endif
1268
1269 /*
1270 * Fill in the missing fields of the old kmsg.
1271 */
1272 {
1273 register struct ether_header *eh;
1274 register struct packet_header *ph;
1275
1276 eh = (struct ether_header *) &net_kmsg(old_kmsg)->header[0];
1277 ph = (struct packet_header *) &net_kmsg(old_kmsg)->packet[0];
1278
1279 /*
1280 * Copy the Ethernet header from where it was received.
1281 */
1282 *eh = *(struct ether_header *)old_addr;
1283
1284 /*
1285 * Set up the type and length fields in the packet header.
1286 */
1287 ph->type = eh->ether_type;
1288 ph->length = len - sizeof(struct ether_header)
1289 + sizeof(struct packet_header);
1290
1291 /*
1292 * Hand the packet to the network module.
1293 */
1294 net_packet(&softp->ss_if, old_kmsg, ph->length,
1295 ethernet_priority(old_kmsg));
1296 }
1297 #ifdef DEBUG
1298 if (se_ibug) printf("\n");
1299 #endif
1300 }
1301
1302 /*
1303 * se_add_read_progs - Add a read program to the request queue.
1304 *
1305 * We replace the net_kmsgs in the kmsg queue 'mq' here.
1306 * 'm' is a net_kmsg.
1307 */
1308 se_add_read_progs(softp, m)
1309 register struct se_state *softp;
1310 register ipc_kmsg_t m;
1311 {
1312 struct sec_msgq *mq;
1313 struct sec_iatq *iq;
1314 struct sec_pq *sq;
1315 struct sec_progq *pq;
1316 struct sec_edev_prog *dp;
1317 struct sec_iat *iat;
1318
1319 mq = &softp->is_msgq;
1320 iq = &softp->is_iatq;
1321 sq = &softp->is_reqq;
1322 pq = sq->sq_progq;
1323
1324 /* We do the entire msg in one piece. */
1325
1326 assert(iq->iq_size - iq->iq_head >= 1);
1327 assert(mq->mq_size - mq->mq_head >= 1);
1328
1329 /*
1330 * Point iats at the net_kmsg.
1331 * It is illegal to wrap around the ring.
1332 */
1333 iat = &iq->iq_iats[iq->iq_head];
1334
1335 iat->iat_data = KVTOPHYS(&net_kmsg(m)->packet[ETHER_HDR_OFF],
1336 u_char *);
1337 iat->iat_count = sizeof(struct ether_header) + ETHERMTU;
1338
1339 iat = &iq->iq_iats[iq->iq_head];
1340 iq->iq_head = (iq->iq_head + 1) % iq->iq_size;
1341
1342 mq->mq_msgs[mq->mq_head] = m;
1343 mq->mq_head = (mq->mq_head + 1) % mq->mq_size;
1344
1345 /*
1346 * Add the device program.
1347 */
1348 assert((pq->pq_head + 1) % sq->sq_size != pq->pq_tail);
1349 dp = PHYSTOKV(pq->pq_un.pq_eprogs[pq->pq_head],
1350 struct sec_edev_prog *);
1351 assert(dp != 0);
1352 dp->edp_iat_count = 1;
1353 dp->edp_iat = SEC_IATIFY(KVTOPHYS(iat, vm_offset_t));
1354 pq->pq_head = (pq->pq_head + 1) % sq->sq_size;
1355
1356 }
1357
1358 #else MACH_KERNEL
1359 /*
1360 * Handle read interrupt requests.
1361 * This includes recognizing trailer protocol,
1362 * and passing up to the higher level software.
1363 *
1364 * This is called with the input state locked.
1365 */
1366
1367 extern struct custom_client custom_clients[];
1368
1369 static
1370 se_handle_read(softp, statp)
1371 register struct se_state *softp;
1372 struct sec_ether_status *statp;
1373 {
1374 struct sec_mbufq *mq = &softp->is_mbufq;
1375 struct mbuf *m, *mnew;
1376 #ifdef PROMISCUOUS
1377 struct mbuf *mpromisc;
1378 struct promiscif * mp;
1379 #endif
1380 int len, n, int_to_sched;
1381 struct ether_header *hp;
1382 struct ifqueue *inq;
1383 spl_t sipl;
1384 struct ifnet *ifp = &softp->ss_arp.ac_if;
1385 #ifdef RAW_ETHER
1386 struct raw_header * rh;
1387 struct mbuf * mrh;
1388 #endif
1389 int trailer = 0;
1390 int ci;
1391
1392 m = mq->mq_mbufs[mq->mq_head];
1393 if ((u_char *)KVIRTTOPHYS(mtod(m, int)) != statp->es_data) {
1394 struct sec_iatq *iq = &softp->is_iatq;
1395 struct sec_iat *iat = &iq->iq_iats[iq->iq_head];
1396 register int i;
1397
1398 printf("%s%d: botch: statp 0x%x from mq 0x%x; es_data 0x%x\n",
1399 se_driver.sed_name, softp-se_state, statp, mq,
1400 statp->es_data);
1401 printf("mq->mq_head %d KVIRTTOPHYS(m) 0x%x\n",
1402 mq->mq_head, KVIRTTOPHYS(mtod(m, int)));
1403 printf("iatq 0x%x iq->iq_head %d iat 0x%x addr 0x%x count %d\n",
1404 iq, iq->iq_head, iat, iat->iat_data, iat->iat_count);
1405 for (i = 0; i < 500000; ++i) {
1406 if (KVIRTTOPHYS(mtod(m, int)) == (int)statp->es_data)
1407 break;
1408 }
1409 }
1410
1411 n = howmany(statp->es_count, MLEN);
1412 m = sec_chain_mbufs(mq, n, (int)statp->es_count);
1413
1414 #ifdef DEBUG
1415 if (se_ibug) {
1416 printf("got%d ", statp->es_count);
1417 }
1418 #endif
1419
1420 softp->ss_arp.ac_if.if_ipackets++;
1421
1422 #ifdef PROMISCUOUS
1423
1424 /*
1425 * get an mbuf for passing softp to promiscintr.
1426 */
1427
1428 mnew = m_getm(M_DONTWAIT, MT_DATA, n+1);
1429 #else
1430 mnew = m_getm(M_DONTWAIT, MT_DATA, n);
1431
1432 #endif /* PROMISCUOUS */
1433
1434 if (mnew == 0) {
1435
1436 /*
1437 * Can't allocate replacement mbufs.
1438 * Go ahead and use the old ones again.
1439 */
1440
1441 se_add_read_progs(softp, m);
1442 if (sec_start_prog(SINST_STARTIO, softp->is_cib,
1443 softp->ss_slic, softp->ss_bin,
1444 SDEV_ETHERREAD, 1)
1445 != SEC_ERR_NONE) {
1446 printf("%s%d: se_handle_read: status 0x%x\n",
1447 se_driver.sed_name, softp-se_state,
1448 softp->is_status);
1449 }
1450 #ifdef DEBUG
1451 if (se_ibug > 1)
1452 printf("lose%d ", n);
1453 #endif
1454 softp->ss_arp.ac_if.if_ierrors++;
1455 return;
1456 }
1457
1458 /*
1459 * Replace the mbufs in the circular mbuf queue.
1460 */
1461
1462 #ifdef PROMISCUOUS
1463
1464 /*
1465 * strip off promiscif mbuf
1466 */
1467
1468 mpromisc = mnew;
1469 mnew = mnew->m_next;
1470 mpromisc->m_next = m;
1471 mpromisc->m_len = sizeof(struct promiscif);
1472
1473 #endif
1474
1475 se_add_read_progs(softp, mnew);
1476 if (sec_start_prog(SINST_STARTIO, softp->is_cib, softp->ss_slic,
1477 softp->ss_bin, SDEV_ETHERREAD, 1)
1478 != SEC_ERR_NONE) {
1479 printf("%s%d: se_handle_read: status 0x%x\n",
1480 se_driver.sed_name, softp-se_state, softp->is_status);
1481 }
1482 #ifdef DEBUG
1483 if (se_ibug > 1)
1484 printf("repl%d ", n);
1485 #endif
1486
1487 /*
1488 * m now contains a packet from the interface.
1489 * Check for trailer protocol.
1490 */
1491
1492 hp = mtod(m, struct ether_header *);
1493 len = statp->es_count;
1494 ASSERT(len == mbuf_chain_size(m), "se_handle_read: size");
1495 len -= sizeof(struct ether_header);
1496 if (len < ETHERMIN) {
1497
1498 #ifdef DEBUG
1499 if (se_ibug)
1500 printf("packet returned of size %d\n", len);
1501 #endif
1502 ++softp->ss_arp.ac_if.if_ierrors;
1503
1504 #ifdef PROMISCUOUS
1505 m_freem(mpromisc);
1506 #else
1507 m_freem(m);
1508 #endif
1509
1510 return;
1511 }
1512
1513 #ifdef PROMISCUOUS
1514 if (promiscon) { /* promiscon => give promiscintr the packet */
1515 inq = &promiscq;
1516 int_to_sched = NETISR_PROMISC;
1517 mp = mtod(mpromisc, struct promiscif *);
1518 mp->promiscif_ifnet = (caddr_t)softp;
1519 mp->promiscif_flag = PROMISC_RCVD;
1520 sipl = IF_LOCK(inq);
1521 if (IF_QFULL(inq)) {
1522 IF_DROP(inq);
1523 IF_UNLOCK(inq, sipl);
1524 m_freem(mpromisc);
1525 return;
1526 }
1527 IF_ENQUEUE(inq, mpromisc);
1528 if (!inq->ifq_busy) {
1529 schednetisr(int_to_sched);
1530 }
1531 IF_UNLOCK(inq, sipl);
1532 return;
1533 }
1534
1535 #endif /* PROMISCUOUS */
1536
1537 /*
1538 * check for SETHER_PROMISCUOUS but !promiscon
1539 */
1540
1541 if (softp->ss_ether_flags == SETHER_PROMISCUOUS) {
1542 if (bcmp((char *)etherbroadcastaddr,
1543 (char *)hp->ether_dhost, 6) != 0
1544 && bcmp((char *)softp->ss_arp.ac_enaddr,
1545 (char *)hp->ether_dhost, 6) != 0) {
1546
1547 #ifdef PROMISCUOUS
1548 m_freem(mpromisc);
1549 #else
1550 m_freem(m); /* throw promiscuous packets away. */
1551 #endif
1552 return;
1553 }
1554 }
1555
1556 #ifdef PROMISCUOUS
1557
1558 (void) m_free(mpromisc); /* promiscoff, no need for promiscif buffer */
1559
1560 #endif
1561
1562 m->m_off += sizeof(struct ether_header);
1563 m->m_len -= sizeof(struct ether_header);
1564 hp->ether_type = ntohs((u_short)hp->ether_type);
1565 if (hp->ether_type >= ETHERPUP_TRAIL
1566 && hp->ether_type < ETHERPUP_TRAIL + ETHERPUP_NTRAILER) {
1567 mnew = se_reorder_trailer_packet(hp, m);
1568 if (mnew == (struct mbuf *)0) {
1569 m_freem(m);
1570 return;
1571 }
1572 m = mnew;
1573 trailer++;
1574 }
1575 #ifdef MACH
1576 /*
1577 * 4.3 wants the interface pointer inserted in front of the
1578 * data -- do this. For now, just tack on an mbuf to front
1579 * of the mbuf chain (this works for both trailers and non-trailer
1580 * packets). This can likely be done more efficiently.
1581 *
1582 * See IF_ADJ()/IF_DEQUEUEIF().
1583 */
1584 MGET(mnew, M_DONTWAIT, MT_DATA);
1585 if (mnew == (struct mbuf *) NULL) {
1586 m_freem(m);
1587 return;
1588 }
1589 mnew->m_len = sizeof (struct ifnet *);
1590 *(mtod(mnew, struct ifnet **)) = ifp;
1591 mnew->m_next = m;
1592 m = mnew;
1593 #endif /* MACH */
1594
1595 #ifdef DEBUG
1596 if (se_ibug)
1597 printf("type0x%x ", hp->ether_type);
1598 #endif
1599
1600 switch (hp->ether_type) {
1601 #ifdef INET
1602 case ETHERPUP_IPTYPE:
1603 #ifdef DEBUG
1604 if (se_ibug) {
1605 printf("ip ");
1606 if (hp->ether_dhost[0] & 0x01)
1607 printf("broad ");
1608 else printf("station ");
1609 }
1610 #endif /* DEBUG */
1611 int_to_sched = NETISR_IP;
1612 inq = &ipintrq;
1613 break;
1614
1615 case ETHERPUP_ARPTYPE:
1616 #ifdef DEBUG
1617 if (se_ibug)
1618 printf("arp\n");
1619 #endif /* DEBUG */
1620 arpinput(&softp->ss_arp, m);
1621 return;
1622 #endif /* INET */
1623
1624 #ifndef MACH
1625 case PCI_TYPE:
1626 pcirint(hp, m);
1627 return;
1628 #endif MACH
1629
1630 default:
1631
1632 #ifndef MACH
1633 /* do not queue reordered trailer to rawif or custom */
1634
1635 if(trailer) {
1636 m_freem(m);
1637 return;
1638 }
1639
1640 /* allow for custom ether_read device drivers */
1641
1642 for(ci = 0; ci < 4; ci++) {
1643 if(custom_clients[ci].custom_devno
1644 && custom_clients[ci].custom_type == hp->ether_type)
1645 {
1646
1647 ASSERT(cdevsw[major(custom_clients[ci].custom_devno)].d_read,
1648 "no custom_client cdevsw.d_read!");
1649
1650 (*cdevsw[major(custom_clients[ci].custom_devno)].d_read)
1651 (hp, m, ifp);
1652
1653 custom_clients[ci].custom_count++;
1654 return;
1655 }
1656 }
1657
1658 #ifdef RAW_ETHER
1659
1660 /*
1661 * reput the ether header into the lead data buffer
1662 * *and* copy a Unix4.2 raw_header for compatibility
1663 */
1664
1665 m->m_off -= sizeof(struct ether_header);
1666 m->m_len += sizeof(struct ether_header);
1667 int_to_sched = NETISR_RAW;
1668 inq = &rawif.if_snd;
1669 mrh = m_getclrm(M_DONTWAIT, MT_DATA, 1);
1670 if(mrh == (struct mbuf *) NULL) {
1671 m_freem(m);
1672 return;
1673 }
1674
1675 /*
1676 * link the raw_header into the ether packet for 4.2
1677 * compatibility (?)
1678 *
1679 * set up raw header, using type as sa_data for bind.
1680 * raw_input() could do this if static struct set up.
1681 * - for now assign AF_UNSPEC for protocol
1682 */
1683
1684 mrh->m_next = m;
1685 m = mrh;
1686 rh = mtod(mrh, struct raw_header*);
1687 rh->raw_proto.sp_family = AF_RAWE;
1688 rh->raw_proto.sp_protocol = AF_UNSPEC;
1689
1690 /*
1691 * copy AF_RAWE and ether_type in for dst addr
1692 */
1693
1694 rh->raw_dst.sa_family = AF_RAWE;
1695 bcopy((caddr_t)&hp->ether_type,
1696 (caddr_t)rh->raw_dst.sa_data, 2);
1697 bcopy((caddr_t)&hp->ether_type,
1698 (caddr_t)rh->raw_src.sa_data, 2);
1699
1700 /*
1701 * put type back into net order
1702 */
1703
1704 hp->ether_type = htons(hp->ether_type);
1705
1706 /*
1707 * copy AF_RAWE and if_unit # in for src addr
1708 */
1709
1710 rh->raw_src.sa_family = AF_RAWE;
1711 bcopy((caddr_t)&ifp->if_unit,
1712 (caddr_t)&rh->raw_src.sa_data[2], sizeof(short));
1713
1714 #else /* not RAW_ETHER */
1715
1716 m_freem(m);
1717 return;
1718
1719 #endif RAW_ETHER
1720 #else MACH
1721 m_freem(m);
1722 return;
1723 #endif MACH
1724
1725 } /* end switch */
1726
1727 #ifndef MACH
1728 sipl = IF_LOCK(inq);
1729 #else
1730 sipl = splimp();
1731 #endif MACH
1732 if (IF_QFULL(inq)) {
1733 IF_DROP(inq);
1734 #ifndef MACH
1735 IF_UNLOCK(inq, sipl);
1736 #else
1737 splx(sipl);
1738 #endif MACH
1739 m_freem(m);
1740 return;
1741 }
1742 IF_ENQUEUE(inq, m);
1743 #ifndef MACH
1744 if (!inq->ifq_busy) {
1745 schednetisr(int_to_sched);
1746 }
1747 IF_UNLOCK(inq, sipl);
1748 #else
1749 schednetisr(int_to_sched);
1750 splx(sipl);
1751 #endif MACH
1752 #ifdef DEBUG
1753 if (se_ibug) printf("\n");
1754 #endif DEBUG
1755 }
1756
1757
1758
1759 /*
1760 * se_reorder_trailer_packet - return a real mbuf chain after noticing trailer
1761 * protocol is being used.
1762 *
1763 * Return the new chain, and modify the header to reflect the real type.
1764 * Return a null mbuf if we couldn't do it.
1765 * It is the responsibility of the caller to free the original, if necessary.
1766 */
1767
1768 struct trailer {
1769 u_short tl_type;
1770 u_short tl_count;
1771 };
1772
1773 #ifdef PROMISCUOUS
1774
1775 /* N.B. this routine is not static so promiscq handler can call it */
1776
1777 struct mbuf *se_reorder_trailer_packet(hp, m)
1778
1779 #else
1780
1781 static struct mbuf *se_reorder_trailer_packet(hp, m)
1782
1783 #endif PROMISCUOUS
1784
1785 struct ether_header *hp;
1786 register struct mbuf *m;
1787 {
1788 register struct mbuf *mnew, *split;
1789 int trail_off;
1790 struct trailer *trailerp;
1791
1792 /*
1793 * find mbuf where we have to split things.
1794 */
1795
1796 trail_off = (hp->ether_type - ETHERPUP_TRAIL)*512;
1797 if (trail_off != 512 && trail_off != 1024) {
1798 printf("%s: ignore trailer with type 0x%x\n",
1799 se_driver.sed_name, hp->ether_type);
1800 return((struct mbuf *)0);
1801 }
1802 split = m;
1803 for (; split != 0 && split->m_len <= trail_off; split = split->m_next)
1804 trail_off -= split->m_len;
1805 #ifdef DEBUG
1806 if (se_ibug > 1)
1807 printf("split 0x%x trail_off %d ", split, trail_off);
1808 #endif DEBUG
1809
1810 if (split == (struct mbuf *)0)
1811 return((struct mbuf *)0);
1812
1813 /*
1814 * trail_off has the index into 'split' of the trailer.
1815 * Lots of potential boundary conditions here that should
1816 * be checked, but since we know the size of data blocks
1817 * in trailer-protocol packets == 512 or 1024 and MLEN == 112,
1818 * we are guaranteed that the trailer header is completely
1819 * embedded in a single mbuf.
1820 */
1821
1822 trailerp = (struct trailer *)(mtod(split, int) + trail_off);
1823 if (trail_off + sizeof(struct trailer) > split->m_len)
1824 return((struct mbuf *)0);
1825
1826 MGET(mnew, M_DONTWAIT, MT_DATA);
1827 if (mnew == 0)
1828 return((struct mbuf *)0);
1829
1830 /*
1831 * Know where to split, and have place for start of header.
1832 * Build real header by copying and chaining.
1833 */
1834
1835 mnew->m_off = MMINOFF;
1836 mnew->m_len = split->m_len - trail_off - sizeof(struct trailer);
1837 mnew->m_next = split->m_next;
1838 bcopy((caddr_t)(trailerp+1), mtod(mnew, caddr_t), (u_int)mnew->m_len);
1839 hp->ether_type = ntohs(trailerp->tl_type);
1840
1841 #ifdef DEBUG
1842 if (se_ibug > 1)
1843 printf("new len %d tlr len %d trtype0x%x ",
1844 mnew->m_len, ntohs(trailerp->tl_count), hp->ether_type);
1845 #endif DEBUG
1846
1847 split->m_len = trail_off;
1848 split->m_next = 0;
1849
1850 #ifdef DEBUG
1851 if (ntohs(trailerp->tl_count) !=
1852 sizeof(struct trailer) + mbuf_chain_size(mnew)) {
1853 printf("%s: odd trailer count %d, expected %d+%d\n",
1854 se_driver.sed_name, ntohs(trailerp->tl_count),
1855 sizeof(struct trailer), mbuf_chain_size(mnew));
1856 }
1857 #endif DEBUG
1858
1859 mnew->m_next = m;
1860 return(mnew);
1861 }
1862
1863
1864 /*
1865 * se_add_read_progs - Add one or two read programs to the request queue.
1866 *
1867 * We replace the mbufs in the mbuf queue 'mq' here. 'm' is an
1868 * mbuf chain of the appropriate length.
1869 */
1870
1871 static
1872 se_add_read_progs(softp, m)
1873 register struct se_state *softp;
1874 register struct mbuf *m;
1875 {
1876 struct sec_mbufq *mq;
1877 struct sec_iatq *iq;
1878 struct sec_pq *sq;
1879 struct sec_progq *pq;
1880 struct sec_edev_prog *dp;
1881 int n;
1882
1883 mq = &softp->is_mbufq;
1884 iq = &softp->is_iatq;
1885 n = mbuf_chain_length(m);
1886 sq = &softp->is_reqq;
1887 pq = sq->sq_progq;
1888
1889 while (n > 0) {
1890 int nnow = MIN(n, iq->iq_size - iq->iq_head);
1891 struct sec_iat *iat;
1892 struct mbuf *mnext;
1893
1894 #ifdef DEBUG
1895 if (se_ibug > 1)
1896 printf("add%d ", nnow);
1897 #endif DEBUG
1898 ASSERT(nnow >= 1, "se_add_read: nnow 1");
1899 ASSERT(nnow == MIN(n, mq->mq_size - mq->mq_head),
1900 "se_add_read: nnow 2");
1901 if (nnow != n) {
1902 register struct mbuf *mprev;
1903 int i;
1904
1905 for (mprev = m, i = 0; i < nnow-1; ++i, mprev = mprev->m_next)
1906 continue;
1907 mnext = mprev->m_next;
1908 mprev->m_next = (struct mbuf *)0;
1909 ASSERT(mbuf_chain_length(m) == nnow, "se_add_read: length");
1910 ASSERT(mbuf_chain_length(mnext) == n - nnow,
1911 "se_add_read: length 2");
1912 } else {
1913 mnext = (struct mbuf *)0;
1914 }
1915
1916 /*
1917 * m now has chain to spray into iats.
1918 * mnext has the rest of the chain.
1919 */
1920
1921 iat = sec_spray_mbuf_iatq(m, iq);
1922 if (iat == 0)
1923 panic("se_add_read_progs");
1924
1925 (void) sec_spray_mbuf_mbufq(m, mq);
1926
1927 /*
1928 * add the device program.
1929 */
1930
1931 ASSERT((pq->pq_head + 1) % sq->sq_size != pq->pq_tail,
1932 "se_add_read: bad head");
1933 dp = pq->pq_un.pq_eprogs[pq->pq_head];
1934 ASSERT(dp != (struct sec_edev_prog *)0, "se_add_read: dp 0");
1935 dp->edp_iat_count = nnow;
1936 dp->edp_iat = SEC_IATIFY(iat);
1937 pq->pq_head = (pq->pq_head + 1) % sq->sq_size;
1938
1939 n -= nnow;
1940 m = mnext;
1941 }
1942 }
1943 #endif MACH_KERNEL
1944 /*
1945 * Ethernet output routine.
1946 * Encapsulate a packet of type family for the local net.
1947 * If this packet is a broadcast packet or is destined for
1948 * ourselves, we pass a copy of it through the loopback
1949 * interface, as the SEEQ chip is not capable of hearing
1950 * its own transmissions.
1951 */
1952 #ifdef MACH_KERNEL
1953 se_output(dev, ior)
1954 dev_t dev;
1955 io_req_t ior;
1956 {
1957 register int unit = minor(dev);
1958 register struct se_state *softp = &se_state[unit];
1959
1960 return (net_write(&softp->ss_if, se_start_u, ior));
1961 }
1962
1963 se_start_u(unit)
1964 int unit;
1965 {
1966 se_start(&se_state[unit]);
1967 }
1968
1969 #else MACH_KERNEL
1970 static
1971 se_output(ifp, m, dest)
1972 struct ifnet *ifp;
1973 register struct mbuf *m;
1974 struct sockaddr *dest;
1975 {
1976 register struct se_state *softp = &se_state[ifp->if_unit];
1977 u_char ether_dest[6];
1978 register struct ether_header *header;
1979 int type;
1980 spl_t sipl;
1981 extern struct ifnet loif;
1982 #ifdef MACH
1983 int usetrailers;
1984 #endif MACH
1985
1986 #ifdef MACH
1987 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) {
1988 m_freem(m);
1989 return(ENETDOWN);
1990 }
1991 #endif MACH
1992
1993 #ifdef DEBUG
1994 if (se_obug) {
1995 printf("O%d ", ifp->if_unit);
1996 if (se_obug > 1) {
1997 printf("se_output called with...\n");
1998 dump_mbuf_chain(m);
1999 }
2000 }
2001 #endif DEBUG
2002 switch (dest->sa_family) {
2003 #ifdef INET
2004 case AF_INET: {
2005 struct in_addr inet_dest;
2006 register struct mbuf *m0 = m;
2007 int off;
2008 struct trailer *tl;
2009
2010 inet_dest = ((struct sockaddr_in *)dest)->sin_addr;
2011 #ifndef MACH
2012 if (!arpresolve(&softp->ss_arp, m, &inet_dest, ether_dest))
2013 return(0); /* Not yet resolved */
2014 #else
2015 /*
2016 * New parameter, tells if should use trailers.
2017 * Need the paramater, but can ignore the result.
2018 */
2019 if (!arpresolve(&softp->ss_arp, m, &inet_dest, ether_dest, &usetrailers))
2020 return(0); /* Not yet resolved */
2021 #endif MACH
2022 if (in_lnaof(inet_dest) == INADDR_ANY) {
2023 struct mbuf *copy = (struct mbuf *)0;
2024
2025 copy = m_copy(m, 0, (int)M_COPYALL);
2026 if (copy != (struct mbuf *)0)
2027 (void) looutput(&loif, copy, dest);
2028 }
2029
2030 /* Generate trailer protocol? */
2031
2032 off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
2033 if ((ifp->if_flags & IFF_NOTRAILERS) == 0
2034 && off > 0 && (off & 0x1FF) == 0
2035 && m->m_off >= MMINOFF + sizeof(struct trailer)) {
2036 type = ETHERPUP_TRAIL + (off >> 9);
2037 m->m_off -= sizeof(struct trailer);
2038 m->m_len += sizeof(struct trailer);
2039 tl = mtod(m, struct trailer *);
2040 tl->tl_type = htons((u_short)ETHERPUP_IPTYPE);
2041 tl->tl_count = htons((u_short)m->m_len);
2042
2043 /*
2044 * Move first packet (control information)
2045 * to end of chain.
2046 */
2047
2048 while (m0->m_next)
2049 m0 = m0->m_next;
2050 m0->m_next = m;
2051 m0 = m->m_next;
2052 m->m_next = (struct mbuf *)0;
2053 m = m0;
2054 } else {
2055 type = ETHERPUP_IPTYPE;
2056 }
2057 }
2058 break;
2059 #endif INET
2060
2061 case AF_UNSPEC:
2062 header = (struct ether_header *)dest->sa_data;
2063 bcopy((caddr_t)header->ether_dhost, (caddr_t)ether_dest,
2064 sizeof(ether_dest));
2065 type = header->ether_type;
2066 break;
2067
2068 default:
2069 printf("%s%d: can't handle address family %d\n",
2070 se_driver.sed_name, softp-se_state, dest->sa_family);
2071 m_freem(m);
2072 return(EAFNOSUPPORT);
2073 }
2074
2075 /*
2076 * Add the local header.
2077 * Always add a new mbuf for the header so that we can compress
2078 * short mbufs at the front easily.
2079 */
2080 {
2081 register struct mbuf *m0;
2082
2083 MGET(m0, M_DONTWAIT, MT_HEADER);
2084 if (m0 == (struct mbuf *)0) {
2085 m_freem(m);
2086 return(ENOBUFS);
2087 }
2088 m0->m_next = m;
2089 m0->m_off = MMINOFF;
2090 m0->m_len = sizeof(struct ether_header);
2091 m = m0;
2092 }
2093
2094 header = mtod(m, struct ether_header *);
2095 header->ether_type = htons((u_short)type);
2096 bcopy((caddr_t)ether_dest, (caddr_t)header->ether_dhost,
2097 sizeof(ether_dest));
2098 sipl = OS_LOCK(softp);
2099 bcopy((caddr_t)softp->ss_arp.ac_enaddr,
2100 (caddr_t)header->ether_shost, 6);
2101
2102 /*
2103 * The SCSI/Ether interface is not very good at handling short
2104 * output packets, so try to condense the first few mbufs
2105 * together.
2106 * Note that we are guaranteed that m->m_off == MMINOFF, as
2107 * we just placed the 14-byte Ethernet header there.
2108 */
2109
2110 while (m->m_next && (m->m_len + m->m_next->m_len) <= MLEN) {
2111 register struct mbuf *mn = m->m_next;
2112
2113 bcopy(mtod(mn, caddr_t), (caddr_t)(mtod(m, int)+m->m_len), (u_int)mn->m_len);
2114 m->m_len += mn->m_len;
2115 ASSERT(m->m_len <= MLEN, "se_output: MLEN");
2116 m->m_next = mn->m_next;
2117 mn->m_next = (struct mbuf *)0;
2118 m_freem(mn);
2119 }
2120
2121 /*
2122 * Queue message on interface, and start output if interface not active.
2123 */
2124
2125 #ifndef MACH
2126 (void) IF_LOCK(&ifp->if_snd);
2127 #endif MACH
2128 if (IF_QFULL(&ifp->if_snd)) {
2129 #ifdef DEBUG
2130 if (se_obug)
2131 printf("dropo\n");
2132 #endif DEBUG
2133 IF_DROP(&ifp->if_snd);
2134 #ifndef MACH
2135 IF_UNLOCK(&ifp->if_snd, SPLIMP);
2136 #endif MACH
2137 OS_UNLOCK(softp, sipl);
2138 m_freem(m);
2139 return(ENETDOWN);
2140 }
2141 IF_ENQUEUE(&ifp->if_snd, m);
2142 #ifndef MACH
2143 IF_UNLOCK(&ifp->if_snd, SPLIMP);
2144 #endif MACH
2145 OS_UNLOCK(softp, sipl);
2146 se_start(softp);
2147 return(0);
2148 }
2149 #endif MACH_KERNEL
2150
2151
2152 /*
2153 * se_start - start output on the interface.
2154 *
2155 * First we make sure it is idle and that there is work to do.
2156 *
2157 * We spray the mbuf into the output iat queue,
2158 * build the device program and start the program running.
2159 *
2160 * EMERGENCY FIX: Since someone is putting tiny mbufs in the
2161 * middle of the mbuf chain, we must copy mbufs into an output
2162 * buffer until we understand the problem better.
2163 */
2164
2165 se_start(softp)
2166 register struct se_state *softp;
2167 {
2168 spl_t sipl;
2169
2170 if (softp-se_state > se_max_unit)
2171 return;
2172 #ifdef DEBUG
2173 if (se_obug)
2174 printf("S%d ", softp-se_state);
2175 #endif DEBUG
2176 sipl = OS_LOCK(softp);
2177 if (softp->os_active) {
2178 #ifdef DEBUG
2179 if (se_obug)
2180 printf("active ");
2181 #endif DEBUG
2182 goto ret;
2183 }
2184
2185 /*
2186 * Device not busy. Is there something in the queue?
2187 */
2188
2189 for (;;) {
2190 #ifdef MACH_KERNEL
2191 struct ifqueue *ifq = &softp->ss_if.if_snd;
2192 register io_req_t m;
2193 #else MACH_KERNEL
2194 struct ifqueue *ifq = &softp->ss_arp.ac_if.if_snd;
2195 register struct mbuf *m, *n;
2196 #endif MACH_KERNEL
2197 register struct sec_pq *sq = &softp->os_reqq;
2198 register struct sec_progq *pq = sq->sq_progq;
2199 register struct sec_dev_prog *dp;
2200 u_char *cp;
2201 struct ether_header *header;
2202 int packetsize, padcount;
2203
2204 dp = PHYSTOKV(pq->pq_un.pq_progs[pq->pq_head],
2205 struct sec_dev_prog *);
2206 #ifndef MACH
2207 (void) IF_LOCK(ifq);
2208 #endif MACH
2209 IF_DEQUEUE(ifq, m);
2210 #ifndef MACH
2211 IF_UNLOCK(ifq, SPLIMP);
2212 #endif MACH
2213 #ifdef MACH_KERNEL
2214 if (m == 0)
2215 #else MACH_KERNEL
2216 if (m == (struct mbuf *)0)
2217 #endif MACH_KERNEL
2218 break;
2219
2220 /*
2221 * m is a nonempty chain of mbufs
2222 * corresponding to a packet.
2223 * Flush the iat queue to empty, and
2224 * place the mbufs there.
2225 */
2226
2227 ASSERT(sq->sq_size != 0, "se_start: size");
2228 ASSERT(pq->pq_head < sq->sq_size, "se_start: head");
2229 ASSERT(pq->pq_tail < sq->sq_size, "se_start: tail");
2230 ASSERT((pq->pq_head + 1) % sq->sq_size != pq->pq_tail,
2231 "se_start: head+1");
2232 ASSERT(pq->pq_tail == pq->pq_head, "se_start: head/tail");
2233
2234 #ifdef MACH_KERNEL
2235 softp->ss_if.if_opackets++;
2236 #else MACH_KERNEL
2237 softp->ss_arp.ac_if.if_opackets++;
2238 #endif MACH_KERNEL
2239 softp->os_active = 1;
2240 softp->os_pending = m;
2241 #ifdef MACH_KERNEL
2242 packetsize = m->io_count;
2243 #else MACH_KERNEL
2244 packetsize = mbuf_chain_size(m);
2245 #endif MACH_KERNEL
2246 padcount = ETHERMIN - (packetsize - sizeof(struct ether_header));
2247 if (padcount > 0)
2248 packetsize += padcount;
2249 #ifdef MACH_KERNEL
2250 bcopy(m->io_data, softp->os_buf, m->io_count);
2251 cp = softp->os_buf + m->io_count;
2252 #else MACH_KERNEL
2253 for (cp = softp->os_buf, n = m; n != 0; n = n->m_next) {
2254 bcopy(mtod(n, caddr_t), (caddr_t)cp, (u_int)n->m_len);
2255 cp += n->m_len;
2256 }
2257 #endif MACH_KERNEL
2258 ASSERT(cp >= softp->os_buf, "se_start: cp < os_buf");
2259 ASSERT(cp <= softp->os_buf + OS_BUF_SIZE,
2260 "se_start: cp > os_buf");
2261 dp->dp_un.dp_data = KVTOPHYS(softp->os_buf, unsigned char *);
2262 dp->dp_data_len = packetsize;
2263 dp->dp_cmd_len = 0;
2264 dp->dp_next = (struct sec_dev_prog *)0;
2265 #ifdef DEBUG
2266 if (se_obug > 1) {
2267 printf("se_start: starting...");
2268 dump_bytes((char *) softp->os_buf, packetsize);
2269 }
2270 #endif DEBUG
2271
2272 /*
2273 * If the packet is a multicast or broadcast
2274 * packet, place an indicator in the dp_cmd[]
2275 * so that the firmware knows to turn off the
2276 * receiver. The SCSI/Ether firmware can't look
2277 * at the packet itself, as the mbuf might not
2278 * be within its 4MB window.
2279 */
2280
2281 dp->dp_cmd[0] = SCSI_ETHER_WRITE;
2282 #ifdef MACH_KERNEL
2283 header = (struct ether_header *)m->io_data;
2284 #else MACH_KERNEL
2285 header = mtod(m, struct ether_header *);
2286 #endif MACH_KERNEL
2287
2288 if ((header->ether_dhost[0] & 0x01)
2289 #ifdef PROMISCUOUS
2290 || (softp->ss_ether_flags == SETHER_PROMISCUOUS)
2291 #endif PROMISCUOUS
2292 )
2293 {
2294 dp->dp_cmd[1] = SCSI_ETHER_MULTICAST;
2295 } else {
2296 dp->dp_cmd[1] = SCSI_ETHER_STATION;
2297 }
2298
2299 #ifdef DEBUG
2300 if (se_obug)
2301 printf("sio ");
2302 #endif DEBUG
2303 pq->pq_head = (pq->pq_head + 1) % sq->sq_size;
2304 if (sec_start_prog(SINST_STARTIO, softp->os_cib,
2305 softp->ss_slic, softp->ss_bin,
2306 SDEV_ETHERWRITE, 1)
2307 != SEC_ERR_NONE) {
2308 printf("%s%d: se_start: status 0x%x\n",
2309 se_driver.sed_name, softp-se_state,
2310 softp->os_status);
2311 }
2312 break;
2313 }
2314 ret:
2315
2316 #ifdef DEBUG
2317 if (se_obug)
2318 printf("\n");
2319 #endif DEBUG
2320 OS_UNLOCK(softp, sipl);
2321 }
2322
2323 #ifndef MACH
2324 /*
2325 * se_ioctl
2326 */
2327
2328 static
2329 se_ioctl(ifp, cmd, data)
2330 register struct ifnet *ifp;
2331 int cmd;
2332 caddr_t data;
2333 {
2334 register struct ifreq *ifr = (struct ifreq *)data;
2335 register struct se_state *softp = &se_state[ifp->if_unit];
2336 spl_t sipl;
2337
2338 switch (cmd) {
2339 case SIOCSIFADDR:
2340 sipl = SS_LOCK(softp);
2341 if (ifp->if_flags & IFF_RUNNING)
2342 if_rtinit(ifp, -1);
2343
2344 se_set_addr(ifp, (struct sockaddr_in *)&ifr->ifr_addr);
2345 SS_UNLOCK(softp, sipl);
2346 se_init(ifp->if_unit);
2347 return(0);
2348
2349 default:
2350
2351 #ifdef PROMISCUOUS
2352 if(promiscdev)
2353 return((*cdevsw[major(promiscdev)].d_ioctl)(ifp, cmd, data));
2354 else
2355 return(EINVAL);
2356 #else
2357 return(EINVAL);
2358 #endif PROMISCUOUS
2359 }
2360 }
2361
2362 #else
2363 /*
2364 * MACH/4.3 changed this a bunch.
2365 */
2366 #ifdef MACH_KERNEL
2367 se_getstat(dev, flavor, status, count)
2368 dev_t dev;
2369 int flavor;
2370 dev_status_t status; /* pointer to OUT array */
2371 unsigned int count; /* out */
2372 {
2373 register int unit = minor(dev);
2374 register struct se_state *softp = &se_state[unit];
2375
2376 return (net_getstat(&softp->ss_if, flavor, status, count));
2377 }
2378
2379 se_setstat(dev, flavor, status, count)
2380 dev_t dev;
2381 int flavor;
2382 dev_status_t status;
2383 unsigned int count;
2384 {
2385 register int unit = minor(dev);
2386 register struct se_state *softp = &se_state[unit];
2387
2388 switch (flavor) {
2389 case NET_STATUS:
2390 {
2391 /*
2392 * All we can change are flags, and not many of those.
2393 */
2394 register struct net_status *ns = (struct net_status *)status;
2395 int mode = 0;
2396
2397 if (count < NET_STATUS_COUNT)
2398 return (D_INVALID_OPERATION);
2399
2400 /*
2401 * XXX This cannot be right-
2402 * the multicast and promiscuous flags
2403 * seem to be mutually exclusive!
2404 */
2405 if (ns->flags & IFF_ALLMULTI)
2406 mode |= SETHER_MULTICAST;
2407 if (ns->flags & IFF_PROMISC)
2408 mode |= SETHER_PROMISCUOUS;
2409
2410 /*
2411 * Force a complete reset if the receive mode changes
2412 * so that these take effect immediately.
2413 */
2414 if (softp->ss_ether_flags != mode) {
2415 softp->ss_ether_flags = mode;
2416 se_set_modes(softp);
2417 }
2418 break;
2419 }
2420 case NET_ADDRESS:
2421 {
2422 register union ether_cvt {
2423 char addr[6];
2424 int lwd[2];
2425 } *ec = (union ether_cvt *)status;
2426
2427 if (count < sizeof(*ec)/sizeof(int))
2428 return (D_INVALID_SIZE);
2429 ec->lwd[0] = ntohl(ec->lwd[0]);
2430 ec->lwd[1] = ntohl(ec->lwd[1]);
2431
2432 bcopy((char *)ec->addr, (char *)softp->ss_addr, 6);
2433 se_set_modes(softp);
2434 break;
2435 }
2436
2437 default:
2438 return (D_INVALID_OPERATION);
2439 }
2440 return (D_SUCCESS);
2441
2442 }
2443 #else MACH_KERNEL
2444 /*
2445 * se_ioctl
2446 */
2447
2448 static
2449 se_ioctl(ifp, cmd, data)
2450 register struct ifnet *ifp;
2451 int cmd;
2452 caddr_t data;
2453 {
2454 register struct ifaddr *ifa = (struct ifaddr *)data;
2455 int s = splimp(), error = 0;
2456
2457 switch (cmd) {
2458
2459 case SIOCSIFADDR:
2460 ifp->if_flags |= IFF_UP;
2461 se_init(ifp->if_unit);
2462
2463 switch (ifa->ifa_addr.sa_family) {
2464 #ifdef INET
2465 case AF_INET:
2466 ((struct arpcom *)ifp)->ac_ipaddr =
2467 IA_SIN(ifa)->sin_addr;
2468 arpwhohas((struct arpcom *)ifp, &IA_SIN(ifa)->sin_addr);
2469 break;
2470 #endif
2471 #ifdef NS
2472 ERROR -- this case not really here yet
2473 case AF_NS:
2474 {
2475 register struct ns_addr *ina = &(IA_SNS(ifa)->sns_addr);
2476
2477 if (ns_nullhost(*ina))
2478 ina->x_host = *(union ns_host *)(ds->ds_addr);
2479 else
2480 se_setaddr(ina->x_host.c_host,ifp->if_unit);
2481 break;
2482 }
2483
2484 #endif
2485 default:
2486 break;
2487 }
2488 break;
2489
2490 default:
2491 error = EINVAL;
2492 }
2493 splx(s);
2494 return (error);
2495 }
2496 #endif MACH_KERNEL
2497 #endif MACH
2498
2499 #ifndef MACH
2500 static
2501 se_set_addr(ifp, sin)
2502 register struct ifnet *ifp;
2503 register struct sockaddr_in *sin;
2504 {
2505 ifp->if_addr = *(struct sockaddr *)sin;
2506 ifp->if_net = in_netof(sin->sin_addr);
2507 ifp->if_host[0] = in_lnaof(sin->sin_addr);
2508 sin = (struct sockaddr_in *)&ifp->if_broadaddr;
2509 sin->sin_family = AF_INET;
2510 sin->sin_addr = if_makeaddr(ifp->if_net, INADDR_ANY);
2511 ifp->if_flags |= IFF_BROADCAST;
2512 }
2513 #endif MACH
2514
2515 /*
2516 * se_watch - watchdog routine, request statistics from board.
2517 *
2518 * The cib's status pointer must have an address that is physical == virtual,
2519 * and must reside within the SEC's 4MB window.
2520 */
2521
2522 static
2523 se_watch(unit)
2524 int unit;
2525 {
2526 register struct se_state *softp = &se_state[unit];
2527 register struct sec_cib *cib;
2528 #ifdef MACH_KERNEL
2529 struct ifnet *ifp = &softp->ss_if;
2530 #else MACH_KERNEL
2531 struct ifnet *ifp = &softp->ss_arp.ac_if;
2532 #endif MACH_KERNEL
2533 volatile int *saved_status;
2534 spl_t sipl;
2535
2536 if (unit < 0 || unit > se_max_unit)
2537 return;
2538
2539 sipl = OS_LOCK(softp);
2540 cib = softp->os_cib;
2541 saved_status = cib->cib_status;
2542 cib->cib_status = KVTOPHYS(&softp->os_gmode, int *);
2543
2544 if (sec_start_prog(SINST_GETMODE, cib, softp->ss_slic,
2545 softp->ss_bin, SDEV_ETHERWRITE, 1)
2546 != SEC_ERR_NONE) {
2547 printf("%s%d: se_watch: status 0x%x\n",
2548 se_driver.sed_name, softp-se_state,
2549 softp->os_gmode.gm_status);
2550 }
2551
2552 cib->cib_status = saved_status;
2553
2554 (void) SS_LOCK(softp);
2555
2556 #define INCR(field1, field2) \
2557 softp->ss_sum.field1 += softp->os_gmode.gm_un.gm_ether.field2
2558
2559 INCR(ec_rx_ovfl, egm_rx_ovfl);
2560 INCR(ec_rx_crc, egm_rx_crc);
2561 INCR(ec_rx_dribbles, egm_rx_dribbles);
2562 INCR(ec_rx_short, egm_rx_short);
2563 INCR(ec_rx_good, egm_rx_good);
2564
2565 INCR(ec_tx_unfl, egm_tx_unfl);
2566 INCR(ec_tx_coll, egm_tx_coll);
2567 INCR(ec_tx_16xcoll, egm_tx_16x_coll);
2568 INCR(ec_tx_good, egm_tx_good);
2569 #undef INCR
2570
2571 #ifdef MACH_KERNEL
2572 softp->ss_if.if_ierrors +=
2573 #else MACH_KERNEL
2574 softp->ss_arp.ac_if.if_ierrors +=
2575 #endif MACH_KERNEL
2576 softp->os_gmode.gm_un.gm_ether.egm_rx_ovfl
2577 + softp->os_gmode.gm_un.gm_ether.egm_rx_crc
2578 + softp->os_gmode.gm_un.gm_ether.egm_rx_dribbles;
2579 #ifdef MACH_KERNEL
2580 softp->ss_if.if_oerrors +=
2581 #else MACH_KERNEL
2582 softp->ss_arp.ac_if.if_oerrors +=
2583 #endif MACH_KERNEL
2584 softp->os_gmode.gm_un.gm_ether.egm_tx_unfl;
2585
2586 #ifdef MACH_KERNEL
2587 softp->ss_if.if_collisions = softp->ss_sum.ec_tx_coll;
2588 #else MACH_KERNEL
2589 softp->ss_arp.ac_if.if_collisions = softp->ss_sum.ec_tx_coll;
2590 #endif MACH_KERNEL
2591
2592 #ifdef MACH_KERNEL
2593 #else MACH_KERNEL
2594 ifp->if_timer = softp->ss_scan_int;
2595 #endif MACH_KERNEL
2596 SS_UNLOCK(softp, SPLIMP);
2597 OS_UNLOCK(softp, sipl);
2598
2599 #ifdef MACH_KERNEL
2600 timeout(se_watch, (char *)0, softp->ss_scan_int);
2601 #endif MACH_KERNEL
2602
2603 }
2604
2605
2606
2607 /*
2608 * reset: not necessary on sequent hardware.
2609 */
2610
2611 static
2612 se_reset()
2613 {
2614 panic("se_reset");
2615 }
2616
2617
2618
2619 /*
2620 * se_set_modes - set the Ethernet modes based upon the soft state.
2621 *
2622 * Called with all pieces of the state locked.
2623 *
2624 * When we do the SINST_SETMODE, we use the get_mode structure
2625 * in the output state. This is fair as everyone else is locked
2626 * out and the first part of the get_mode structure is a set_mode
2627 * piece.
2628 */
2629
2630 se_set_modes(softp)
2631 register struct se_state *softp;
2632 {
2633 register volatile struct sec_ether_smodes *esm;
2634 register struct sec_cib *cib = softp->os_cib;
2635 volatile int *saved_status = cib->cib_status;
2636
2637 cib->cib_status = KVTOPHYS(&softp->os_gmode, volatile int *);
2638
2639 esm = &softp->os_gmode.gm_un.gm_ether.egm_sm;
2640 #ifdef MACH_KERNEL
2641 bcopy((caddr_t)softp->ss_addr, (caddr_t)esm->esm_addr, 6);
2642 #else MACH_KERNEL
2643 bcopy((caddr_t)softp->ss_arp.ac_enaddr, (caddr_t)esm->esm_addr, 6);
2644 #endif MACH_KERNEL
2645 esm->esm_flags = softp->ss_ether_flags;
2646 #ifdef MACH_KERNEL
2647 esm->esm_size = sizeof(struct ether_header) + ETHERMTU;
2648 /* we receive entire packet at once */
2649 #else MACH_KERNEL
2650 esm->esm_size = MLEN;
2651 #endif MACH_KERNEL
2652
2653 if (sec_start_prog(SINST_SETMODE, cib, softp->ss_slic,
2654 softp->ss_bin, SDEV_ETHERWRITE, 1)
2655 != SEC_ERR_NONE) {
2656 printf("%s%d: se_set_mode: status 0x%x\n",
2657 se_driver.sed_name, softp-se_state,
2658 softp->os_gmode.gm_status);
2659 }
2660 cib->cib_status = saved_status;
2661 }
2662
2663 #ifdef DEBUG
2664 static char hex[] = "0123456789abcdef";
2665
2666 dump_mbuf_chain(m)
2667 register struct mbuf *m;
2668 {
2669 register int mcnt;
2670
2671 for (mcnt = 0; m != NULL; mcnt++, m = m->m_next) {
2672 printf("mbuf[%d]:", mcnt);
2673 dump_bytes(mtod(m, char *), m->m_len);
2674 }
2675 }
2676
2677 dump_bytes(cp, len)
2678 register char *cp;
2679 register int len;
2680 {
2681 register int cnt;
2682
2683 for (cnt = 0; cnt < len; cnt++, cp++) {
2684 if ((cnt % 20) == 0)
2685 printf("\n\t");
2686 printf(" %c%c", hex[((int)(*cp) >> 4) & 0xf], hex[(*cp) & 0xf]);
2687 }
2688 printf("\n");
2689 }
2690 #endif DEBUG
Cache object: 35d04c238c0594c69d3557855955fbea
|