FreeBSD/Linux Kernel Cross Reference
sys/dev/i2o/iopl.c
1 /* $NetBSD: iopl.c,v 1.11 2002/10/02 16:33:51 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This is an untested driver for I2O LAN interfaces. It has at least these
41 * issues:
42 *
43 * - Will leak rx/tx descriptors & mbufs on transport failure.
44 * - Doesn't handle token-ring, but that's not a big deal.
45 * - Interrupts run at IPL_BIO.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: iopl.c,v 1.11 2002/10/02 16:33:51 thorpej Exp $");
50
51 #include "opt_i2o.h"
52 #include "opt_inet.h"
53 #include "opt_ns.h"
54 #include "bpfilter.h"
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/device.h>
60 #include <sys/endian.h>
61 #include <sys/proc.h>
62 #include <sys/callout.h>
63 #include <sys/socket.h>
64 #include <sys/malloc.h>
65 #include <sys/sockio.h>
66 #include <sys/mbuf.h>
67
68 #include <machine/bus.h>
69
70 #include <uvm/uvm_extern.h>
71
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_ether.h>
76 #include <net/if_fddi.h>
77 #include <net/if_token.h>
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #endif
81
82 #ifdef NS
83 #include <netns/ns.h>
84 #include <netns/ns_if.h>
85 #endif
86
87 #ifdef INET
88 #include <netinet/in.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_inarp.h>
93 #endif
94
95 #include <dev/i2o/i2o.h>
96 #include <dev/i2o/iopio.h>
97 #include <dev/i2o/iopvar.h>
98 #include <dev/i2o/ioplvar.h>
99
100 static void iopl_attach(struct device *, struct device *, void *);
101 static int iopl_match(struct device *, struct cfdata *, void *);
102
103 static void iopl_error(struct iopl_softc *, u_int);
104 static void iopl_getpg(struct iopl_softc *, int);
105 static void iopl_intr_pg(struct device *, struct iop_msg *, void *);
106 static void iopl_intr_evt(struct device *, struct iop_msg *, void *);
107 static void iopl_intr_null(struct device *, struct iop_msg *, void *);
108 static void iopl_intr_rx(struct device *, struct iop_msg *, void *);
109 static void iopl_intr_tx(struct device *, struct iop_msg *, void *);
110 static void iopl_tick(void *);
111 static void iopl_tick_sched(struct iopl_softc *);
112
113 static int iopl_filter_ether(struct iopl_softc *);
114 static int iopl_filter_generic(struct iopl_softc *, u_int64_t *);
115
116 static int iopl_rx_alloc(struct iopl_softc *, int);
117 static void iopl_rx_free(struct iopl_softc *);
118 static void iopl_rx_post(struct iopl_softc *);
119 static int iopl_tx_alloc(struct iopl_softc *, int);
120 static void iopl_tx_free(struct iopl_softc *);
121
122 static int iopl_ifmedia_change(struct ifnet *);
123 static void iopl_ifmedia_status(struct ifnet *, struct ifmediareq *);
124
125 static void iopl_munge_ether(struct mbuf *, u_int8_t *);
126 static void iopl_munge_fddi(struct mbuf *, u_int8_t *);
127
128 static int iopl_init(struct ifnet *);
129 static int iopl_ioctl(struct ifnet *, u_long, caddr_t);
130 static void iopl_start(struct ifnet *);
131 static void iopl_stop(struct ifnet *, int);
132
133 CFATTACH_DECL(iopl, sizeof(struct iopl_softc),
134 iopl_match, iopl_attach, NULL, NULL);
135
136 #ifdef I2OVERBOSE
137 static const char * const iopl_errors[] = {
138 "success",
139 "device failure",
140 "destination not found",
141 "transmit error",
142 "transmit aborted",
143 "receive error",
144 "receive aborted",
145 "DMA error",
146 "bad packet detected",
147 "out of memory",
148 "bucket overrun",
149 "IOP internal error",
150 "cancelled",
151 "invalid transaction context",
152 "destination address detected",
153 "destination address omitted",
154 "partial packet returned",
155 "temporarily suspended",
156 };
157 #endif /* I2OVERBOSE */
158
159 static const struct iopl_media iopl_ether_media[] = {
160 { I2O_LAN_CONNECTION_100BASEVG_ETHERNET, IFM_100_VG },
161 { I2O_LAN_CONNECTION_100BASEVG_TOKEN_RING, IFM_100_VG },
162 { I2O_LAN_CONNECTION_ETHERNET_AUI, IFM_10_5 },
163 { I2O_LAN_CONNECTION_ETHERNET_10BASE5, IFM_10_5 },
164 { I2O_LAN_CONNECTION_ETHERNET_10BASE2, IFM_10_2 },
165 { I2O_LAN_CONNECTION_ETHERNET_10BASET, IFM_10_T },
166 { I2O_LAN_CONNECTION_ETHERNET_10BASEFL, IFM_10_FL },
167 { I2O_LAN_CONNECTION_ETHERNET_100BASETX, IFM_100_TX },
168 { I2O_LAN_CONNECTION_ETHERNET_100BASEFX, IFM_100_FX },
169 { I2O_LAN_CONNECTION_ETHERNET_100BASET4, IFM_100_T4 },
170 { I2O_LAN_CONNECTION_ETHERNET_1000BASESX, IFM_1000_SX },
171 { I2O_LAN_CONNECTION_ETHERNET_1000BASELX, IFM_1000_LX },
172 { I2O_LAN_CONNECTION_ETHERNET_1000BASECX, IFM_1000_CX },
173 { I2O_LAN_CONNECTION_ETHERNET_1000BASET, IFM_1000_T },
174 { I2O_LAN_CONNECTION_DEFAULT, IFM_10_T }
175 };
176
177 static const struct iopl_media iopl_fddi_media[] = {
178 { I2O_LAN_CONNECTION_FDDI_125MBIT, IFM_FDDI_SMF },
179 { I2O_LAN_CONNECTION_DEFAULT, IFM_FDDI_SMF },
180 };
181
182 /*
183 * Match a supported device.
184 */
185 static int
186 iopl_match(struct device *parent, struct cfdata *match, void *aux)
187 {
188
189 return (((struct iop_attach_args *)aux)->ia_class == I2O_CLASS_LAN);
190 }
191
192 /*
193 * Attach a supported device.
194 */
195 static void
196 iopl_attach(struct device *parent, struct device *self, void *aux)
197 {
198 struct iop_attach_args *ia;
199 struct iopl_softc *sc;
200 struct iop_softc *iop;
201 struct ifnet *ifp;
202 int rv, iff, ifcap, orphanlimit, maxpktsize;
203 struct {
204 struct i2o_param_op_results pr;
205 struct i2o_param_read_results prr;
206 union {
207 struct i2o_param_lan_device_info ldi;
208 struct i2o_param_lan_transmit_info ti;
209 struct i2o_param_lan_receive_info ri;
210 struct i2o_param_lan_operation lo;
211 struct i2o_param_lan_batch_control bc;
212 struct i2o_param_lan_mac_address lma;
213 } p;
214 } __attribute__ ((__packed__)) param;
215 const char *typestr, *addrstr;
216 char wwn[20];
217 u_int8_t hwaddr[8];
218 u_int tmp;
219 u_int32_t tmp1, tmp2, tmp3;
220
221 sc = (struct iopl_softc *)self;
222 iop = (struct iop_softc *)parent;
223 ia = (struct iop_attach_args *)aux;
224 ifp = &sc->sc_if.sci_if;
225 sc->sc_tid = ia->ia_tid;
226 sc->sc_dmat = iop->sc_dmat;
227
228 /* Say what the device is. */
229 printf(": LAN interface");
230 iop_print_ident(iop, ia->ia_tid);
231 printf("\n");
232
233 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_LAN_DEVICE_INFO,
234 ¶m, sizeof(param), NULL);
235 if (rv != 0)
236 return;
237
238 sc->sc_ms_pg = -1;
239
240 switch (sc->sc_mtype = le16toh(param.p.ldi.lantype)) {
241 case I2O_LAN_TYPE_ETHERNET:
242 typestr = "Ethernet";
243 addrstr = ether_sprintf(param.p.ldi.hwaddr);
244 sc->sc_ms_pg = I2O_PARAM_LAN_802_3_STATS;
245 sc->sc_rx_prepad = 2;
246 sc->sc_munge = iopl_munge_ether;
247 orphanlimit = sizeof(struct ether_header);
248 iff = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
249 break;
250
251 case I2O_LAN_TYPE_100BASEVG:
252 typestr = "100VG-AnyLAN";
253 addrstr = ether_sprintf(param.p.ldi.hwaddr);
254 sc->sc_ms_pg = I2O_PARAM_LAN_802_3_STATS;
255 sc->sc_rx_prepad = 2;
256 sc->sc_munge = iopl_munge_ether;
257 orphanlimit = sizeof(struct ether_header);
258 iff = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
259 break;
260
261 case I2O_LAN_TYPE_FDDI:
262 typestr = "FDDI";
263 addrstr = fddi_sprintf(param.p.ldi.hwaddr);
264 sc->sc_ms_pg = I2O_PARAM_LAN_FDDI_STATS;
265 sc->sc_rx_prepad = 0;
266 sc->sc_munge = iopl_munge_fddi;
267 orphanlimit = sizeof(struct fddi_header);
268 iff = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
269 break;
270
271 case I2O_LAN_TYPE_TOKEN_RING:
272 typestr = "token ring";
273 addrstr = token_sprintf(param.p.ldi.hwaddr);
274 iff = IFF_BROADCAST | IFF_MULTICAST;
275 break;
276
277 case I2O_LAN_TYPE_FIBRECHANNEL:
278 typestr = "fibre channel";
279 addrstr = wwn;
280 sprintf(wwn, "%08x%08x",
281 ((u_int32_t *)param.p.ldi.hwaddr)[0],
282 ((u_int32_t *)param.p.ldi.hwaddr)[1]);
283 iff = IFF_BROADCAST | IFF_MULTICAST;
284 break;
285
286 default:
287 typestr = "unknown medium";
288 addrstr = "unknown";
289 break;
290 }
291
292 memcpy(hwaddr, param.p.ldi.hwaddr, sizeof(hwaddr));
293 printf("%s: %s, address %s, %d Mb/s maximum\n", self->dv_xname,
294 typestr, addrstr,
295 (int)(le64toh(param.p.ldi.maxrxbps) / 1000*1000));
296 maxpktsize = le32toh(param.p.ldi.maxpktsize);
297
298 if (sc->sc_ms_pg == -1) {
299 printf("%s: medium not supported\n", self->dv_xname);
300 return;
301 }
302
303 /*
304 * Register our initiators.
305 */
306 sc->sc_ii_pg.ii_dv = self;
307 sc->sc_ii_pg.ii_intr = iopl_intr_pg;
308 sc->sc_ii_pg.ii_flags = 0;
309 sc->sc_ii_pg.ii_tid = ia->ia_tid;
310 iop_initiator_register(iop, &sc->sc_ii_pg);
311
312 sc->sc_ii_evt.ii_dv = self;
313 sc->sc_ii_evt.ii_intr = iopl_intr_evt;
314 sc->sc_ii_evt.ii_flags = II_NOTCTX | II_UTILITY;
315 sc->sc_ii_evt.ii_tid = ia->ia_tid;
316 iop_initiator_register(iop, &sc->sc_ii_evt);
317
318 sc->sc_ii_null.ii_dv = self;
319 sc->sc_ii_null.ii_intr = iopl_intr_null;
320 sc->sc_ii_null.ii_flags = II_NOTCTX | II_UTILITY;
321 sc->sc_ii_null.ii_tid = ia->ia_tid;
322 iop_initiator_register(iop, &sc->sc_ii_evt);
323
324 sc->sc_ii_rx.ii_dv = self;
325 sc->sc_ii_rx.ii_intr = iopl_intr_rx;
326 sc->sc_ii_rx.ii_flags = II_NOTCTX | II_UTILITY;
327 sc->sc_ii_rx.ii_tid = ia->ia_tid;
328 iop_initiator_register(iop, &sc->sc_ii_rx);
329
330 sc->sc_ii_tx.ii_dv = self;
331 sc->sc_ii_tx.ii_intr = iopl_intr_tx;
332 sc->sc_ii_tx.ii_flags = II_NOTCTX | II_UTILITY;
333 sc->sc_ii_tx.ii_tid = ia->ia_tid;
334 iop_initiator_register(iop, &sc->sc_ii_tx);
335
336 /*
337 * Determine some of the the capabilities of the interface - in
338 * particular, the maximum number of segments per S/G list, and how
339 * much buffer context we'll need to transmit frames (some adapters
340 * may need the destination address in the buffer context).
341 */
342 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_LAN_TRANSMIT_INFO,
343 ¶m, sizeof(param), NULL);
344 if (rv != 0);
345 return;
346
347 tmp = le32toh(param.p.ti.txmodes);
348
349 if ((param.p.ti.txmodes & I2O_LAN_MODES_NO_DA_IN_SGL) == 0)
350 sc->sc_tx_ohead = 1 + 1 + 2;
351 else
352 sc->sc_tx_ohead = 1 + 1;
353
354 ifcap = 0;
355
356 if (((le32toh(iop->sc_status.segnumber) >> 12) & 15) ==
357 I2O_VERSION_20) {
358 if ((tmp & I2O_LAN_MODES_IPV4_CHECKSUM) != 0)
359 ifcap |= IFCAP_CSUM_IPv4;
360 if ((tmp & I2O_LAN_MODES_TCP_CHECKSUM) != 0)
361 ifcap |= IFCAP_CSUM_TCPv4;
362 if ((tmp & I2O_LAN_MODES_UDP_CHECKSUM) != 0)
363 ifcap |= IFCAP_CSUM_UDPv4;
364 #ifdef notyet
365 if ((tmp & I2O_LAN_MODES_ICMP_CHECKSUM) != 0)
366 ifcap |= IFCAP_CSUM_ICMP;
367 #endif
368 }
369
370 sc->sc_tx_maxsegs =
371 min(le32toh(param.p.ti.maxpktsg), IOPL_MAX_SEGS);
372 sc->sc_tx_maxout = le32toh(param.p.ti.maxpktsout);
373 sc->sc_tx_maxreq = le32toh(param.p.ti.maxpktsreq);
374
375 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_LAN_RECEIVE_INFO,
376 ¶m, sizeof(param), NULL);
377 if (rv != 0)
378 return;
379
380 sc->sc_rx_maxbkt = le32toh(param.p.ri.maxbuckets);
381
382 #ifdef I2ODEBUG
383 if (sc->sc_tx_maxsegs == 0)
384 panic("%s: sc->sc_tx_maxsegs == 0", self->dv_xname);
385 if (sc->sc_tx_maxout == 0)
386 panic("%s: sc->sc_tx_maxsegs == 0", self->dv_xname);
387 if (sc->sc_tx_maxreq == 0)
388 panic("%s: sc->sc_tx_maxsegs == 0", self->dv_xname);
389 if (sc->sc_rx_maxbkt == 0)
390 panic("%s: sc->sc_rx_maxbkt == 0", self->dv_xname);
391 #endif
392
393 /*
394 * Set the pre-padding and "orphan" limits. This is to ensure that
395 * for received packets, the L3 payload will be aligned on a 32-bit
396 * boundary, and the L2 header won't be split between buckets.
397 *
398 * While here, enable error reporting for transmits. We're not
399 * interested in most errors (e.g. excessive collisions), but others
400 * are of more concern.
401 */
402 tmp1 = htole32(sc->sc_rx_prepad);
403 tmp2 = htole32(orphanlimit);
404 tmp3 = htole32(1); /* XXX */
405
406 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_OPERATION,
407 &tmp1, sizeof(tmp1), I2O_PARAM_LAN_OPERATION_pktprepad))
408 return;
409 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_OPERATION,
410 &tmp2, sizeof(tmp2), I2O_PARAM_LAN_OPERATION_pktorphanlimit))
411 return;
412 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_OPERATION,
413 &tmp3, sizeof(tmp3), I2O_PARAM_LAN_OPERATION_userflags))
414 return;
415
416 /*
417 * Set the batching parameters.
418 */
419 #if IOPL_BATCHING_ENABLED
420 /* Select automatic batching, and specify the maximum packet count. */
421 tmp1 = htole32(0);
422 tmp2 = htole32(IOPL_MAX_BATCH);
423 tmp3 = htole32(IOPL_MAX_BATCH);
424 #else
425 /* Force batching off. */
426 tmp1 = htole32(1); /* XXX */
427 tmp2 = htole32(1);
428 tmp3 = htole32(1);
429 #endif
430 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_BATCH_CONTROL,
431 &tmp1, sizeof(tmp1), I2O_PARAM_LAN_BATCH_CONTROL_batchflags))
432 return;
433 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_BATCH_CONTROL,
434 &tmp2, sizeof(tmp2), I2O_PARAM_LAN_BATCH_CONTROL_maxrxbatchcount))
435 return;
436 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_BATCH_CONTROL,
437 &tmp3, sizeof(tmp3), I2O_PARAM_LAN_BATCH_CONTROL_maxtxbatchcount))
438 return;
439
440 /*
441 * Get multicast parameters.
442 */
443 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_LAN_MAC_ADDRESS,
444 ¶m, sizeof(param), NULL);
445 if (rv != 0)
446 return;
447
448 sc->sc_mcast_max = le32toh(param.p.lma.maxmcastaddr);
449 sc->sc_mcast_max = min(IOPL_MAX_MULTI, sc->sc_mcast_max);
450
451 /*
452 * Allocate transmit and receive descriptors.
453 */
454 if (iopl_tx_alloc(sc, IOPL_DESCRIPTORS)) {
455 printf("%s: unable to allocate transmit descriptors\n",
456 sc->sc_dv.dv_xname);
457 return;
458 }
459 if (iopl_rx_alloc(sc, IOPL_DESCRIPTORS)) {
460 printf("%s: unable to allocate receive descriptors\n",
461 sc->sc_dv.dv_xname);
462 return;
463 }
464
465 /*
466 * Claim the device so that we don't get any nasty surprises. Allow
467 * failure.
468 */
469 iop_util_claim(iop, &sc->sc_ii_evt, 0,
470 I2O_UTIL_CLAIM_NO_PEER_SERVICE |
471 I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
472 I2O_UTIL_CLAIM_PRIMARY_USER);
473
474 /*
475 * Attach the interface.
476 */
477 memcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
478 ifp->if_softc = sc;
479 ifp->if_flags = iff;
480 ifp->if_capabilities = ifcap;
481 ifp->if_ioctl = iopl_ioctl;
482 ifp->if_start = iopl_start;
483 ifp->if_stop = iopl_stop;
484 ifp->if_init = iopl_init;
485 IFQ_SET_READY(&ifp->if_snd);
486
487 if_attach(ifp);
488
489 switch (sc->sc_mtype) {
490 case I2O_LAN_TYPE_ETHERNET:
491 case I2O_LAN_TYPE_100BASEVG:
492 /* Can we handle 802.1Q encapsulated frames? */
493 if (maxpktsize >= ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
494 sc->sc_if.sci_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
495
496 ether_ifattach(ifp, (u_char *)hwaddr);
497 break;
498
499 case I2O_LAN_TYPE_FDDI:
500 fddi_ifattach(ifp, (u_char *)hwaddr);
501 break;
502 }
503
504 ifmedia_init(&sc->sc_ifmedia, 0, iopl_ifmedia_change,
505 iopl_ifmedia_status);
506 }
507
508 /*
509 * Allocate the specified number of TX descriptors.
510 */
511 static int
512 iopl_tx_alloc(struct iopl_softc *sc, int count)
513 {
514 struct iopl_tx *tx;
515 int i, size, rv;
516
517 if (count > sc->sc_tx_maxout)
518 count = sc->sc_tx_maxout;
519
520 #ifdef I2ODEBUG
521 printf("%s: %d TX descriptors\n", sc->sc_dv.dv_xname, count);
522 #endif
523
524 size = count * sizeof(*tx);
525 sc->sc_tx = malloc(size, M_DEVBUF, M_NOWAIT|M_ZERO);
526
527 for (i = 0, tx = sc->sc_tx; i < count; i++, tx++) {
528 rv = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
529 sc->sc_tx_maxsegs, MCLBYTES, 0,
530 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
531 &tx->tx_dmamap);
532 if (rv != 0) {
533 iopl_tx_free(sc);
534 return (rv);
535 }
536
537 tx->tx_ident = i;
538 SLIST_INSERT_HEAD(&sc->sc_tx_free, tx, tx_chain);
539 sc->sc_tx_freecnt++;
540 }
541
542 return (0);
543 }
544
545 /*
546 * Free all TX descriptors.
547 */
548 static void
549 iopl_tx_free(struct iopl_softc *sc)
550 {
551 struct iopl_tx *tx;
552
553 while ((tx = SLIST_FIRST(&sc->sc_tx_free)) != NULL) {
554 SLIST_REMOVE_HEAD(&sc->sc_tx_free, tx_chain);
555 bus_dmamap_destroy(sc->sc_dmat, tx->tx_dmamap);
556 }
557
558 free(sc->sc_tx, M_DEVBUF);
559 sc->sc_tx = NULL;
560 sc->sc_tx_freecnt = 0;
561 }
562
563 /*
564 * Allocate the specified number of RX buckets and descriptors.
565 */
566 static int
567 iopl_rx_alloc(struct iopl_softc *sc, int count)
568 {
569 struct iopl_rx *rx;
570 struct mbuf *m;
571 int i, size, rv, state;
572
573 if (count > sc->sc_rx_maxbkt)
574 count = sc->sc_rx_maxbkt;
575
576 #ifdef I2ODEBUG
577 printf("%s: %d RX descriptors\n", sc->sc_dv.dv_xname, count);
578 #endif
579
580 size = count * sizeof(*rx);
581 sc->sc_rx = malloc(size, M_DEVBUF, M_NOWAIT|M_ZERO);
582
583 for (i = 0, rx = sc->sc_rx; i < count; i++, rx++) {
584 state = 0;
585
586 MGETHDR(m, M_DONTWAIT, MT_DATA);
587 if (m == NULL) {
588 rv = ENOBUFS;
589 goto bad;
590 }
591
592 state++;
593
594 MCLGET(m, M_DONTWAIT);
595 if ((m->m_flags & M_EXT) == 0) {
596 m_freem(m);
597 rv = ENOBUFS;
598 goto bad;
599 }
600
601 rv = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE,
602 sc->sc_tx_maxsegs, PAGE_SIZE, 0,
603 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &rx->rx_dmamap);
604 if (rv != 0)
605 goto bad;
606
607 state++;
608
609 rv = bus_dmamap_load_mbuf(sc->sc_dmat, rx->rx_dmamap, m,
610 BUS_DMA_READ | BUS_DMA_NOWAIT);
611 if (rv != 0)
612 goto bad;
613
614 rx->rx_ident = i;
615 SLIST_INSERT_HEAD(&sc->sc_rx_free, rx, rx_chain);
616 sc->sc_rx_freecnt++;
617 }
618
619 bad:
620 if (state > 1)
621 bus_dmamap_destroy(sc->sc_dmat, rx->rx_dmamap);
622 if (state > 0)
623 m_freem(m);
624
625 iopl_rx_free(sc);
626 return (rv);
627 }
628
629 /*
630 * Free all RX buckets and descriptors.
631 */
632 static void
633 iopl_rx_free(struct iopl_softc *sc)
634 {
635 struct iopl_rx *rx;
636
637 while ((rx = SLIST_FIRST(&sc->sc_rx_free)) != NULL) {
638 SLIST_REMOVE_HEAD(&sc->sc_rx_free, rx_chain);
639 bus_dmamap_destroy(sc->sc_dmat, rx->rx_dmamap);
640 m_freem(rx->rx_mbuf);
641 }
642
643 free(sc->sc_rx, M_DEVBUF);
644 sc->sc_rx = NULL;
645 sc->sc_rx_freecnt = 0;
646 }
647
648 /*
649 * Post all free RX buckets to the device.
650 */
651 static void
652 iopl_rx_post(struct iopl_softc *sc)
653 {
654 struct i2o_lan_receive_post *mf;
655 struct iopl_rx *rx;
656 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sp, *p, *ep, *lp;
657 bus_dmamap_t dm;
658 bus_dma_segment_t *ds;
659 bus_addr_t saddr, eaddr;
660 u_int i, slen, tlen;
661
662 mf = (struct i2o_lan_receive_post *)mb;
663 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_LAN_RECEIVE_POST);
664 mf->msgictx = sc->sc_ii_rx.ii_ictx;
665
666 ep = mb + (sizeof(mb) >> 2);
667 sp = (u_int32_t *)(mf + 1);
668
669 while (sc->sc_rx_freecnt != 0) {
670 mf->msgflags = I2O_MSGFLAGS(i2o_lan_receive_post);
671 mf->bktcnt = 0;
672 p = sp;
673
674 /*
675 * Remove RX descriptors from the list, sync their DMA maps,
676 * and add their buckets to the scatter/gather list for
677 * posting.
678 */
679 for (;;) {
680 rx = SLIST_FIRST(&sc->sc_rx_free);
681 SLIST_REMOVE_HEAD(&sc->sc_rx_free, rx_chain);
682 dm = rx->rx_dmamap;
683
684 bus_dmamap_sync(sc->sc_dmat, dm, 0, dm->dm_mapsize,
685 BUS_DMASYNC_PREREAD);
686
687 lp = p;
688 *p++ = dm->dm_mapsize | I2O_SGL_PAGE_LIST |
689 I2O_SGL_END_BUFFER | I2O_SGL_BC_32BIT;
690 *p++ = rx->rx_ident;
691
692 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--) {
693 slen = ds->ds_len;
694 saddr = ds->ds_addr;
695 ds++;
696
697 /*
698 * XXX This should be done with a bus_space
699 * flag.
700 */
701 while (slen > 0) {
702 eaddr = (saddr + PAGE_SIZE) &
703 ~(PAGE_SIZE - 1);
704 tlen = min(eaddr - saddr, slen);
705 slen -= tlen;
706 *p++ = le32toh(saddr);
707 saddr = eaddr;
708 }
709 }
710
711 if (p + 2 + sc->sc_tx_maxsegs >= ep)
712 break;
713 if (--sc->sc_rx_freecnt <= 0)
714 break;
715 }
716
717 /*
718 * Terminate the scatter/gather list and fix up the message
719 * frame size and free RX descriptor count.
720 */
721 *lp |= I2O_SGL_END;
722 mb[0] += ((p - sp) << 16);
723
724 /*
725 * Finally, post the message frame to the device.
726 */
727 iop_post((struct iop_softc *)sc->sc_dv.dv_parent, mb);
728 }
729 }
730
731 /*
732 * Handle completion of periodic parameter group retrievals.
733 */
734 static void
735 iopl_intr_pg(struct device *dv, struct iop_msg *im, void *reply)
736 {
737 struct i2o_param_lan_stats *ls;
738 struct i2o_param_lan_802_3_stats *les;
739 struct i2o_param_lan_media_operation *lmo;
740 struct iopl_softc *sc;
741 struct iop_softc *iop;
742 struct ifnet *ifp;
743 struct i2o_reply *rb;
744 int pg;
745
746 rb = (struct i2o_reply *)reply;
747 sc = (struct iopl_softc *)dv;
748 iop = (struct iop_softc *)dv->dv_parent;
749 ifp = &sc->sc_if.sci_if;
750
751 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
752 iopl_tick_sched(sc);
753 return;
754 }
755
756 iop_msg_unmap(iop, im);
757 pg = le16toh(((struct iop_pgop *)im->im_dvcontext)->oat.group);
758 free(im->im_dvcontext, M_DEVBUF);
759 iop_msg_free(iop, im);
760
761 switch (pg) {
762 case I2O_PARAM_LAN_MEDIA_OPERATION:
763 lmo = &sc->sc_pb.p.lmo;
764
765 sc->sc_curmbps =
766 (int)(le64toh(lmo->currxbps) / (1000 * 1000));
767 sc->sc_conntype = le32toh(lmo->connectiontype);
768
769 if (lmo->linkstatus) {
770 /* Necessary only for initialisation. */
771 sc->sc_flags |= IOPL_LINK;
772 }
773
774 /* Chain the next retrieval. */
775 sc->sc_next_pg = I2O_PARAM_LAN_STATS;
776 break;
777
778 case I2O_PARAM_LAN_STATS:
779 ls = &sc->sc_pb.p.ls;
780
781 /* XXX Not all of these stats may be supported. */
782 ifp->if_ipackets = le64toh(ls->ipackets);
783 ifp->if_opackets = le64toh(ls->opackets);
784 ifp->if_ierrors = le64toh(ls->ierrors);
785 ifp->if_oerrors = le64toh(ls->oerrors);
786
787 /* Chain the next retrieval. */
788 sc->sc_next_pg = sc->sc_ms_pg;
789 break;
790
791 case I2O_PARAM_LAN_802_3_STATS:
792 les = &sc->sc_pb.p.les;
793
794 /*
795 * This isn't particularly meaningful: the sum of the number
796 * of packets that encounted a single collision and the
797 * number of packets that encountered multiple collisions.
798 *
799 * XXX Not all of these stats may be supported.
800 */
801 ifp->if_collisions = le64toh(les->onecollision) +
802 le64toh(les->manycollisions);
803
804 sc->sc_next_pg = -1;
805 break;
806
807 case I2O_PARAM_LAN_FDDI_STATS:
808 sc->sc_next_pg = -1;
809 break;
810 }
811
812 iopl_tick_sched(sc);
813 }
814
815 /*
816 * Handle an event signalled by the interface.
817 */
818 static void
819 iopl_intr_evt(struct device *dv, struct iop_msg *im, void *reply)
820 {
821 struct i2o_util_event_register_reply *rb;
822 struct iopl_softc *sc;
823 u_int event;
824
825 rb = (struct i2o_util_event_register_reply *)reply;
826
827 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
828 return;
829
830 sc = (struct iopl_softc *)dv;
831 event = le32toh(rb->event);
832
833 switch (event) {
834 case I2O_EVENT_LAN_MEDIA_CHANGE:
835 sc->sc_flags |= IOPL_MEDIA_CHANGE;
836 break;
837 case I2O_EVENT_LAN_LINK_UP:
838 sc->sc_flags |= IOPL_LINK;
839 break;
840 case I2O_EVENT_LAN_LINK_DOWN:
841 sc->sc_flags &= ~IOPL_LINK;
842 break;
843 default:
844 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
845 break;
846 }
847 }
848
849 /*
850 * Bit-bucket initiator: ignore interrupts signaled by the interface.
851 */
852 static void
853 iopl_intr_null(struct device *dv, struct iop_msg *im, void *reply)
854 {
855
856 }
857
858 /*
859 * Handle a receive interrupt.
860 */
861 static void
862 iopl_intr_rx(struct device *dv, struct iop_msg *im, void *reply)
863 {
864 struct i2o_lan_receive_reply *rb;
865 struct iopl_softc *sc;
866 struct iopl_rx *rx;
867 struct ifnet *ifp;
868 struct mbuf *m, *m0;
869 u_int32_t *p;
870 int off, err, flg, first, lastpkt, lastbkt, rv;
871 int len, i, pkt, pktlen[IOPL_MAX_BATCH], csumflgs[IOPL_MAX_BATCH];
872 struct mbuf *head[IOPL_MAX_BATCH], *tail[IOPL_MAX_BATCH];
873
874 rb = (struct i2o_lan_receive_reply *)reply;
875 sc = (struct iopl_softc *)dv;
876 ifp = &sc->sc_if.sci_if;
877 p = (u_int32_t *)(rb + 1);
878
879 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
880 /* XXX We leak if we get here. */
881 return;
882 }
883
884 memset(head, 0, sizeof(head));
885 memset(pktlen, 0, sizeof(pktlen));
886 memset(csumflgs, 0, sizeof(csumflgs));
887
888 /*
889 * Scan through the transaction reply list. The TRL takes this
890 * form:
891 *
892 * 32-bits Bucket context
893 * 32-bits 1st packet offset (high 8-bits are control flags)
894 * 32-bits 1st packet length (high 8-bits are error status)
895 * 32-bits 2nd packet offset
896 * 32-bits 2nd packet length
897 * ...
898 * 32-bits Nth packet offset
899 * 32-bits Nth packet length
900 * ...
901 * 32-bits Bucket context
902 * 32-bits 1st packet offset
903 * 32-bits 1st packet length
904 * ...
905 */
906 for (lastbkt = 0; !lastbkt;) {
907 /*
908 * Return the RX descriptor for this bucket back to the free
909 * list.
910 */
911 rx = &sc->sc_rx[*p++];
912 SLIST_INSERT_HEAD(&sc->sc_rx_free, rx, rx_chain);
913 sc->sc_rx_freecnt++;
914
915 /*
916 * Sync the bucket's DMA map.
917 */
918 bus_dmamap_sync(sc->sc_dmat, rx->rx_dmamap, 0,
919 rx->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
920
921 /*
922 * If this is a valid receive, go through the PDB entries
923 * and re-assemble all the packet fragments that we find.
924 * Otherwise, just free up the buckets that we had posted -
925 * we have probably received this reply because the
926 * interface has been reset or suspended.
927 */
928 if ((rb->trlflags & I2O_LAN_RECEIVE_REPLY_PDB) == 0) {
929 lastbkt = (--rb->trlcount == 0);
930 continue;
931 }
932
933 m = rx->rx_mbuf;
934
935 for (lastpkt = 0, first = 1, pkt = 0; !lastpkt; pkt++) {
936 off = p[0] & 0x00ffffff;
937 len = p[1] & 0x00ffffff;
938 flg = p[0] >> 24;
939 err = p[1] >> 24;
940 p += 2;
941
942 #ifdef I2ODEBUG
943 if (pkt >= IOPL_MAX_BATCH)
944 panic("iopl_intr_rx: too many packets");
945 #endif
946 /*
947 * Break out at the right spot later on if this is
948 * the last packet in this bucket, or the last
949 * bucket.
950 */
951 if ((flg & 0x40) == 0x40) /* XXX */
952 lastpkt = 1;
953 if ((flg & 0xc8) == 0xc0) /* XXX */
954 lastbkt = 1;
955
956 /*
957 * Skip dummy PDB entries.
958 */
959 if ((flg & 0x07) == 0x02) /* XXX */
960 continue;
961
962 /*
963 * If the packet was received with errors, then
964 * arrange to dump it. We allow bad L3 and L4
965 * checksums through for accounting purposes.
966 */
967 if (pktlen[pkt] == -1)
968 continue;
969 if ((off & 0x03) == 0x01) { /* XXX */
970 pktlen[pkt] = -1;
971 continue;
972 }
973 if ((err & I2O_LAN_PDB_ERROR_CKSUM_MASK) != 0) {
974 if ((err & I2O_LAN_PDB_ERROR_L3_CKSUM_BAD) != 0)
975 csumflgs[pkt] |= M_CSUM_IPv4_BAD;
976 if ((err & I2O_LAN_PDB_ERROR_L4_CKSUM_BAD) != 0)
977 csumflgs[pkt] |= M_CSUM_TCP_UDP_BAD;
978 err &= ~I2O_LAN_PDB_ERROR_CKSUM_MASK;
979 }
980 if (err != I2O_LAN_PDB_ERROR_NONE) {
981 pktlen[pkt] = -1;
982 continue;
983 }
984
985 if (len <= (MHLEN - sc->sc_rx_prepad)) {
986 /*
987 * The fragment is small enough to fit in a
988 * single header mbuf - allocate one and
989 * copy the data into it. This greatly
990 * reduces memory consumption when we
991 * receive lots of small packets.
992 */
993 MGETHDR(m0, M_DONTWAIT, MT_DATA);
994 if (m0 == NULL) {
995 ifp->if_ierrors++;
996 m_freem(m);
997 continue;
998 }
999 m0->m_data += sc->sc_rx_prepad;
1000 m_copydata(m, 0, len, mtod(m0, caddr_t) + off);
1001 off = 0;
1002 } else if (!first) {
1003 /*
1004 * The bucket contains multiple fragments
1005 * (each from a different packet). Allocate
1006 * an mbuf header and add a reference to the
1007 * storage from the bucket's mbuf.
1008 */
1009 m0 = m_copym(m, off, len, M_DONTWAIT);
1010 off = 0;
1011 } else {
1012 /*
1013 * This is the first "large" packet in the
1014 * bucket. Allocate replacement mbuf
1015 * storage. If we fail, drop the packet and
1016 * continue.
1017 */
1018 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1019 if (m0 == NULL) {
1020 pktlen[pkt] = -1;
1021 continue;
1022 }
1023
1024 MCLGET(m0, M_DONTWAIT);
1025 if ((m0->m_flags & M_EXT) == 0) {
1026 pktlen[pkt] = -1;
1027 m_freem(m0);
1028 continue;
1029 }
1030
1031 /*
1032 * If we can't load the new mbuf, then drop
1033 * the bucket from the RX list. XXX Ouch.
1034 */
1035 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1036 rv = bus_dmamap_load_mbuf(sc->sc_dmat,
1037 rx->rx_dmamap, m0,
1038 BUS_DMA_READ | BUS_DMA_NOWAIT);
1039 if (rv != 0) {
1040 printf("%s: unable to load mbuf (%d),"
1041 " discarding bucket\n",
1042 sc->sc_dv.dv_xname, rv);
1043 SLIST_REMOVE_HEAD(&sc->sc_rx_free,
1044 rx_chain);
1045 sc->sc_rx_freecnt--;
1046 }
1047
1048 rx->rx_mbuf = m0;
1049 m0 = m;
1050 first = 0;
1051 }
1052
1053 /*
1054 * Fix up the mbuf header, and append the mbuf to
1055 * the chain for this packet.
1056 */
1057 m0->m_len = len;
1058 m0->m_data += off;
1059 if (head[pkt] != NULL)
1060 tail[pkt]->m_next = m0;
1061 else
1062 head[pkt] = m0;
1063 tail[pkt] = m0;
1064 pktlen[pkt] += len;
1065 }
1066 }
1067
1068 /*
1069 * Pass each received packet on.
1070 */
1071 for (i = 0; i < IOPL_MAX_BATCH; i++) {
1072 if ((m = head[i]) == NULL)
1073 continue;
1074
1075 /*
1076 * If the packet was received with errors, we dump it here.
1077 */
1078 if ((len = pktlen[i]) < 0) {
1079 m_freem(m);
1080 continue;
1081 }
1082
1083 /*
1084 * Otherwise, fix up the header, feed a copy to BPF, and
1085 * then pass it on up.
1086 */
1087 m->m_flags |= M_HASFCS;
1088 m->m_pkthdr.rcvif = ifp;
1089 m->m_pkthdr.len = len;
1090 m->m_pkthdr.csum_flags = csumflgs[pkt] | sc->sc_rx_csumflgs;
1091
1092 #if NBPFILTER > 0
1093 if (ifp->if_bpf)
1094 bpf_mtap(ifp->if_bpf, m);
1095 #endif /* NBPFILTER > 0 */
1096
1097 (*ifp->if_input)(ifp, m);
1098 }
1099
1100 /*
1101 * Re-post the buckets back to the interface, and try to send more
1102 * packets.
1103 */
1104 iopl_rx_post(sc);
1105 iopl_start(&sc->sc_if.sci_if);
1106 }
1107
1108 /*
1109 * Handle a transmit interrupt.
1110 */
1111 static void
1112 iopl_intr_tx(struct device *dv, struct iop_msg *im, void *reply)
1113 {
1114 struct i2o_lan_send_reply *rb;
1115 struct iopl_softc *sc;
1116 struct iopl_tx *tx;
1117 struct ifnet *ifp;
1118 int i, bktcnt;
1119
1120 sc = (struct iopl_softc *)dv;
1121 rb = (struct i2o_lan_send_reply *)reply;
1122
1123 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1124 /* XXX We leak if we get here. */
1125 return;
1126 }
1127
1128 if (rb->reqstatus != I2O_STATUS_SUCCESS)
1129 iopl_error(sc, le16toh(rb->detail));
1130
1131 /*
1132 * For each packet that has been transmitted, unload the DMA map,
1133 * free the source mbuf, and then release the transmit descriptor
1134 * back to the pool.
1135 */
1136 bktcnt = (le32toh(rb->msgflags) >> 16) - (sizeof(*rb) >> 2);
1137
1138 for (i = 0; i <= bktcnt; i++) {
1139 tx = &sc->sc_tx[rb->tctx[i]];
1140
1141 bus_dmamap_sync(sc->sc_dmat, tx->tx_dmamap, 0,
1142 tx->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1143 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1144
1145 m_freem(tx->tx_mbuf);
1146
1147 SLIST_INSERT_HEAD(&sc->sc_tx_free, tx, tx_chain);
1148 sc->sc_tx_freecnt++;
1149 }
1150
1151 /*
1152 * Try to send more packets.
1153 */
1154 ifp->if_flags &= ~IFF_OACTIVE;
1155 iopl_start(&sc->sc_if.sci_if);
1156 }
1157
1158 /*
1159 * Describe an error code returned by the adapter.
1160 */
1161 static void
1162 iopl_error(struct iopl_softc *sc, u_int dsc)
1163 {
1164 #ifdef I2OVERBOSE
1165 const char *errstr;
1166 #endif
1167
1168 switch (dsc) {
1169 case I2O_LAN_DSC_RECEIVE_ERROR:
1170 case I2O_LAN_DSC_RECEIVE_ABORTED:
1171 case I2O_LAN_DSC_TRANSMIT_ERROR:
1172 case I2O_LAN_DSC_TRANSMIT_ABORTED:
1173 case I2O_LAN_DSC_TEMP_SUSPENDED_STATE: /* ??? */
1174 break;
1175
1176 default:
1177 #ifdef I2OVERBOSE
1178 if (dsc > sizeof(iopl_errors) / sizeof(iopl_errors[0]))
1179 errstr = "<unknown>";
1180 else
1181 errstr = iopl_errors[dsc];
1182 printf("%s: error 0x%04x: %s\n", sc->sc_dv.dv_xname, dsc,
1183 errstr);
1184 #else
1185 printf("%s: error 0x%04x\n", sc->sc_dv.dv_xname, dsc);
1186 #endif
1187 break;
1188 }
1189 }
1190
1191 /*
1192 * Retrieve the next scheduled parameter group from the interface. Called
1193 * periodically.
1194 */
1195 static void
1196 iopl_tick(void *cookie)
1197 {
1198 struct iopl_softc *sc;
1199
1200 sc = cookie;
1201
1202 iopl_getpg(sc, sc->sc_next_pg);
1203 }
1204
1205 /*
1206 * Schedule the next PG retrieval.
1207 */
1208 static void
1209 iopl_tick_sched(struct iopl_softc *sc)
1210 {
1211 int s;
1212
1213 if (sc->sc_next_pg == -1) {
1214 s = splbio();
1215 if ((sc->sc_flags & IOPL_MEDIA_CHANGE) != 0) {
1216 sc->sc_next_pg = I2O_PARAM_LAN_MEDIA_OPERATION;
1217 sc->sc_flags &= ~IOPL_MEDIA_CHANGE;
1218 } else
1219 sc->sc_next_pg = I2O_PARAM_LAN_STATS;
1220 splx(s);
1221 }
1222
1223 callout_reset(&sc->sc_pg_callout, hz / IOPL_TICK_HZ, iopl_tick, sc);
1224 }
1225
1226 /*
1227 * Request the specified parameter group from the interface, to be delivered
1228 * to the PG initiator.
1229 */
1230 static void
1231 iopl_getpg(struct iopl_softc *sc, int pg)
1232 {
1233
1234 iop_field_get_all((struct iop_softc *)sc->sc_dv.dv_parent, sc->sc_tid,
1235 pg, &sc->sc_pb, sizeof(sc->sc_pb), &sc->sc_ii_pg);
1236 }
1237
1238 /*
1239 * Report on current media status.
1240 */
1241 static void
1242 iopl_ifmedia_status(struct ifnet *ifp, struct ifmediareq *req)
1243 {
1244 const struct iopl_media *ilm;
1245 struct iopl_softc *sc;
1246 int s, conntype;
1247
1248 sc = ifp->if_softc;
1249
1250 s = splbio();
1251 conntype = sc->sc_conntype;
1252 splx(s);
1253
1254 req->ifm_status = IFM_AVALID;
1255 if ((sc->sc_flags & IOPL_LINK) != 0)
1256 req->ifm_status |= IFM_ACTIVE;
1257
1258 switch (sc->sc_mtype) {
1259 case I2O_LAN_TYPE_100BASEVG:
1260 case I2O_LAN_TYPE_ETHERNET:
1261 ilm = iopl_ether_media;
1262 req->ifm_active = IFM_ETHER;
1263 break;
1264
1265 case I2O_LAN_TYPE_FDDI:
1266 ilm = iopl_fddi_media;
1267 req->ifm_active = IFM_FDDI;
1268 break;
1269 }
1270
1271 for (; ilm->ilm_i2o != I2O_LAN_CONNECTION_DEFAULT; ilm++)
1272 if (ilm->ilm_i2o == conntype)
1273 break;
1274 req->ifm_active |= ilm->ilm_ifmedia;
1275
1276 if (ilm->ilm_i2o == I2O_LAN_CONNECTION_DEFAULT)
1277 printf("%s: unknown connection type 0x%08x; defaulting\n",
1278 sc->sc_dv.dv_xname, conntype);
1279 }
1280
1281 /*
1282 * Change media parameters.
1283 */
1284 static int
1285 iopl_ifmedia_change(struct ifnet *ifp)
1286 {
1287 struct iop_softc *iop;
1288 struct iopl_softc *sc;
1289 const struct iopl_media *ilm;
1290 u_int subtype;
1291 u_int32_t ciontype;
1292 u_int8_t fdx;
1293
1294 sc = ifp->if_softc;
1295 iop = (struct iop_softc *)sc->sc_dv.dv_parent;
1296
1297 subtype = IFM_SUBTYPE(sc->sc_ifmedia.ifm_cur->ifm_media);
1298 if (subtype == IFM_AUTO)
1299 ciontype = I2O_LAN_CONNECTION_DEFAULT;
1300 else {
1301 switch (sc->sc_mtype) {
1302 case I2O_LAN_TYPE_100BASEVG:
1303 case I2O_LAN_TYPE_ETHERNET:
1304 ilm = iopl_ether_media;
1305 break;
1306
1307 case I2O_LAN_TYPE_FDDI:
1308 ilm = iopl_fddi_media;
1309 break;
1310 }
1311
1312 for (; ilm->ilm_i2o != I2O_LAN_CONNECTION_DEFAULT; ilm++)
1313 if (ilm->ilm_ifmedia == subtype)
1314 break;
1315 if (ilm->ilm_i2o == I2O_LAN_CONNECTION_DEFAULT)
1316 return (EINVAL);
1317 ciontype = le32toh(ilm->ilm_i2o);
1318 }
1319
1320 if ((sc->sc_ifmedia.ifm_cur->ifm_media & IFM_FDX) != 0)
1321 fdx = 1;
1322 else if ((sc->sc_ifmedia.ifm_cur->ifm_media & IFM_HDX) != 0)
1323 fdx = 0;
1324 else {
1325 /*
1326 * XXX Not defined as auto-detect, but as "default".
1327 */
1328 fdx = 0xff;
1329 }
1330
1331 /*
1332 * XXX Can we set all these independently? Will omitting the
1333 * connector type screw us up?
1334 */
1335 iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MEDIA_OPERATION,
1336 &ciontype, sizeof(ciontype),
1337 I2O_PARAM_LAN_MEDIA_OPERATION_connectiontarget);
1338 #if 0
1339 iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MEDIA_OPERATION,
1340 &certype, sizeof(certype),
1341 I2O_PARAM_LAN_MEDIA_OPERATION_connectertarget);
1342 #endif
1343 iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MEDIA_OPERATION,
1344 &fdx, sizeof(fdx),
1345 I2O_PARAM_LAN_MEDIA_OPERATION_duplextarget);
1346
1347 ifp->if_baudrate = ifmedia_baudrate(sc->sc_ifmedia.ifm_cur->ifm_media);
1348 return (0);
1349 }
1350
1351 /*
1352 * Initialize the interface.
1353 */
1354 static int
1355 iopl_init(struct ifnet *ifp)
1356 {
1357 struct i2o_lan_reset mf;
1358 struct iopl_softc *sc;
1359 struct iop_softc *iop;
1360 int rv, s, flg;
1361 u_int8_t hwaddr[8];
1362 u_int32_t txmode, rxmode;
1363
1364 sc = ifp->if_softc;
1365 iop = (struct iop_softc *)sc->sc_dv.dv_parent;
1366
1367 s = splbio();
1368 flg = sc->sc_flags;
1369 splx(s);
1370
1371 if ((flg & IOPL_INITTED) == 0) {
1372 /*
1373 * Reset the interface hardware.
1374 */
1375 mf.msgflags = I2O_MSGFLAGS(i2o_lan_reset);
1376 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_LAN_RESET);
1377 mf.msgictx = sc->sc_ii_null.ii_ictx;
1378 mf.reserved = 0;
1379 mf.resrcflags = 0;
1380 iop_post(iop, (u_int32_t *)&mf);
1381 DELAY(5000);
1382
1383 /*
1384 * Register to receive events from the device.
1385 */
1386 if (iop_util_eventreg(iop, &sc->sc_ii_evt, 0xffffffff))
1387 printf("%s: unable to register for events\n",
1388 sc->sc_dv.dv_xname);
1389
1390 /*
1391 * Trigger periodic parameter group retrievals.
1392 */
1393 s = splbio();
1394 sc->sc_flags |= (IOPL_MEDIA_CHANGE | IOPL_INITTED);
1395 splx(s);
1396
1397 callout_init(&sc->sc_pg_callout);
1398
1399 sc->sc_next_pg = -1;
1400 iopl_tick_sched(sc);
1401 }
1402
1403 /*
1404 * Enable or disable hardware checksumming.
1405 */
1406 s = splbio();
1407 #ifdef IOPL_ENABLE_BATCHING
1408 sc->sc_tx_tcw = I2O_LAN_TCW_REPLY_BATCH;
1409 #else
1410 sc->sc_tx_tcw = I2O_LAN_TCW_REPLY_IMMEDIATELY;
1411 #endif
1412 sc->sc_rx_csumflgs = 0;
1413 rxmode = 0;
1414 txmode = 0;
1415
1416 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) != 0) {
1417 sc->sc_tx_tcw |= I2O_LAN_TCW_CKSUM_NETWORK;
1418 sc->sc_rx_csumflgs |= M_CSUM_IPv4;
1419 txmode |= I2O_LAN_MODES_IPV4_CHECKSUM;
1420 rxmode |= I2O_LAN_MODES_IPV4_CHECKSUM;
1421 }
1422
1423 if ((ifp->if_capenable & IFCAP_CSUM_TCPv4) != 0) {
1424 sc->sc_tx_tcw |= I2O_LAN_TCW_CKSUM_TRANSPORT;
1425 sc->sc_rx_csumflgs |= M_CSUM_TCPv4;
1426 txmode |= I2O_LAN_MODES_TCP_CHECKSUM;
1427 rxmode |= I2O_LAN_MODES_TCP_CHECKSUM;
1428 }
1429
1430 if ((ifp->if_capenable & IFCAP_CSUM_UDPv4) != 0) {
1431 sc->sc_tx_tcw |= I2O_LAN_TCW_CKSUM_TRANSPORT;
1432 sc->sc_rx_csumflgs |= M_CSUM_UDPv4;
1433 txmode |= I2O_LAN_MODES_UDP_CHECKSUM;
1434 rxmode |= I2O_LAN_MODES_TCP_CHECKSUM;
1435 }
1436
1437 splx(s);
1438
1439 /* We always want a copy of the checksum. */
1440 rxmode |= I2O_LAN_MODES_FCS_RECEPTION;
1441 rxmode = htole32(rxmode);
1442 txmode = htole32(txmode);
1443
1444 rv = iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_OPERATION,
1445 &txmode, sizeof(txmode), I2O_PARAM_LAN_OPERATION_txmodesenable);
1446 if (rv == 0)
1447 rv = iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_OPERATION,
1448 &txmode, sizeof(txmode),
1449 I2O_PARAM_LAN_OPERATION_rxmodesenable);
1450 if (rv != 0)
1451 return (rv);
1452
1453 /*
1454 * Try to set the active MAC address.
1455 */
1456 memset(hwaddr, 0, sizeof(hwaddr));
1457 memcpy(hwaddr, LLADDR(ifp->if_sadl), ifp->if_addrlen);
1458 iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MAC_ADDRESS,
1459 hwaddr, sizeof(hwaddr), I2O_PARAM_LAN_MAC_ADDRESS_localaddr);
1460
1461 ifp->if_flags = (ifp->if_flags | IFF_RUNNING) & ~IFF_OACTIVE;
1462
1463 /*
1464 * Program the receive filter.
1465 */
1466 switch (sc->sc_mtype) {
1467 case I2O_LAN_TYPE_ETHERNET:
1468 case I2O_LAN_TYPE_100BASEVG:
1469 case I2O_LAN_TYPE_FDDI:
1470 iopl_filter_ether(sc);
1471 break;
1472 }
1473
1474 /*
1475 * Post any free receive buckets to the interface.
1476 */
1477 s = splbio();
1478 iopl_rx_post(sc);
1479 splx(s);
1480 return (0);
1481 }
1482
1483 /*
1484 * Stop the interface.
1485 */
1486 static void
1487 iopl_stop(struct ifnet *ifp, int disable)
1488 {
1489 struct i2o_lan_suspend mf;
1490 struct iopl_softc *sc;
1491 struct iop_softc *iop;
1492 int flg, s;
1493
1494 sc = ifp->if_softc;
1495 iop = (struct iop_softc *)sc->sc_dv.dv_xname;
1496
1497 s = splbio();
1498 flg = sc->sc_flags;
1499 splx(s);
1500
1501 if ((flg & IOPL_INITTED) != 0) {
1502 /*
1503 * Block reception of events from the device.
1504 */
1505 if (iop_util_eventreg(iop, &sc->sc_ii_evt, 0))
1506 printf("%s: unable to register for events\n",
1507 sc->sc_dv.dv_xname);
1508
1509 /*
1510 * Stop parameter group retrival.
1511 */
1512 callout_stop(&sc->sc_pg_callout);
1513
1514 s = splbio();
1515 sc->sc_flags &= ~IOPL_INITTED;
1516 splx(s);
1517 }
1518
1519 /*
1520 * If requested, suspend the interface.
1521 */
1522 if (disable) {
1523 mf.msgflags = I2O_MSGFLAGS(i2o_lan_suspend);
1524 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_LAN_SUSPEND);
1525 mf.msgictx = sc->sc_ii_null.ii_ictx;
1526 mf.reserved = 0;
1527 mf.resrcflags = I2O_LAN_RESRC_RETURN_BUCKETS |
1528 I2O_LAN_RESRC_RETURN_XMITS;
1529 iop_post(iop, (u_int32_t *)&mf);
1530 }
1531
1532 ifp->if_timer = 0;
1533 ifp->if_flags &= ~IFF_RUNNING;
1534 }
1535
1536 /*
1537 * Start output on the interface.
1538 */
1539 static void
1540 iopl_start(struct ifnet *ifp)
1541 {
1542 struct iopl_softc *sc;
1543 struct iop_softc *iop;
1544 struct i2o_lan_packet_send *mf;
1545 struct iopl_tx *tx;
1546 struct mbuf *m;
1547 bus_dmamap_t dm;
1548 bus_dma_segment_t *ds;
1549 bus_addr_t saddr, eaddr;
1550 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *p, *lp;
1551 u_int rv, i, slen, tlen, size;
1552 int frameleft, nxmits;
1553 SLIST_HEAD(,iopl_tx) pending;
1554
1555 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1556 return;
1557
1558 sc = (struct iopl_softc *)ifp->if_softc;
1559 iop = (struct iop_softc *)sc->sc_dv.dv_parent;
1560 mf = (struct i2o_lan_packet_send *)mb;
1561 frameleft = -1;
1562 nxmits = 0;
1563 SLIST_INIT(&pending);
1564
1565 /*
1566 * Set static fields in the message frame header.
1567 */
1568 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_LAN_PACKET_SEND);
1569 mf->msgictx = sc->sc_ii_rx.ii_ictx;
1570 mf->tcw = sc->sc_tx_tcw;
1571
1572 for (;;) {
1573 /*
1574 * Grab a packet to send and a transmit descriptor for it.
1575 * If we don't get both, then bail out.
1576 */
1577 if ((tx = SLIST_FIRST(&sc->sc_tx_free)) == NULL) {
1578 ifp->if_flags |= IFF_OACTIVE;
1579 break;
1580 }
1581 IFQ_DEQUEUE(&ifp->if_snd, m);
1582 if (m == NULL)
1583 break;
1584
1585 /*
1586 * Load the mbuf into the descriptor's DMA map. If we fail,
1587 * drop the packet on the floor and get out.
1588 */
1589 dm = tx->tx_dmamap;
1590 rv = bus_dmamap_load_mbuf(sc->sc_dmat, dm, m,
1591 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1592 if (rv == NULL) {
1593 printf("%s: unable to load TX buffer; error = %d\n",
1594 sc->sc_dv.dv_xname, rv);
1595 m_freem(m);
1596 break;
1597 }
1598 bus_dmamap_sync(sc->sc_dmat, dm, 0, dm->dm_mapsize,
1599 BUS_DMASYNC_PREWRITE);
1600
1601 /*
1602 * Now that the transmit descriptor has resources allocated
1603 * to it, remove it from the free list and add it to the
1604 * pending list.
1605 */
1606 SLIST_REMOVE_HEAD(&sc->sc_tx_free, tx_chain);
1607 SLIST_INSERT_HEAD(&pending, tx, tx_chain);
1608 sc->sc_tx_freecnt--;
1609
1610 /*
1611 * Determine whether we can cram this transmit into an
1612 * existing message frame (if any), or whether we need to
1613 * send a new one.
1614 */
1615 #if IOPL_BATCHING_ENABLED
1616 if (nxmits >= sc->sc_tx_maxreq)
1617 size = UINT_MAX;
1618 else
1619 size = sc->sc_tx_ohead + sc->sc_tx_maxsegs;
1620 #else
1621 size = UINT_MAX;
1622 #endif
1623
1624 if (size > frameleft) {
1625 if (frameleft >= 0) {
1626 /*
1627 * We have an old message frame to flush.
1628 * Clear the pending list if we send it
1629 * successfully.
1630 */
1631 *lp |= I2O_SGL_END;
1632 if (iop_post(iop, mb) == 0)
1633 SLIST_INIT(&pending);
1634 }
1635
1636 /*
1637 * Prepare a new message frame.
1638 */
1639 mf->msgflags = I2O_MSGFLAGS(i2o_lan_packet_send);
1640 p = (u_int32_t *)(mf + 1);
1641 frameleft = (sizeof(mb) - sizeof(*mf)) >> 2;
1642 nxmits = 0;
1643 }
1644
1645 /*
1646 * Fill the scatter/gather list. The interface may have
1647 * requested that the destination address be passed as part
1648 * of the buffer context.
1649 */
1650 lp = p;
1651
1652 if (sc->sc_tx_ohead > 2) {
1653 *p++ = dm->dm_mapsize | I2O_SGL_PAGE_LIST |
1654 I2O_SGL_BC_96BIT | I2O_SGL_END_BUFFER;
1655 *p++ = tx->tx_ident;
1656 (*sc->sc_munge)(m, (u_int8_t *)p);
1657 p += 2;
1658 } else {
1659 *p++ = dm->dm_mapsize | I2O_SGL_PAGE_LIST |
1660 I2O_SGL_BC_32BIT | I2O_SGL_END_BUFFER;
1661 *p++ = tx->tx_ident;
1662 }
1663
1664 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1665 slen = ds->ds_len;
1666 saddr = ds->ds_addr;
1667
1668 /* XXX This should be done with a bus_space flag. */
1669 while (slen > 0) {
1670 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1671 tlen = min(eaddr - saddr, slen);
1672 slen -= tlen;
1673 *p++ = le32toh(saddr);
1674 saddr = eaddr;
1675 }
1676 }
1677
1678 frameleft -= (p - lp);
1679 nxmits++;
1680
1681 #if NBPFILTER > 0
1682 /*
1683 * If BPF is enabled on this interface, feed it a copy of
1684 * the packet.
1685 */
1686 if (ifp->if_bpf)
1687 bpf_mtap(ifp->if_bpf, m);
1688 #endif
1689 }
1690
1691 /*
1692 * Flush any waiting message frame. If it's sent successfully, then
1693 * return straight away.
1694 */
1695 if (frameleft >= 0) {
1696 *lp |= I2O_SGL_END;
1697 if (iop_post(iop, mb) == 0)
1698 return;
1699 }
1700
1701 /*
1702 * Free resources for transmits that failed.
1703 */
1704 while ((tx = SLIST_FIRST(&pending)) != NULL) {
1705 SLIST_REMOVE_HEAD(&pending, tx_chain);
1706 SLIST_INSERT_HEAD(&sc->sc_tx_free, tx, tx_chain);
1707 sc->sc_tx_freecnt++;
1708 bus_dmamap_sync(sc->sc_dmat, tx->tx_dmamap, 0,
1709 tx->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1710 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1711 m_freem(tx->tx_mbuf);
1712 }
1713 }
1714
1715 /*
1716 * Munge an Ethernet address into buffer context.
1717 */
1718 static void
1719 iopl_munge_ether(struct mbuf *m, u_int8_t *dp)
1720 {
1721 struct ether_header *eh;
1722 u_int8_t *sp;
1723 int i;
1724
1725 eh = mtod(m, struct ether_header *);
1726 sp = (u_int8_t *)eh->ether_dhost;
1727 for (i = ETHER_ADDR_LEN; i > 0; i--)
1728 *dp++ = *sp++;
1729 *dp++ = 0;
1730 *dp++ = 0;
1731 }
1732
1733 /*
1734 * Munge an FDDI address into buffer context.
1735 */
1736 static void
1737 iopl_munge_fddi(struct mbuf *m, u_int8_t *dp)
1738 {
1739 struct fddi_header *fh;
1740 u_int8_t *sp;
1741 int i;
1742
1743 fh = mtod(m, struct fddi_header *);
1744 sp = (u_int8_t *)fh->fddi_dhost;
1745 for (i = 6; i > 0; i--)
1746 *dp++ = *sp++;
1747 *dp++ = 0;
1748 *dp++ = 0;
1749 }
1750
1751 /*
1752 * Program the receive filter for an Ethernet interface.
1753 */
1754 static int
1755 iopl_filter_ether(struct iopl_softc *sc)
1756 {
1757 struct ifnet *ifp;
1758 struct ethercom *ec;
1759 struct ether_multi *enm;
1760 u_int64_t *tbl;
1761 int i, rv, size;
1762 struct ether_multistep step;
1763
1764 ec = &sc->sc_if.sci_ec;
1765 ifp = &ec->ec_if;
1766
1767 /*
1768 * If there are more multicast addresses than will fit into the
1769 * filter table, or we fail to allocate memory for the table, then
1770 * enable reception of all multicast packets.
1771 */
1772 if (ec->ec_multicnt > sc->sc_mcast_max)
1773 goto allmulti;
1774
1775 size = sizeof(*tbl) * sc->sc_mcast_max;
1776 if ((tbl = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL)
1777 goto allmulti;
1778
1779 ETHER_FIRST_MULTI(step, ec, enm)
1780 for (i = 0; enm != NULL; i++) {
1781 /*
1782 * For the moment, if a range of multicast addresses was
1783 * specified, then just accept all multicast packets.
1784 */
1785 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1786 free(tbl, M_DEVBUF);
1787 goto allmulti;
1788 }
1789
1790 /*
1791 * Add the address to the table.
1792 */
1793 memset(&tbl[i], 0, sizeof(tbl[i]));
1794 memcpy(&tbl[i], enm->enm_addrlo, ETHER_ADDR_LEN);
1795
1796 ETHER_NEXT_MULTI(step, enm);
1797 }
1798
1799 sc->sc_mcast_cnt = i;
1800 ifp->if_flags &= ~IFF_ALLMULTI;
1801 rv = iopl_filter_generic(sc, tbl);
1802 free(tbl, M_DEVBUF);
1803 return (0);
1804
1805 allmulti:
1806 sc->sc_mcast_cnt = 0;
1807 ifp->if_flags |= IFF_ALLMULTI;
1808 return (iopl_filter_generic(sc, NULL));
1809 }
1810
1811 /*
1812 * Generic receive filter programming.
1813 */
1814 static int
1815 iopl_filter_generic(struct iopl_softc *sc, u_int64_t *tbl)
1816 {
1817 struct iop_softc *iop;
1818 struct ifnet *ifp;
1819 int i, rv;
1820 u_int32_t tmp1;
1821
1822 ifp = &sc->sc_if.sci_if;
1823 iop = (struct iop_softc *)sc->sc_dv.dv_parent;
1824
1825 /*
1826 * Clear out the existing multicast table and set in the new one, if
1827 * any.
1828 */
1829 if (sc->sc_mcast_max != 0) {
1830 iop_table_clear(iop, sc->sc_tid,
1831 I2O_PARAM_LAN_MCAST_MAC_ADDRESS);
1832
1833 for (i = 0; i < sc->sc_mcast_cnt; i++) {
1834 rv = iop_table_add_row(iop, sc->sc_tid,
1835 I2O_PARAM_LAN_MCAST_MAC_ADDRESS,
1836 &tbl[i], sizeof(tbl[i]), i);
1837 if (rv != 0) {
1838 ifp->if_flags |= IFF_ALLMULTI;
1839 break;
1840 }
1841 }
1842 }
1843
1844 /*
1845 * Set the filter mask.
1846 */
1847 if ((ifp->if_flags & IFF_PROMISC) != 0)
1848 tmp1 = I2O_LAN_FILTERMASK_PROMISC_ENABLE;
1849 else {
1850 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
1851 tmp1 = I2O_LAN_FILTERMASK_PROMISC_MCAST_ENABLE;
1852 else
1853 tmp1 = 0;
1854
1855 if ((ifp->if_flags & IFF_BROADCAST) == 0)
1856 tmp1 |= I2O_LAN_FILTERMASK_BROADCAST_DISABLE;
1857 }
1858 tmp1 = htole32(tmp1);
1859
1860 return (iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MAC_ADDRESS,
1861 &tmp1, sizeof(tmp1), I2O_PARAM_LAN_MAC_ADDRESS_filtermask));
1862 }
1863
1864 /*
1865 * Handle control operations.
1866 */
1867 static int
1868 iopl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1869 {
1870 struct iopl_softc *sc;
1871 struct ifaddr *ifa;
1872 struct ifreq *ifr;
1873 int s, rv;
1874 #ifdef NS
1875 struct ns_addr *ina;
1876 #endif
1877
1878 ifr = (struct ifreq *)data;
1879 sc = ifp->if_softc;
1880 s = splnet();
1881 rv = 0;
1882
1883 switch (cmd) {
1884 case SIOCSIFMEDIA:
1885 case SIOCGIFMEDIA:
1886 rv = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
1887 goto out;
1888 }
1889
1890 switch (sc->sc_mtype) {
1891 case I2O_LAN_TYPE_ETHERNET:
1892 case I2O_LAN_TYPE_100BASEVG:
1893 rv = ether_ioctl(ifp, cmd, data);
1894 if (rv == ENETRESET) {
1895 /*
1896 * Flags and/or multicast list has changed; need to
1897 * set the hardware filter accordingly.
1898 */
1899 rv = iopl_filter_ether(sc);
1900 }
1901 break;
1902
1903 case I2O_LAN_TYPE_FDDI:
1904 /*
1905 * XXX This should be shared.
1906 */
1907 switch (cmd) {
1908 case SIOCSIFADDR:
1909 ifa = (struct ifaddr *)data;
1910 ifp->if_flags |= IFF_UP;
1911
1912 switch (ifa->ifa_addr->sa_family) {
1913 #if defined(INET)
1914 case AF_INET:
1915 iopl_init(ifp);
1916 arp_ifinit(ifp, ifa);
1917 break;
1918 #endif /* INET */
1919
1920 #if defined(NS)
1921 case AF_NS:
1922 ina = &(IA_SNS(ifa)->sns_addr);
1923 if (ns_nullhost(*ina))
1924 ina->x_host = *(union ns_host *)
1925 LLADDR(ifp->if_sadl);
1926 else {
1927 ifp->if_flags &= ~IFF_RUNNING;
1928 memcpy(LLADDR(ifp->if_sadl),
1929 ina->x_host.c_host, 6);
1930 }
1931 iopl_init(ifp);
1932 break;
1933 #endif /* NS */
1934 default:
1935 iopl_init(ifp);
1936 break;
1937 }
1938 break;
1939
1940 case SIOCGIFADDR:
1941 ifr = (struct ifreq *)data;
1942 memcpy(((struct sockaddr *)&ifr->ifr_data)->sa_data,
1943 LLADDR(ifp->if_sadl), 6);
1944 break;
1945
1946 case SIOCSIFFLAGS:
1947 iopl_init(ifp);
1948 break;
1949
1950 case SIOCADDMULTI:
1951 case SIOCDELMULTI:
1952 ifr = (struct ifreq *)data;
1953 if (cmd == SIOCADDMULTI)
1954 rv = ether_addmulti(ifr, &sc->sc_if.sci_ec);
1955 else
1956 rv = ether_delmulti(ifr, &sc->sc_if.sci_ec);
1957 if (rv == ENETRESET &&
1958 (ifp->if_flags & IFF_RUNNING) != 0)
1959 rv = iopl_filter_ether(sc);
1960 break;
1961
1962 case SIOCSIFMTU:
1963 ifr = (struct ifreq *)data;
1964 if (ifr->ifr_mtu > FDDIMTU) {
1965 rv = EINVAL;
1966 break;
1967 }
1968 ifp->if_mtu = ifr->ifr_mtu;
1969 break;
1970
1971 default:
1972 rv = ENOTTY;
1973 break;
1974 }
1975 }
1976
1977 out:
1978 splx(s);
1979 return (rv);
1980 }
Cache object: 84bf60eceb9fb04bcaef13c399c1129c
|