FreeBSD/Linux Kernel Cross Reference
sys/dev/i2o/iopl.c
1 /* $NetBSD: iopl.c,v 1.22 2006/09/07 02:40:32 dogcow Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This is an untested driver for I2O LAN interfaces. It has at least these
41 * issues:
42 *
43 * - Will leak rx/tx descriptors & mbufs on transport failure.
44 * - Doesn't handle token-ring, but that's not a big deal.
45 * - Interrupts run at IPL_BIO.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: iopl.c,v 1.22 2006/09/07 02:40:32 dogcow Exp $");
50
51 #include "opt_i2o.h"
52 #include "opt_inet.h"
53 #include "bpfilter.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/device.h>
59 #include <sys/endian.h>
60 #include <sys/proc.h>
61 #include <sys/callout.h>
62 #include <sys/socket.h>
63 #include <sys/malloc.h>
64 #include <sys/sockio.h>
65 #include <sys/mbuf.h>
66
67 #include <machine/bus.h>
68
69 #include <uvm/uvm_extern.h>
70
71 #include <net/if.h>
72 #include <net/if_dl.h>
73 #include <net/if_media.h>
74 #include <net/if_ether.h>
75 #include <net/if_fddi.h>
76 #include <net/if_token.h>
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81
82 #ifdef INET
83 #include <netinet/in.h>
84 #include <netinet/in_systm.h>
85 #include <netinet/in_var.h>
86 #include <netinet/ip.h>
87 #include <netinet/if_inarp.h>
88 #endif
89
90 #include <dev/i2o/i2o.h>
91 #include <dev/i2o/iopio.h>
92 #include <dev/i2o/iopvar.h>
93 #include <dev/i2o/ioplvar.h>
94
95 static void iopl_attach(struct device *, struct device *, void *);
96 static int iopl_match(struct device *, struct cfdata *, void *);
97
98 static void iopl_error(struct iopl_softc *, u_int);
99 static void iopl_getpg(struct iopl_softc *, int);
100 static void iopl_intr_pg(struct device *, struct iop_msg *, void *);
101 static void iopl_intr_evt(struct device *, struct iop_msg *, void *);
102 static void iopl_intr_null(struct device *, struct iop_msg *, void *);
103 static void iopl_intr_rx(struct device *, struct iop_msg *, void *);
104 static void iopl_intr_tx(struct device *, struct iop_msg *, void *);
105 static void iopl_tick(void *);
106 static void iopl_tick_sched(struct iopl_softc *);
107
108 static int iopl_filter_ether(struct iopl_softc *);
109 static int iopl_filter_generic(struct iopl_softc *, u_int64_t *);
110
111 static int iopl_rx_alloc(struct iopl_softc *, int);
112 static void iopl_rx_free(struct iopl_softc *);
113 static void iopl_rx_post(struct iopl_softc *);
114 static int iopl_tx_alloc(struct iopl_softc *, int);
115 static void iopl_tx_free(struct iopl_softc *);
116
117 static int iopl_ifmedia_change(struct ifnet *);
118 static void iopl_ifmedia_status(struct ifnet *, struct ifmediareq *);
119
120 static void iopl_munge_ether(struct mbuf *, u_int8_t *);
121 static void iopl_munge_fddi(struct mbuf *, u_int8_t *);
122
123 static int iopl_init(struct ifnet *);
124 static int iopl_ioctl(struct ifnet *, u_long, caddr_t);
125 static void iopl_start(struct ifnet *);
126 static void iopl_stop(struct ifnet *, int);
127
128 CFATTACH_DECL(iopl, sizeof(struct iopl_softc),
129 iopl_match, iopl_attach, NULL, NULL);
130
131 #ifdef I2OVERBOSE
132 static const char * const iopl_errors[] = {
133 "success",
134 "device failure",
135 "destination not found",
136 "transmit error",
137 "transmit aborted",
138 "receive error",
139 "receive aborted",
140 "DMA error",
141 "bad packet detected",
142 "out of memory",
143 "bucket overrun",
144 "IOP internal error",
145 "cancelled",
146 "invalid transaction context",
147 "destination address detected",
148 "destination address omitted",
149 "partial packet returned",
150 "temporarily suspended",
151 };
152 #endif /* I2OVERBOSE */
153
154 static const struct iopl_media iopl_ether_media[] = {
155 { I2O_LAN_CONNECTION_100BASEVG_ETHERNET, IFM_100_VG },
156 { I2O_LAN_CONNECTION_100BASEVG_TOKEN_RING, IFM_100_VG },
157 { I2O_LAN_CONNECTION_ETHERNET_AUI, IFM_10_5 },
158 { I2O_LAN_CONNECTION_ETHERNET_10BASE5, IFM_10_5 },
159 { I2O_LAN_CONNECTION_ETHERNET_10BASE2, IFM_10_2 },
160 { I2O_LAN_CONNECTION_ETHERNET_10BASET, IFM_10_T },
161 { I2O_LAN_CONNECTION_ETHERNET_10BASEFL, IFM_10_FL },
162 { I2O_LAN_CONNECTION_ETHERNET_100BASETX, IFM_100_TX },
163 { I2O_LAN_CONNECTION_ETHERNET_100BASEFX, IFM_100_FX },
164 { I2O_LAN_CONNECTION_ETHERNET_100BASET4, IFM_100_T4 },
165 { I2O_LAN_CONNECTION_ETHERNET_1000BASESX, IFM_1000_SX },
166 { I2O_LAN_CONNECTION_ETHERNET_1000BASELX, IFM_1000_LX },
167 { I2O_LAN_CONNECTION_ETHERNET_1000BASECX, IFM_1000_CX },
168 { I2O_LAN_CONNECTION_ETHERNET_1000BASET, IFM_1000_T },
169 { I2O_LAN_CONNECTION_DEFAULT, IFM_10_T }
170 };
171
172 static const struct iopl_media iopl_fddi_media[] = {
173 { I2O_LAN_CONNECTION_FDDI_125MBIT, IFM_FDDI_SMF },
174 { I2O_LAN_CONNECTION_DEFAULT, IFM_FDDI_SMF },
175 };
176
177 /*
178 * Match a supported device.
179 */
180 static int
181 iopl_match(struct device *parent, struct cfdata *match, void *aux)
182 {
183
184 return (((struct iop_attach_args *)aux)->ia_class == I2O_CLASS_LAN);
185 }
186
187 /*
188 * Attach a supported device.
189 */
190 static void
191 iopl_attach(struct device *parent, struct device *self, void *aux)
192 {
193 struct iop_attach_args *ia;
194 struct iopl_softc *sc;
195 struct iop_softc *iop;
196 struct ifnet *ifp;
197 int rv, iff, ifcap, orphanlimit, maxpktsize;
198 struct {
199 struct i2o_param_op_results pr;
200 struct i2o_param_read_results prr;
201 union {
202 struct i2o_param_lan_device_info ldi;
203 struct i2o_param_lan_transmit_info ti;
204 struct i2o_param_lan_receive_info ri;
205 struct i2o_param_lan_operation lo;
206 struct i2o_param_lan_batch_control bc;
207 struct i2o_param_lan_mac_address lma;
208 } p;
209 } __attribute__ ((__packed__)) param;
210 const char *typestr, *addrstr;
211 char wwn[20];
212 u_int8_t hwaddr[8];
213 u_int tmp;
214 u_int32_t tmp1, tmp2, tmp3;
215
216 sc = device_private(self);
217 iop = (struct iop_softc *)parent;
218 ia = (struct iop_attach_args *)aux;
219 ifp = &sc->sc_if.sci_if;
220 sc->sc_tid = ia->ia_tid;
221 sc->sc_dmat = iop->sc_dmat;
222
223 /* Say what the device is. */
224 printf(": LAN interface");
225 iop_print_ident(iop, ia->ia_tid);
226 printf("\n");
227
228 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_LAN_DEVICE_INFO,
229 ¶m, sizeof(param), NULL);
230 if (rv != 0)
231 return;
232
233 sc->sc_ms_pg = -1;
234
235 switch (sc->sc_mtype = le16toh(param.p.ldi.lantype)) {
236 case I2O_LAN_TYPE_ETHERNET:
237 typestr = "Ethernet";
238 addrstr = ether_sprintf(param.p.ldi.hwaddr);
239 sc->sc_ms_pg = I2O_PARAM_LAN_802_3_STATS;
240 sc->sc_rx_prepad = 2;
241 sc->sc_munge = iopl_munge_ether;
242 orphanlimit = sizeof(struct ether_header);
243 iff = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
244 break;
245
246 case I2O_LAN_TYPE_100BASEVG:
247 typestr = "100VG-AnyLAN";
248 addrstr = ether_sprintf(param.p.ldi.hwaddr);
249 sc->sc_ms_pg = I2O_PARAM_LAN_802_3_STATS;
250 sc->sc_rx_prepad = 2;
251 sc->sc_munge = iopl_munge_ether;
252 orphanlimit = sizeof(struct ether_header);
253 iff = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
254 break;
255
256 case I2O_LAN_TYPE_FDDI:
257 typestr = "FDDI";
258 addrstr = fddi_sprintf(param.p.ldi.hwaddr);
259 sc->sc_ms_pg = I2O_PARAM_LAN_FDDI_STATS;
260 sc->sc_rx_prepad = 0;
261 sc->sc_munge = iopl_munge_fddi;
262 orphanlimit = sizeof(struct fddi_header);
263 iff = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
264 break;
265
266 case I2O_LAN_TYPE_TOKEN_RING:
267 typestr = "token ring";
268 addrstr = token_sprintf(param.p.ldi.hwaddr);
269 iff = IFF_BROADCAST | IFF_MULTICAST;
270 break;
271
272 case I2O_LAN_TYPE_FIBRECHANNEL:
273 typestr = "fibre channel";
274 addrstr = wwn;
275 snprintf(wwn, sizeof(wwn), "%08x%08x",
276 ((u_int32_t *)param.p.ldi.hwaddr)[0],
277 ((u_int32_t *)param.p.ldi.hwaddr)[1]);
278 iff = IFF_BROADCAST | IFF_MULTICAST;
279 break;
280
281 default:
282 typestr = "unknown medium";
283 addrstr = "unknown";
284 break;
285 }
286
287 memcpy(hwaddr, param.p.ldi.hwaddr, sizeof(hwaddr));
288 printf("%s: %s, address %s, %d Mb/s maximum\n", self->dv_xname,
289 typestr, addrstr,
290 (int)(le64toh(param.p.ldi.maxrxbps) / 1000*1000));
291 maxpktsize = le32toh(param.p.ldi.maxpktsize);
292
293 if (sc->sc_ms_pg == -1) {
294 printf("%s: medium not supported\n", self->dv_xname);
295 return;
296 }
297
298 /*
299 * Register our initiators.
300 */
301 sc->sc_ii_pg.ii_dv = self;
302 sc->sc_ii_pg.ii_intr = iopl_intr_pg;
303 sc->sc_ii_pg.ii_flags = 0;
304 sc->sc_ii_pg.ii_tid = ia->ia_tid;
305 iop_initiator_register(iop, &sc->sc_ii_pg);
306
307 sc->sc_ii_evt.ii_dv = self;
308 sc->sc_ii_evt.ii_intr = iopl_intr_evt;
309 sc->sc_ii_evt.ii_flags = II_NOTCTX | II_UTILITY;
310 sc->sc_ii_evt.ii_tid = ia->ia_tid;
311 iop_initiator_register(iop, &sc->sc_ii_evt);
312
313 sc->sc_ii_null.ii_dv = self;
314 sc->sc_ii_null.ii_intr = iopl_intr_null;
315 sc->sc_ii_null.ii_flags = II_NOTCTX | II_UTILITY;
316 sc->sc_ii_null.ii_tid = ia->ia_tid;
317 iop_initiator_register(iop, &sc->sc_ii_evt);
318
319 sc->sc_ii_rx.ii_dv = self;
320 sc->sc_ii_rx.ii_intr = iopl_intr_rx;
321 sc->sc_ii_rx.ii_flags = II_NOTCTX | II_UTILITY;
322 sc->sc_ii_rx.ii_tid = ia->ia_tid;
323 iop_initiator_register(iop, &sc->sc_ii_rx);
324
325 sc->sc_ii_tx.ii_dv = self;
326 sc->sc_ii_tx.ii_intr = iopl_intr_tx;
327 sc->sc_ii_tx.ii_flags = II_NOTCTX | II_UTILITY;
328 sc->sc_ii_tx.ii_tid = ia->ia_tid;
329 iop_initiator_register(iop, &sc->sc_ii_tx);
330
331 /*
332 * Determine some of the capabilities of the interface - in
333 * particular, the maximum number of segments per S/G list, and how
334 * much buffer context we'll need to transmit frames (some adapters
335 * may need the destination address in the buffer context).
336 */
337 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_LAN_TRANSMIT_INFO,
338 ¶m, sizeof(param), NULL);
339 if (rv != 0)
340 return;
341
342 tmp = le32toh(param.p.ti.txmodes);
343
344 if ((param.p.ti.txmodes & I2O_LAN_MODES_NO_DA_IN_SGL) == 0)
345 sc->sc_tx_ohead = 1 + 1 + 2;
346 else
347 sc->sc_tx_ohead = 1 + 1;
348
349 ifcap = 0;
350
351 if (((le32toh(iop->sc_status.segnumber) >> 12) & 15) ==
352 I2O_VERSION_20) {
353 if ((tmp & I2O_LAN_MODES_IPV4_CHECKSUM) != 0)
354 ifcap |= IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_IPv4_Rx;
355 if ((tmp & I2O_LAN_MODES_TCP_CHECKSUM) != 0)
356 ifcap |= IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_TCPv4_Rx;
357 if ((tmp & I2O_LAN_MODES_UDP_CHECKSUM) != 0)
358 ifcap |= IFCAP_CSUM_UDPv4_Tx|IFCAP_CSUM_UDPv4_Rx;
359 #ifdef notyet
360 if ((tmp & I2O_LAN_MODES_ICMP_CHECKSUM) != 0)
361 ifcap |= IFCAP_CSUM_ICMP;
362 #endif
363 }
364
365 sc->sc_tx_maxsegs =
366 min(le32toh(param.p.ti.maxpktsg), IOPL_MAX_SEGS);
367 sc->sc_tx_maxout = le32toh(param.p.ti.maxpktsout);
368 sc->sc_tx_maxreq = le32toh(param.p.ti.maxpktsreq);
369
370 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_LAN_RECEIVE_INFO,
371 ¶m, sizeof(param), NULL);
372 if (rv != 0)
373 return;
374
375 sc->sc_rx_maxbkt = le32toh(param.p.ri.maxbuckets);
376
377 #ifdef I2ODEBUG
378 if (sc->sc_tx_maxsegs == 0)
379 panic("%s: sc->sc_tx_maxsegs == 0", self->dv_xname);
380 if (sc->sc_tx_maxout == 0)
381 panic("%s: sc->sc_tx_maxsegs == 0", self->dv_xname);
382 if (sc->sc_tx_maxreq == 0)
383 panic("%s: sc->sc_tx_maxsegs == 0", self->dv_xname);
384 if (sc->sc_rx_maxbkt == 0)
385 panic("%s: sc->sc_rx_maxbkt == 0", self->dv_xname);
386 #endif
387
388 /*
389 * Set the pre-padding and "orphan" limits. This is to ensure that
390 * for received packets, the L3 payload will be aligned on a 32-bit
391 * boundary, and the L2 header won't be split between buckets.
392 *
393 * While here, enable error reporting for transmits. We're not
394 * interested in most errors (e.g. excessive collisions), but others
395 * are of more concern.
396 */
397 tmp1 = htole32(sc->sc_rx_prepad);
398 tmp2 = htole32(orphanlimit);
399 tmp3 = htole32(1); /* XXX */
400
401 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_OPERATION,
402 &tmp1, sizeof(tmp1), I2O_PARAM_LAN_OPERATION_pktprepad))
403 return;
404 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_OPERATION,
405 &tmp2, sizeof(tmp2), I2O_PARAM_LAN_OPERATION_pktorphanlimit))
406 return;
407 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_OPERATION,
408 &tmp3, sizeof(tmp3), I2O_PARAM_LAN_OPERATION_userflags))
409 return;
410
411 /*
412 * Set the batching parameters.
413 */
414 #if IOPL_BATCHING_ENABLED
415 /* Select automatic batching, and specify the maximum packet count. */
416 tmp1 = htole32(0);
417 tmp2 = htole32(IOPL_MAX_BATCH);
418 tmp3 = htole32(IOPL_MAX_BATCH);
419 #else
420 /* Force batching off. */
421 tmp1 = htole32(1); /* XXX */
422 tmp2 = htole32(1);
423 tmp3 = htole32(1);
424 #endif
425 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_BATCH_CONTROL,
426 &tmp1, sizeof(tmp1), I2O_PARAM_LAN_BATCH_CONTROL_batchflags))
427 return;
428 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_BATCH_CONTROL,
429 &tmp2, sizeof(tmp2), I2O_PARAM_LAN_BATCH_CONTROL_maxrxbatchcount))
430 return;
431 if (iop_field_set(iop, ia->ia_tid, I2O_PARAM_LAN_BATCH_CONTROL,
432 &tmp3, sizeof(tmp3), I2O_PARAM_LAN_BATCH_CONTROL_maxtxbatchcount))
433 return;
434
435 /*
436 * Get multicast parameters.
437 */
438 rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_LAN_MAC_ADDRESS,
439 ¶m, sizeof(param), NULL);
440 if (rv != 0)
441 return;
442
443 sc->sc_mcast_max = le32toh(param.p.lma.maxmcastaddr);
444 sc->sc_mcast_max = min(IOPL_MAX_MULTI, sc->sc_mcast_max);
445
446 /*
447 * Allocate transmit and receive descriptors.
448 */
449 if (iopl_tx_alloc(sc, IOPL_DESCRIPTORS)) {
450 printf("%s: unable to allocate transmit descriptors\n",
451 sc->sc_dv.dv_xname);
452 return;
453 }
454 if (iopl_rx_alloc(sc, IOPL_DESCRIPTORS)) {
455 printf("%s: unable to allocate receive descriptors\n",
456 sc->sc_dv.dv_xname);
457 return;
458 }
459
460 /*
461 * Claim the device so that we don't get any nasty surprises. Allow
462 * failure.
463 */
464 iop_util_claim(iop, &sc->sc_ii_evt, 0,
465 I2O_UTIL_CLAIM_NO_PEER_SERVICE |
466 I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
467 I2O_UTIL_CLAIM_PRIMARY_USER);
468
469 /*
470 * Attach the interface.
471 */
472 memcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
473 ifp->if_softc = sc;
474 ifp->if_flags = iff;
475 ifp->if_capabilities = ifcap;
476 ifp->if_ioctl = iopl_ioctl;
477 ifp->if_start = iopl_start;
478 ifp->if_stop = iopl_stop;
479 ifp->if_init = iopl_init;
480 IFQ_SET_READY(&ifp->if_snd);
481
482 if_attach(ifp);
483
484 switch (sc->sc_mtype) {
485 case I2O_LAN_TYPE_ETHERNET:
486 case I2O_LAN_TYPE_100BASEVG:
487 /* Can we handle 802.1Q encapsulated frames? */
488 if (maxpktsize >= ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
489 sc->sc_if.sci_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
490
491 ether_ifattach(ifp, (u_char *)hwaddr);
492 break;
493
494 case I2O_LAN_TYPE_FDDI:
495 fddi_ifattach(ifp, (u_char *)hwaddr);
496 break;
497 }
498
499 ifmedia_init(&sc->sc_ifmedia, 0, iopl_ifmedia_change,
500 iopl_ifmedia_status);
501 }
502
503 /*
504 * Allocate the specified number of TX descriptors.
505 */
506 static int
507 iopl_tx_alloc(struct iopl_softc *sc, int count)
508 {
509 struct iopl_tx *tx;
510 int i, size, rv;
511
512 if (count > sc->sc_tx_maxout)
513 count = sc->sc_tx_maxout;
514
515 #ifdef I2ODEBUG
516 printf("%s: %d TX descriptors\n", sc->sc_dv.dv_xname, count);
517 #endif
518
519 size = count * sizeof(*tx);
520 sc->sc_tx = malloc(size, M_DEVBUF, M_NOWAIT|M_ZERO);
521
522 for (i = 0, tx = sc->sc_tx; i < count; i++, tx++) {
523 rv = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
524 sc->sc_tx_maxsegs, MCLBYTES, 0,
525 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
526 &tx->tx_dmamap);
527 if (rv != 0) {
528 iopl_tx_free(sc);
529 return (rv);
530 }
531
532 tx->tx_ident = i;
533 SLIST_INSERT_HEAD(&sc->sc_tx_free, tx, tx_chain);
534 sc->sc_tx_freecnt++;
535 }
536
537 return (0);
538 }
539
540 /*
541 * Free all TX descriptors.
542 */
543 static void
544 iopl_tx_free(struct iopl_softc *sc)
545 {
546 struct iopl_tx *tx;
547
548 while ((tx = SLIST_FIRST(&sc->sc_tx_free)) != NULL) {
549 SLIST_REMOVE_HEAD(&sc->sc_tx_free, tx_chain);
550 bus_dmamap_destroy(sc->sc_dmat, tx->tx_dmamap);
551 }
552
553 free(sc->sc_tx, M_DEVBUF);
554 sc->sc_tx = NULL;
555 sc->sc_tx_freecnt = 0;
556 }
557
558 /*
559 * Allocate the specified number of RX buckets and descriptors.
560 */
561 static int
562 iopl_rx_alloc(struct iopl_softc *sc, int count)
563 {
564 struct iopl_rx *rx;
565 struct mbuf *m;
566 int i, size, rv, state;
567
568 if (count > sc->sc_rx_maxbkt)
569 count = sc->sc_rx_maxbkt;
570
571 #ifdef I2ODEBUG
572 printf("%s: %d RX descriptors\n", sc->sc_dv.dv_xname, count);
573 #endif
574
575 size = count * sizeof(*rx);
576 sc->sc_rx = malloc(size, M_DEVBUF, M_NOWAIT|M_ZERO);
577
578 for (i = 0, rx = sc->sc_rx; i < count; i++, rx++) {
579 state = 0;
580
581 MGETHDR(m, M_DONTWAIT, MT_DATA);
582 if (m == NULL) {
583 rv = ENOBUFS;
584 goto bad;
585 }
586
587 state++;
588
589 MCLGET(m, M_DONTWAIT);
590 if ((m->m_flags & M_EXT) == 0) {
591 m_freem(m);
592 rv = ENOBUFS;
593 goto bad;
594 }
595
596 rv = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE,
597 sc->sc_tx_maxsegs, PAGE_SIZE, 0,
598 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &rx->rx_dmamap);
599 if (rv != 0)
600 goto bad;
601
602 state++;
603
604 rv = bus_dmamap_load_mbuf(sc->sc_dmat, rx->rx_dmamap, m,
605 BUS_DMA_READ | BUS_DMA_NOWAIT);
606 if (rv != 0)
607 goto bad;
608
609 rx->rx_ident = i;
610 SLIST_INSERT_HEAD(&sc->sc_rx_free, rx, rx_chain);
611 sc->sc_rx_freecnt++;
612 }
613
614 bad:
615 if (state > 1)
616 bus_dmamap_destroy(sc->sc_dmat, rx->rx_dmamap);
617 if (state > 0)
618 m_freem(m);
619
620 iopl_rx_free(sc);
621 return (rv);
622 }
623
624 /*
625 * Free all RX buckets and descriptors.
626 */
627 static void
628 iopl_rx_free(struct iopl_softc *sc)
629 {
630 struct iopl_rx *rx;
631
632 while ((rx = SLIST_FIRST(&sc->sc_rx_free)) != NULL) {
633 SLIST_REMOVE_HEAD(&sc->sc_rx_free, rx_chain);
634 bus_dmamap_destroy(sc->sc_dmat, rx->rx_dmamap);
635 m_freem(rx->rx_mbuf);
636 }
637
638 free(sc->sc_rx, M_DEVBUF);
639 sc->sc_rx = NULL;
640 sc->sc_rx_freecnt = 0;
641 }
642
643 /*
644 * Post all free RX buckets to the device.
645 */
646 static void
647 iopl_rx_post(struct iopl_softc *sc)
648 {
649 struct i2o_lan_receive_post *mf;
650 struct iopl_rx *rx;
651 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sp, *p, *ep, *lp;
652 bus_dmamap_t dm;
653 bus_dma_segment_t *ds;
654 bus_addr_t saddr, eaddr;
655 u_int i, slen, tlen;
656
657 mf = (struct i2o_lan_receive_post *)mb;
658 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_LAN_RECEIVE_POST);
659 mf->msgictx = sc->sc_ii_rx.ii_ictx;
660
661 ep = mb + (sizeof(mb) >> 2);
662 sp = (u_int32_t *)(mf + 1);
663
664 while (sc->sc_rx_freecnt != 0) {
665 mf->msgflags = I2O_MSGFLAGS(i2o_lan_receive_post);
666 mf->bktcnt = 0;
667 p = sp;
668
669 /*
670 * Remove RX descriptors from the list, sync their DMA maps,
671 * and add their buckets to the scatter/gather list for
672 * posting.
673 */
674 for (;;) {
675 rx = SLIST_FIRST(&sc->sc_rx_free);
676 SLIST_REMOVE_HEAD(&sc->sc_rx_free, rx_chain);
677 dm = rx->rx_dmamap;
678
679 bus_dmamap_sync(sc->sc_dmat, dm, 0, dm->dm_mapsize,
680 BUS_DMASYNC_PREREAD);
681
682 lp = p;
683 *p++ = dm->dm_mapsize | I2O_SGL_PAGE_LIST |
684 I2O_SGL_END_BUFFER | I2O_SGL_BC_32BIT;
685 *p++ = rx->rx_ident;
686
687 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--) {
688 slen = ds->ds_len;
689 saddr = ds->ds_addr;
690 ds++;
691
692 /*
693 * XXX This should be done with a bus_space
694 * flag.
695 */
696 while (slen > 0) {
697 eaddr = (saddr + PAGE_SIZE) &
698 ~(PAGE_SIZE - 1);
699 tlen = min(eaddr - saddr, slen);
700 slen -= tlen;
701 *p++ = le32toh(saddr);
702 saddr = eaddr;
703 }
704 }
705
706 if (p + 2 + sc->sc_tx_maxsegs >= ep)
707 break;
708 if (--sc->sc_rx_freecnt <= 0)
709 break;
710 }
711
712 /*
713 * Terminate the scatter/gather list and fix up the message
714 * frame size and free RX descriptor count.
715 */
716 *lp |= I2O_SGL_END;
717 mb[0] += ((p - sp) << 16);
718
719 /*
720 * Finally, post the message frame to the device.
721 */
722 iop_post((struct iop_softc *)device_parent(&sc->sc_dv), mb);
723 }
724 }
725
726 /*
727 * Handle completion of periodic parameter group retrievals.
728 */
729 static void
730 iopl_intr_pg(struct device *dv, struct iop_msg *im, void *reply)
731 {
732 struct i2o_param_lan_stats *ls;
733 struct i2o_param_lan_802_3_stats *les;
734 struct i2o_param_lan_media_operation *lmo;
735 struct iopl_softc *sc;
736 struct iop_softc *iop;
737 struct ifnet *ifp;
738 struct i2o_reply *rb;
739 int pg;
740
741 rb = (struct i2o_reply *)reply;
742 sc = (struct iopl_softc *)dv;
743 iop = (struct iop_softc *)device_parent(dv);
744 ifp = &sc->sc_if.sci_if;
745
746 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
747 iopl_tick_sched(sc);
748 return;
749 }
750
751 iop_msg_unmap(iop, im);
752 pg = le16toh(((struct iop_pgop *)im->im_dvcontext)->oat.group);
753 free(im->im_dvcontext, M_DEVBUF);
754 iop_msg_free(iop, im);
755
756 switch (pg) {
757 case I2O_PARAM_LAN_MEDIA_OPERATION:
758 lmo = &sc->sc_pb.p.lmo;
759
760 sc->sc_curmbps =
761 (int)(le64toh(lmo->currxbps) / (1000 * 1000));
762 sc->sc_conntype = le32toh(lmo->connectiontype);
763
764 if (lmo->linkstatus) {
765 /* Necessary only for initialisation. */
766 sc->sc_flags |= IOPL_LINK;
767 }
768
769 /* Chain the next retrieval. */
770 sc->sc_next_pg = I2O_PARAM_LAN_STATS;
771 break;
772
773 case I2O_PARAM_LAN_STATS:
774 ls = &sc->sc_pb.p.ls;
775
776 /* XXX Not all of these stats may be supported. */
777 ifp->if_ipackets = le64toh(ls->ipackets);
778 ifp->if_opackets = le64toh(ls->opackets);
779 ifp->if_ierrors = le64toh(ls->ierrors);
780 ifp->if_oerrors = le64toh(ls->oerrors);
781
782 /* Chain the next retrieval. */
783 sc->sc_next_pg = sc->sc_ms_pg;
784 break;
785
786 case I2O_PARAM_LAN_802_3_STATS:
787 les = &sc->sc_pb.p.les;
788
789 /*
790 * This isn't particularly meaningful: the sum of the number
791 * of packets that encounted a single collision and the
792 * number of packets that encountered multiple collisions.
793 *
794 * XXX Not all of these stats may be supported.
795 */
796 ifp->if_collisions = le64toh(les->onecollision) +
797 le64toh(les->manycollisions);
798
799 sc->sc_next_pg = -1;
800 break;
801
802 case I2O_PARAM_LAN_FDDI_STATS:
803 sc->sc_next_pg = -1;
804 break;
805 }
806
807 iopl_tick_sched(sc);
808 }
809
810 /*
811 * Handle an event signalled by the interface.
812 */
813 static void
814 iopl_intr_evt(struct device *dv, struct iop_msg *im, void *reply)
815 {
816 struct i2o_util_event_register_reply *rb;
817 struct iopl_softc *sc;
818 u_int event;
819
820 rb = (struct i2o_util_event_register_reply *)reply;
821
822 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
823 return;
824
825 sc = (struct iopl_softc *)dv;
826 event = le32toh(rb->event);
827
828 switch (event) {
829 case I2O_EVENT_LAN_MEDIA_CHANGE:
830 sc->sc_flags |= IOPL_MEDIA_CHANGE;
831 break;
832 case I2O_EVENT_LAN_LINK_UP:
833 sc->sc_flags |= IOPL_LINK;
834 break;
835 case I2O_EVENT_LAN_LINK_DOWN:
836 sc->sc_flags &= ~IOPL_LINK;
837 break;
838 default:
839 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
840 break;
841 }
842 }
843
844 /*
845 * Bit-bucket initiator: ignore interrupts signaled by the interface.
846 */
847 static void
848 iopl_intr_null(struct device *dv, struct iop_msg *im, void *reply)
849 {
850
851 }
852
853 /*
854 * Handle a receive interrupt.
855 */
856 static void
857 iopl_intr_rx(struct device *dv, struct iop_msg *im, void *reply)
858 {
859 struct i2o_lan_receive_reply *rb;
860 struct iopl_softc *sc;
861 struct iopl_rx *rx;
862 struct ifnet *ifp;
863 struct mbuf *m, *m0;
864 u_int32_t *p;
865 int off, err, flg, first, lastpkt, lastbkt, rv;
866 int len, i, pkt, pktlen[IOPL_MAX_BATCH], csumflgs[IOPL_MAX_BATCH];
867 struct mbuf *head[IOPL_MAX_BATCH], *tail[IOPL_MAX_BATCH];
868
869 rb = (struct i2o_lan_receive_reply *)reply;
870 sc = (struct iopl_softc *)dv;
871 ifp = &sc->sc_if.sci_if;
872 p = (u_int32_t *)(rb + 1);
873
874 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
875 /* XXX We leak if we get here. */
876 return;
877 }
878
879 memset(head, 0, sizeof(head));
880 memset(pktlen, 0, sizeof(pktlen));
881 memset(csumflgs, 0, sizeof(csumflgs));
882
883 /*
884 * Scan through the transaction reply list. The TRL takes this
885 * form:
886 *
887 * 32-bits Bucket context
888 * 32-bits 1st packet offset (high 8-bits are control flags)
889 * 32-bits 1st packet length (high 8-bits are error status)
890 * 32-bits 2nd packet offset
891 * 32-bits 2nd packet length
892 * ...
893 * 32-bits Nth packet offset
894 * 32-bits Nth packet length
895 * ...
896 * 32-bits Bucket context
897 * 32-bits 1st packet offset
898 * 32-bits 1st packet length
899 * ...
900 */
901 for (lastbkt = 0; !lastbkt;) {
902 /*
903 * Return the RX descriptor for this bucket back to the free
904 * list.
905 */
906 rx = &sc->sc_rx[*p++];
907 SLIST_INSERT_HEAD(&sc->sc_rx_free, rx, rx_chain);
908 sc->sc_rx_freecnt++;
909
910 /*
911 * Sync the bucket's DMA map.
912 */
913 bus_dmamap_sync(sc->sc_dmat, rx->rx_dmamap, 0,
914 rx->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
915
916 /*
917 * If this is a valid receive, go through the PDB entries
918 * and re-assemble all the packet fragments that we find.
919 * Otherwise, just free up the buckets that we had posted -
920 * we have probably received this reply because the
921 * interface has been reset or suspended.
922 */
923 if ((rb->trlflags & I2O_LAN_RECEIVE_REPLY_PDB) == 0) {
924 lastbkt = (--rb->trlcount == 0);
925 continue;
926 }
927
928 m = rx->rx_mbuf;
929
930 for (lastpkt = 0, first = 1, pkt = 0; !lastpkt; pkt++) {
931 off = p[0] & 0x00ffffff;
932 len = p[1] & 0x00ffffff;
933 flg = p[0] >> 24;
934 err = p[1] >> 24;
935 p += 2;
936
937 #ifdef I2ODEBUG
938 if (pkt >= IOPL_MAX_BATCH)
939 panic("iopl_intr_rx: too many packets");
940 #endif
941 /*
942 * Break out at the right spot later on if this is
943 * the last packet in this bucket, or the last
944 * bucket.
945 */
946 if ((flg & 0x40) == 0x40) /* XXX */
947 lastpkt = 1;
948 if ((flg & 0xc8) == 0xc0) /* XXX */
949 lastbkt = 1;
950
951 /*
952 * Skip dummy PDB entries.
953 */
954 if ((flg & 0x07) == 0x02) /* XXX */
955 continue;
956
957 /*
958 * If the packet was received with errors, then
959 * arrange to dump it. We allow bad L3 and L4
960 * checksums through for accounting purposes.
961 */
962 if (pktlen[pkt] == -1)
963 continue;
964 if ((off & 0x03) == 0x01) { /* XXX */
965 pktlen[pkt] = -1;
966 continue;
967 }
968 if ((err & I2O_LAN_PDB_ERROR_CKSUM_MASK) != 0) {
969 if ((err & I2O_LAN_PDB_ERROR_L3_CKSUM_BAD) != 0)
970 csumflgs[pkt] |= M_CSUM_IPv4_BAD;
971 if ((err & I2O_LAN_PDB_ERROR_L4_CKSUM_BAD) != 0)
972 csumflgs[pkt] |= M_CSUM_TCP_UDP_BAD;
973 err &= ~I2O_LAN_PDB_ERROR_CKSUM_MASK;
974 }
975 if (err != I2O_LAN_PDB_ERROR_NONE) {
976 pktlen[pkt] = -1;
977 continue;
978 }
979
980 if (len <= (MHLEN - sc->sc_rx_prepad)) {
981 /*
982 * The fragment is small enough to fit in a
983 * single header mbuf - allocate one and
984 * copy the data into it. This greatly
985 * reduces memory consumption when we
986 * receive lots of small packets.
987 */
988 MGETHDR(m0, M_DONTWAIT, MT_DATA);
989 if (m0 == NULL) {
990 ifp->if_ierrors++;
991 m_freem(m);
992 continue;
993 }
994 m0->m_data += sc->sc_rx_prepad;
995 m_copydata(m, 0, len, mtod(m0, caddr_t) + off);
996 off = 0;
997 } else if (!first) {
998 /*
999 * The bucket contains multiple fragments
1000 * (each from a different packet). Allocate
1001 * an mbuf header and add a reference to the
1002 * storage from the bucket's mbuf.
1003 */
1004 m0 = m_copym(m, off, len, M_DONTWAIT);
1005 off = 0;
1006 } else {
1007 /*
1008 * This is the first "large" packet in the
1009 * bucket. Allocate replacement mbuf
1010 * storage. If we fail, drop the packet and
1011 * continue.
1012 */
1013 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1014 if (m0 == NULL) {
1015 pktlen[pkt] = -1;
1016 continue;
1017 }
1018
1019 MCLGET(m0, M_DONTWAIT);
1020 if ((m0->m_flags & M_EXT) == 0) {
1021 pktlen[pkt] = -1;
1022 m_freem(m0);
1023 continue;
1024 }
1025
1026 /*
1027 * If we can't load the new mbuf, then drop
1028 * the bucket from the RX list. XXX Ouch.
1029 */
1030 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1031 rv = bus_dmamap_load_mbuf(sc->sc_dmat,
1032 rx->rx_dmamap, m0,
1033 BUS_DMA_READ | BUS_DMA_NOWAIT);
1034 if (rv != 0) {
1035 printf("%s: unable to load mbuf (%d),"
1036 " discarding bucket\n",
1037 sc->sc_dv.dv_xname, rv);
1038 SLIST_REMOVE_HEAD(&sc->sc_rx_free,
1039 rx_chain);
1040 sc->sc_rx_freecnt--;
1041 }
1042
1043 rx->rx_mbuf = m0;
1044 m0 = m;
1045 first = 0;
1046 }
1047
1048 /*
1049 * Fix up the mbuf header, and append the mbuf to
1050 * the chain for this packet.
1051 */
1052 m0->m_len = len;
1053 m0->m_data += off;
1054 if (head[pkt] != NULL)
1055 tail[pkt]->m_next = m0;
1056 else
1057 head[pkt] = m0;
1058 tail[pkt] = m0;
1059 pktlen[pkt] += len;
1060 }
1061 }
1062
1063 /*
1064 * Pass each received packet on.
1065 */
1066 for (i = 0; i < IOPL_MAX_BATCH; i++) {
1067 if ((m = head[i]) == NULL)
1068 continue;
1069
1070 /*
1071 * If the packet was received with errors, we dump it here.
1072 */
1073 if ((len = pktlen[i]) < 0) {
1074 m_freem(m);
1075 continue;
1076 }
1077
1078 /*
1079 * Otherwise, fix up the header, trim off the CRC, feed
1080 * a copy to BPF, and then pass it on up.
1081 */
1082 m->m_pkthdr.rcvif = ifp;
1083 m->m_pkthdr.len = len;
1084 m->m_pkthdr.csum_flags = csumflgs[pkt] | sc->sc_rx_csumflgs;
1085 m_adj(m, -ETHER_CRC_LEN);
1086
1087 #if NBPFILTER > 0
1088 if (ifp->if_bpf)
1089 bpf_mtap(ifp->if_bpf, m);
1090 #endif /* NBPFILTER > 0 */
1091
1092 (*ifp->if_input)(ifp, m);
1093 }
1094
1095 /*
1096 * Re-post the buckets back to the interface, and try to send more
1097 * packets.
1098 */
1099 iopl_rx_post(sc);
1100 iopl_start(&sc->sc_if.sci_if);
1101 }
1102
1103 /*
1104 * Handle a transmit interrupt.
1105 */
1106 static void
1107 iopl_intr_tx(struct device *dv, struct iop_msg *im, void *reply)
1108 {
1109 struct i2o_lan_send_reply *rb;
1110 struct iopl_softc *sc;
1111 struct iopl_tx *tx;
1112 struct ifnet *ifp;
1113 int i, bktcnt;
1114
1115 sc = (struct iopl_softc *)dv;
1116 rb = (struct i2o_lan_send_reply *)reply;
1117
1118 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1119 /* XXX We leak if we get here. */
1120 return;
1121 }
1122
1123 if (rb->reqstatus != I2O_STATUS_SUCCESS)
1124 iopl_error(sc, le16toh(rb->detail));
1125
1126 /*
1127 * For each packet that has been transmitted, unload the DMA map,
1128 * free the source mbuf, and then release the transmit descriptor
1129 * back to the pool.
1130 */
1131 bktcnt = (le32toh(rb->msgflags) >> 16) - (sizeof(*rb) >> 2);
1132
1133 for (i = 0; i <= bktcnt; i++) {
1134 tx = &sc->sc_tx[rb->tctx[i]];
1135
1136 bus_dmamap_sync(sc->sc_dmat, tx->tx_dmamap, 0,
1137 tx->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1138 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1139
1140 m_freem(tx->tx_mbuf);
1141
1142 SLIST_INSERT_HEAD(&sc->sc_tx_free, tx, tx_chain);
1143 sc->sc_tx_freecnt++;
1144 }
1145
1146 /*
1147 * Try to send more packets.
1148 */
1149 ifp->if_flags &= ~IFF_OACTIVE;
1150 iopl_start(&sc->sc_if.sci_if);
1151 }
1152
1153 /*
1154 * Describe an error code returned by the adapter.
1155 */
1156 static void
1157 iopl_error(struct iopl_softc *sc, u_int dsc)
1158 {
1159 #ifdef I2OVERBOSE
1160 const char *errstr;
1161 #endif
1162
1163 switch (dsc) {
1164 case I2O_LAN_DSC_RECEIVE_ERROR:
1165 case I2O_LAN_DSC_RECEIVE_ABORTED:
1166 case I2O_LAN_DSC_TRANSMIT_ERROR:
1167 case I2O_LAN_DSC_TRANSMIT_ABORTED:
1168 case I2O_LAN_DSC_TEMP_SUSPENDED_STATE: /* ??? */
1169 break;
1170
1171 default:
1172 #ifdef I2OVERBOSE
1173 if (dsc > sizeof(iopl_errors) / sizeof(iopl_errors[0]))
1174 errstr = "<unknown>";
1175 else
1176 errstr = iopl_errors[dsc];
1177 printf("%s: error 0x%04x: %s\n", sc->sc_dv.dv_xname, dsc,
1178 errstr);
1179 #else
1180 printf("%s: error 0x%04x\n", sc->sc_dv.dv_xname, dsc);
1181 #endif
1182 break;
1183 }
1184 }
1185
1186 /*
1187 * Retrieve the next scheduled parameter group from the interface. Called
1188 * periodically.
1189 */
1190 static void
1191 iopl_tick(void *cookie)
1192 {
1193 struct iopl_softc *sc;
1194
1195 sc = cookie;
1196
1197 iopl_getpg(sc, sc->sc_next_pg);
1198 }
1199
1200 /*
1201 * Schedule the next PG retrieval.
1202 */
1203 static void
1204 iopl_tick_sched(struct iopl_softc *sc)
1205 {
1206 int s;
1207
1208 if (sc->sc_next_pg == -1) {
1209 s = splbio();
1210 if ((sc->sc_flags & IOPL_MEDIA_CHANGE) != 0) {
1211 sc->sc_next_pg = I2O_PARAM_LAN_MEDIA_OPERATION;
1212 sc->sc_flags &= ~IOPL_MEDIA_CHANGE;
1213 } else
1214 sc->sc_next_pg = I2O_PARAM_LAN_STATS;
1215 splx(s);
1216 }
1217
1218 callout_reset(&sc->sc_pg_callout, hz / IOPL_TICK_HZ, iopl_tick, sc);
1219 }
1220
1221 /*
1222 * Request the specified parameter group from the interface, to be delivered
1223 * to the PG initiator.
1224 */
1225 static void
1226 iopl_getpg(struct iopl_softc *sc, int pg)
1227 {
1228
1229 iop_field_get_all((struct iop_softc *)device_parent(&sc->sc_dv),
1230 sc->sc_tid, pg, &sc->sc_pb, sizeof(sc->sc_pb), &sc->sc_ii_pg);
1231 }
1232
1233 /*
1234 * Report on current media status.
1235 */
1236 static void
1237 iopl_ifmedia_status(struct ifnet *ifp, struct ifmediareq *req)
1238 {
1239 const struct iopl_media *ilm;
1240 struct iopl_softc *sc;
1241 int s, conntype;
1242
1243 sc = ifp->if_softc;
1244
1245 s = splbio();
1246 conntype = sc->sc_conntype;
1247 splx(s);
1248
1249 req->ifm_status = IFM_AVALID;
1250 if ((sc->sc_flags & IOPL_LINK) != 0)
1251 req->ifm_status |= IFM_ACTIVE;
1252
1253 switch (sc->sc_mtype) {
1254 case I2O_LAN_TYPE_100BASEVG:
1255 case I2O_LAN_TYPE_ETHERNET:
1256 ilm = iopl_ether_media;
1257 req->ifm_active = IFM_ETHER;
1258 break;
1259
1260 case I2O_LAN_TYPE_FDDI:
1261 ilm = iopl_fddi_media;
1262 req->ifm_active = IFM_FDDI;
1263 break;
1264 }
1265
1266 for (; ilm->ilm_i2o != I2O_LAN_CONNECTION_DEFAULT; ilm++)
1267 if (ilm->ilm_i2o == conntype)
1268 break;
1269 req->ifm_active |= ilm->ilm_ifmedia;
1270
1271 if (ilm->ilm_i2o == I2O_LAN_CONNECTION_DEFAULT)
1272 printf("%s: unknown connection type 0x%08x; defaulting\n",
1273 sc->sc_dv.dv_xname, conntype);
1274 }
1275
1276 /*
1277 * Change media parameters.
1278 */
1279 static int
1280 iopl_ifmedia_change(struct ifnet *ifp)
1281 {
1282 struct iop_softc *iop;
1283 struct iopl_softc *sc;
1284 const struct iopl_media *ilm;
1285 u_int subtype;
1286 u_int32_t ciontype;
1287 u_int8_t fdx;
1288
1289 sc = ifp->if_softc;
1290 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
1291
1292 subtype = IFM_SUBTYPE(sc->sc_ifmedia.ifm_cur->ifm_media);
1293 if (subtype == IFM_AUTO)
1294 ciontype = I2O_LAN_CONNECTION_DEFAULT;
1295 else {
1296 switch (sc->sc_mtype) {
1297 case I2O_LAN_TYPE_100BASEVG:
1298 case I2O_LAN_TYPE_ETHERNET:
1299 ilm = iopl_ether_media;
1300 break;
1301
1302 case I2O_LAN_TYPE_FDDI:
1303 ilm = iopl_fddi_media;
1304 break;
1305 }
1306
1307 for (; ilm->ilm_i2o != I2O_LAN_CONNECTION_DEFAULT; ilm++)
1308 if (ilm->ilm_ifmedia == subtype)
1309 break;
1310 if (ilm->ilm_i2o == I2O_LAN_CONNECTION_DEFAULT)
1311 return (EINVAL);
1312 ciontype = le32toh(ilm->ilm_i2o);
1313 }
1314
1315 if ((sc->sc_ifmedia.ifm_cur->ifm_media & IFM_FDX) != 0)
1316 fdx = 1;
1317 else if ((sc->sc_ifmedia.ifm_cur->ifm_media & IFM_HDX) != 0)
1318 fdx = 0;
1319 else {
1320 /*
1321 * XXX Not defined as auto-detect, but as "default".
1322 */
1323 fdx = 0xff;
1324 }
1325
1326 /*
1327 * XXX Can we set all these independently? Will omitting the
1328 * connector type screw us up?
1329 */
1330 iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MEDIA_OPERATION,
1331 &ciontype, sizeof(ciontype),
1332 I2O_PARAM_LAN_MEDIA_OPERATION_connectiontarget);
1333 #if 0
1334 iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MEDIA_OPERATION,
1335 &certype, sizeof(certype),
1336 I2O_PARAM_LAN_MEDIA_OPERATION_connectertarget);
1337 #endif
1338 iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MEDIA_OPERATION,
1339 &fdx, sizeof(fdx),
1340 I2O_PARAM_LAN_MEDIA_OPERATION_duplextarget);
1341
1342 ifp->if_baudrate = ifmedia_baudrate(sc->sc_ifmedia.ifm_cur->ifm_media);
1343 return (0);
1344 }
1345
1346 /*
1347 * Initialize the interface.
1348 */
1349 static int
1350 iopl_init(struct ifnet *ifp)
1351 {
1352 struct i2o_lan_reset mf;
1353 struct iopl_softc *sc;
1354 struct iop_softc *iop;
1355 int rv, s, flg;
1356 u_int8_t hwaddr[8];
1357 u_int32_t txmode, rxmode;
1358 uint64_t ifcap;
1359
1360 sc = ifp->if_softc;
1361 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
1362
1363 s = splbio();
1364 flg = sc->sc_flags;
1365 splx(s);
1366
1367 if ((flg & IOPL_INITTED) == 0) {
1368 /*
1369 * Reset the interface hardware.
1370 */
1371 mf.msgflags = I2O_MSGFLAGS(i2o_lan_reset);
1372 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_LAN_RESET);
1373 mf.msgictx = sc->sc_ii_null.ii_ictx;
1374 mf.reserved = 0;
1375 mf.resrcflags = 0;
1376 iop_post(iop, (u_int32_t *)&mf);
1377 DELAY(5000);
1378
1379 /*
1380 * Register to receive events from the device.
1381 */
1382 if (iop_util_eventreg(iop, &sc->sc_ii_evt, 0xffffffff))
1383 printf("%s: unable to register for events\n",
1384 sc->sc_dv.dv_xname);
1385
1386 /*
1387 * Trigger periodic parameter group retrievals.
1388 */
1389 s = splbio();
1390 sc->sc_flags |= (IOPL_MEDIA_CHANGE | IOPL_INITTED);
1391 splx(s);
1392
1393 callout_init(&sc->sc_pg_callout);
1394
1395 sc->sc_next_pg = -1;
1396 iopl_tick_sched(sc);
1397 }
1398
1399 /*
1400 * Enable or disable hardware checksumming.
1401 */
1402 s = splbio();
1403 #ifdef IOPL_ENABLE_BATCHING
1404 sc->sc_tx_tcw = I2O_LAN_TCW_REPLY_BATCH;
1405 #else
1406 sc->sc_tx_tcw = I2O_LAN_TCW_REPLY_IMMEDIATELY;
1407 #endif
1408 sc->sc_rx_csumflgs = 0;
1409 rxmode = 0;
1410 txmode = 0;
1411
1412 ifcap = ifp->if_capenable;
1413 if ((ifcap & IFCAP_CSUM_IPv4_Tx) != 0) {
1414 sc->sc_tx_tcw |= I2O_LAN_TCW_CKSUM_NETWORK;
1415 txmode |= I2O_LAN_MODES_IPV4_CHECKSUM;
1416 }
1417 if ((ifcap & IFCAP_CSUM_IPv4_Rx) != 0) {
1418 sc->sc_rx_csumflgs |= M_CSUM_IPv4;
1419 rxmode |= I2O_LAN_MODES_IPV4_CHECKSUM;
1420 }
1421
1422 if ((ifcap & IFCAP_CSUM_TCPv4_Tx) != 0) {
1423 sc->sc_tx_tcw |= I2O_LAN_TCW_CKSUM_TRANSPORT;
1424 txmode |= I2O_LAN_MODES_TCP_CHECKSUM;
1425 }
1426 if ((ifcap & IFCAP_CSUM_TCPv4_Rx) != 0) {
1427 sc->sc_rx_csumflgs |= M_CSUM_TCPv4;
1428 rxmode |= I2O_LAN_MODES_TCP_CHECKSUM;
1429 }
1430
1431 if ((ifcap & IFCAP_CSUM_UDPv4_Tx) != 0) {
1432 sc->sc_tx_tcw |= I2O_LAN_TCW_CKSUM_TRANSPORT;
1433 txmode |= I2O_LAN_MODES_UDP_CHECKSUM;
1434 }
1435 if ((ifcap & IFCAP_CSUM_UDPv4_Rx) != 0) {
1436 sc->sc_rx_csumflgs |= M_CSUM_UDPv4;
1437 rxmode |= I2O_LAN_MODES_TCP_CHECKSUM;
1438 }
1439
1440 splx(s);
1441
1442 /* We always want a copy of the checksum. */
1443 rxmode |= I2O_LAN_MODES_FCS_RECEPTION;
1444 rxmode = htole32(rxmode);
1445 txmode = htole32(txmode);
1446
1447 rv = iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_OPERATION,
1448 &txmode, sizeof(txmode), I2O_PARAM_LAN_OPERATION_txmodesenable);
1449 if (rv == 0)
1450 rv = iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_OPERATION,
1451 &txmode, sizeof(txmode),
1452 I2O_PARAM_LAN_OPERATION_rxmodesenable);
1453 if (rv != 0)
1454 return (rv);
1455
1456 /*
1457 * Try to set the active MAC address.
1458 */
1459 memset(hwaddr, 0, sizeof(hwaddr));
1460 memcpy(hwaddr, LLADDR(ifp->if_sadl), ifp->if_addrlen);
1461 iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MAC_ADDRESS,
1462 hwaddr, sizeof(hwaddr), I2O_PARAM_LAN_MAC_ADDRESS_localaddr);
1463
1464 ifp->if_flags = (ifp->if_flags | IFF_RUNNING) & ~IFF_OACTIVE;
1465
1466 /*
1467 * Program the receive filter.
1468 */
1469 switch (sc->sc_mtype) {
1470 case I2O_LAN_TYPE_ETHERNET:
1471 case I2O_LAN_TYPE_100BASEVG:
1472 case I2O_LAN_TYPE_FDDI:
1473 iopl_filter_ether(sc);
1474 break;
1475 }
1476
1477 /*
1478 * Post any free receive buckets to the interface.
1479 */
1480 s = splbio();
1481 iopl_rx_post(sc);
1482 splx(s);
1483 return (0);
1484 }
1485
1486 /*
1487 * Stop the interface.
1488 */
1489 static void
1490 iopl_stop(struct ifnet *ifp, int disable)
1491 {
1492 struct i2o_lan_suspend mf;
1493 struct iopl_softc *sc;
1494 struct iop_softc *iop;
1495 int flg, s;
1496
1497 sc = ifp->if_softc;
1498 iop = (struct iop_softc *)sc->sc_dv.dv_xname;
1499
1500 s = splbio();
1501 flg = sc->sc_flags;
1502 splx(s);
1503
1504 if ((flg & IOPL_INITTED) != 0) {
1505 /*
1506 * Block reception of events from the device.
1507 */
1508 if (iop_util_eventreg(iop, &sc->sc_ii_evt, 0))
1509 printf("%s: unable to register for events\n",
1510 sc->sc_dv.dv_xname);
1511
1512 /*
1513 * Stop parameter group retrival.
1514 */
1515 callout_stop(&sc->sc_pg_callout);
1516
1517 s = splbio();
1518 sc->sc_flags &= ~IOPL_INITTED;
1519 splx(s);
1520 }
1521
1522 /*
1523 * If requested, suspend the interface.
1524 */
1525 if (disable) {
1526 mf.msgflags = I2O_MSGFLAGS(i2o_lan_suspend);
1527 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_LAN_SUSPEND);
1528 mf.msgictx = sc->sc_ii_null.ii_ictx;
1529 mf.reserved = 0;
1530 mf.resrcflags = I2O_LAN_RESRC_RETURN_BUCKETS |
1531 I2O_LAN_RESRC_RETURN_XMITS;
1532 iop_post(iop, (u_int32_t *)&mf);
1533 }
1534
1535 ifp->if_timer = 0;
1536 ifp->if_flags &= ~IFF_RUNNING;
1537 }
1538
1539 /*
1540 * Start output on the interface.
1541 */
1542 static void
1543 iopl_start(struct ifnet *ifp)
1544 {
1545 struct iopl_softc *sc;
1546 struct iop_softc *iop;
1547 struct i2o_lan_packet_send *mf;
1548 struct iopl_tx *tx;
1549 struct mbuf *m;
1550 bus_dmamap_t dm;
1551 bus_dma_segment_t *ds;
1552 bus_addr_t saddr, eaddr;
1553 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *p, *lp;
1554 u_int rv, i, slen, tlen, size;
1555 int frameleft, nxmits;
1556 SLIST_HEAD(,iopl_tx) pending;
1557
1558 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1559 return;
1560
1561 sc = (struct iopl_softc *)ifp->if_softc;
1562 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
1563 mf = (struct i2o_lan_packet_send *)mb;
1564 frameleft = -1;
1565 nxmits = 0;
1566 SLIST_INIT(&pending);
1567
1568 /*
1569 * Set static fields in the message frame header.
1570 */
1571 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_LAN_PACKET_SEND);
1572 mf->msgictx = sc->sc_ii_rx.ii_ictx;
1573 mf->tcw = sc->sc_tx_tcw;
1574
1575 for (;;) {
1576 /*
1577 * Grab a packet to send and a transmit descriptor for it.
1578 * If we don't get both, then bail out.
1579 */
1580 if ((tx = SLIST_FIRST(&sc->sc_tx_free)) == NULL) {
1581 ifp->if_flags |= IFF_OACTIVE;
1582 break;
1583 }
1584 IFQ_DEQUEUE(&ifp->if_snd, m);
1585 if (m == NULL)
1586 break;
1587
1588 /*
1589 * Load the mbuf into the descriptor's DMA map. If we fail,
1590 * drop the packet on the floor and get out.
1591 */
1592 dm = tx->tx_dmamap;
1593 rv = bus_dmamap_load_mbuf(sc->sc_dmat, dm, m,
1594 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1595 if (rv == NULL) {
1596 printf("%s: unable to load TX buffer; error = %d\n",
1597 sc->sc_dv.dv_xname, rv);
1598 m_freem(m);
1599 break;
1600 }
1601 bus_dmamap_sync(sc->sc_dmat, dm, 0, dm->dm_mapsize,
1602 BUS_DMASYNC_PREWRITE);
1603
1604 /*
1605 * Now that the transmit descriptor has resources allocated
1606 * to it, remove it from the free list and add it to the
1607 * pending list.
1608 */
1609 SLIST_REMOVE_HEAD(&sc->sc_tx_free, tx_chain);
1610 SLIST_INSERT_HEAD(&pending, tx, tx_chain);
1611 sc->sc_tx_freecnt--;
1612
1613 /*
1614 * Determine whether we can cram this transmit into an
1615 * existing message frame (if any), or whether we need to
1616 * send a new one.
1617 */
1618 #if IOPL_BATCHING_ENABLED
1619 if (nxmits >= sc->sc_tx_maxreq)
1620 size = UINT_MAX;
1621 else
1622 size = sc->sc_tx_ohead + sc->sc_tx_maxsegs;
1623 #else
1624 size = UINT_MAX;
1625 #endif
1626
1627 if (size > frameleft) {
1628 if (frameleft >= 0) {
1629 /*
1630 * We have an old message frame to flush.
1631 * Clear the pending list if we send it
1632 * successfully.
1633 */
1634 *lp |= I2O_SGL_END;
1635 if (iop_post(iop, mb) == 0)
1636 SLIST_INIT(&pending);
1637 }
1638
1639 /*
1640 * Prepare a new message frame.
1641 */
1642 mf->msgflags = I2O_MSGFLAGS(i2o_lan_packet_send);
1643 p = (u_int32_t *)(mf + 1);
1644 frameleft = (sizeof(mb) - sizeof(*mf)) >> 2;
1645 nxmits = 0;
1646 }
1647
1648 /*
1649 * Fill the scatter/gather list. The interface may have
1650 * requested that the destination address be passed as part
1651 * of the buffer context.
1652 */
1653 lp = p;
1654
1655 if (sc->sc_tx_ohead > 2) {
1656 *p++ = dm->dm_mapsize | I2O_SGL_PAGE_LIST |
1657 I2O_SGL_BC_96BIT | I2O_SGL_END_BUFFER;
1658 *p++ = tx->tx_ident;
1659 (*sc->sc_munge)(m, (u_int8_t *)p);
1660 p += 2;
1661 } else {
1662 *p++ = dm->dm_mapsize | I2O_SGL_PAGE_LIST |
1663 I2O_SGL_BC_32BIT | I2O_SGL_END_BUFFER;
1664 *p++ = tx->tx_ident;
1665 }
1666
1667 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1668 slen = ds->ds_len;
1669 saddr = ds->ds_addr;
1670
1671 /* XXX This should be done with a bus_space flag. */
1672 while (slen > 0) {
1673 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1674 tlen = min(eaddr - saddr, slen);
1675 slen -= tlen;
1676 *p++ = le32toh(saddr);
1677 saddr = eaddr;
1678 }
1679 }
1680
1681 frameleft -= (p - lp);
1682 nxmits++;
1683
1684 #if NBPFILTER > 0
1685 /*
1686 * If BPF is enabled on this interface, feed it a copy of
1687 * the packet.
1688 */
1689 if (ifp->if_bpf)
1690 bpf_mtap(ifp->if_bpf, m);
1691 #endif
1692 }
1693
1694 /*
1695 * Flush any waiting message frame. If it's sent successfully, then
1696 * return straight away.
1697 */
1698 if (frameleft >= 0) {
1699 *lp |= I2O_SGL_END;
1700 if (iop_post(iop, mb) == 0)
1701 return;
1702 }
1703
1704 /*
1705 * Free resources for transmits that failed.
1706 */
1707 while ((tx = SLIST_FIRST(&pending)) != NULL) {
1708 SLIST_REMOVE_HEAD(&pending, tx_chain);
1709 SLIST_INSERT_HEAD(&sc->sc_tx_free, tx, tx_chain);
1710 sc->sc_tx_freecnt++;
1711 bus_dmamap_sync(sc->sc_dmat, tx->tx_dmamap, 0,
1712 tx->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1713 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1714 m_freem(tx->tx_mbuf);
1715 }
1716 }
1717
1718 /*
1719 * Munge an Ethernet address into buffer context.
1720 */
1721 static void
1722 iopl_munge_ether(struct mbuf *m, u_int8_t *dp)
1723 {
1724 struct ether_header *eh;
1725 u_int8_t *sp;
1726 int i;
1727
1728 eh = mtod(m, struct ether_header *);
1729 sp = (u_int8_t *)eh->ether_dhost;
1730 for (i = ETHER_ADDR_LEN; i > 0; i--)
1731 *dp++ = *sp++;
1732 *dp++ = 0;
1733 *dp++ = 0;
1734 }
1735
1736 /*
1737 * Munge an FDDI address into buffer context.
1738 */
1739 static void
1740 iopl_munge_fddi(struct mbuf *m, u_int8_t *dp)
1741 {
1742 struct fddi_header *fh;
1743 u_int8_t *sp;
1744 int i;
1745
1746 fh = mtod(m, struct fddi_header *);
1747 sp = (u_int8_t *)fh->fddi_dhost;
1748 for (i = 6; i > 0; i--)
1749 *dp++ = *sp++;
1750 *dp++ = 0;
1751 *dp++ = 0;
1752 }
1753
1754 /*
1755 * Program the receive filter for an Ethernet interface.
1756 */
1757 static int
1758 iopl_filter_ether(struct iopl_softc *sc)
1759 {
1760 struct ifnet *ifp;
1761 struct ethercom *ec;
1762 struct ether_multi *enm;
1763 u_int64_t *tbl;
1764 int i, rv, size;
1765 struct ether_multistep step;
1766
1767 ec = &sc->sc_if.sci_ec;
1768 ifp = &ec->ec_if;
1769
1770 /*
1771 * If there are more multicast addresses than will fit into the
1772 * filter table, or we fail to allocate memory for the table, then
1773 * enable reception of all multicast packets.
1774 */
1775 if (ec->ec_multicnt > sc->sc_mcast_max)
1776 goto allmulti;
1777
1778 size = sizeof(*tbl) * sc->sc_mcast_max;
1779 if ((tbl = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL)
1780 goto allmulti;
1781
1782 ETHER_FIRST_MULTI(step, ec, enm)
1783 for (i = 0; enm != NULL; i++) {
1784 /*
1785 * For the moment, if a range of multicast addresses was
1786 * specified, then just accept all multicast packets.
1787 */
1788 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1789 free(tbl, M_DEVBUF);
1790 goto allmulti;
1791 }
1792
1793 /*
1794 * Add the address to the table.
1795 */
1796 memset(&tbl[i], 0, sizeof(tbl[i]));
1797 memcpy(&tbl[i], enm->enm_addrlo, ETHER_ADDR_LEN);
1798
1799 ETHER_NEXT_MULTI(step, enm);
1800 }
1801
1802 sc->sc_mcast_cnt = i;
1803 ifp->if_flags &= ~IFF_ALLMULTI;
1804 rv = iopl_filter_generic(sc, tbl);
1805 free(tbl, M_DEVBUF);
1806 return (0);
1807
1808 allmulti:
1809 sc->sc_mcast_cnt = 0;
1810 ifp->if_flags |= IFF_ALLMULTI;
1811 return (iopl_filter_generic(sc, NULL));
1812 }
1813
1814 /*
1815 * Generic receive filter programming.
1816 */
1817 static int
1818 iopl_filter_generic(struct iopl_softc *sc, u_int64_t *tbl)
1819 {
1820 struct iop_softc *iop;
1821 struct ifnet *ifp;
1822 int i, rv;
1823 u_int32_t tmp1;
1824
1825 ifp = &sc->sc_if.sci_if;
1826 iop = (struct iop_softc *)device_parent(&sc->sc_dv);
1827
1828 /*
1829 * Clear out the existing multicast table and set in the new one, if
1830 * any.
1831 */
1832 if (sc->sc_mcast_max != 0) {
1833 iop_table_clear(iop, sc->sc_tid,
1834 I2O_PARAM_LAN_MCAST_MAC_ADDRESS);
1835
1836 for (i = 0; i < sc->sc_mcast_cnt; i++) {
1837 rv = iop_table_add_row(iop, sc->sc_tid,
1838 I2O_PARAM_LAN_MCAST_MAC_ADDRESS,
1839 &tbl[i], sizeof(tbl[i]), i);
1840 if (rv != 0) {
1841 ifp->if_flags |= IFF_ALLMULTI;
1842 break;
1843 }
1844 }
1845 }
1846
1847 /*
1848 * Set the filter mask.
1849 */
1850 if ((ifp->if_flags & IFF_PROMISC) != 0)
1851 tmp1 = I2O_LAN_FILTERMASK_PROMISC_ENABLE;
1852 else {
1853 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
1854 tmp1 = I2O_LAN_FILTERMASK_PROMISC_MCAST_ENABLE;
1855 else
1856 tmp1 = 0;
1857
1858 if ((ifp->if_flags & IFF_BROADCAST) == 0)
1859 tmp1 |= I2O_LAN_FILTERMASK_BROADCAST_DISABLE;
1860 }
1861 tmp1 = htole32(tmp1);
1862
1863 return (iop_field_set(iop, sc->sc_tid, I2O_PARAM_LAN_MAC_ADDRESS,
1864 &tmp1, sizeof(tmp1), I2O_PARAM_LAN_MAC_ADDRESS_filtermask));
1865 }
1866
1867 /*
1868 * Handle control operations.
1869 */
1870 static int
1871 iopl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1872 {
1873 struct iopl_softc *sc;
1874 struct ifaddr *ifa;
1875 struct ifreq *ifr;
1876 int s, rv;
1877
1878 ifr = (struct ifreq *)data;
1879 sc = ifp->if_softc;
1880 s = splnet();
1881 rv = 0;
1882
1883 switch (cmd) {
1884 case SIOCSIFMEDIA:
1885 case SIOCGIFMEDIA:
1886 rv = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
1887 goto out;
1888 }
1889
1890 switch (sc->sc_mtype) {
1891 case I2O_LAN_TYPE_ETHERNET:
1892 case I2O_LAN_TYPE_100BASEVG:
1893 rv = ether_ioctl(ifp, cmd, data);
1894 if (rv == ENETRESET) {
1895 /*
1896 * Flags and/or multicast list has changed; need to
1897 * set the hardware filter accordingly.
1898 */
1899 if (ifp->if_flags & IFF_RUNNING)
1900 rv = iopl_filter_ether(sc);
1901 else
1902 rv = 0;
1903 }
1904 break;
1905
1906 case I2O_LAN_TYPE_FDDI:
1907 /*
1908 * XXX This should be shared.
1909 */
1910 switch (cmd) {
1911 case SIOCSIFADDR:
1912 ifa = (struct ifaddr *)data;
1913 ifp->if_flags |= IFF_UP;
1914
1915 switch (ifa->ifa_addr->sa_family) {
1916 #if defined(INET)
1917 case AF_INET:
1918 iopl_init(ifp);
1919 arp_ifinit(ifp, ifa);
1920 break;
1921 #endif /* INET */
1922
1923 default:
1924 iopl_init(ifp);
1925 break;
1926 }
1927 break;
1928
1929 case SIOCGIFADDR:
1930 ifr = (struct ifreq *)data;
1931 memcpy(((struct sockaddr *)&ifr->ifr_data)->sa_data,
1932 LLADDR(ifp->if_sadl), 6);
1933 break;
1934
1935 case SIOCSIFFLAGS:
1936 iopl_init(ifp);
1937 break;
1938
1939 case SIOCADDMULTI:
1940 case SIOCDELMULTI:
1941 ifr = (struct ifreq *)data;
1942 if (cmd == SIOCADDMULTI)
1943 rv = ether_addmulti(ifr, &sc->sc_if.sci_ec);
1944 else
1945 rv = ether_delmulti(ifr, &sc->sc_if.sci_ec);
1946 if (rv == ENETRESET) {
1947 if (ifp->if_flags & IFF_RUNNING)
1948 rv = iopl_filter_ether(sc);
1949 else
1950 rv = 0;
1951 }
1952 break;
1953
1954 case SIOCSIFMTU:
1955 ifr = (struct ifreq *)data;
1956 if (ifr->ifr_mtu > FDDIMTU) {
1957 rv = EINVAL;
1958 break;
1959 }
1960 ifp->if_mtu = ifr->ifr_mtu;
1961 break;
1962
1963 default:
1964 rv = ENOTTY;
1965 break;
1966 }
1967 }
1968
1969 out:
1970 splx(s);
1971 return (rv);
1972 }
Cache object: 49db945c4ffd0b0c688aa20f4a12be85
|