FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/i82596.c
1 /* $NetBSD: i82596.c,v 1.47 2022/07/06 15:41:47 andvar Exp $ */
2
3 /*
4 * Copyright (c) 2003 Jochen Kunz.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of Jochen Kunz may not be used to endorse or promote
16 * products derived from this software without specific prior
17 * written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JOCHEN KUNZ
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for the Intel i82596CA and i82596DX/SX 10MBit/s Ethernet chips.
34 *
35 * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
36 * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
37 * i82586 compatibility mode.
38 *
39 * Documentation about these chips can be found at
40 *
41 * http://developer.intel.com/design/network/datashts/290218.htm
42 * http://developer.intel.com/design/network/datashts/290219.htm
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.47 2022/07/06 15:41:47 andvar Exp $");
47
48 /* autoconfig and device stuff */
49 #include <sys/param.h>
50 #include <sys/device.h>
51 #include <sys/conf.h>
52 #include "locators.h"
53 #include "ioconf.h"
54
55 /* bus_space / bus_dma etc. */
56 #include <sys/bus.h>
57 #include <sys/intr.h>
58
59 /* general system data and functions */
60 #include <sys/systm.h>
61 #include <sys/ioctl.h>
62
63 /* tsleep / sleep / wakeup */
64 #include <sys/proc.h>
65 /* hz for above */
66 #include <sys/kernel.h>
67
68 /* network stuff */
69 #include <net/if.h>
70 #include <net/if_dl.h>
71 #include <net/if_media.h>
72 #include <net/if_ether.h>
73 #include <net/bpf.h>
74 #include <sys/socket.h>
75 #include <sys/mbuf.h>
76
77 #include <dev/ic/i82596reg.h>
78 #include <dev/ic/i82596var.h>
79
80 /* Supported chip variants */
81 const char *i82596_typenames[] = { "unknown", "DX/SX", "CA" };
82
83 /* media change and status callback */
84 static int iee_mediachange(struct ifnet *);
85 static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
86
87 /* interface routines to upper protocols */
88 static void iee_start(struct ifnet *); /* initiate output */
89 static int iee_ioctl(struct ifnet *, u_long, void *); /* ioctl routine */
90 static int iee_init(struct ifnet *); /* init routine */
91 static void iee_stop(struct ifnet *, int); /* stop routine */
92 static void iee_watchdog(struct ifnet *); /* timer routine */
93
94 /* internal helper functions */
95 static void iee_cb_setup(struct iee_softc *, uint32_t);
96
97 /*
98 * Things a MD frontend has to provide:
99 *
100 * The functions via function pointers in the softc:
101 * int (*sc_iee_cmd)(struct iee_softc *sc, uint32_t cmd);
102 * int (*sc_iee_reset)(struct iee_softc *sc);
103 * void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
104 * int (*sc_mediachange)(struct ifnet *);
105 *
106 * sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
107 * to the SCP cmd word and issuing a Channel Attention.
108 * sc_iee_reset(): initiate a reset, supply the address of the SCP to the
109 * chip, wait for the chip to initialize and ACK interrupts that
110 * this may have caused by calling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
111 * This functions must carefully bus_dmamap_sync() all data they have touched!
112 *
113 * sc_mediastatus() and sc_mediachange() are just MD hooks to the according
114 * MI functions. The MD frontend may set this pointers to NULL when they
115 * are not needed.
116 *
117 * sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
118 * This is for printing out the correct chip type at attach time only. The
119 * MI backend doesn't distinguish different chip types when programming
120 * the chip.
121 *
122 * IEE_NEED_SWAP in sc->sc_flags has to be cleared on little endian hardware
123 * and set on big endian hardware, when endianness conversion is not done
124 * by the bus attachment but done by i82596 chip itself.
125 * Usually you need to set IEE_NEED_SWAP on big endian machines
126 * where the hardware (the LE/~BE pin) is configured as BE mode.
127 *
128 * If the chip is configured as BE mode, all 8 bit (byte) and 16 bit (word)
129 * entities can be written in big endian. But Rev A chip doesn't support
130 * 32 bit (dword) entities with big endian byte ordering, so we have to
131 * treat all 32 bit (dword) entities as two 16 bit big endian entities.
132 * Rev B and C chips support big endian byte ordering for 32 bit entities,
133 * and this new feature is enabled by IEE_SYSBUS_BE in the sysbus byte.
134 *
135 * With the IEE_SYSBUS_BE feature, all 32 bit address pointers are
136 * treated as true 32 bit entities but the SCB absolute address and
137 * statistical counters are still treated as two 16 bit big endian entities,
138 * so we have to always swap high and low words for these entities.
139 * IEE_SWAP32() should be used for the SCB address and statistical counters,
140 * and IEE_SWAPA32() should be used for other 32 bit pointers in the shmem.
141 *
142 * IEE_REV_A flag must be set in sc->sc_flags if the IEE_SYSBUS_BE feature
143 * is disabled even on big endian machines for the old Rev A chip in backend.
144 *
145 * sc->sc_cl_align must be set to 1 or to the cache line size. When set to
146 * 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
147 * it forces alignment of the data structures in the shared memory to a multiple
148 * of sc->sc_cl_align. This is needed on some hppa machines that have non DMA
149 * I/O coherent caches and are unable to map the shared memory uncachable.
150 * (At least pre PA7100LC CPUs are unable to map memory uncachable.)
151 *
152 * The MD frontend also has to set sc->sc_cl_align and sc->sc_sysbus
153 * to allocate and setup shared DMA memory in MI iee_attach().
154 * All communication with the chip is done via this shared memory.
155 * This memory is mapped with BUS_DMA_COHERENT so it will be uncached
156 * if possible for archs with non DMA I/O coherent caches.
157 * The base of the memory needs to be aligned to an even address
158 * if sc->sc_cl_align == 1 and aligned to a cache line if sc->sc_cl_align != 1.
159 * Each descriptor offsets are calculated in iee_attach() to handle this.
160 *
161 * An interrupt with iee_intr() as handler must be established.
162 *
163 * Call void iee_attach(struct iee_softc *sc, uint8_t *ether_address,
164 * int *media, int nmedia, int defmedia); when everything is set up. First
165 * parameter is a pointer to the MI softc, ether_address is an array that
166 * contains the ethernet address. media is an array of the media types
167 * provided by the hardware. The members of this array are supplied to
168 * ifmedia_add() in sequence. nmedia is the count of elements in media.
169 * defmedia is the default media that is set via ifmedia_set().
170 * nmedia and defmedia are ignored when media == NULL.
171 *
172 * The MD backend may call iee_detach() to detach the device.
173 *
174 * See sys/arch/hppa/gsc/if_iee_gsc.c for an example.
175 */
176
177
178 /*
179 * How frame reception is done:
180 * Each Receive Frame Descriptor has one associated Receive Buffer Descriptor.
181 * Each RBD points to the data area of an mbuf cluster. The RFDs are linked
182 * together in a circular list. sc->sc_rx_done is the count of RFDs in the
183 * list already processed / the number of the RFD that has to be checked for
184 * a new frame first at the next RX interrupt. Upon successful reception of
185 * a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
186 * cluster is allocated and the RFD / RBD are reinitialized accordingly.
187 *
188 * When a RFD list overrun occurred the whole RFD and RBD lists are
189 * reinitialized and frame reception is started again.
190 */
191 int
192 iee_intr(void *intarg)
193 {
194 struct iee_softc *sc = intarg;
195 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
196 struct iee_rfd *rfd;
197 struct iee_rbd *rbd;
198 bus_dmamap_t rx_map;
199 struct mbuf *rx_mbuf;
200 struct mbuf *new_mbuf;
201 int scb_status;
202 int scb_cmd;
203 int n, col;
204 uint16_t status, count, cmd;
205
206 if ((ifp->if_flags & IFF_RUNNING) == 0) {
207 (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
208 return 1;
209 }
210 IEE_SCBSYNC(sc, BUS_DMASYNC_POSTREAD);
211 scb_status = SC_SCB(sc)->scb_status;
212 scb_cmd = SC_SCB(sc)->scb_cmd;
213 for (;;) {
214 rfd = SC_RFD(sc, sc->sc_rx_done);
215 IEE_RFDSYNC(sc, sc->sc_rx_done,
216 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
217 status = rfd->rfd_status;
218 if ((status & IEE_RFD_C) == 0) {
219 IEE_RFDSYNC(sc, sc->sc_rx_done, BUS_DMASYNC_PREREAD);
220 break;
221 }
222 rfd->rfd_status = 0;
223 IEE_RFDSYNC(sc, sc->sc_rx_done,
224 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
225
226 /* At least one packet was received. */
227 rx_map = sc->sc_rx_map[sc->sc_rx_done];
228 rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
229 IEE_RBDSYNC(sc, (sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD,
230 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
231 SC_RBD(sc, (sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
232 &= ~IEE_RBD_EL;
233 IEE_RBDSYNC(sc, (sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD,
234 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
235 rbd = SC_RBD(sc, sc->sc_rx_done);
236 IEE_RBDSYNC(sc, sc->sc_rx_done,
237 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
238 count = rbd->rbd_count;
239 if ((status & IEE_RFD_OK) == 0
240 || (count & IEE_RBD_EOF) == 0
241 || (count & IEE_RBD_F) == 0){
242 /* Receive error, skip frame and reuse buffer. */
243 rbd->rbd_count = 0;
244 rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
245 IEE_RBDSYNC(sc, sc->sc_rx_done,
246 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
247 printf("%s: iee_intr: receive error %d, rfd_status="
248 "0x%.4x, rfd_count=0x%.4x\n",
249 device_xname(sc->sc_dev),
250 ++sc->sc_rx_err, status, count);
251 sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
252 continue;
253 }
254 bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_map->dm_mapsize,
255 BUS_DMASYNC_POSTREAD);
256 rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
257 count & IEE_RBD_COUNT;
258 m_set_rcvif(rx_mbuf, ifp);
259 MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
260 if (new_mbuf == NULL) {
261 printf("%s: iee_intr: can't allocate mbuf\n",
262 device_xname(sc->sc_dev));
263 break;
264 }
265 MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
266 MCLGET(new_mbuf, M_DONTWAIT);
267 if ((new_mbuf->m_flags & M_EXT) == 0) {
268 printf("%s: iee_intr: can't alloc mbuf cluster\n",
269 device_xname(sc->sc_dev));
270 m_freem(new_mbuf);
271 break;
272 }
273 bus_dmamap_unload(sc->sc_dmat, rx_map);
274 new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES - 2;
275 new_mbuf->m_data += 2;
276 if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_map,
277 new_mbuf, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
278 panic("%s: iee_intr: can't load RX DMA map\n",
279 device_xname(sc->sc_dev));
280 bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
281 rx_map->dm_mapsize, BUS_DMASYNC_PREREAD);
282 if_percpuq_enqueue(ifp->if_percpuq, rx_mbuf);
283 sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
284 rbd->rbd_count = 0;
285 rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
286 rbd->rbd_rb_addr = IEE_SWAPA32(rx_map->dm_segs[0].ds_addr);
287 IEE_RBDSYNC(sc, sc->sc_rx_done,
288 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
289 sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
290 }
291 if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
292 || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
293 || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
294 /* Receive Overrun, reinit receive ring buffer. */
295 for (n = 0 ; n < IEE_NRFD ; n++) {
296 rfd = SC_RFD(sc, n);
297 rbd = SC_RBD(sc, n);
298 rfd->rfd_cmd = IEE_RFD_SF;
299 rfd->rfd_link_addr =
300 IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rfd_off
301 + sc->sc_rfd_sz * ((n + 1) % IEE_NRFD)));
302 rbd->rbd_next_rbd =
303 IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rbd_off
304 + sc->sc_rbd_sz * ((n + 1) % IEE_NRFD)));
305 rbd->rbd_size = IEE_RBD_EL |
306 sc->sc_rx_map[n]->dm_segs[0].ds_len;
307 rbd->rbd_rb_addr =
308 IEE_SWAPA32(sc->sc_rx_map[n]->dm_segs[0].ds_addr);
309 }
310 SC_RFD(sc, 0)->rfd_rbd_addr =
311 IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rbd_off));
312 sc->sc_rx_done = 0;
313 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, sc->sc_rfd_off,
314 sc->sc_rfd_sz * IEE_NRFD + sc->sc_rbd_sz * IEE_NRFD,
315 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
316 (sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
317 printf("%s: iee_intr: receive ring buffer overrun\n",
318 device_xname(sc->sc_dev));
319 }
320
321 if (sc->sc_next_cb != 0) {
322 IEE_CBSYNC(sc, sc->sc_next_cb - 1,
323 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
324 status = SC_CB(sc, sc->sc_next_cb - 1)->cb_status;
325 IEE_CBSYNC(sc, sc->sc_next_cb - 1,
326 BUS_DMASYNC_PREREAD);
327 if ((status & IEE_CB_C) != 0) {
328 /* CMD list finished */
329 ifp->if_timer = 0;
330 if (sc->sc_next_tbd != 0) {
331 /* A TX CMD list finished, cleanup */
332 for (n = 0 ; n < sc->sc_next_cb ; n++) {
333 bus_dmamap_unload(sc->sc_dmat,
334 sc->sc_tx_map[n]);
335 m_freem(sc->sc_tx_mbuf[n]);
336 sc->sc_tx_mbuf[n] = NULL;
337 IEE_CBSYNC(sc, n,
338 BUS_DMASYNC_POSTREAD |
339 BUS_DMASYNC_POSTWRITE);
340 status = SC_CB(sc, n)->cb_status;
341 IEE_CBSYNC(sc, n,
342 BUS_DMASYNC_PREREAD);
343 if ((status & IEE_CB_COL) != 0 &&
344 (status & IEE_CB_MAXCOL) == 0)
345 col = 16;
346 else
347 col = status
348 & IEE_CB_MAXCOL;
349 sc->sc_tx_col += col;
350 if ((status & IEE_CB_OK) != 0) {
351 if_statadd2(ifp,
352 if_opackets, 1,
353 if_collisions, col);
354 }
355 }
356 sc->sc_next_tbd = 0;
357 ifp->if_flags &= ~IFF_OACTIVE;
358 }
359 for (n = 0 ; n < sc->sc_next_cb; n++) {
360 /*
361 * Check if a CMD failed, but ignore TX errors.
362 */
363 IEE_CBSYNC(sc, n, BUS_DMASYNC_POSTREAD |
364 BUS_DMASYNC_POSTWRITE);
365 cmd = SC_CB(sc, n)->cb_cmd;
366 status = SC_CB(sc, n)->cb_status;
367 IEE_CBSYNC(sc, n, BUS_DMASYNC_PREREAD);
368 if ((cmd & IEE_CB_CMD) != IEE_CB_CMD_TR &&
369 (status & IEE_CB_OK) == 0)
370 printf("%s: iee_intr: scb_status=0x%x "
371 "scb_cmd=0x%x failed command %d: "
372 "cb_status[%d]=0x%.4x "
373 "cb_cmd[%d]=0x%.4x\n",
374 device_xname(sc->sc_dev),
375 scb_status, scb_cmd,
376 ++sc->sc_cmd_err,
377 n, status, n, cmd);
378 }
379 sc->sc_next_cb = 0;
380 if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
381 iee_cb_setup(sc, IEE_CB_CMD_MCS |
382 IEE_CB_S | IEE_CB_EL | IEE_CB_I);
383 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
384 } else
385 /* Try to get deferred packets going. */
386 if_schedule_deferred_start(ifp);
387 }
388 }
389 if (IEE_SWAP32(SC_SCB(sc)->scb_crc_err) != sc->sc_crc_err) {
390 sc->sc_crc_err = IEE_SWAP32(SC_SCB(sc)->scb_crc_err);
391 printf("%s: iee_intr: crc_err=%d\n", device_xname(sc->sc_dev),
392 sc->sc_crc_err);
393 }
394 if (IEE_SWAP32(SC_SCB(sc)->scb_align_err) != sc->sc_align_err) {
395 sc->sc_align_err = IEE_SWAP32(SC_SCB(sc)->scb_align_err);
396 printf("%s: iee_intr: align_err=%d\n",
397 device_xname(sc->sc_dev), sc->sc_align_err);
398 }
399 if (IEE_SWAP32(SC_SCB(sc)->scb_resource_err) != sc->sc_resource_err) {
400 sc->sc_resource_err = IEE_SWAP32(SC_SCB(sc)->scb_resource_err);
401 printf("%s: iee_intr: resource_err=%d\n",
402 device_xname(sc->sc_dev), sc->sc_resource_err);
403 }
404 if (IEE_SWAP32(SC_SCB(sc)->scb_overrun_err) != sc->sc_overrun_err) {
405 sc->sc_overrun_err = IEE_SWAP32(SC_SCB(sc)->scb_overrun_err);
406 printf("%s: iee_intr: overrun_err=%d\n",
407 device_xname(sc->sc_dev), sc->sc_overrun_err);
408 }
409 if (IEE_SWAP32(SC_SCB(sc)->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
410 sc->sc_rcvcdt_err = IEE_SWAP32(SC_SCB(sc)->scb_rcvcdt_err);
411 printf("%s: iee_intr: rcvcdt_err=%d\n",
412 device_xname(sc->sc_dev), sc->sc_rcvcdt_err);
413 }
414 if (IEE_SWAP32(SC_SCB(sc)->scb_short_fr_err) != sc->sc_short_fr_err) {
415 sc->sc_short_fr_err = IEE_SWAP32(SC_SCB(sc)->scb_short_fr_err);
416 printf("%s: iee_intr: short_fr_err=%d\n",
417 device_xname(sc->sc_dev), sc->sc_short_fr_err);
418 }
419 IEE_SCBSYNC(sc, BUS_DMASYNC_PREREAD);
420 (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
421 return 1;
422 }
423
424
425
426 /*
427 * How Command Block List Processing is done.
428 *
429 * A running CBL is never manipulated. If there is a CBL already running,
430 * further CMDs are deferred until the current list is done. A new list is
431 * setup when the old one has finished.
432 * This eases programming. To manipulate a running CBL it is necessary to
433 * suspend the Command Unit to avoid race conditions. After a suspend
434 * is sent we have to wait for an interrupt that ACKs the suspend. Then
435 * we can manipulate the CBL and resume operation. I am not sure that this
436 * is more effective than the current, much simpler approach. => KISS
437 * See i82596CA data sheet page 26.
438 *
439 * A CBL is running or on the way to be set up when (sc->sc_next_cb != 0).
440 *
441 * A CBL may consist of TX CMDs, and _only_ TX CMDs.
442 * A TX CBL is running or on the way to be set up when
443 * ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
444 *
445 * A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
446 * non-TX CMDs.
447 *
448 * This comes mostly through the way how an Ethernet driver works and
449 * because running CBLs are not manipulated when they are on the way. If
450 * if_start() is called there will be TX CMDs enqueued so we have a running
451 * CBL and other CMDs from e.g. if_ioctl() will be deferred and vice versa.
452 *
453 * The Multicast Setup Command is special. A MCS needs more space than
454 * a single CB has. Actual space requirement depends on the length of the
455 * multicast list. So we always defer MCS until other CBLs are finished,
456 * then we setup a CONF CMD in the first CB. The CONF CMD is needed to
457 * turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
458 * use all the remaining space in the CBL and the Transmit Buffer Descriptor
459 * List. (Therefore CBL and TBDL must be continuous in physical and virtual
460 * memory. This is guaranteed through the definitions of the list offsets
461 * in i82596reg.h and because it is only a single DMA segment used for all
462 * lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
463 * a multicast list length of 0, thus disabling the multicast filter.
464 * A deferred MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
465 */
466 void
467 iee_cb_setup(struct iee_softc *sc, uint32_t cmd)
468 {
469 struct iee_cb *cb = SC_CB(sc, sc->sc_next_cb);
470 struct ethercom *ec = &sc->sc_ethercom;
471 struct ifnet *ifp = &ec->ec_if;
472 struct ether_multistep step;
473 struct ether_multi *enm;
474
475 memset(cb, 0, sc->sc_cb_sz);
476 cb->cb_cmd = cmd;
477 switch (cmd & IEE_CB_CMD) {
478 case IEE_CB_CMD_NOP: /* NOP CMD */
479 break;
480 case IEE_CB_CMD_IAS: /* Individual Address Setup */
481 memcpy(__UNVOLATILE(cb->cb_ind_addr), CLLADDR(ifp->if_sadl),
482 ETHER_ADDR_LEN);
483 break;
484 case IEE_CB_CMD_CONF: /* Configure */
485 memcpy(__UNVOLATILE(cb->cb_cf), sc->sc_cf, sc->sc_cf[0]
486 & IEE_CF_0_CNT_M);
487 break;
488 case IEE_CB_CMD_MCS: /* Multicast Setup */
489 if (sc->sc_next_cb != 0) {
490 sc->sc_flags |= IEE_WANT_MCAST;
491 return;
492 }
493 sc->sc_flags &= ~IEE_WANT_MCAST;
494 if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
495 /* Need no multicast filter in promisc mode. */
496 iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
497 | IEE_CB_I);
498 return;
499 }
500 /* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
501 cb = SC_CB(sc, sc->sc_next_cb + 1);
502 cb->cb_cmd = cmd;
503 cb->cb_mcast.mc_size = 0;
504 ETHER_LOCK(ec);
505 ETHER_FIRST_MULTI(step, ec, enm);
506 while (enm != NULL) {
507 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
508 ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
509 * ETHER_ADDR_LEN + 2 * sc->sc_cb_sz >
510 sc->sc_cb_sz * IEE_NCB +
511 sc->sc_tbd_sz * IEE_NTBD * IEE_NCB) {
512 cb->cb_mcast.mc_size = 0;
513 break;
514 }
515 memcpy(__UNVOLATILE(&cb->cb_mcast.mc_addrs[
516 cb->cb_mcast.mc_size]),
517 enm->enm_addrlo, ETHER_ADDR_LEN);
518 ETHER_NEXT_MULTI(step, enm);
519 cb->cb_mcast.mc_size += ETHER_ADDR_LEN;
520 }
521 ETHER_UNLOCK(ec);
522 if (cb->cb_mcast.mc_size == 0) {
523 /* Can't do exact mcast filtering, do ALLMULTI mode. */
524 ifp->if_flags |= IFF_ALLMULTI;
525 sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
526 } else {
527 /* disable ALLMULTI and load mcast list */
528 ifp->if_flags &= ~IFF_ALLMULTI;
529 sc->sc_cf[11] |= IEE_CF_11_MCALL;
530 /* Mcast setup may need more than sc->sc_cb_sz bytes. */
531 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
532 sc->sc_cb_off,
533 sc->sc_cb_sz * IEE_NCB +
534 sc->sc_tbd_sz * IEE_NTBD * IEE_NCB,
535 BUS_DMASYNC_PREWRITE);
536 }
537 iee_cb_setup(sc, IEE_CB_CMD_CONF);
538 break;
539 case IEE_CB_CMD_TR: /* Transmit */
540 cb->cb_transmit.tx_tbd_addr =
541 IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_tbd_off
542 + sc->sc_tbd_sz * sc->sc_next_tbd));
543 cb->cb_cmd |= IEE_CB_SF; /* Always use Flexible Mode. */
544 break;
545 case IEE_CB_CMD_TDR: /* Time Domain Reflectometry */
546 break;
547 case IEE_CB_CMD_DUMP: /* Dump */
548 break;
549 case IEE_CB_CMD_DIAG: /* Diagnose */
550 break;
551 default:
552 /* can't happen */
553 break;
554 }
555 cb->cb_link_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_cb_off +
556 sc->sc_cb_sz * (sc->sc_next_cb + 1)));
557 IEE_CBSYNC(sc, sc->sc_next_cb,
558 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
559 sc->sc_next_cb++;
560 ifp->if_timer = 5;
561 }
562
563
564
565 void
566 iee_attach(struct iee_softc *sc, uint8_t *eth_addr, int *media, int nmedia,
567 int defmedia)
568 {
569 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
570 int n;
571
572 KASSERT(sc->sc_cl_align > 0 && powerof2(sc->sc_cl_align));
573
574 /*
575 * Calculate DMA descriptor offsets and sizes in shmem
576 * which should be cache line aligned.
577 */
578 sc->sc_scp_off = 0;
579 sc->sc_scp_sz = roundup2(sizeof(struct iee_scp), sc->sc_cl_align);
580 sc->sc_iscp_off = sc->sc_scp_sz;
581 sc->sc_iscp_sz = roundup2(sizeof(struct iee_iscp), sc->sc_cl_align);
582 sc->sc_scb_off = sc->sc_iscp_off + sc->sc_iscp_sz;
583 sc->sc_scb_sz = roundup2(sizeof(struct iee_scb), sc->sc_cl_align);
584 sc->sc_rfd_off = sc->sc_scb_off + sc->sc_scb_sz;
585 sc->sc_rfd_sz = roundup2(sizeof(struct iee_rfd), sc->sc_cl_align);
586 sc->sc_rbd_off = sc->sc_rfd_off + sc->sc_rfd_sz * IEE_NRFD;
587 sc->sc_rbd_sz = roundup2(sizeof(struct iee_rbd), sc->sc_cl_align);
588 sc->sc_cb_off = sc->sc_rbd_off + sc->sc_rbd_sz * IEE_NRFD;
589 sc->sc_cb_sz = roundup2(sizeof(struct iee_cb), sc->sc_cl_align);
590 sc->sc_tbd_off = sc->sc_cb_off + sc->sc_cb_sz * IEE_NCB;
591 sc->sc_tbd_sz = roundup2(sizeof(struct iee_tbd), sc->sc_cl_align);
592 sc->sc_shmem_sz = sc->sc_tbd_off + sc->sc_tbd_sz * IEE_NTBD * IEE_NCB;
593
594 /* allocate memory for shared DMA descriptors */
595 if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_shmem_sz, PAGE_SIZE, 0,
596 &sc->sc_dma_segs, 1, &sc->sc_dma_rsegs, BUS_DMA_NOWAIT) != 0) {
597 aprint_error(": can't allocate %d bytes of DMA memory\n",
598 sc->sc_shmem_sz);
599 return;
600 }
601 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs,
602 sc->sc_shmem_sz, (void **)&sc->sc_shmem_addr,
603 BUS_DMA_COHERENT | BUS_DMA_NOWAIT) != 0) {
604 aprint_error(": can't map DMA memory\n");
605 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs,
606 sc->sc_dma_rsegs);
607 return;
608 }
609 if (bus_dmamap_create(sc->sc_dmat, sc->sc_shmem_sz, sc->sc_dma_rsegs,
610 sc->sc_shmem_sz, 0, BUS_DMA_NOWAIT, &sc->sc_shmem_map) != 0) {
611 aprint_error(": can't create DMA map\n");
612 bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr,
613 sc->sc_shmem_sz);
614 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs,
615 sc->sc_dma_rsegs);
616 return;
617 }
618 if (bus_dmamap_load(sc->sc_dmat, sc->sc_shmem_map, sc->sc_shmem_addr,
619 sc->sc_shmem_sz, NULL, BUS_DMA_NOWAIT) != 0) {
620 aprint_error(": can't load DMA map\n");
621 bus_dmamap_destroy(sc->sc_dmat, sc->sc_shmem_map);
622 bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr,
623 sc->sc_shmem_sz);
624 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs,
625 sc->sc_dma_rsegs);
626 return;
627 }
628 memset(sc->sc_shmem_addr, 0, sc->sc_shmem_sz);
629
630 /*
631 * Set pointer to Intermediate System Configuration Pointer.
632 * Phys. addr. in big endian order. (Big endian as defined by Intel.)
633 */
634 SC_SCP(sc)->scp_iscp_addr = IEE_SWAP32(IEE_PHYS_SHMEM(sc->sc_iscp_off));
635 SC_SCP(sc)->scp_sysbus = sc->sc_sysbus;
636 /*
637 * Set pointer to System Control Block.
638 * Phys. addr. in big endian order. (Big endian as defined by Intel.)
639 */
640 SC_ISCP(sc)->iscp_scb_addr = IEE_SWAP32(IEE_PHYS_SHMEM(sc->sc_scb_off));
641 /* Set pointer to Receive Frame Area. (physical address) */
642 SC_SCB(sc)->scb_rfa_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rfd_off));
643 /* Set pointer to Command Block. (physical address) */
644 SC_SCB(sc)->scb_cmd_blk_addr =
645 IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_cb_off));
646
647 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, sc->sc_shmem_sz,
648 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
649
650 /* Initialize ifmedia structures. */
651 sc->sc_ethercom.ec_ifmedia = &sc->sc_ifmedia;
652 ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
653 if (media != NULL) {
654 for (n = 0 ; n < nmedia ; n++)
655 ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
656 ifmedia_set(&sc->sc_ifmedia, defmedia);
657 } else {
658 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
659 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
660 }
661
662 ifp->if_softc = sc;
663 strcpy(ifp->if_xname, device_xname(sc->sc_dev));
664 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
665 ifp->if_start = iee_start; /* initiate output routine */
666 ifp->if_ioctl = iee_ioctl; /* ioctl routine */
667 ifp->if_init = iee_init; /* init routine */
668 ifp->if_stop = iee_stop; /* stop routine */
669 ifp->if_watchdog = iee_watchdog; /* timer routine */
670 IFQ_SET_READY(&ifp->if_snd);
671 /* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
672 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
673
674 if_attach(ifp);
675 if_deferred_start_init(ifp, NULL);
676 ether_ifattach(ifp, eth_addr);
677
678 aprint_normal(": Intel 82596%s address %s\n",
679 i82596_typenames[sc->sc_type], ether_sprintf(eth_addr));
680
681 for (n = 0 ; n < IEE_NCB ; n++)
682 sc->sc_tx_map[n] = NULL;
683 for (n = 0 ; n < IEE_NRFD ; n++) {
684 sc->sc_rx_mbuf[n] = NULL;
685 sc->sc_rx_map[n] = NULL;
686 }
687 sc->sc_tx_timeout = 0;
688 sc->sc_setup_timeout = 0;
689 (sc->sc_iee_reset)(sc);
690 }
691
692
693 void
694 iee_detach(struct iee_softc *sc, int flags)
695 {
696 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
697
698 if ((ifp->if_flags & IFF_RUNNING) != 0)
699 iee_stop(ifp, 1);
700 ether_ifdetach(ifp);
701 if_detach(ifp);
702 ifmedia_fini(&sc->sc_ifmedia);
703 bus_dmamap_unload(sc->sc_dmat, sc->sc_shmem_map);
704 bus_dmamap_destroy(sc->sc_dmat, sc->sc_shmem_map);
705 bus_dmamem_unmap(sc->sc_dmat, sc->sc_shmem_addr, sc->sc_shmem_sz);
706 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_segs, sc->sc_dma_rsegs);
707 }
708
709
710 /* Media change and status callback */
711 int
712 iee_mediachange(struct ifnet *ifp)
713 {
714 struct iee_softc *sc = ifp->if_softc;
715
716 if (sc->sc_mediachange != NULL)
717 return (sc->sc_mediachange)(ifp);
718 return 0;
719 }
720
721
722 void
723 iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
724 {
725 struct iee_softc *sc = ifp->if_softc;
726
727 if (sc->sc_mediastatus != NULL)
728 (sc->sc_mediastatus)(ifp, ifmreq);
729 }
730
731
732 /* Initiate output routine */
733 void
734 iee_start(struct ifnet *ifp)
735 {
736 struct iee_softc *sc = ifp->if_softc;
737 struct mbuf *m = NULL;
738 struct iee_tbd *tbd;
739 int t;
740 int n;
741
742 if (sc->sc_next_cb != 0)
743 /* There is already a CMD running. Defer packet enqueuing. */
744 return;
745 for (t = 0 ; t < IEE_NCB ; t++) {
746 IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
747 if (sc->sc_tx_mbuf[t] == NULL)
748 break;
749 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
750 sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
751 /*
752 * The packet needs more TBD than we support.
753 * Copy the packet into a mbuf cluster to get it out.
754 */
755 printf("%s: iee_start: failed to load DMA map\n",
756 device_xname(sc->sc_dev));
757 MGETHDR(m, M_DONTWAIT, MT_DATA);
758 if (m == NULL) {
759 printf("%s: iee_start: can't allocate mbuf\n",
760 device_xname(sc->sc_dev));
761 m_freem(sc->sc_tx_mbuf[t]);
762 sc->sc_tx_mbuf[t] = NULL;
763 t--;
764 continue;
765 }
766 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
767 MCLGET(m, M_DONTWAIT);
768 if ((m->m_flags & M_EXT) == 0) {
769 printf("%s: iee_start: can't allocate mbuf "
770 "cluster\n", device_xname(sc->sc_dev));
771 m_freem(sc->sc_tx_mbuf[t]);
772 sc->sc_tx_mbuf[t] = NULL;
773 m_freem(m);
774 t--;
775 continue;
776 }
777 m_copydata(sc->sc_tx_mbuf[t], 0,
778 sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, void *));
779 m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
780 m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
781 m_freem(sc->sc_tx_mbuf[t]);
782 sc->sc_tx_mbuf[t] = m;
783 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
784 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
785 printf("%s: iee_start: can't load TX DMA map\n",
786 device_xname(sc->sc_dev));
787 m_freem(sc->sc_tx_mbuf[t]);
788 sc->sc_tx_mbuf[t] = NULL;
789 t--;
790 continue;
791 }
792 }
793 for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
794 tbd = SC_TBD(sc, sc->sc_next_tbd + n);
795 tbd->tbd_tb_addr =
796 IEE_SWAPA32(sc->sc_tx_map[t]->dm_segs[n].ds_addr);
797 tbd->tbd_size =
798 sc->sc_tx_map[t]->dm_segs[n].ds_len;
799 tbd->tbd_link_addr =
800 IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_tbd_off +
801 sc->sc_tbd_sz * (sc->sc_next_tbd + n + 1)));
802 }
803 SC_TBD(sc, sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
804 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
805 sc->sc_tbd_off + sc->sc_next_tbd * sc->sc_tbd_sz,
806 sc->sc_tbd_sz * sc->sc_tx_map[t]->dm_nsegs,
807 BUS_DMASYNC_PREWRITE);
808 bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
809 sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
810 IFQ_POLL(&ifp->if_snd, m);
811 if (m == NULL)
812 iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
813 | IEE_CB_I);
814 else
815 iee_cb_setup(sc, IEE_CB_CMD_TR);
816 sc->sc_next_tbd += n;
817 /* Pass packet to bpf if someone listens. */
818 bpf_mtap(ifp, sc->sc_tx_mbuf[t], BPF_D_OUT);
819 }
820 if (t == 0)
821 /* No packets got set up for TX. */
822 return;
823 if (t == IEE_NCB)
824 ifp->if_flags |= IFF_OACTIVE;
825 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
826 }
827
828
829
830 /* ioctl routine */
831 int
832 iee_ioctl(struct ifnet *ifp, u_long cmd, void *data)
833 {
834 struct iee_softc *sc = ifp->if_softc;
835 int s;
836 int err;
837
838 s = splnet();
839 switch (cmd) {
840 default:
841 err = ether_ioctl(ifp, cmd, data);
842 if (err == ENETRESET) {
843 /*
844 * Multicast list as changed; set the hardware filter
845 * accordingly.
846 */
847 if (ifp->if_flags & IFF_RUNNING) {
848 iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S |
849 IEE_CB_EL | IEE_CB_I);
850 if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
851 (*sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
852 }
853 err = 0;
854 }
855 break;
856 }
857 splx(s);
858 return err;
859 }
860
861
862
863 /* init routine */
864 int
865 iee_init(struct ifnet *ifp)
866 {
867 struct iee_softc *sc = ifp->if_softc;
868 int r;
869 int t;
870 int n;
871 int err;
872
873 sc->sc_next_cb = 0;
874 sc->sc_next_tbd = 0;
875 sc->sc_flags &= ~IEE_WANT_MCAST;
876 sc->sc_rx_done = 0;
877 SC_SCB(sc)->scb_crc_err = 0;
878 SC_SCB(sc)->scb_align_err = 0;
879 SC_SCB(sc)->scb_resource_err = 0;
880 SC_SCB(sc)->scb_overrun_err = 0;
881 SC_SCB(sc)->scb_rcvcdt_err = 0;
882 SC_SCB(sc)->scb_short_fr_err = 0;
883 sc->sc_crc_err = 0;
884 sc->sc_align_err = 0;
885 sc->sc_resource_err = 0;
886 sc->sc_overrun_err = 0;
887 sc->sc_rcvcdt_err = 0;
888 sc->sc_short_fr_err = 0;
889 sc->sc_tx_col = 0;
890 sc->sc_rx_err = 0;
891 sc->sc_cmd_err = 0;
892 /* Create Transmit DMA maps. */
893 for (t = 0 ; t < IEE_NCB ; t++) {
894 if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
895 MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
896 &sc->sc_tx_map[t]) != 0) {
897 printf("%s: iee_init: can't create TX DMA map\n",
898 device_xname(sc->sc_dev));
899 for (n = 0 ; n < t ; n++)
900 bus_dmamap_destroy(sc->sc_dmat,
901 sc->sc_tx_map[n]);
902 return ENOBUFS;
903 }
904 }
905 /* Initialize Receive Frame and Receive Buffer Descriptors */
906 err = 0;
907 memset(SC_RFD(sc, 0), 0, sc->sc_rfd_sz * IEE_NRFD);
908 memset(SC_RBD(sc, 0), 0, sc->sc_rbd_sz * IEE_NRFD);
909 for (r = 0 ; r < IEE_NRFD ; r++) {
910 SC_RFD(sc, r)->rfd_cmd = IEE_RFD_SF;
911 SC_RFD(sc, r)->rfd_link_addr =
912 IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rfd_off
913 + sc->sc_rfd_sz * ((r + 1) % IEE_NRFD)));
914
915 SC_RBD(sc, r)->rbd_next_rbd =
916 IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rbd_off
917 + sc->sc_rbd_sz * ((r + 1) % IEE_NRFD)));
918 if (sc->sc_rx_mbuf[r] == NULL) {
919 MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
920 if (sc->sc_rx_mbuf[r] == NULL) {
921 printf("%s: iee_init: can't allocate mbuf\n",
922 device_xname(sc->sc_dev));
923 err = 1;
924 break;
925 }
926 MCLAIM(sc->sc_rx_mbuf[r],
927 &sc->sc_ethercom.ec_rx_mowner);
928 MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
929 if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
930 printf("%s: iee_init: can't allocate mbuf"
931 " cluster\n", device_xname(sc->sc_dev));
932 m_freem(sc->sc_rx_mbuf[r]);
933 sc->sc_rx_mbuf[r] = NULL;
934 err = 1;
935 break;
936 }
937 sc->sc_rx_mbuf[r]->m_len =
938 sc->sc_rx_mbuf[r]->m_pkthdr.len = MCLBYTES - 2;
939 sc->sc_rx_mbuf[r]->m_data += 2;
940 }
941 if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
942 MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
943 &sc->sc_rx_map[r]) != 0) {
944 printf("%s: iee_init: can't create RX DMA map\n",
945 device_xname(sc->sc_dev));
946 m_freem(sc->sc_rx_mbuf[r]);
947 sc->sc_rx_mbuf[r] = NULL;
948 err = 1;
949 break;
950 }
951 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_map[r],
952 sc->sc_rx_mbuf[r], BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
953 printf("%s: iee_init: can't load RX DMA map\n",
954 device_xname(sc->sc_dev));
955 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
956 m_freem(sc->sc_rx_mbuf[r]);
957 sc->sc_rx_mbuf[r] = NULL;
958 err = 1;
959 break;
960 }
961 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
962 sc->sc_rx_map[r]->dm_mapsize, BUS_DMASYNC_PREREAD);
963 SC_RBD(sc, r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
964 SC_RBD(sc, r)->rbd_rb_addr =
965 IEE_SWAPA32(sc->sc_rx_map[r]->dm_segs[0].ds_addr);
966 }
967 SC_RFD(sc, 0)->rfd_rbd_addr =
968 IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rbd_off));
969 if (err != 0) {
970 for (n = 0 ; n < r; n++) {
971 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
972 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
973 sc->sc_rx_map[n] = NULL;
974 m_freem(sc->sc_rx_mbuf[n]);
975 sc->sc_rx_mbuf[n] = NULL;
976 }
977 for (n = 0 ; n < t ; n++) {
978 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
979 sc->sc_tx_map[n] = NULL;
980 }
981 return ENOBUFS;
982 }
983
984 (sc->sc_iee_reset)(sc);
985 iee_cb_setup(sc, IEE_CB_CMD_IAS);
986 sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
987 sc->sc_cf[1] = IEE_CF_1_DEF;
988 sc->sc_cf[2] = IEE_CF_2_DEF;
989 sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
990 | IEE_CF_3_PREAMLEN_DEF;
991 sc->sc_cf[4] = IEE_CF_4_DEF;
992 sc->sc_cf[5] = IEE_CF_5_DEF;
993 sc->sc_cf[6] = IEE_CF_6_DEF;
994 sc->sc_cf[7] = IEE_CF_7_DEF;
995 sc->sc_cf[8] = IEE_CF_8_DEF;
996 sc->sc_cf[9] = IEE_CF_9_DEF;
997 sc->sc_cf[10] = IEE_CF_10_DEF;
998 sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
999 sc->sc_cf[12] = IEE_CF_12_DEF;
1000 sc->sc_cf[13] = IEE_CF_13_DEF;
1001 iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
1002 SC_SCB(sc)->scb_rfa_addr = IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rfd_off));
1003 bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, sc->sc_shmem_sz,
1004 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1005 (sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
1006 /* Issue a Channel Attention to ACK interrupts we may have caused. */
1007 (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
1008
1009 /* Mark the interface as running and ready to RX/TX packets. */
1010 ifp->if_flags |= IFF_RUNNING;
1011 ifp->if_flags &= ~IFF_OACTIVE;
1012 return 0;
1013 }
1014
1015
1016 /* Stop routine */
1017 void
1018 iee_stop(struct ifnet *ifp, int disable)
1019 {
1020 struct iee_softc *sc = ifp->if_softc;
1021 int n;
1022
1023 ifp->if_flags &= ~IFF_RUNNING;
1024 ifp->if_flags |= IFF_OACTIVE;
1025 ifp->if_timer = 0;
1026 /* Reset the chip to get it quiet. */
1027 (sc->sc_iee_reset)(ifp->if_softc);
1028 /* Issue a Channel Attention to ACK interrupts we may have caused. */
1029 (sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
1030 /* Release any dynamically allocated resources. */
1031 for (n = 0 ; n < IEE_NCB ; n++) {
1032 if (sc->sc_tx_map[n] != NULL)
1033 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
1034 sc->sc_tx_map[n] = NULL;
1035 }
1036 for (n = 0 ; n < IEE_NRFD ; n++) {
1037 if (sc->sc_rx_map[n] != NULL) {
1038 bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
1039 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
1040 }
1041 sc->sc_rx_map[n] = NULL;
1042 if (sc->sc_rx_mbuf[n] != NULL)
1043 m_freem(sc->sc_rx_mbuf[n]);
1044 sc->sc_rx_mbuf[n] = NULL;
1045 }
1046 }
1047
1048
1049 /* Timer routine */
1050 void
1051 iee_watchdog(struct ifnet *ifp)
1052 {
1053 struct iee_softc *sc = ifp->if_softc;
1054
1055 (sc->sc_iee_reset)(sc);
1056 if (sc->sc_next_tbd != 0)
1057 printf("%s: iee_watchdog: transmit timeout %d\n",
1058 device_xname(sc->sc_dev), ++sc->sc_tx_timeout);
1059 else
1060 printf("%s: iee_watchdog: setup timeout %d\n",
1061 device_xname(sc->sc_dev), ++sc->sc_setup_timeout);
1062 iee_init(ifp);
1063 }
Cache object: d05419c9f738d8fa4ecb8b1e27ccc52b
|