FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/rrunner.c
1 /* $NetBSD: rrunner.c,v 1.67 2008/06/08 12:43:51 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code contributed to The NetBSD Foundation by Kevin M. Lahey
8 * of the Numerical Aerospace Simulation Facility, NASA Ames Research
9 * Center.
10 *
11 * Partially based on a HIPPI driver written by Essential Communications
12 * Corporation. Thanks to Jason Thorpe, Matt Jacob, and Fred Templin
13 * for invaluable advice and encouragement!
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: rrunner.c,v 1.67 2008/06/08 12:43:51 tsutsui Exp $");
39
40 #include "opt_inet.h"
41
42 #include "bpfilter.h"
43 #include "esh.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/mbuf.h>
48 #include <sys/buf.h>
49 #include <sys/bufq.h>
50 #include <sys/socket.h>
51 #include <sys/ioctl.h>
52 #include <sys/errno.h>
53 #include <sys/syslog.h>
54 #include <sys/select.h>
55 #include <sys/device.h>
56 #include <sys/proc.h>
57 #include <sys/kernel.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/route.h>
66
67 #include <net/if_hippi.h>
68 #include <net/if_media.h>
69
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip.h>
75 #include <netinet/if_inarp.h>
76 #endif
77
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #include <net/bpfdesc.h>
82 #endif
83
84 #include <sys/cpu.h>
85 #include <sys/bus.h>
86 #include <sys/intr.h>
87
88 #include <dev/ic/rrunnerreg.h>
89 #include <dev/ic/rrunnervar.h>
90
91 /*
92 #define ESH_PRINTF
93 */
94
95 /* Autoconfig definition of driver back-end */
96 extern struct cfdriver esh_cd;
97
98 struct esh_softc *esh_softc_debug[22]; /* for gdb */
99
100 #ifdef DIAGNOSTIC
101 u_int32_t max_write_len;
102 #endif
103
104 /* Network device driver and initialization framework routines */
105
106 void eshinit(struct esh_softc *);
107 int eshioctl(struct ifnet *, u_long, void *);
108 void eshreset(struct esh_softc *);
109 void eshstart(struct ifnet *);
110 static int eshstatus(struct esh_softc *);
111 void eshstop(struct esh_softc *);
112 void eshwatchdog(struct ifnet *);
113
114 /* Routines to support FP operation */
115
116 dev_type_open(esh_fpopen);
117 dev_type_close(esh_fpclose);
118 dev_type_read(esh_fpread);
119 dev_type_write(esh_fpwrite);
120 #ifdef MORE_DONE
121 dev_type_mmap(esh_fpmmap);
122 #endif
123 dev_type_strategy(esh_fpstrategy);
124
125 const struct cdevsw esh_cdevsw = {
126 esh_fpopen, esh_fpclose, esh_fpread, esh_fpwrite, nullioctl,
127 nostop, notty, nullpoll,
128 #ifdef MORE_DONE
129 esh_fpmmap,
130 #else
131 nommap,
132 #endif
133 nullkqfilter,
134 D_OTHER,
135 };
136
137 /* General routines, not externally visable */
138
139 static struct mbuf *esh_adjust_mbufs(struct esh_softc *, struct mbuf *m);
140 static void esh_dma_sync(struct esh_softc *, void *,
141 int, int, int, int, int, int);
142 static void esh_fill_snap_ring(struct esh_softc *);
143 static void esh_init_snap_ring(struct esh_softc *);
144 static void esh_close_snap_ring(struct esh_softc *);
145 static void esh_read_snap_ring(struct esh_softc *, u_int16_t, int);
146 static void esh_fill_fp_ring(struct esh_softc *, struct esh_fp_ring_ctl *);
147 static void esh_flush_fp_ring(struct esh_softc *,
148 struct esh_fp_ring_ctl *,
149 struct esh_dmainfo *);
150 static void esh_init_fp_rings(struct esh_softc *);
151 static void esh_read_fp_ring(struct esh_softc *, u_int16_t, int, int);
152 static void esh_reset_runcode(struct esh_softc *);
153 static void esh_send(struct esh_softc *);
154 static void esh_send_cmd(struct esh_softc *, u_int8_t, u_int8_t, u_int8_t);
155 static u_int32_t esh_read_eeprom(struct esh_softc *, u_int32_t);
156 static void esh_write_addr(bus_space_tag_t, bus_space_handle_t,
157 bus_addr_t, bus_addr_t);
158 static int esh_write_eeprom(struct esh_softc *, u_int32_t, u_int32_t);
159 static void eshstart_cleanup(struct esh_softc *, u_int16_t, int);
160
161 static struct esh_dmainfo *esh_new_dmainfo(struct esh_softc *);
162 static void esh_free_dmainfo(struct esh_softc *, struct esh_dmainfo *);
163 static int esh_generic_ioctl(struct esh_softc *, u_long, void *, u_long,
164 struct lwp *);
165
166 #ifdef ESH_PRINTF
167 static int esh_check(struct esh_softc *);
168 #endif
169
170 #define ESHUNIT(x) ((minor(x) & 0xff00) >> 8)
171 #define ESHULP(x) (minor(x) & 0x00ff)
172
173
174 /*
175 * Back-end attach and configure. Allocate DMA space and initialize
176 * all structures.
177 */
178
179 void
180 eshconfig(sc)
181 struct esh_softc *sc;
182 {
183 struct ifnet *ifp = &sc->sc_if;
184 bus_space_tag_t iot = sc->sc_iot;
185 bus_space_handle_t ioh = sc->sc_ioh;
186 u_int32_t misc_host_ctl;
187 u_int32_t misc_local_ctl;
188 u_int32_t header_format;
189 u_int32_t ula_tmp;
190 bus_size_t size;
191 int rseg;
192 int error;
193 int i;
194
195 esh_softc_debug[device_unit(&sc->sc_dev)] = sc;
196 sc->sc_flags = 0;
197
198 TAILQ_INIT(&sc->sc_dmainfo_freelist);
199 sc->sc_dmainfo_freelist_count = 0;
200
201 /*
202 * Allocate and divvy up some host side memory that can hold
203 * data structures that will be DMA'ed over to the NIC
204 */
205
206 sc->sc_dma_size = sizeof(struct rr_gen_info) +
207 sizeof(struct rr_ring_ctl) * RR_ULP_COUNT +
208 sizeof(struct rr_descr) * RR_SEND_RING_SIZE +
209 sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE +
210 sizeof(struct rr_event) * RR_EVENT_RING_SIZE;
211
212 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size,
213 0, RR_DMA_BOUNDARY, &sc->sc_dmaseg, 1,
214 &rseg, BUS_DMA_NOWAIT);
215 if (error) {
216 aprint_error_dev(&sc->sc_dev, "couldn't allocate space for host-side"
217 "data structures\n");
218 return;
219 }
220 if (rseg > 1) {
221 aprint_error_dev(&sc->sc_dev, "contiguous memory not available\n");
222 goto bad_dmamem_map;
223 }
224
225 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, rseg,
226 sc->sc_dma_size, (void **)&sc->sc_dma_addr,
227 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
228 if (error) {
229 aprint_error_dev(&sc->sc_dev,
230 "couldn't map memory for host-side structures\n");
231 goto bad_dmamem_map;
232 }
233
234 if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size,
235 1, sc->sc_dma_size, RR_DMA_BOUNDARY,
236 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
237 &sc->sc_dma)) {
238 aprint_error_dev(&sc->sc_dev, "couldn't create DMA map\n");
239 goto bad_dmamap_create;
240 }
241
242 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma, sc->sc_dma_addr,
243 sc->sc_dma_size, NULL, BUS_DMA_NOWAIT)) {
244 aprint_error_dev(&sc->sc_dev, "couldn't load DMA map\n");
245 goto bad_dmamap_load;
246 }
247
248 memset(sc->sc_dma_addr, 0, sc->sc_dma_size);
249
250 sc->sc_gen_info_dma = sc->sc_dma->dm_segs->ds_addr;
251 sc->sc_gen_info = (struct rr_gen_info *) sc->sc_dma_addr;
252 size = sizeof(struct rr_gen_info);
253
254 sc->sc_recv_ring_table_dma = sc->sc_dma->dm_segs->ds_addr + size;
255 sc->sc_recv_ring_table =
256 (struct rr_ring_ctl *) (sc->sc_dma_addr + size);
257 size += sizeof(struct rr_ring_ctl) * RR_ULP_COUNT;
258
259 sc->sc_send_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
260 sc->sc_send_ring = (struct rr_descr *) (sc->sc_dma_addr + size);
261 sc->sc2_send_ring = (struct rr2_descr *) (sc->sc_dma_addr + size);
262 size += sizeof(struct rr_descr) * RR_SEND_RING_SIZE;
263
264 sc->sc_snap_recv_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
265 sc->sc_snap_recv_ring = (struct rr_descr *) (sc->sc_dma_addr + size);
266 sc->sc2_snap_recv_ring = (struct rr2_descr *) (sc->sc_dma_addr + size);
267 size += sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE;
268
269 sc->sc_event_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
270 sc->sc_event_ring = (struct rr_event *) (sc->sc_dma_addr + size);
271 size += sizeof(struct rr_event) * RR_EVENT_RING_SIZE;
272
273 #ifdef DIAGNOSTIC
274 if (size > sc->sc_dmaseg.ds_len) {
275 aprint_error_dev(&sc->sc_dev, "bogus size calculation\n");
276 goto bad_other;
277 }
278 #endif
279
280 /*
281 * Allocate DMA maps for transfers. We do this here and now
282 * so we won't have to wait for them in the middle of sending
283 * or receiving something.
284 */
285
286 if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX,
287 ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY,
288 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
289 &sc->sc_send.ec_dma)) {
290 aprint_error_dev(&sc->sc_dev, "failed bus_dmamap_create\n");
291 goto bad_other;
292 }
293 sc->sc_send.ec_offset = 0;
294 sc->sc_send.ec_descr = sc->sc_send_ring;
295 TAILQ_INIT(&sc->sc_send.ec_di_queue);
296 bufq_alloc(&sc->sc_send.ec_buf_queue, "fcfs", 0);
297
298 for (i = 0; i < RR_MAX_SNAP_RECV_RING_SIZE; i++)
299 if (bus_dmamap_create(sc->sc_dmat, RR_DMA_MAX, 1, RR_DMA_MAX,
300 RR_DMA_BOUNDARY,
301 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
302 &sc->sc_snap_recv.ec_dma[i])) {
303 aprint_error_dev(&sc->sc_dev, "failed bus_dmamap_create\n");
304 for (i--; i >= 0; i--)
305 bus_dmamap_destroy(sc->sc_dmat,
306 sc->sc_snap_recv.ec_dma[i]);
307 goto bad_ring_dmamap_create;
308 }
309
310 /*
311 * If this is a coldboot, the NIC RunCode should be operational.
312 * If it is a warmboot, it may or may not be operational.
313 * Just to be sure, we'll stop the RunCode and reset everything.
314 */
315
316 /* Halt the processor (preserve NO_SWAP, if set) */
317
318 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
319 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
320 (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC);
321
322 /* Make the EEPROM readable */
323
324 misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
325 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL,
326 misc_local_ctl & ~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM |
327 RR_LC_PARITY_ON));
328
329 /* Extract interesting information from the EEPROM: */
330
331 header_format = esh_read_eeprom(sc, RR_EE_HEADER_FORMAT);
332 if (header_format != RR_EE_HEADER_FORMAT_MAGIC) {
333 aprint_error_dev(&sc->sc_dev, "bogus EEPROM header format value %x\n",
334 header_format);
335 goto bad_other;
336 }
337
338 /*
339 * As it is now, the runcode version in the EEPROM doesn't
340 * reflect the actual runcode version number. That is only
341 * available once the runcode starts up. We should probably
342 * change the firmware update code to modify this value,
343 * but Essential itself doesn't do it right now.
344 */
345
346 sc->sc_sram_size = 4 * esh_read_eeprom(sc, RR_EE_SRAM_SIZE);
347 sc->sc_runcode_start = esh_read_eeprom(sc, RR_EE_RUNCODE_START);
348 sc->sc_runcode_version = esh_read_eeprom(sc, RR_EE_RUNCODE_VERSION);
349
350 sc->sc_pci_latency = esh_read_eeprom(sc, RR_EE_PCI_LATENCY);
351 sc->sc_pci_lat_gnt = esh_read_eeprom(sc, RR_EE_PCI_LAT_GNT);
352
353 /* General tuning values */
354
355 sc->sc_tune.rt_mode_and_status =
356 esh_read_eeprom(sc, RR_EE_MODE_AND_STATUS);
357 sc->sc_tune.rt_conn_retry_count =
358 esh_read_eeprom(sc, RR_EE_CONN_RETRY_COUNT);
359 sc->sc_tune.rt_conn_retry_timer =
360 esh_read_eeprom(sc, RR_EE_CONN_RETRY_TIMER);
361 sc->sc_tune.rt_conn_timeout =
362 esh_read_eeprom(sc, RR_EE_CONN_TIMEOUT);
363 sc->sc_tune.rt_interrupt_timer =
364 esh_read_eeprom(sc, RR_EE_INTERRUPT_TIMER);
365 sc->sc_tune.rt_tx_timeout =
366 esh_read_eeprom(sc, RR_EE_TX_TIMEOUT);
367 sc->sc_tune.rt_rx_timeout =
368 esh_read_eeprom(sc, RR_EE_RX_TIMEOUT);
369 sc->sc_tune.rt_stats_timer =
370 esh_read_eeprom(sc, RR_EE_STATS_TIMER);
371 sc->sc_tune.rt_stats_timer = ESH_STATS_TIMER_DEFAULT;
372
373 /* DMA tuning values */
374
375 sc->sc_tune.rt_pci_state =
376 esh_read_eeprom(sc, RR_EE_PCI_STATE);
377 sc->sc_tune.rt_dma_write_state =
378 esh_read_eeprom(sc, RR_EE_DMA_WRITE_STATE);
379 sc->sc_tune.rt_dma_read_state =
380 esh_read_eeprom(sc, RR_EE_DMA_READ_STATE);
381 sc->sc_tune.rt_driver_param =
382 esh_read_eeprom(sc, RR_EE_DRIVER_PARAM);
383
384 /*
385 * Snag the ULA. The first two bytes are reserved.
386 * We don't really use it immediately, but it would be good to
387 * have for building IPv6 addresses, etc.
388 */
389
390 ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_HI);
391 sc->sc_ula[0] = (ula_tmp >> 8) & 0xff;
392 sc->sc_ula[1] = ula_tmp & 0xff;
393
394 ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_LO);
395 sc->sc_ula[2] = (ula_tmp >> 24) & 0xff;
396 sc->sc_ula[3] = (ula_tmp >> 16) & 0xff;
397 sc->sc_ula[4] = (ula_tmp >> 8) & 0xff;
398 sc->sc_ula[5] = ula_tmp & 0xff;
399
400 /* Reset EEPROM readability */
401
402 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
403
404 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
405 ifp->if_softc = sc;
406 ifp->if_start = eshstart;
407 ifp->if_ioctl = eshioctl;
408 ifp->if_watchdog = eshwatchdog;
409 ifp->if_flags = IFF_SIMPLEX | IFF_NOTRAILERS | IFF_NOARP;
410 IFQ_SET_READY(&ifp->if_snd);
411
412 if_attach(ifp);
413 hippi_ifattach(ifp, sc->sc_ula);
414
415 sc->sc_misaligned_bufs = sc->sc_bad_lens = 0;
416 sc->sc_fp_rings = 0;
417
418 return;
419
420 bad_ring_dmamap_create:
421 bus_dmamap_destroy(sc->sc_dmat, sc->sc_send.ec_dma);
422 bad_other:
423 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma);
424 bad_dmamap_load:
425 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma);
426 bad_dmamap_create:
427 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_addr, sc->sc_dma_size);
428 bad_dmamem_map:
429 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, rseg);
430 return;
431 }
432
433
434 /*
435 * Bring device up.
436 *
437 * Assume that the on-board processor has already been stopped,
438 * the rings have been cleared of valid buffers, and everything
439 * is pretty much as it was when the system started.
440 *
441 * Stop the processor (just for good measure), clear the SRAM,
442 * reload the boot code, and start it all up again, with the PC
443 * pointing at the boot code. Once the boot code has had a chance
444 * to come up, adjust all of the appropriate parameters, and send
445 * the 'start firmware' command.
446 *
447 * The NIC won't actually be up until it gets an interrupt with an
448 * event indicating the RunCode is up.
449 */
450
451 void
452 eshinit(sc)
453 struct esh_softc *sc;
454 {
455 struct ifnet *ifp = &sc->sc_if;
456 bus_space_tag_t iot = sc->sc_iot;
457 bus_space_handle_t ioh = sc->sc_ioh;
458 struct rr_ring_ctl *ring;
459 u_int32_t misc_host_ctl;
460 u_int32_t misc_local_ctl;
461 u_int32_t value;
462 u_int32_t mode;
463
464 /* If we're already doing an init, don't try again simultaniously */
465
466 if ((sc->sc_flags & ESH_FL_INITIALIZING) != 0)
467 return;
468 sc->sc_flags = ESH_FL_INITIALIZING;
469
470 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
471 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
472
473 /* Halt the processor (preserve NO_SWAP, if set) */
474
475 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
476 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
477 (misc_host_ctl & RR_MH_NO_SWAP)
478 | RR_MH_HALT_PROC | RR_MH_CLEAR_INT);
479
480 /* Make the EEPROM readable */
481
482 misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
483 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL,
484 misc_local_ctl & ~(RR_LC_FAST_PROM |
485 RR_LC_ADD_SRAM |
486 RR_LC_PARITY_ON));
487
488 /* Reset DMA */
489
490 bus_space_write_4(iot, ioh, RR_RX_STATE, RR_RS_RESET);
491 bus_space_write_4(iot, ioh, RR_TX_STATE, 0);
492 bus_space_write_4(iot, ioh, RR_DMA_READ_STATE, RR_DR_RESET);
493 bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE, RR_DW_RESET);
494 bus_space_write_4(iot, ioh, RR_PCI_STATE, 0);
495 bus_space_write_4(iot, ioh, RR_TIMER, 0);
496 bus_space_write_4(iot, ioh, RR_TIMER_REF, 0);
497
498 /*
499 * Reset the assist register that the documentation suggests
500 * resetting. Too bad that the docs don't mention anything
501 * else about the register!
502 */
503
504 bus_space_write_4(iot, ioh, 0x15C, 1);
505
506 /* Clear BIST, set the PC to the start of the code and let 'er rip */
507
508 value = bus_space_read_4(iot, ioh, RR_PCI_BIST);
509 bus_space_write_4(iot, ioh, RR_PCI_BIST, (value & ~0xff) | 8);
510
511 sc->sc_bist_write(sc, 0);
512 esh_reset_runcode(sc);
513
514 bus_space_write_4(iot, ioh, RR_PROC_PC, sc->sc_runcode_start);
515 bus_space_write_4(iot, ioh, RR_PROC_BREAKPT, 0x00000001);
516
517 misc_host_ctl &= ~RR_MH_HALT_PROC;
518 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL, misc_host_ctl);
519
520 /* XXX: should we sleep rather than delaying for 1ms!? */
521
522 delay(1000); /* Need 500 us, but we'll give it more */
523
524 value = sc->sc_bist_read(sc);
525 if (value != 0) {
526 aprint_error_dev(&sc->sc_dev, "BIST is %d, not 0!\n",
527 value);
528 goto bad_init;
529 }
530
531 #ifdef ESH_PRINTF
532 printf("%s: BIST is %x\n", device_xname(&sc->sc_dev), value);
533 eshstatus(sc);
534 #endif
535
536 /* RunCode is up. Initialize NIC */
537
538 esh_write_addr(iot, ioh, RR_GEN_INFO_PTR, sc->sc_gen_info_dma);
539 esh_write_addr(iot, ioh, RR_RECV_RING_PTR, sc->sc_recv_ring_table_dma);
540
541 sc->sc_event_consumer = 0;
542 bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, sc->sc_event_consumer);
543 sc->sc_event_producer = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER);
544 sc->sc_cmd_producer = RR_INIT_CMD;
545 sc->sc_cmd_consumer = 0;
546
547 mode = bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS);
548 mode |= (RR_MS_WARNINGS |
549 RR_MS_ERR_TERM |
550 RR_MS_NO_RESTART |
551 RR_MS_SWAP_DATA);
552 mode &= ~RR_MS_PH_MODE;
553 bus_space_write_4(iot, ioh, RR_MODE_AND_STATUS, mode);
554
555 #if 0
556 #ifdef ESH_PRINTF
557 printf("eshinit: misc_local_ctl %x, SRAM size %d\n", misc_local_ctl,
558 sc->sc_sram_size);
559 #endif
560 /*
561 misc_local_ctl |= (RR_LC_FAST_PROM | RR_LC_PARITY_ON);
562 */
563 if (sc->sc_sram_size > 256 * 1024) {
564 misc_local_ctl |= RR_LC_ADD_SRAM;
565 }
566 #endif
567
568 #ifdef ESH_PRINTF
569 printf("eshinit: misc_local_ctl %x\n", misc_local_ctl);
570 #endif
571 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
572
573 /* Set tuning parameters */
574
575 bus_space_write_4(iot, ioh, RR_CONN_RETRY_COUNT,
576 sc->sc_tune.rt_conn_retry_count);
577 bus_space_write_4(iot, ioh, RR_CONN_RETRY_TIMER,
578 sc->sc_tune.rt_conn_retry_timer);
579 bus_space_write_4(iot, ioh, RR_CONN_TIMEOUT,
580 sc->sc_tune.rt_conn_timeout);
581 bus_space_write_4(iot, ioh, RR_INTERRUPT_TIMER,
582 sc->sc_tune.rt_interrupt_timer);
583 bus_space_write_4(iot, ioh, RR_TX_TIMEOUT,
584 sc->sc_tune.rt_tx_timeout);
585 bus_space_write_4(iot, ioh, RR_RX_TIMEOUT,
586 sc->sc_tune.rt_rx_timeout);
587 bus_space_write_4(iot, ioh, RR_STATS_TIMER,
588 sc->sc_tune.rt_stats_timer);
589 bus_space_write_4(iot, ioh, RR_PCI_STATE,
590 sc->sc_tune.rt_pci_state);
591 bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE,
592 sc->sc_tune.rt_dma_write_state);
593 bus_space_write_4(iot, ioh, RR_DMA_READ_STATE,
594 sc->sc_tune.rt_dma_read_state);
595
596 sc->sc_max_rings = bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS);
597
598 sc->sc_runcode_version =
599 bus_space_read_4(iot, ioh, RR_RUNCODE_VERSION);
600 sc->sc_version = sc->sc_runcode_version >> 16;
601 if (sc->sc_version != 1 && sc->sc_version != 2) {
602 aprint_error_dev(&sc->sc_dev, "bad version number %d in runcode\n",
603 sc->sc_version);
604 goto bad_init;
605 }
606
607 if (sc->sc_version == 1) {
608 sc->sc_options = 0;
609 } else {
610 value = bus_space_read_4(iot, ioh, RR_ULA);
611 sc->sc_options = value >> 16;
612 }
613
614 if (sc->sc_options & (RR_OP_LONG_TX | RR_OP_LONG_RX)) {
615 aprint_error_dev(&sc->sc_dev, "unsupported firmware -- long descriptors\n");
616 goto bad_init;
617 }
618
619 printf("%s: startup runcode version %d.%d.%d, options %x\n",
620 device_xname(&sc->sc_dev),
621 sc->sc_version,
622 (sc->sc_runcode_version >> 8) & 0xff,
623 sc->sc_runcode_version & 0xff,
624 sc->sc_options);
625
626 /* Initialize the general ring information */
627
628 memset(sc->sc_recv_ring_table, 0,
629 sizeof(struct rr_ring_ctl) * RR_ULP_COUNT);
630
631 ring = &sc->sc_gen_info->ri_event_ring_ctl;
632 ring->rr_ring_addr = sc->sc_event_ring_dma;
633 ring->rr_entry_size = sizeof(struct rr_event);
634 ring->rr_free_bufs = RR_EVENT_RING_SIZE / 4;
635 ring->rr_entries = RR_EVENT_RING_SIZE;
636 ring->rr_prod_index = 0;
637
638 ring = &sc->sc_gen_info->ri_cmd_ring_ctl;
639 ring->rr_free_bufs = 8;
640 ring->rr_entry_size = sizeof(union rr_cmd);
641 ring->rr_prod_index = RR_INIT_CMD;
642
643 ring = &sc->sc_gen_info->ri_send_ring_ctl;
644 ring->rr_ring_addr = sc->sc_send_ring_dma;
645 if (sc->sc_version == 1) {
646 ring->rr_free_bufs = RR_RR_DONT_COMPLAIN;
647 } else {
648 ring->rr_free_bufs = 0;
649 }
650
651 ring->rr_entries = RR_SEND_RING_SIZE;
652 ring->rr_entry_size = sizeof(struct rr_descr);
653
654 ring->rr_prod_index = sc->sc_send.ec_producer =
655 sc->sc_send.ec_consumer = 0;
656 sc->sc_send.ec_cur_mbuf = NULL;
657 sc->sc_send.ec_cur_buf = NULL;
658
659 sc->sc_snap_recv.ec_descr = sc->sc_snap_recv_ring;
660 sc->sc_snap_recv.ec_consumer = sc->sc_snap_recv.ec_producer = 0;
661
662 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
664
665 /* Set up the watchdog to make sure something happens! */
666
667 sc->sc_watchdog = 0;
668 ifp->if_timer = 5;
669
670 /*
671 * Can't actually turn on interface until we see some events,
672 * so set initialized flag, but don't start sending.
673 */
674
675 sc->sc_flags = ESH_FL_INITIALIZED;
676 esh_send_cmd(sc, RR_CC_START_RUNCODE, 0, 0);
677 return;
678
679 bad_init:
680 sc->sc_flags = 0;
681 wakeup((void *) sc);
682 return;
683 }
684
685
686 /*
687 * Code to handle the Framing Protocol (FP) interface to the esh.
688 * This will allow us to write directly to the wire, with no
689 * intervening memcpy's to slow us down.
690 */
691
692 int
693 esh_fpopen(dev_t dev, int oflags, int devtype,
694 struct lwp *l)
695 {
696 struct esh_softc *sc;
697 struct rr_ring_ctl *ring_ctl;
698 struct esh_fp_ring_ctl *recv;
699 int ulp = ESHULP(dev);
700 int error = 0;
701 bus_size_t size;
702 int rseg;
703 int s;
704
705 sc = device_lookup_private(&esh_cd, ESHUNIT(dev));
706 if (sc == NULL || ulp == HIPPI_ULP_802)
707 return (ENXIO);
708
709 #ifdef ESH_PRINTF
710 printf("esh_fpopen: opening board %d, ulp %d\n",
711 device_unit(&sc->sc_dev), ulp);
712 #endif
713
714 /* If the card is not up, initialize it. */
715
716 s = splnet();
717
718 if (sc->sc_fp_rings >= sc->sc_max_rings - 1) {
719 splx(s);
720 return (ENOSPC);
721 }
722
723 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
724 eshinit(sc);
725 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0)
726 return EIO;
727 }
728
729 if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
730 /*
731 * Wait for the runcode to indicate that it is up,
732 * while watching to make sure we haven't crashed.
733 */
734
735 error = 0;
736 while (error == 0 &&
737 (sc->sc_flags & ESH_FL_INITIALIZED) != 0 &&
738 (sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
739 error = tsleep((void *) sc, PCATCH | PRIBIO,
740 "eshinit", 0);
741 #ifdef ESH_PRINTF
742 printf("esh_fpopen: tslept\n");
743 #endif
744 }
745
746 if (error != 0) {
747 splx(s);
748 return error;
749 }
750
751 if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
752 splx(s);
753 return EIO;
754 }
755 }
756
757
758 #ifdef ESH_PRINTF
759 printf("esh_fpopen: card up\n");
760 #endif
761
762 /* Look at the ring descriptor to see if the ULP is in use */
763
764 ring_ctl = &sc->sc_recv_ring_table[ulp];
765 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
766 (char *) ring_ctl - (char *) sc->sc_dma_addr,
767 sizeof(*ring_ctl),
768 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
769 if (ring_ctl->rr_entry_size != 0) {
770 splx(s);
771 return (EBUSY);
772 }
773
774 #ifdef ESH_PRINTF
775 printf("esh_fpopen: ring %d okay\n", ulp);
776 #endif
777
778 /*
779 * Allocate the DMA space for the ring; space for the
780 * ring control blocks has already been staticly allocated.
781 */
782
783 recv = (struct esh_fp_ring_ctl *)
784 malloc(sizeof(*recv), M_DEVBUF, M_WAITOK|M_ZERO);
785 if (recv == NULL)
786 return(ENOMEM);
787 TAILQ_INIT(&recv->ec_queue);
788
789 size = RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr);
790 error = bus_dmamem_alloc(sc->sc_dmat, size, 0, RR_DMA_BOUNDARY,
791 &recv->ec_dmaseg, 1,
792 &rseg, BUS_DMA_WAITOK);
793
794 if (error) {
795 aprint_error_dev(&sc->sc_dev, "couldn't allocate space for FP receive ring"
796 "data structures\n");
797 goto bad_fp_dmamem_alloc;
798 }
799
800 if (rseg > 1) {
801 aprint_error_dev(&sc->sc_dev, "contiguous memory not available for "
802 "FP receive ring\n");
803 goto bad_fp_dmamem_map;
804 }
805
806 error = bus_dmamem_map(sc->sc_dmat, &recv->ec_dmaseg, rseg,
807 size, (void **) &recv->ec_descr,
808 BUS_DMA_WAITOK | BUS_DMA_COHERENT);
809 if (error) {
810 aprint_error_dev(&sc->sc_dev, "couldn't map memory for FP receive ring\n");
811 goto bad_fp_dmamem_map;
812 }
813
814 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, RR_DMA_BOUNDARY,
815 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
816 &recv->ec_dma)) {
817 aprint_error_dev(&sc->sc_dev, "couldn't create DMA map for FP receive ring\n");
818 goto bad_fp_dmamap_create;
819 }
820
821 if (bus_dmamap_load(sc->sc_dmat, recv->ec_dma, recv->ec_descr,
822 size, NULL, BUS_DMA_WAITOK)) {
823 aprint_error_dev(&sc->sc_dev, "couldn't load DMA map for FP receive ring\n");
824 goto bad_fp_dmamap_load;
825 }
826
827 memset(recv->ec_descr, 0, size);
828
829 /*
830 * Create the ring:
831 *
832 * XXX: HTF are we gonna deal with the fact that we don't know
833 * if the open succeeded until we get a response from
834 * the event handler? I guess we could go to sleep waiting
835 * for the interrupt, and get woken up by the eshintr
836 * case handling it.
837 */
838
839 ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr;
840 ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4;
841 ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE;
842 ring_ctl->rr_entry_size = sizeof(struct rr_descr);
843 ring_ctl->rr_prod_index = recv->ec_producer = recv->ec_consumer = 0;
844 ring_ctl->rr_mode = RR_RR_CHARACTER;
845 recv->ec_ulp = ulp;
846 recv->ec_index = -1;
847
848 sc->sc_fp_recv[ulp] = recv;
849
850 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
851 (char *) ring_ctl - (char *) sc->sc_dma_addr,
852 sizeof(*ring_ctl),
853 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
854
855 bus_dmamap_sync(sc->sc_dmat, recv->ec_dma, 0, size,
856 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
857
858 esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer);
859
860 #ifdef ESH_PRINTF
861 printf("esh_fpopen: sent create ring cmd\n");
862 #endif
863
864 while (recv->ec_index == -1) {
865 error = tsleep((void *) &recv->ec_ulp, PCATCH | PRIBIO,
866 "eshfpopen", 0);
867 if (error != 0 || recv->ec_index == -1) {
868 splx(s);
869 goto bad_fp_ring_create;
870 }
871 }
872 #ifdef ESH_PRINTF
873 printf("esh_fpopen: created ring\n");
874 #endif
875
876 /*
877 * Ring is created. Set up various pointers to the ring
878 * information, fill the ring, and get going...
879 */
880
881 sc->sc_fp_rings++;
882 splx(s);
883 return 0;
884
885 bad_fp_ring_create:
886 #ifdef ESH_PRINTF
887 printf("esh_fpopen: bad ring create\n");
888 #endif
889 sc->sc_fp_recv[ulp] = NULL;
890 memset(ring_ctl, 0, sizeof(*ring_ctl));
891 bus_dmamap_unload(sc->sc_dmat, recv->ec_dma);
892 bad_fp_dmamap_load:
893 bus_dmamap_destroy(sc->sc_dmat, recv->ec_dma);
894 bad_fp_dmamap_create:
895 bus_dmamem_unmap(sc->sc_dmat, (void *) recv->ec_descr, size);
896 bad_fp_dmamem_map:
897 bus_dmamem_free(sc->sc_dmat, &recv->ec_dmaseg, rseg);
898 bad_fp_dmamem_alloc:
899 free(recv, M_DEVBUF);
900 if (error == 0)
901 error = ENOMEM;
902 splx(s);
903 return (error);
904 }
905
906
907 int
908 esh_fpclose(dev_t dev, int fflag, int devtype,
909 struct lwp *l)
910 {
911 struct esh_softc *sc;
912 struct rr_ring_ctl *ring_ctl;
913 struct esh_fp_ring_ctl *ring;
914 int ulp = ESHULP(dev);
915 int index;
916 int error = 0;
917 int s;
918
919 sc = device_lookup_private(&esh_cd, ESHUNIT(dev));
920 if (sc == NULL || ulp == HIPPI_ULP_802)
921 return (ENXIO);
922
923 s = splnet();
924
925 ring = sc->sc_fp_recv[ulp];
926 ring_ctl = &sc->sc_recv_ring_table[ulp];
927 index = ring->ec_index;
928
929 #ifdef ESH_PRINTF
930 printf("esh_fpclose: closing unit %d, ulp %d\n",
931 device_unit(&sc->sc_dev), ulp);
932 #endif
933 assert(ring);
934 assert(ring_ctl);
935
936 /*
937 * Disable the ring, wait for notification, and get rid of DMA
938 * stuff and dynamically allocated memory. Loop, waiting to
939 * learn that the ring has been disabled, or the card
940 * has been shut down.
941 */
942
943 do {
944 esh_send_cmd(sc, RR_CC_DISABLE_RING, ulp, ring->ec_producer);
945
946 error = tsleep((void *) &ring->ec_index, PCATCH | PRIBIO,
947 "esh_fpclose", 0);
948 if (error != 0 && error != EAGAIN) {
949 aprint_error_dev(&sc->sc_dev, "esh_fpclose: wait on ring disable bad\n");
950 ring->ec_index = -1;
951 break;
952 }
953 } while (ring->ec_index != -1 && sc->sc_flags != 0);
954
955 /*
956 * XXX: Gotta unload the ring, removing old descriptors!
957 * *Can* there be outstanding reads with a close issued!?
958 */
959
960 bus_dmamap_unload(sc->sc_dmat, ring->ec_dma);
961 bus_dmamap_destroy(sc->sc_dmat, ring->ec_dma);
962 bus_dmamem_unmap(sc->sc_dmat, (void *) ring->ec_descr,
963 RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr));
964 bus_dmamem_free(sc->sc_dmat, &ring->ec_dmaseg, ring->ec_dma->dm_nsegs);
965 free(ring, M_DEVBUF);
966 memset(ring_ctl, 0, sizeof(*ring_ctl));
967 sc->sc_fp_recv[ulp] = NULL;
968 sc->sc_fp_recv_index[index] = NULL;
969
970 sc->sc_fp_rings--;
971 if (sc->sc_fp_rings == 0)
972 sc->sc_flags &= ~ESH_FL_FP_RING_UP;
973
974 splx(s);
975 return 0;
976 }
977
978 int
979 esh_fpread(dev_t dev, struct uio *uio, int ioflag)
980 {
981 struct lwp *l = curlwp;
982 struct proc *p = l->l_proc;
983 struct iovec *iovp;
984 struct esh_softc *sc;
985 struct esh_fp_ring_ctl *ring;
986 struct esh_dmainfo *di;
987 int ulp = ESHULP(dev);
988 int error;
989 int i;
990 int s;
991
992 #ifdef ESH_PRINTF
993 printf("esh_fpread: dev %x\n", dev);
994 #endif
995
996 sc = device_lookup_private(&esh_cd, ESHUNIT(dev));
997 if (sc == NULL || ulp == HIPPI_ULP_802)
998 return (ENXIO);
999
1000 s = splnet();
1001
1002 ring = sc->sc_fp_recv[ulp];
1003
1004 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
1005 error = ENXIO;
1006 goto fpread_done;
1007 }
1008
1009 /* Check for validity */
1010 for (i = 0; i < uio->uio_iovcnt; i++) {
1011 /* Check for valid offsets and sizes */
1012 if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 ||
1013 (i < uio->uio_iovcnt - 1 &&
1014 (uio->uio_iov[i].iov_len & 3) != 0)) {
1015 error = EFAULT;
1016 goto fpread_done;
1017 }
1018 }
1019
1020 uvm_lwp_hold(l); /* Lock process info into memory */
1021
1022 /* Lock down the pages */
1023 for (i = 0; i < uio->uio_iovcnt; i++) {
1024 iovp = &uio->uio_iov[i];
1025 error = uvm_vslock(p->p_vmspace, iovp->iov_base, iovp->iov_len,
1026 VM_PROT_WRITE);
1027 if (error) {
1028 /* Unlock what we've locked so far. */
1029 for (--i; i >= 0; i--) {
1030 iovp = &uio->uio_iov[i];
1031 uvm_vsunlock(p->p_vmspace, iovp->iov_base,
1032 iovp->iov_len);
1033 }
1034 goto fpread_done;
1035 }
1036 }
1037
1038 /*
1039 * Perform preliminary DMA mapping and throw the buffers
1040 * onto the queue to be sent.
1041 */
1042
1043 di = esh_new_dmainfo(sc);
1044 if (di == NULL) {
1045 error = ENOMEM;
1046 goto fpread_done;
1047 }
1048 di->ed_buf = NULL;
1049 di->ed_error = 0;
1050 di->ed_read_len = 0;
1051
1052 #ifdef ESH_PRINTF
1053 printf("esh_fpread: ulp %d, uio offset %qd, resid %d, iovcnt %d\n",
1054 ulp, uio->uio_offset, uio->uio_resid, uio->uio_iovcnt);
1055 #endif
1056
1057 error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma,
1058 uio, BUS_DMA_READ|BUS_DMA_WAITOK);
1059 if (error) {
1060 aprint_error_dev(&sc->sc_dev, "esh_fpread: bus_dmamap_load_uio "
1061 "failed\terror code %d\n",
1062 error);
1063 error = ENOBUFS;
1064 esh_free_dmainfo(sc, di);
1065 goto fpread_done;
1066 }
1067
1068 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
1069 0, di->ed_dma->dm_mapsize,
1070 BUS_DMASYNC_PREREAD);
1071
1072 #ifdef ESH_PRINTF
1073 printf("esh_fpread: ulp %d, di %p, nsegs %d, uio len %d\n",
1074 ulp, di, di->ed_dma->dm_nsegs, uio->uio_resid);
1075 #endif
1076
1077 di->ed_flags |= ESH_DI_BUSY;
1078
1079 TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list);
1080 esh_fill_fp_ring(sc, ring);
1081
1082 while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) {
1083 error = tsleep((void *) di, PCATCH | PRIBIO, "esh_fpread", 0);
1084 #ifdef ESH_PRINTF
1085 printf("esh_fpread: ulp %d, tslept %d\n", ulp, error);
1086 #endif
1087 if (error) {
1088 /*
1089 * Remove the buffer entries from the ring; this
1090 * is gonna require a DISCARD_PKT command, and
1091 * will certainly disrupt things. This is why we
1092 * can have only one outstanding read on a ring
1093 * at a time. :-(
1094 */
1095
1096 printf("esh_fpread: was that a ^C!? error %d, ulp %d\n",
1097 error, ulp);
1098 if (error == EINTR || error == ERESTART)
1099 error = 0;
1100 if ((di->ed_flags & ESH_DI_BUSY) != 0) {
1101 esh_flush_fp_ring(sc, ring, di);
1102 error = EINTR;
1103 break;
1104 }
1105 }
1106 }
1107
1108 if (error == 0 && di->ed_error != 0)
1109 error = EIO;
1110
1111 /*
1112 * How do we let the caller know how much has been read?
1113 * Adjust the uio_resid stuff!?
1114 */
1115
1116 assert(uio->uio_resid >= di->ed_read_len);
1117
1118 uio->uio_resid -= di->ed_read_len;
1119 for (i = 0; i < uio->uio_iovcnt; i++) {
1120 iovp = &uio->uio_iov[i];
1121 uvm_vsunlock(p->p_vmspace, iovp->iov_base, iovp->iov_len);
1122 }
1123
1124 uvm_lwp_rele(l); /* Release process info */
1125 esh_free_dmainfo(sc, di);
1126
1127 fpread_done:
1128 #ifdef ESH_PRINTF
1129 printf("esh_fpread: ulp %d, error %d\n", ulp, error);
1130 #endif
1131 splx(s);
1132 return error;
1133 }
1134
1135
1136 int
1137 esh_fpwrite(dev_t dev, struct uio *uio, int ioflag)
1138 {
1139 struct lwp *l = curlwp;
1140 struct proc *p = l->l_proc;
1141 struct iovec *iovp;
1142 struct esh_softc *sc;
1143 struct esh_send_ring_ctl *ring;
1144 struct esh_dmainfo *di;
1145 int ulp = ESHULP(dev);
1146 int error;
1147 int len;
1148 int i;
1149 int s;
1150
1151 #ifdef ESH_PRINTF
1152 printf("esh_fpwrite: dev %x\n", dev);
1153 #endif
1154
1155 sc = device_lookup_private(&esh_cd, ESHUNIT(dev));
1156 if (sc == NULL || ulp == HIPPI_ULP_802)
1157 return (ENXIO);
1158
1159 s = splnet();
1160
1161 ring = &sc->sc_send;
1162
1163 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
1164 error = ENXIO;
1165 goto fpwrite_done;
1166 }
1167
1168 /* Check for validity */
1169 for (i = 0; i < uio->uio_iovcnt; i++) {
1170 if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 ||
1171 (i < uio->uio_iovcnt - 1 &&
1172 (uio->uio_iov[i].iov_len & 3) != 0)) {
1173 error = EFAULT;
1174 goto fpwrite_done;
1175 }
1176 }
1177
1178 uvm_lwp_hold(l); /* Lock process info into memory */
1179
1180 /* Lock down the pages */
1181 for (i = 0; i < uio->uio_iovcnt; i++) {
1182 iovp = &uio->uio_iov[i];
1183 error = uvm_vslock(p->p_vmspace, iovp->iov_base, iovp->iov_len,
1184 VM_PROT_READ);
1185 if (error) {
1186 /* Unlock what we've locked so far. */
1187 for (--i; i >= 0; i--) {
1188 iovp = &uio->uio_iov[i];
1189 uvm_vsunlock(p->p_vmspace, iovp->iov_base,
1190 iovp->iov_len);
1191 }
1192 goto fpwrite_done;
1193 }
1194 }
1195
1196 /*
1197 * Perform preliminary DMA mapping and throw the buffers
1198 * onto the queue to be sent.
1199 */
1200
1201 di = esh_new_dmainfo(sc);
1202 if (di == NULL) {
1203 error = ENOMEM;
1204 goto fpwrite_done;
1205 }
1206 di->ed_buf = NULL;
1207 di->ed_error = 0;
1208
1209 #ifdef ESH_PRINTF
1210 printf("esh_fpwrite: uio offset %qd, resid %d, iovcnt %d\n",
1211 uio->uio_offset, uio->uio_resid, uio->uio_iovcnt);
1212 #endif
1213
1214 error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma,
1215 uio, BUS_DMA_WRITE|BUS_DMA_WAITOK);
1216 if (error) {
1217 aprint_error_dev(&sc->sc_dev, "esh_fpwrite: bus_dmamap_load_uio "
1218 "failed\terror code %d\n",
1219 error);
1220 error = ENOBUFS;
1221 esh_free_dmainfo(sc, di);
1222 goto fpwrite_done;
1223 }
1224
1225 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
1226 0, di->ed_dma->dm_mapsize,
1227 BUS_DMASYNC_PREWRITE);
1228
1229 #ifdef ESH_PRINTF
1230 printf("esh_fpwrite: di %p, nsegs %d, uio len %d\n",
1231 di, di->ed_dma->dm_nsegs, uio->uio_resid);
1232 #endif
1233
1234 len = di->ed_dma->dm_mapsize;
1235 di->ed_flags |= ESH_DI_BUSY;
1236
1237 TAILQ_INSERT_TAIL(&ring->ec_di_queue, di, ed_list);
1238 eshstart(&sc->sc_if);
1239
1240 while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) {
1241 error = tsleep((void *) di, PRIBIO, "esh_fpwrite", 0);
1242 #ifdef ESH_PRINTF
1243 printf("esh_fpwrite: tslept %d\n", error);
1244 #endif
1245 if (error) {
1246 printf("esh_fpwrite: was that a ^C!? Shouldn't be! Error %d\n",
1247 error);
1248 if (error == EINTR || error == ERESTART)
1249 error = 0;
1250 if ((di->ed_flags & ESH_DI_BUSY) != 0) {
1251 panic("interrupted eshwrite!");
1252 #if 0
1253 /* Better do *something* here! */
1254 esh_flush_send_ring(sc, di);
1255 #endif
1256 error = EINTR;
1257 break;
1258 }
1259 }
1260 }
1261
1262 if (error == 0 && di->ed_error != 0)
1263 error = EIO;
1264
1265 /*
1266 * How do we let the caller know how much has been written?
1267 * Adjust the uio_resid stuff!?
1268 */
1269
1270 uio->uio_resid -= len;
1271 uio->uio_offset += len;
1272
1273 for (i = 0; i < uio->uio_iovcnt; i++) {
1274 iovp = &uio->uio_iov[i];
1275 uvm_vsunlock(p->p_vmspace, iovp->iov_base, iovp->iov_len);
1276 }
1277
1278 uvm_lwp_rele(l); /* Release process info */
1279 esh_free_dmainfo(sc, di);
1280
1281 fpwrite_done:
1282 #ifdef ESH_PRINTF
1283 printf("esh_fpwrite: error %d\n", error);
1284 #endif
1285 splx(s);
1286 return error;
1287 }
1288
1289 void
1290 esh_fpstrategy(bp)
1291 struct buf *bp;
1292 {
1293 struct esh_softc *sc;
1294 int ulp = ESHULP(bp->b_dev);
1295 int error = 0;
1296 int s;
1297
1298 #ifdef ESH_PRINTF
1299 printf("esh_fpstrategy: starting, bcount %ld, flags %lx, dev %x\n"
1300 "\tunit %x, ulp %d\n",
1301 bp->b_bcount, bp->b_flags, bp->b_dev, unit, ulp);
1302 #endif
1303
1304 sc = device_lookup_private(&esh_cd, ESHUNIT(bp->b_dev));
1305
1306 s = splnet();
1307 if (sc == NULL || ulp == HIPPI_ULP_802) {
1308 bp->b_error = ENXIO;
1309 goto done;
1310 }
1311
1312 if (bp->b_bcount == 0)
1313 goto done;
1314
1315 #define UP_FLAGS (ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP)
1316
1317 if ((sc->sc_flags & UP_FLAGS) != UP_FLAGS) {
1318 bp->b_error = EBUSY;
1319 goto done;
1320 }
1321 #undef UP_FLAGS
1322
1323 if (bp->b_flags & B_READ) {
1324 /*
1325 * Perform preliminary DMA mapping and throw the buffers
1326 * onto the queue to be sent.
1327 */
1328
1329 struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[ulp];
1330 struct esh_dmainfo *di = esh_new_dmainfo(sc);
1331
1332 if (di == NULL) {
1333 bp->b_error = ENOMEM;
1334 goto done;
1335 }
1336 di->ed_buf = bp;
1337 error = bus_dmamap_load(sc->sc_dmat, di->ed_dma,
1338 bp->b_data, bp->b_bcount,
1339 bp->b_proc,
1340 BUS_DMA_READ|BUS_DMA_WAITOK);
1341 if (error) {
1342 aprint_error_dev(&sc->sc_dev, "esh_fpstrategy: "
1343 "bus_dmamap_load "
1344 "failed\terror code %d\n",
1345 error);
1346 bp->b_error = ENOBUFS;
1347 esh_free_dmainfo(sc, di);
1348 goto done;
1349 }
1350
1351 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
1352 0, di->ed_dma->dm_mapsize,
1353 BUS_DMASYNC_PREREAD);
1354
1355 #ifdef ESH_PRINTF
1356 printf("fpstrategy: di %p\n", di);
1357 #endif
1358
1359 TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list);
1360 esh_fill_fp_ring(sc, ring);
1361 } else {
1362 /*
1363 * Queue up the buffer for future sending. If the card
1364 * isn't already transmitting, give it a kick.
1365 */
1366
1367 struct esh_send_ring_ctl *ring = &sc->sc_send;
1368 BUFQ_PUT(ring->ec_buf_queue, bp);
1369 #ifdef ESH_PRINTF
1370 printf("esh_fpstrategy: ready to call eshstart to write!\n");
1371 #endif
1372 eshstart(&sc->sc_if);
1373 }
1374 splx(s);
1375 return;
1376
1377 done:
1378 splx(s);
1379 #ifdef ESH_PRINTF
1380 printf("esh_fpstrategy: failing, bp->b_error %d!\n",
1381 bp->b_error);
1382 #endif
1383 biodone(bp);
1384 }
1385
1386 /*
1387 * Handle interrupts. This is basicly event handling code; version two
1388 * firmware tries to speed things up by just telling us the location
1389 * of the producer and consumer indices, rather than sending us an event.
1390 */
1391
1392 int
1393 eshintr(arg)
1394 void *arg;
1395 {
1396 struct esh_softc *sc = arg;
1397 bus_space_tag_t iot = sc->sc_iot;
1398 bus_space_handle_t ioh = sc->sc_ioh;
1399 struct ifnet *ifp = &sc->sc_if;
1400 u_int32_t rc_offsets;
1401 u_int32_t misc_host_ctl;
1402 int rc_send_consumer = 0; /* shut up compiler */
1403 int rc_snap_ring_consumer = 0; /* ditto */
1404 u_int8_t fp_ring_consumer[RR_MAX_RECV_RING];
1405 int start_consumer;
1406 int ret = 0;
1407
1408 int okay = 0;
1409 int blah = 0;
1410 char sbuf[100];
1411 char t[100];
1412
1413
1414 /* Check to see if this is our interrupt. */
1415
1416 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
1417 if ((misc_host_ctl & RR_MH_INTERRUPT) == 0)
1418 return 0;
1419
1420 /* If we can't do anything with the interrupt, just drop it */
1421
1422 if (sc->sc_flags == 0)
1423 return 1;
1424
1425 rc_offsets = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER);
1426 sc->sc_event_producer = rc_offsets & 0xff;
1427 if (sc->sc_version == 2) {
1428 int i;
1429
1430 sbuf[0] = '\0';
1431 strlcat(sbuf, "rc: ", sizeof(sbuf));
1432 rc_send_consumer = (rc_offsets >> 8) & 0xff;
1433 rc_snap_ring_consumer = (rc_offsets >> 16) & 0xff;
1434 for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
1435 rc_offsets =
1436 bus_space_read_4(iot, ioh,
1437 RR_RUNCODE_RECV_CONS + i);
1438 /* XXX: should do this right! */
1439 NTOHL(rc_offsets);
1440 *((u_int32_t *) &fp_ring_consumer[i]) = rc_offsets;
1441 snprintf(t, sizeof(t), "%.8x|", rc_offsets);
1442 strlcat(sbuf, t, sizeof(sbuf));
1443 }
1444 }
1445 start_consumer = sc->sc_event_consumer;
1446
1447 /* Take care of synchronizing DMA with entries we read... */
1448
1449 esh_dma_sync(sc, sc->sc_event_ring,
1450 start_consumer, sc->sc_event_producer,
1451 RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0,
1452 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1453
1454 while (sc->sc_event_consumer != sc->sc_event_producer) {
1455 struct rr_event *event =
1456 &sc->sc_event_ring[sc->sc_event_consumer];
1457
1458 #ifdef ESH_PRINTF
1459 if (event->re_code != RR_EC_WATCHDOG &&
1460 event->re_code != RR_EC_STATS_UPDATE &&
1461 event->re_code != RR_EC_SET_CMD_CONSUMER) {
1462 printf("%s: event code %x, ring %d, index %d\n",
1463 device_xname(&sc->sc_dev), event->re_code,
1464 event->re_ring, event->re_index);
1465 if (okay == 0)
1466 printf("%s\n", sbuf);
1467 okay = 1;
1468 }
1469 #endif
1470 ret = 1; /* some action was taken by card */
1471
1472 switch(event->re_code) {
1473 case RR_EC_RUNCODE_UP:
1474 printf("%s: firmware up\n", device_xname(&sc->sc_dev));
1475 sc->sc_flags |= ESH_FL_RUNCODE_UP;
1476 esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
1477 esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0);
1478 #ifdef ESH_PRINTF
1479 eshstatus(sc);
1480 #endif
1481 if ((ifp->if_flags & IFF_UP) != 0)
1482 esh_init_snap_ring(sc);
1483 if (sc->sc_fp_rings > 0)
1484 esh_init_fp_rings(sc);
1485
1486 /*
1487 * XXX: crank up FP rings that might be
1488 * in use after a reset!
1489 */
1490 wakeup((void *) sc);
1491 break;
1492
1493 case RR_EC_WATCHDOG:
1494 /*
1495 * Record the watchdog event.
1496 * This is checked by eshwatchdog
1497 */
1498
1499 sc->sc_watchdog = 1;
1500 break;
1501
1502 case RR_EC_SET_CMD_CONSUMER:
1503 sc->sc_cmd_consumer = event->re_index;
1504 break;
1505
1506 case RR_EC_LINK_ON:
1507 printf("%s: link up\n", device_xname(&sc->sc_dev));
1508 sc->sc_flags |= ESH_FL_LINK_UP;
1509
1510 esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
1511 esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0);
1512 if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) {
1513 /*
1514 * Interface is now `running', with no
1515 * output active.
1516 */
1517 ifp->if_flags |= IFF_RUNNING;
1518 ifp->if_flags &= ~IFF_OACTIVE;
1519
1520 /* Attempt to start output, if any. */
1521 }
1522 eshstart(ifp);
1523 break;
1524
1525 case RR_EC_LINK_OFF:
1526 sc->sc_flags &= ~ESH_FL_LINK_UP;
1527 printf("%s: link down\n", device_xname(&sc->sc_dev));
1528 break;
1529
1530 /*
1531 * These are all unexpected. We need to handle all
1532 * of them, though.
1533 */
1534
1535 case RR_EC_INVALID_CMD:
1536 case RR_EC_INTERNAL_ERROR:
1537 case RR2_EC_INTERNAL_ERROR:
1538 case RR_EC_BAD_SEND_RING:
1539 case RR_EC_BAD_SEND_BUF:
1540 case RR_EC_BAD_SEND_DESC:
1541 case RR_EC_RECV_RING_FLUSH:
1542 case RR_EC_RECV_ERROR_INFO:
1543 case RR_EC_BAD_RECV_BUF:
1544 case RR_EC_BAD_RECV_DESC:
1545 case RR_EC_BAD_RECV_RING:
1546 case RR_EC_UNIMPLEMENTED:
1547 aprint_error_dev(&sc->sc_dev, "unexpected event %x;"
1548 "shutting down interface\n",
1549 event->re_code);
1550 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1551 sc->sc_flags = ESH_FL_CRASHED;
1552 #ifdef ESH_PRINTF
1553 eshstatus(sc);
1554 #endif
1555 break;
1556
1557 #define CALLOUT(a) case a: \
1558 printf("%s: Event " #a " received -- " \
1559 "ring %d index %d timestamp %x\n", \
1560 device_xname(&sc->sc_dev), event->re_ring, event->re_index, \
1561 event->re_timestamp); \
1562 break;
1563
1564 CALLOUT(RR_EC_NO_RING_FOR_ULP);
1565 CALLOUT(RR_EC_REJECTING); /* dropping packets */
1566 #undef CALLOUT
1567
1568 /* Send events */
1569
1570 case RR_EC_PACKET_SENT: /* not used in firmware 2.x */
1571 ifp->if_opackets++;
1572 /* FALLTHROUGH */
1573
1574 case RR_EC_SET_SND_CONSUMER:
1575 assert(sc->sc_version == 1);
1576 /* FALLTHROUGH */
1577
1578 case RR_EC_SEND_RING_LOW:
1579 eshstart_cleanup(sc, event->re_index, 0);
1580 break;
1581
1582
1583 case RR_EC_CONN_REJECT:
1584 case RR_EC_CAMPON_TIMEOUT:
1585 case RR_EC_CONN_TIMEOUT:
1586 case RR_EC_DISCONN_ERR:
1587 case RR_EC_INTERNAL_PARITY:
1588 case RR_EC_TX_IDLE:
1589 case RR_EC_SEND_LINK_OFF:
1590 eshstart_cleanup(sc, event->re_index, event->re_code);
1591 break;
1592
1593 /* Receive events */
1594
1595 case RR_EC_RING_ENABLED:
1596 if (event->re_ring == HIPPI_ULP_802) {
1597 rc_snap_ring_consumer = 0; /* prevent read */
1598 sc->sc_flags |= ESH_FL_SNAP_RING_UP;
1599 esh_fill_snap_ring(sc);
1600
1601 if (sc->sc_flags & ESH_FL_LINK_UP) {
1602 /*
1603 * Interface is now `running', with no
1604 * output active.
1605 */
1606 ifp->if_flags |= IFF_RUNNING;
1607 ifp->if_flags &= ~IFF_OACTIVE;
1608
1609 /* Attempt to start output, if any. */
1610
1611 eshstart(ifp);
1612 }
1613 #ifdef ESH_PRINTF
1614 if (event->re_index != 0)
1615 printf("ENABLE snap ring -- index %d instead of 0!\n",
1616 event->re_index);
1617 #endif
1618 } else {
1619 struct esh_fp_ring_ctl *ring =
1620 sc->sc_fp_recv[event->re_ring];
1621
1622 sc->sc_flags |= ESH_FL_FP_RING_UP;
1623 #ifdef ESH_PRINTF
1624 printf("eshintr: FP ring %d up\n",
1625 event->re_ring);
1626 #endif
1627
1628 sc->sc_fp_recv_index[event->re_index] = ring;
1629 ring->ec_index = event->re_index;
1630 wakeup((void *) &ring->ec_ulp);
1631 }
1632 break;
1633
1634 case RR_EC_RING_DISABLED:
1635 #ifdef ESH_PRINTF
1636 printf("eshintr: disabling ring %d\n",
1637 event->re_ring);
1638 #endif
1639 if (event->re_ring == HIPPI_ULP_802) {
1640 struct rr_ring_ctl *ring =
1641 sc->sc_recv_ring_table + HIPPI_ULP_802;
1642 memset(ring, 0, sizeof(*ring));
1643 sc->sc_flags &= ~ESH_FL_CLOSING_SNAP;
1644 sc->sc_flags &= ~ESH_FL_SNAP_RING_UP;
1645 while (sc->sc_snap_recv.ec_consumer
1646 != sc->sc_snap_recv.ec_producer) {
1647 struct mbuf *m0;
1648 u_int16_t offset = sc->sc_snap_recv.ec_consumer;
1649
1650 bus_dmamap_unload(sc->sc_dmat,
1651 sc->sc_snap_recv.ec_dma[offset]);
1652 MFREE(sc->sc_snap_recv.ec_m[offset], m0);
1653 sc->sc_snap_recv.ec_m[offset] = NULL;
1654 sc->sc_snap_recv.ec_consumer =
1655 NEXT_RECV(sc->sc_snap_recv.ec_consumer);
1656 }
1657 sc->sc_snap_recv.ec_consumer =
1658 rc_snap_ring_consumer;
1659 sc->sc_snap_recv.ec_producer =
1660 rc_snap_ring_consumer;
1661 wakeup((void *) &sc->sc_snap_recv);
1662 } else {
1663 struct esh_fp_ring_ctl *recv =
1664 sc->sc_fp_recv[event->re_ring];
1665 assert(recv != NULL);
1666 recv->ec_consumer = recv->ec_producer =
1667 fp_ring_consumer[recv->ec_index];
1668 recv->ec_index = -1;
1669 wakeup((void *) &recv->ec_index);
1670 }
1671 break;
1672
1673 case RR_EC_RING_ENABLE_ERR:
1674 if (event->re_ring == HIPPI_ULP_802) {
1675 aprint_error_dev(&sc->sc_dev, "unable to enable SNAP ring!?\n\t"
1676 "shutting down interface\n");
1677 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1678 #ifdef ESH_PRINTF
1679 eshstatus(sc);
1680 #endif
1681 } else {
1682 /*
1683 * If we just leave the ring index as-is,
1684 * the driver will figure out that
1685 * we failed to open the ring.
1686 */
1687 wakeup((void *) &(sc->sc_fp_recv[event->re_ring]->ec_ulp));
1688 }
1689 break;
1690
1691 case RR_EC_PACKET_DISCARDED:
1692 /*
1693 * Determine the dmainfo for the current packet
1694 * we just discarded and wake up the waiting
1695 * process.
1696 *
1697 * This should never happen on the network ring!
1698 */
1699
1700 if (event->re_ring == HIPPI_ULP_802) {
1701 aprint_error_dev(&sc->sc_dev, "discard on SNAP ring!?\n\t"
1702 "shutting down interface\n");
1703 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1704 sc->sc_flags = ESH_FL_CRASHED;
1705 } else {
1706 struct esh_fp_ring_ctl *ring =
1707 sc->sc_fp_recv[event->re_ring];
1708 struct esh_dmainfo *di =
1709 ring->ec_cur_dmainfo;
1710
1711 if (di == NULL)
1712 di = ring->ec_dmainfo[ring->ec_producer];
1713 printf("eshintr: DISCARD: index %d,"
1714 "ring prod %d, di %p, ring[index] %p\n",
1715 event->re_index, ring->ec_producer, di,
1716 ring->ec_dmainfo[event->re_index]);
1717
1718 if (di == NULL)
1719 di = ring->ec_dmainfo[event->re_index];
1720
1721 if (di == NULL) {
1722 printf("eshintr: DISCARD: NULL di, skipping...\n");
1723 break;
1724 }
1725
1726 di->ed_flags &=
1727 ~(ESH_DI_READING | ESH_DI_BUSY);
1728 wakeup((void *) &di->ed_flags);
1729 }
1730 break;
1731
1732 case RR_EC_OUT_OF_BUF:
1733 case RR_EC_RECV_RING_OUT:
1734 case RR_EC_RECV_RING_LOW:
1735 break;
1736
1737 case RR_EC_SET_RECV_CONSUMER:
1738 case RR_EC_PACKET_RECVED:
1739 if (event->re_ring == HIPPI_ULP_802)
1740 esh_read_snap_ring(sc, event->re_index, 0);
1741 else if (sc->sc_fp_recv[event->re_ring] != NULL)
1742 esh_read_fp_ring(sc, event->re_index, 0,
1743 event->re_ring);
1744 break;
1745
1746 case RR_EC_RECV_IDLE:
1747 case RR_EC_PARITY_ERR:
1748 case RR_EC_LLRC_ERR:
1749 case RR_EC_PKT_LENGTH_ERR:
1750 case RR_EC_IP_HDR_CKSUM_ERR:
1751 case RR_EC_DATA_CKSUM_ERR:
1752 case RR_EC_SHORT_BURST_ERR:
1753 case RR_EC_RECV_LINK_OFF:
1754 case RR_EC_FLAG_SYNC_ERR:
1755 case RR_EC_FRAME_ERR:
1756 case RR_EC_STATE_TRANS_ERR:
1757 case RR_EC_NO_READY_PULSE:
1758 if (event->re_ring == HIPPI_ULP_802) {
1759 esh_read_snap_ring(sc, event->re_index,
1760 event->re_code);
1761 } else {
1762 struct esh_fp_ring_ctl *r;
1763
1764 r = sc->sc_fp_recv[event->re_ring];
1765 if (r)
1766 r->ec_error = event->re_code;
1767 }
1768 break;
1769
1770 /*
1771 * Statistics events can be ignored for now. They might become
1772 * necessary if we have to deliver stats on demand, rather than
1773 * just returning the statistics block of memory.
1774 */
1775
1776 case RR_EC_STATS_UPDATE:
1777 case RR_EC_STATS_RETRIEVED:
1778 case RR_EC_TRACE:
1779 break;
1780
1781 default:
1782 aprint_error_dev(&sc->sc_dev, "Bogus event code %x, "
1783 "ring %d, index %d, timestamp %x\n",
1784 event->re_code,
1785 event->re_ring, event->re_index,
1786 event->re_timestamp);
1787 break;
1788 }
1789
1790 sc->sc_event_consumer = NEXT_EVENT(sc->sc_event_consumer);
1791 }
1792
1793 /* Do the receive and send ring processing for version 2 RunCode */
1794
1795 if (sc->sc_version == 2) {
1796 int i;
1797 if (sc->sc_send.ec_consumer != rc_send_consumer) {
1798 eshstart_cleanup(sc, rc_send_consumer, 0);
1799 ret = 1;
1800 blah++;
1801 }
1802 if (sc->sc_snap_recv.ec_consumer != rc_snap_ring_consumer &&
1803 (sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) {
1804 esh_read_snap_ring(sc, rc_snap_ring_consumer, 0);
1805 ret = 1;
1806 blah++;
1807 }
1808 for (i = 0; i < RR_MAX_RECV_RING; i++) {
1809 struct esh_fp_ring_ctl *r = sc->sc_fp_recv_index[i];
1810
1811 if (r != NULL &&
1812 r->ec_consumer != fp_ring_consumer[i]) {
1813 #ifdef ESH_PRINTF
1814 printf("eshintr: performed read on ring %d, index %d\n",
1815 r->ec_ulp, i);
1816 #endif
1817 blah++;
1818 esh_read_fp_ring(sc, fp_ring_consumer[i],
1819 0, r->ec_ulp);
1820 fp_ring_consumer[i] = r->ec_consumer;
1821 }
1822 }
1823 if (blah != 0 && okay == 0) {
1824 okay = 1;
1825 #ifdef ESH_PRINTF
1826 printf("%s\n", sbuf);
1827 #endif
1828 }
1829 rc_offsets = (sc->sc_snap_recv.ec_consumer << 16) |
1830 (sc->sc_send.ec_consumer << 8) | sc->sc_event_consumer;
1831 } else {
1832 rc_offsets = sc->sc_event_consumer;
1833 }
1834
1835 esh_dma_sync(sc, sc->sc_event_ring,
1836 start_consumer, sc->sc_event_producer,
1837 RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0,
1838 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1839
1840 /* Write out new values for the FP segments... */
1841
1842 if (sc->sc_version == 2) {
1843 int i;
1844 u_int32_t u;
1845
1846 sbuf[0] = '\0';
1847 strlcat(sbuf, "drv: ", sizeof(sbuf));
1848 for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
1849 /* XXX: should do this right! */
1850 u = *((u_int32_t *) &fp_ring_consumer[i]);
1851 snprintf(t, sizeof(t), "%.8x|", u);
1852 strlcat(sbuf, t, sizeof(sbuf));
1853 NTOHL(u);
1854 bus_space_write_4(iot, ioh,
1855 RR_DRIVER_RECV_CONS + i, u);
1856 }
1857 #ifdef ESH_PRINTF
1858 if (okay == 1)
1859 printf("%s\n", sbuf);
1860 #endif
1861
1862 sbuf[0] = '\0';
1863 strlcat(sbuf, "rcn: ", sizeof(sbuf));
1864 for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
1865 u = bus_space_read_4(iot, ioh,
1866 RR_RUNCODE_RECV_CONS + i);
1867 /* XXX: should do this right! */
1868 NTOHL(u);
1869 snprintf(t, sizeof(t), "%.8x|", u);
1870 strlcat(sbuf, t, sizeof(sbuf));
1871 }
1872 #ifdef ESH_PRINTF
1873 if (okay == 1)
1874 printf("%s\n", sbuf);
1875 #endif
1876 }
1877
1878 /* Clear interrupt */
1879 bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, rc_offsets);
1880
1881 return (ret);
1882 }
1883
1884
1885 /*
1886 * Start output on the interface. Always called at splnet().
1887 * Check to see if there are any mbufs that didn't get sent the
1888 * last time this was called. If there are none, get more mbufs
1889 * and send 'em.
1890 *
1891 * For now, we only send one packet at a time.
1892 */
1893
1894 void
1895 eshstart(ifp)
1896 struct ifnet *ifp;
1897 {
1898 struct esh_softc *sc = ifp->if_softc;
1899 struct esh_send_ring_ctl *send = &sc->sc_send;
1900 struct mbuf *m = NULL;
1901 int error;
1902
1903 /* Don't transmit if interface is busy or not running */
1904
1905 #ifdef ESH_PRINTF
1906 printf("eshstart: ready to look; flags %x\n", sc->sc_flags);
1907 #endif
1908
1909 #define LINK_UP_FLAGS (ESH_FL_LINK_UP | ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP)
1910 if ((sc->sc_flags & LINK_UP_FLAGS) != LINK_UP_FLAGS)
1911 return;
1912 #undef LINK_UP_FLAGS
1913
1914 #ifdef ESH_PRINTF
1915 if (esh_check(sc))
1916 return;
1917 #endif
1918
1919 /* If we have sent the current packet, get another */
1920
1921 while ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0 &&
1922 (m = send->ec_cur_mbuf) == NULL && send->ec_cur_buf == NULL &&
1923 send->ec_cur_dmainfo == NULL) {
1924 IFQ_DEQUEUE(&ifp->if_snd, m);
1925 if (m == 0) /* not really needed */
1926 break;
1927
1928 #if NBPFILTER > 0
1929 if (ifp->if_bpf) {
1930 /*
1931 * On output, the raw packet has a eight-byte CCI
1932 * field prepended. On input, there is no such field.
1933 * The bpf expects the packet to look the same in both
1934 * places, so we temporarily lop off the prepended CCI
1935 * field here, then replace it. Ugh.
1936 *
1937 * XXX: Need to use standard mbuf manipulation
1938 * functions, first mbuf may be less than
1939 * 8 bytes long.
1940 */
1941
1942 m->m_len -= 8;
1943 m->m_data += 8;
1944 m->m_pkthdr.len -= 8;
1945 bpf_mtap(ifp->if_bpf, m);
1946 m->m_len += 8;
1947 m->m_data -= 8;
1948 m->m_pkthdr.len += 8;
1949 }
1950 #endif
1951
1952 send->ec_len = m->m_pkthdr.len;
1953 m = send->ec_cur_mbuf = esh_adjust_mbufs(sc, m);
1954 if (m == NULL)
1955 continue;
1956
1957 error = bus_dmamap_load_mbuf(sc->sc_dmat, send->ec_dma,
1958 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1959 if (error)
1960 panic("%s: eshstart: "
1961 "bus_dmamap_load_mbuf failed err %d\n",
1962 device_xname(&sc->sc_dev), error);
1963 send->ec_offset = 0;
1964 }
1965
1966 /*
1967 * If there are no network packets to send, see if there
1968 * are any FP packets to send.
1969 *
1970 * XXX: Some users may disagree with these priorities;
1971 * this reduces network latency by increasing FP latency...
1972 * Note that it also means that FP packets can get
1973 * locked out so that they *never* get sent, if the
1974 * network constantly fills up the pipe. Not good!
1975 */
1976
1977 if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 &&
1978 send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
1979 send->ec_cur_dmainfo == NULL &&
1980 BUFQ_PEEK(send->ec_buf_queue) != NULL) {
1981 struct buf *bp;
1982
1983 #ifdef ESH_PRINTF
1984 printf("eshstart: getting a buf from send->ec_queue %p\n",
1985 send->ec_queue);
1986 #endif
1987
1988 bp = send->ec_cur_buf = BUFQ_GET(send->ec_buf_queue);
1989 send->ec_offset = 0;
1990 send->ec_len = bp->b_bcount;
1991
1992 /*
1993 * Determine the DMA mapping for the buffer.
1994 * If this is too large, what do we do!?
1995 */
1996
1997 error = bus_dmamap_load(sc->sc_dmat, send->ec_dma,
1998 bp->b_data, bp->b_bcount,
1999 bp->b_proc,
2000 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2001
2002 if (error)
2003 panic("%s: eshstart: "
2004 "bus_dmamap_load failed err %d\n",
2005 device_xname(&sc->sc_dev), error);
2006 }
2007
2008 /*
2009 * If there are no packets from strategy to send, see if there
2010 * are any FP packets to send from fpwrite.
2011 */
2012
2013 if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 &&
2014 send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
2015 send->ec_cur_dmainfo == NULL) {
2016 struct esh_dmainfo *di;
2017
2018 di = TAILQ_FIRST(&send->ec_di_queue);
2019 if (di == NULL)
2020 return;
2021 TAILQ_REMOVE(&send->ec_di_queue, di, ed_list);
2022
2023 #ifdef ESH_PRINTF
2024 printf("eshstart: getting a di from send->ec_di_queue %p\n",
2025 &send->ec_di_queue);
2026 #endif
2027
2028 send->ec_cur_dmainfo = di;
2029 send->ec_offset = 0;
2030 send->ec_len = di->ed_dma->dm_mapsize;
2031 }
2032
2033 if (send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
2034 send->ec_cur_dmainfo == NULL)
2035 return;
2036
2037 assert(send->ec_len);
2038 assert(send->ec_dma->dm_nsegs ||
2039 send->ec_cur_dmainfo->ed_dma->dm_nsegs);
2040 assert(send->ec_cur_mbuf || send->ec_cur_buf || send->ec_cur_dmainfo);
2041
2042 esh_send(sc);
2043 return;
2044 }
2045
2046
2047 /*
2048 * Put the buffers from the send dmamap into the descriptors and
2049 * send 'em off...
2050 */
2051
2052 static void
2053 esh_send(sc)
2054 struct esh_softc *sc;
2055 {
2056 struct esh_send_ring_ctl *send = &sc->sc_send;
2057 u_int start_producer = send->ec_producer;
2058 bus_dmamap_t dma;
2059
2060 if (send->ec_cur_dmainfo != NULL)
2061 dma = send->ec_cur_dmainfo->ed_dma;
2062 else
2063 dma = send->ec_dma;
2064
2065 #ifdef ESH_PRINTF
2066 printf("esh_send: producer %x consumer %x nsegs %d\n",
2067 send->ec_producer, send->ec_consumer, dma->dm_nsegs);
2068 #endif
2069
2070 esh_dma_sync(sc, send->ec_descr, send->ec_producer, send->ec_consumer,
2071 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1,
2072 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2073
2074 while (NEXT_SEND(send->ec_producer) != send->ec_consumer &&
2075 send->ec_offset < dma->dm_nsegs) {
2076 int offset = send->ec_producer;
2077
2078 send->ec_descr[offset].rd_buffer_addr =
2079 dma->dm_segs[send->ec_offset].ds_addr;
2080 send->ec_descr[offset].rd_length =
2081 dma->dm_segs[send->ec_offset].ds_len;
2082 send->ec_descr[offset].rd_control = 0;
2083
2084 if (send->ec_offset == 0) {
2085 /* Start of the dmamap... */
2086 send->ec_descr[offset].rd_control |=
2087 RR_CT_PACKET_START;
2088 }
2089
2090 if (send->ec_offset + 1 == dma->dm_nsegs) {
2091 send->ec_descr[offset].rd_control |= RR_CT_PACKET_END;
2092 }
2093
2094 send->ec_offset++;
2095 send->ec_producer = NEXT_SEND(send->ec_producer);
2096 }
2097
2098 /*
2099 * XXX: we could optimize the dmamap_sync to just get what we've
2100 * just set up, rather than the whole buffer...
2101 */
2102
2103 bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize,
2104 BUS_DMASYNC_PREWRITE);
2105 esh_dma_sync(sc, send->ec_descr,
2106 start_producer, send->ec_consumer,
2107 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1,
2108 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2109
2110 #ifdef ESH_PRINTF
2111 if (send->ec_offset != dma->dm_nsegs)
2112 printf("eshstart: couldn't fit packet in send ring!\n");
2113 #endif
2114
2115 if (sc->sc_version == 1) {
2116 esh_send_cmd(sc, RR_CC_SET_SEND_PRODUCER,
2117 0, send->ec_producer);
2118 } else {
2119 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2120 RR_SEND_PRODUCER, send->ec_producer);
2121 }
2122 return;
2123 }
2124
2125
2126 /*
2127 * Cleanup for the send routine. When the NIC sends us an event to
2128 * let us know that it has consumed our buffers, we need to free the
2129 * buffers, and possibly send another packet.
2130 */
2131
2132 static void
2133 eshstart_cleanup(sc, consumer, error)
2134 struct esh_softc *sc;
2135 u_int16_t consumer;
2136 int error;
2137 {
2138 struct esh_send_ring_ctl *send = &sc->sc_send;
2139 int start_consumer = send->ec_consumer;
2140 bus_dmamap_t dma;
2141
2142 if (send->ec_cur_dmainfo != NULL)
2143 dma = send->ec_cur_dmainfo->ed_dma;
2144 else
2145 dma = send->ec_dma;
2146
2147 #ifdef ESH_PRINTF
2148 printf("eshstart_cleanup: consumer %x, send->consumer %x\n",
2149 consumer, send->ec_consumer);
2150 #endif
2151
2152 esh_dma_sync(sc, send->ec_descr,
2153 send->ec_consumer, consumer,
2154 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0,
2155 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2156
2157 while (send->ec_consumer != consumer) {
2158 assert(dma->dm_nsegs);
2159 assert(send->ec_cur_mbuf || send->ec_cur_buf ||
2160 send->ec_cur_dmainfo);
2161
2162 if (send->ec_descr[send->ec_consumer].rd_control &
2163 RR_CT_PACKET_END) {
2164 #ifdef ESH_PRINT
2165 printf("eshstart_cleanup: dmamap_sync mapsize %d\n",
2166 send->ec_dma->dm_mapsize);
2167 #endif
2168 bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize,
2169 BUS_DMASYNC_POSTWRITE);
2170 bus_dmamap_unload(sc->sc_dmat, dma);
2171 if (send->ec_cur_mbuf) {
2172 m_freem(send->ec_cur_mbuf);
2173 send->ec_cur_mbuf = NULL;
2174 } else if (send->ec_cur_dmainfo) {
2175 send->ec_cur_dmainfo->ed_flags &= ~ESH_DI_BUSY;
2176 send->ec_cur_dmainfo->ed_error =
2177 (send->ec_error ? send->ec_error : error);
2178 send->ec_error = 0;
2179 wakeup((void *) send->ec_cur_dmainfo);
2180 send->ec_cur_dmainfo = NULL;
2181 } else if (send->ec_cur_buf) {
2182 biodone(send->ec_cur_buf);
2183 send->ec_cur_buf = NULL;
2184 } else {
2185 panic("%s: eshstart_cleanup: "
2186 "no current mbuf, buf, or dmainfo!\n",
2187 device_xname(&sc->sc_dev));
2188 }
2189
2190 /*
2191 * Version 1 of the firmware sent an event each
2192 * time it sent out a packet. Later versions do not
2193 * (which results in a considerable speedup), so we
2194 * have to keep track here.
2195 */
2196
2197 if (sc->sc_version != 1)
2198 sc->sc_if.if_opackets++;
2199 }
2200 if (error != 0)
2201 send->ec_error = error;
2202
2203 send->ec_consumer = NEXT_SEND(send->ec_consumer);
2204 }
2205
2206 esh_dma_sync(sc, send->ec_descr,
2207 start_consumer, consumer,
2208 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0,
2209 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2210
2211 eshstart(&sc->sc_if);
2212 }
2213
2214
2215 /*
2216 * XXX: Ouch: The NIC can only send word-aligned buffers, and only
2217 * the last buffer in the packet can have a length that is not
2218 * a multiple of four!
2219 *
2220 * Here we traverse the packet, pick out the bogus mbufs, and fix 'em
2221 * if possible. The fix is amazingly expensive, so we sure hope that
2222 * this is a rare occurance (it seems to be).
2223 */
2224
2225 static struct mbuf *
2226 esh_adjust_mbufs(sc, m)
2227 struct esh_softc *sc;
2228 struct mbuf *m;
2229 {
2230 struct mbuf *m0, *n, *n0;
2231 u_int32_t write_len;
2232
2233 write_len = m->m_pkthdr.len;
2234 #ifdef DIAGNOSTIC
2235 if (write_len > max_write_len)
2236 max_write_len = write_len;
2237 #endif
2238
2239 for (n0 = n = m; n; n = n->m_next) {
2240 while (n && n->m_len == 0) {
2241 MFREE(n, m0);
2242 if (n == m)
2243 n = n0 = m = m0;
2244 else
2245 n = n0->m_next = m0;
2246 }
2247 if (n == NULL)
2248 break;
2249
2250 if (mtod(n, long) & 3 || (n->m_next && n->m_len & 3)) {
2251 /* Gotta clean it up */
2252 struct mbuf *o;
2253 u_int32_t len;
2254
2255 sc->sc_misaligned_bufs++;
2256 MGETHDR(o, M_DONTWAIT, MT_DATA);
2257 if (!o)
2258 goto bogosity;
2259
2260 MCLGET(o, M_DONTWAIT);
2261 if (!(o->m_flags & M_EXT)) {
2262 MFREE(o, m0);
2263 goto bogosity;
2264 }
2265
2266 /*
2267 * XXX: Copy as much as we can into the
2268 * cluster. For now we can't have more
2269 * than a cluster in there. May change.
2270 * I'd prefer not to get this
2271 * down-n-dirty, but we have to be able
2272 * to do this kind of funky copy.
2273 */
2274
2275 len = min(MCLBYTES, write_len);
2276 #ifdef DIAGNOSTIC
2277 assert(n->m_len <= len);
2278 assert(len <= MCLBYTES);
2279 #endif
2280
2281 m_copydata(n, 0, len, mtod(o, void *));
2282 o->m_pkthdr.len = len;
2283 m_adj(n, len);
2284 o->m_len = len;
2285 o->m_next = n;
2286
2287 if (n == m)
2288 m = o;
2289 else
2290 n0->m_next = o;
2291 n = o;
2292 }
2293 n0 = n;
2294 write_len -= n->m_len;
2295 }
2296 return m;
2297
2298 bogosity:
2299 aprint_error_dev(&sc->sc_dev, "esh_adjust_mbuf: unable to allocate cluster for "
2300 "mbuf %p, len %x\n",
2301 mtod(m, void *), m->m_len);
2302 m_freem(m);
2303 return NULL;
2304 }
2305
2306
2307 /*
2308 * Read in the current valid entries from the ring and forward
2309 * them to the upper layer protocols. It is possible that we
2310 * haven't received the whole packet yet, in which case we just
2311 * add each of the buffers into the packet until we have the whole
2312 * thing.
2313 */
2314
2315 static void
2316 esh_read_snap_ring(sc, consumer, error)
2317 struct esh_softc *sc;
2318 u_int16_t consumer;
2319 int error;
2320 {
2321 struct ifnet *ifp = &sc->sc_if;
2322 struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv;
2323 int start_consumer = recv->ec_consumer;
2324 u_int16_t control;
2325
2326 if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0)
2327 return;
2328
2329 if (error)
2330 recv->ec_error = error;
2331
2332 esh_dma_sync(sc, recv->ec_descr,
2333 start_consumer, consumer,
2334 RR_SNAP_RECV_RING_SIZE,
2335 sizeof(struct rr_descr), 0,
2336 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2337
2338 while (recv->ec_consumer != consumer) {
2339 u_int16_t offset = recv->ec_consumer;
2340 struct mbuf *m;
2341
2342 m = recv->ec_m[offset];
2343 m->m_len = recv->ec_descr[offset].rd_length;
2344 control = recv->ec_descr[offset].rd_control;
2345 bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, m->m_len,
2346 BUS_DMASYNC_POSTREAD);
2347 bus_dmamap_unload(sc->sc_dmat, recv->ec_dma[offset]);
2348
2349 #ifdef ESH_PRINTF
2350 printf("esh_read_snap_ring: offset %x addr %p len %x flags %x\n",
2351 offset, mtod(m, void *), m->m_len, control);
2352 #endif
2353 if (control & RR_CT_PACKET_START || !recv->ec_cur_mbuf) {
2354 if (recv->ec_cur_pkt) {
2355 m_freem(recv->ec_cur_pkt);
2356 recv->ec_cur_pkt = NULL;
2357 printf("%s: possible skipped packet!\n",
2358 device_xname(&sc->sc_dev));
2359 }
2360 recv->ec_cur_pkt = recv->ec_cur_mbuf = m;
2361 /* allocated buffers all have pkthdrs... */
2362 m->m_pkthdr.rcvif = ifp;
2363 m->m_pkthdr.len = m->m_len;
2364 } else {
2365 if (!recv->ec_cur_pkt)
2366 panic("esh_read_snap_ring: no cur_pkt");
2367
2368 recv->ec_cur_mbuf->m_next = m;
2369 recv->ec_cur_mbuf = m;
2370 recv->ec_cur_pkt->m_pkthdr.len += m->m_len;
2371 }
2372
2373 recv->ec_m[offset] = NULL;
2374 recv->ec_descr[offset].rd_length = 0;
2375 recv->ec_descr[offset].rd_buffer_addr = 0;
2376
2377 /* Note that we can START and END on the same buffer */
2378
2379 if (control & RR_CT_PACKET_END) { /* XXX: RR2_ matches */
2380 m = recv->ec_cur_pkt;
2381 if (!error && !recv->ec_error) {
2382 /*
2383 * We have a complete packet, send it up
2384 * the stack...
2385 */
2386 ifp->if_ipackets++;
2387
2388 #if NBPFILTER > 0
2389 /*
2390 * Check if there's a BPF listener on this
2391 * interface. If so, hand off the raw packet
2392 * to BPF.
2393 */
2394 if (ifp->if_bpf) {
2395 /*
2396 * Incoming packets start with the FP
2397 * data, so no alignment problems
2398 * here...
2399 */
2400 bpf_mtap(ifp->if_bpf, m);
2401 }
2402 #endif
2403 if ((ifp->if_flags & IFF_RUNNING) == 0) {
2404 m_freem(m);
2405 } else {
2406 m = m_pullup(m,
2407 sizeof(struct hippi_header));
2408 (*ifp->if_input)(ifp, m);
2409 }
2410 } else {
2411 ifp->if_ierrors++;
2412 recv->ec_error = 0;
2413 m_freem(m);
2414 }
2415 recv->ec_cur_pkt = recv->ec_cur_mbuf = NULL;
2416 }
2417
2418 recv->ec_descr[offset].rd_control = 0;
2419 recv->ec_consumer = NEXT_RECV(recv->ec_consumer);
2420 }
2421
2422 esh_dma_sync(sc, recv->ec_descr,
2423 start_consumer, consumer,
2424 RR_SNAP_RECV_RING_SIZE,
2425 sizeof(struct rr_descr), 0,
2426 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2427
2428 esh_fill_snap_ring(sc);
2429 }
2430
2431
2432 /*
2433 * Add the SNAP (IEEE 802) receive ring to the NIC. It is possible
2434 * that we are doing this after resetting the card, in which case
2435 * the structures have already been filled in and we may need to
2436 * resume sending data.
2437 */
2438
2439 static void
2440 esh_init_snap_ring(sc)
2441 struct esh_softc *sc;
2442 {
2443 struct rr_ring_ctl *ring = sc->sc_recv_ring_table + HIPPI_ULP_802;
2444
2445 if ((sc->sc_flags & ESH_FL_CLOSING_SNAP) != 0) {
2446 aprint_error_dev(&sc->sc_dev, "can't reopen SNAP ring until ring disable is completed\n");
2447 return;
2448 }
2449
2450 if (ring->rr_entry_size == 0) {
2451 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
2452 (char *) ring - (char *) sc->sc_dma_addr,
2453 sizeof(*ring),
2454 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2455
2456 ring->rr_ring_addr = sc->sc_snap_recv_ring_dma;
2457 ring->rr_free_bufs = RR_SNAP_RECV_RING_SIZE / 4;
2458 ring->rr_entries = RR_SNAP_RECV_RING_SIZE;
2459 ring->rr_entry_size = sizeof(struct rr_descr);
2460 ring->rr_prod_index = 0;
2461 sc->sc_snap_recv.ec_producer = 0;
2462 sc->sc_snap_recv.ec_consumer = 0;
2463 ring->rr_mode = RR_RR_IP;
2464
2465 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
2466 (char *) ring - (char *) sc->sc_dma_addr,
2467 sizeof(ring),
2468 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2469 esh_send_cmd(sc, RR_CC_ENABLE_RING, HIPPI_ULP_802,
2470 sc->sc_snap_recv.ec_producer);
2471 } else {
2472 printf("%s: snap receive ring already initialized!\n",
2473 device_xname(&sc->sc_dev));
2474 }
2475 }
2476
2477 static void
2478 esh_close_snap_ring(sc)
2479 struct esh_softc *sc;
2480 {
2481 #ifdef ESH_PRINTF
2482 printf("esh_close_snap_ring: starting\n");
2483 #endif
2484
2485 if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0)
2486 return;
2487
2488 sc->sc_flags |= ESH_FL_CLOSING_SNAP;
2489 esh_send_cmd(sc, RR_CC_DISABLE_RING, HIPPI_ULP_802, 0);
2490
2491 /* Disable event will trigger the rest of the cleanup. */
2492 }
2493
2494 /*
2495 * Fill in the snap ring with more mbuf buffers so that we can
2496 * receive traffic.
2497 */
2498
2499 static void
2500 esh_fill_snap_ring(sc)
2501 struct esh_softc *sc;
2502 {
2503 struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv;
2504 int start_producer = recv->ec_producer;
2505 int error;
2506
2507 esh_dma_sync(sc, recv->ec_descr,
2508 recv->ec_producer, recv->ec_consumer,
2509 RR_SNAP_RECV_RING_SIZE,
2510 sizeof(struct rr_descr), 1,
2511 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2512
2513 while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) {
2514 int offset = recv->ec_producer;
2515 struct mbuf *m, *m0;
2516
2517 MGETHDR(m, M_DONTWAIT, MT_DATA);
2518 if (!m)
2519 break;
2520 MCLGET(m, M_DONTWAIT);
2521 if ((m->m_flags & M_EXT) == 0) {
2522 MFREE(m, m0);
2523 break;
2524 }
2525
2526 error = bus_dmamap_load(sc->sc_dmat, recv->ec_dma[offset],
2527 mtod(m, void *), MCLBYTES,
2528 NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
2529 if (error) {
2530 printf("%s: esh_fill_recv_ring: bus_dmamap_load "
2531 "failed\toffset %x, error code %d\n",
2532 device_xname(&sc->sc_dev), offset, error);
2533 MFREE(m, m0);
2534 break;
2535 }
2536
2537 /*
2538 * In this implementation, we should only see one segment
2539 * per DMA.
2540 */
2541
2542 assert(recv->ec_dma[offset]->dm_nsegs == 1);
2543
2544 /*
2545 * Load into the descriptors.
2546 */
2547
2548 recv->ec_descr[offset].rd_ring =
2549 (sc->sc_version == 1) ? HIPPI_ULP_802 : 0;
2550 recv->ec_descr[offset].rd_buffer_addr =
2551 recv->ec_dma[offset]->dm_segs->ds_addr;
2552 recv->ec_descr[offset].rd_length =
2553 recv->ec_dma[offset]->dm_segs->ds_len;
2554 recv->ec_descr[offset].rd_control = 0;
2555
2556 bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, MCLBYTES,
2557 BUS_DMASYNC_PREREAD);
2558
2559 recv->ec_m[offset] = m;
2560
2561 recv->ec_producer = NEXT_RECV(recv->ec_producer);
2562 }
2563
2564 esh_dma_sync(sc, recv->ec_descr,
2565 start_producer, recv->ec_consumer,
2566 RR_SNAP_RECV_RING_SIZE,
2567 sizeof(struct rr_descr), 1,
2568 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2569
2570 if (sc->sc_version == 1)
2571 esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, HIPPI_ULP_802,
2572 recv->ec_producer);
2573 else
2574 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2575 RR_SNAP_RECV_PRODUCER, recv->ec_producer);
2576 }
2577
2578 static void
2579 esh_init_fp_rings(sc)
2580 struct esh_softc *sc;
2581 {
2582 struct esh_fp_ring_ctl *recv;
2583 struct rr_ring_ctl *ring_ctl;
2584 int ulp;
2585
2586 for (ulp = 0; ulp < RR_ULP_COUNT; ulp++) {
2587 ring_ctl = &sc->sc_recv_ring_table[ulp];
2588 recv = sc->sc_fp_recv[ulp];
2589
2590 if (recv == NULL)
2591 continue;
2592
2593 ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr;
2594 ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4;
2595 ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE;
2596 ring_ctl->rr_entry_size = sizeof(struct rr_descr);
2597 ring_ctl->rr_prod_index = 0;
2598 ring_ctl->rr_mode = RR_RR_CHARACTER;
2599 recv->ec_producer = 0;
2600 recv->ec_consumer = 0;
2601 recv->ec_index = -1;
2602
2603 esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer);
2604 }
2605 }
2606
2607 static void
2608 esh_read_fp_ring(sc, consumer, error, ulp)
2609 struct esh_softc *sc;
2610 u_int16_t consumer;
2611 int error;
2612 int ulp;
2613 {
2614 struct esh_fp_ring_ctl *recv = sc->sc_fp_recv[ulp];
2615 int start_consumer = recv->ec_consumer;
2616 u_int16_t control;
2617
2618 #ifdef ESH_PRINTF
2619 printf("esh_read_fp_ring: ulp %d, consumer %d, producer %d, old consumer %d\n",
2620 recv->ec_ulp, consumer, recv->ec_producer, recv->ec_consumer);
2621 #endif
2622 if ((sc->sc_flags & ESH_FL_FP_RING_UP) == 0)
2623 return;
2624
2625 if (error != 0)
2626 recv->ec_error = error;
2627
2628 esh_dma_sync(sc, recv->ec_descr,
2629 start_consumer, consumer,
2630 RR_FP_RECV_RING_SIZE,
2631 sizeof(struct rr_descr), 0,
2632 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2633
2634 while (recv->ec_consumer != consumer) {
2635 u_int16_t offset = recv->ec_consumer;
2636
2637 control = recv->ec_descr[offset].rd_control;
2638
2639 if (control & RR_CT_PACKET_START) {
2640 if (recv->ec_read_len) {
2641 recv->ec_error = 0;
2642 printf("%s: ulp %d: possible skipped FP packet!\n",
2643 device_xname(&sc->sc_dev), recv->ec_ulp);
2644 }
2645 recv->ec_seen_end = 0;
2646 recv->ec_read_len = 0;
2647 }
2648 if (recv->ec_seen_end == 0)
2649 recv->ec_read_len += recv->ec_descr[offset].rd_length;
2650
2651 #if NOT_LAME
2652 recv->ec_descr[offset].rd_length = 0;
2653 recv->ec_descr[offset].rd_buffer_addr = 0;
2654 #endif
2655
2656 #ifdef ESH_PRINTF
2657 printf("esh_read_fp_ring: offset %d addr %d len %d flags %x, total %d\n",
2658 offset, recv->ec_descr[offset].rd_buffer_addr,
2659 recv->ec_descr[offset].rd_length, control, recv->ec_read_len);
2660 #endif
2661 /* Note that we can START and END on the same buffer */
2662
2663 if ((control & RR_CT_PACKET_END) == RR_CT_PACKET_END) {
2664 if (recv->ec_dmainfo[offset] != NULL) {
2665 struct esh_dmainfo *di =
2666 recv->ec_dmainfo[offset];
2667
2668 recv->ec_dmainfo[offset] = NULL;
2669 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
2670 0, recv->ec_read_len,
2671 BUS_DMASYNC_POSTREAD);
2672 bus_dmamap_unload(sc->sc_dmat, di->ed_dma);
2673
2674 if (!error && !recv->ec_error) {
2675 /*
2676 * XXX: we oughta do this right, with full
2677 * BPF support and the rest...
2678 */
2679 if (di->ed_buf != NULL) {
2680 di->ed_buf->b_resid =
2681 di->ed_buf->b_bcount -
2682 recv->ec_read_len;
2683 } else {
2684 di->ed_read_len =
2685 recv->ec_read_len;
2686 }
2687 } else {
2688 if (di->ed_buf != NULL) {
2689 di->ed_buf->b_resid =
2690 di->ed_buf->b_bcount;
2691 di->ed_buf->b_error = EIO;
2692 } else {
2693 di->ed_error = EIO;
2694 recv->ec_error = 0;
2695 }
2696 }
2697
2698 #ifdef ESH_PRINTF
2699 printf("esh_read_fp_ring: ulp %d, read %d, resid %ld\n",
2700 recv->ec_ulp, recv->ec_read_len, (di->ed_buf ? di->ed_buf->b_resid : di->ed_read_len));
2701 #endif
2702 di->ed_flags &=
2703 ~(ESH_DI_BUSY | ESH_DI_READING);
2704 if (di->ed_buf != NULL)
2705 biodone(di->ed_buf);
2706 else
2707 wakeup((void *) di);
2708 recv->ec_read_len = 0;
2709 } else {
2710 #ifdef ESH_PRINTF
2711 printf("esh_read_fp_ring: ulp %d, seen end at %d\n",
2712 recv->ec_ulp, offset);
2713 #endif
2714 recv->ec_seen_end = 1;
2715 }
2716 }
2717
2718 #if NOT_LAME
2719 recv->ec_descr[offset].rd_control = 0;
2720 #endif
2721 recv->ec_consumer = NEXT_RECV(recv->ec_consumer);
2722 }
2723
2724 esh_dma_sync(sc, recv->ec_descr,
2725 start_consumer, consumer,
2726 RR_SNAP_RECV_RING_SIZE,
2727 sizeof(struct rr_descr), 0,
2728 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2729
2730 esh_fill_fp_ring(sc, recv);
2731 }
2732
2733
2734 static void
2735 esh_fill_fp_ring(sc, recv)
2736 struct esh_softc *sc;
2737 struct esh_fp_ring_ctl *recv;
2738 {
2739 struct esh_dmainfo *di = recv->ec_cur_dmainfo;
2740 int start_producer = recv->ec_producer;
2741
2742 #ifdef ESH_PRINTF
2743 printf("esh_fill_fp_ring: ulp %d, di %p, producer %d\n",
2744 recv->ec_ulp, di, start_producer);
2745 #endif
2746
2747 esh_dma_sync(sc, recv->ec_descr,
2748 recv->ec_producer, recv->ec_consumer,
2749 RR_SNAP_RECV_RING_SIZE,
2750 sizeof(struct rr_descr), 1,
2751 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2752
2753 while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) {
2754 int offset = recv->ec_producer;
2755
2756 if (di == NULL) {
2757 /*
2758 * Must allow only one reader at a time; see
2759 * esh_flush_fp_ring().
2760 */
2761
2762 if (offset != start_producer)
2763 goto fp_fill_done;
2764
2765 di = TAILQ_FIRST(&recv->ec_queue);
2766 if (di == NULL)
2767 goto fp_fill_done;
2768 TAILQ_REMOVE(&recv->ec_queue, di, ed_list);
2769 recv->ec_offset = 0;
2770 recv->ec_cur_dmainfo = di;
2771 di->ed_flags |= ESH_DI_READING;
2772 #ifdef ESH_PRINTF
2773 printf("\toffset %d nsegs %d\n",
2774 recv->ec_offset, di->ed_dma->dm_nsegs);
2775 #endif
2776 }
2777
2778 /*
2779 * Load into the descriptors.
2780 */
2781
2782 recv->ec_descr[offset].rd_ring = 0;
2783 recv->ec_descr[offset].rd_buffer_addr =
2784 di->ed_dma->dm_segs[recv->ec_offset].ds_addr;
2785 recv->ec_descr[offset].rd_length =
2786 di->ed_dma->dm_segs[recv->ec_offset].ds_len;
2787 recv->ec_descr[offset].rd_control = 0;
2788 recv->ec_dmainfo[offset] = NULL;
2789
2790 if (recv->ec_offset == 0) {
2791 /* Start of the dmamap... */
2792 recv->ec_descr[offset].rd_control |=
2793 RR_CT_PACKET_START;
2794 }
2795
2796 assert(recv->ec_offset < di->ed_dma->dm_nsegs);
2797
2798 recv->ec_offset++;
2799 if (recv->ec_offset == di->ed_dma->dm_nsegs) {
2800 recv->ec_descr[offset].rd_control |= RR_CT_PACKET_END;
2801 recv->ec_dmainfo[offset] = di;
2802 di = NULL;
2803 recv->ec_offset = 0;
2804 recv->ec_cur_dmainfo = NULL;
2805 }
2806
2807 recv->ec_producer = NEXT_RECV(recv->ec_producer);
2808 }
2809
2810 fp_fill_done:
2811 esh_dma_sync(sc, recv->ec_descr,
2812 start_producer, recv->ec_consumer,
2813 RR_SNAP_RECV_RING_SIZE,
2814 sizeof(struct rr_descr), 1,
2815 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2816
2817
2818 if (sc->sc_version == 1) {
2819 esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, recv->ec_ulp,
2820 recv->ec_producer);
2821 } else {
2822 union {
2823 u_int32_t producer;
2824 u_int8_t indices[4];
2825 } v;
2826 int which;
2827 int i;
2828 struct esh_fp_ring_ctl *r;
2829
2830 which = (recv->ec_index / 4) * 4;
2831 #if BAD_PRODUCER
2832 v.producer = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2833 RR_RECVS_PRODUCER + which);
2834 NTOHL(v.producer);
2835 #endif
2836 for (i = 0; i < 4; i++) {
2837 r = sc->sc_fp_recv_index[i + which];
2838 if (r != NULL)
2839 v.indices[i] = r->ec_producer;
2840 else
2841 v.indices[i] = 0;
2842 }
2843 #ifdef ESH_PRINTF
2844 printf("esh_fill_fp_ring: ulp %d, updating producer %d: %.8x\n",
2845 recv->ec_ulp, which, v.producer);
2846 #endif
2847 HTONL(v.producer);
2848 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2849 RR_RECVS_PRODUCER + which, v.producer);
2850 }
2851 #ifdef ESH_PRINTF
2852 printf("esh_fill_fp_ring: ulp %d, final producer %d\n",
2853 recv->ec_ulp, recv->ec_producer);
2854 #endif
2855 }
2856
2857 /*
2858 * When a read is interrupted, we need to flush the buffers out of
2859 * the ring; otherwise, a driver error could lock a process up,
2860 * with no way to exit.
2861 */
2862
2863 static void
2864 esh_flush_fp_ring(sc, recv, di)
2865 struct esh_softc *sc;
2866 struct esh_fp_ring_ctl *recv;
2867 struct esh_dmainfo *di;
2868 {
2869 int error = 0;
2870
2871 /*
2872 * If the read request hasn't yet made it to the top of the queue,
2873 * just remove it from the queue, and return.
2874 */
2875
2876 if ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING) {
2877 TAILQ_REMOVE(&recv->ec_queue, di, ed_list);
2878 return;
2879 }
2880
2881 #ifdef ESH_PRINTF
2882 printf("esh_flush_fp_ring: di->ed_flags %x, ulp %d, producer %x\n",
2883 di->ed_flags, recv->ec_ulp, recv->ec_producer);
2884 #endif
2885
2886 /* Now we gotta get tough. Issue a discard packet command */
2887
2888 esh_send_cmd(sc, RR_CC_DISCARD_PKT, recv->ec_ulp,
2889 recv->ec_producer - 1);
2890
2891 /* Wait for it to finish */
2892
2893 while ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING &&
2894 error == 0) {
2895 error = tsleep((void *) &di->ed_flags, PRIBIO,
2896 "esh_flush_fp_ring", hz);
2897 printf("esh_flush_fp_ring: di->ed_flags %x, error %d\n",
2898 di->ed_flags, error);
2899 /*
2900 * What do I do if this times out or gets interrupted?
2901 * Reset the card? I could get an interrupt before
2902 * giving it a chance to check. Perhaps I oughta wait
2903 * awhile? What about not giving the user a chance
2904 * to interrupt, and just expecting a quick answer?
2905 * That way I could reset the card if it doesn't
2906 * come back right away!
2907 */
2908 if (error != 0) {
2909 eshreset(sc);
2910 break;
2911 }
2912 }
2913
2914 /* XXX: Do we need to clear out the dmainfo pointers */
2915 }
2916
2917
2918 int
2919 eshioctl(ifp, cmd, data)
2920 struct ifnet *ifp;
2921 u_long cmd;
2922 void *data;
2923 {
2924 int error = 0;
2925 struct esh_softc *sc = ifp->if_softc;
2926 struct ifaddr *ifa = (struct ifaddr *)data;
2927 struct ifdrv *ifd = (struct ifdrv *) data;
2928 u_long len;
2929 int s;
2930
2931 s = splnet();
2932
2933 while (sc->sc_flags & ESH_FL_EEPROM_BUSY) {
2934 error = tsleep(&sc->sc_flags, PCATCH | PRIBIO,
2935 "esheeprom", 0);
2936 if (error != 0)
2937 goto ioctl_done;
2938 }
2939
2940 switch (cmd) {
2941
2942 case SIOCSIFADDR:
2943 ifp->if_flags |= IFF_UP;
2944 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
2945 eshinit(sc);
2946 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
2947 error = EIO;
2948 goto ioctl_done;
2949 }
2950 }
2951
2952 if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP))
2953 == ESH_FL_RUNCODE_UP) {
2954 while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
2955 error = tsleep((void *) &sc->sc_snap_recv,
2956 PRIBIO, "esh_closing_fp_ring",
2957 hz);
2958 if (error != 0)
2959 goto ioctl_done;
2960 }
2961 esh_init_snap_ring(sc);
2962 }
2963
2964 switch (ifa->ifa_addr->sa_family) {
2965 #ifdef INET
2966 case AF_INET:
2967 /* The driver doesn't really care about IP addresses */
2968 break;
2969 #endif
2970 default:
2971 break;
2972 }
2973 break;
2974
2975 case SIOCSIFFLAGS:
2976 if ((ifp->if_flags & IFF_UP) == 0 &&
2977 (ifp->if_flags & IFF_RUNNING) != 0) {
2978 /*
2979 * If interface is marked down and it is running, then
2980 * stop it.
2981 */
2982
2983 ifp->if_flags &= ~IFF_RUNNING;
2984 esh_close_snap_ring(sc);
2985 while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
2986 error = tsleep((void *) &sc->sc_snap_recv,
2987 PRIBIO, "esh_closing_fp_ring",
2988 hz);
2989 if (error != 0)
2990 goto ioctl_done;
2991 }
2992
2993 } else if ((ifp->if_flags & IFF_UP) != 0 &&
2994 (ifp->if_flags & IFF_RUNNING) == 0) {
2995
2996 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
2997 eshinit(sc);
2998 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
2999 error = EIO;
3000 goto ioctl_done;
3001 }
3002 }
3003
3004 if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP)) == ESH_FL_RUNCODE_UP) {
3005 while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
3006 error = tsleep((void *) &sc->sc_snap_recv, PRIBIO, "esh_closing_fp_ring", hz);
3007 if (error != 0)
3008 goto ioctl_done;
3009 }
3010 esh_init_snap_ring(sc);
3011 }
3012 }
3013 break;
3014
3015 case SIOCSDRVSPEC: /* Driver-specific configuration calls */
3016 cmd = ifd->ifd_cmd;
3017 len = ifd->ifd_len;
3018 data = ifd->ifd_data;
3019
3020 esh_generic_ioctl(sc, cmd, data, len, NULL);
3021 break;
3022
3023 default:
3024 error = EINVAL;
3025 break;
3026 }
3027
3028 ioctl_done:
3029 splx(s);
3030 return (error);
3031 }
3032
3033
3034 static int
3035 esh_generic_ioctl(struct esh_softc *sc, u_long cmd, void *data,
3036 u_long len, struct lwp *l)
3037 {
3038 struct ifnet *ifp = &sc->sc_if;
3039 struct rr_eeprom rr_eeprom;
3040 bus_space_tag_t iot = sc->sc_iot;
3041 bus_space_handle_t ioh = sc->sc_ioh;
3042 u_int32_t misc_host_ctl;
3043 u_int32_t misc_local_ctl;
3044 u_int32_t address;
3045 u_int32_t value;
3046 u_int32_t offset;
3047 u_int32_t length;
3048 int error = 0;
3049 int i;
3050
3051 /*
3052 * If we have a LWP pointer, check to make sure that the
3053 * user is privileged before performing any destruction operations.
3054 */
3055
3056 if (l != NULL) {
3057 switch (cmd) {
3058 case EIOCGTUNE:
3059 case EIOCGEEPROM:
3060 case EIOCGSTATS:
3061 break;
3062
3063 default:
3064 error = kauth_authorize_generic(l->l_cred,
3065 KAUTH_GENERIC_ISSUSER, NULL);
3066 if (error)
3067 return (error);
3068 }
3069 }
3070
3071 switch (cmd) {
3072 case EIOCGTUNE:
3073 if (len != sizeof(struct rr_tuning))
3074 error = EMSGSIZE;
3075 else {
3076 error = copyout((void *) &sc->sc_tune, data,
3077 sizeof(struct rr_tuning));
3078 }
3079 break;
3080
3081 case EIOCSTUNE:
3082 if ((ifp->if_flags & IFF_UP) == 0) {
3083 if (len != sizeof(struct rr_tuning)) {
3084 error = EMSGSIZE;
3085 } else {
3086 error = copyin(data, (void *) &sc->sc_tune,
3087 sizeof(struct rr_tuning));
3088 }
3089 } else {
3090 error = EBUSY;
3091 }
3092 break;
3093
3094 case EIOCGSTATS:
3095 if (len != sizeof(struct rr_stats))
3096 error = EMSGSIZE;
3097 else
3098 error = copyout((void *) &sc->sc_gen_info->ri_stats,
3099 data, sizeof(struct rr_stats));
3100 break;
3101
3102 case EIOCGEEPROM:
3103 case EIOCSEEPROM:
3104 if ((ifp->if_flags & IFF_UP) != 0) {
3105 error = EBUSY;
3106 break;
3107 }
3108
3109 if (len != sizeof(struct rr_eeprom)) {
3110 error = EMSGSIZE;
3111 break;
3112 }
3113
3114 error = copyin(data, (void *) &rr_eeprom, sizeof(rr_eeprom));
3115 if (error != 0)
3116 break;
3117
3118 offset = rr_eeprom.ifr_offset;
3119 length = rr_eeprom.ifr_length;
3120
3121 if (length > RR_EE_MAX_LEN * sizeof(u_int32_t)) {
3122 error = EFBIG;
3123 break;
3124 }
3125
3126 if (offset + length > RR_EE_MAX_LEN * sizeof(u_int32_t)) {
3127 error = EFAULT;
3128 break;
3129 }
3130
3131 if (offset % 4 || length % 4) {
3132 error = EIO;
3133 break;
3134 }
3135
3136 /* Halt the processor (preserve NO_SWAP, if set) */
3137
3138 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
3139 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
3140 (misc_host_ctl & RR_MH_NO_SWAP) |
3141 RR_MH_HALT_PROC);
3142
3143 /* Make the EEPROM accessible */
3144
3145 misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
3146 value = misc_local_ctl &
3147 ~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM | RR_LC_PARITY_ON);
3148 if (cmd == EIOCSEEPROM) /* make writable! */
3149 value |= RR_LC_WRITE_PROM;
3150 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, value);
3151
3152 if (cmd == EIOCSEEPROM) {
3153 printf("%s: writing EEPROM\n", device_xname(&sc->sc_dev));
3154 sc->sc_flags |= ESH_FL_EEPROM_BUSY;
3155 }
3156
3157 /* Do that EEPROM voodoo that you do so well... */
3158
3159 address = offset * RR_EE_BYTE_LEN;
3160 for (i = 0; i < length; i += 4) {
3161 if (cmd == EIOCGEEPROM) {
3162 value = esh_read_eeprom(sc, address);
3163 address += RR_EE_WORD_LEN;
3164 if (copyout(&value,
3165 (char *) rr_eeprom.ifr_buffer + i,
3166 sizeof(u_int32_t)) != 0) {
3167 error = EFAULT;
3168 break;
3169 }
3170 } else {
3171 if (copyin((char *) rr_eeprom.ifr_buffer + i,
3172 &value, sizeof(u_int32_t)) != 0) {
3173 error = EFAULT;
3174 break;
3175 }
3176 if (esh_write_eeprom(sc, address,
3177 value) != 0) {
3178 error = EIO;
3179 break;
3180 }
3181
3182 /*
3183 * Have to give up control now and
3184 * then, so sleep for a clock tick.
3185 * Might be good to figure out how
3186 * long a tick is, so that we could
3187 * intelligently chose the frequency
3188 * of these pauses.
3189 */
3190
3191 if (i % 40 == 0) {
3192 tsleep(&sc->sc_flags,
3193 PRIBIO, "eshweeprom", 1);
3194 }
3195
3196 address += RR_EE_WORD_LEN;
3197 }
3198 }
3199
3200 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
3201 if (cmd == EIOCSEEPROM) {
3202 sc->sc_flags &= ~ESH_FL_EEPROM_BUSY;
3203 wakeup(&sc->sc_flags);
3204 printf("%s: done writing EEPROM\n",
3205 device_xname(&sc->sc_dev));
3206 }
3207 break;
3208
3209 case EIOCRESET:
3210 eshreset(sc);
3211 break;
3212
3213 default:
3214 error = EINVAL;
3215 break;
3216 }
3217
3218 return error;
3219 }
3220
3221
3222 void
3223 eshreset(sc)
3224 struct esh_softc *sc;
3225 {
3226 int s;
3227
3228 s = splnet();
3229 eshstop(sc);
3230 eshinit(sc);
3231 splx(s);
3232 }
3233
3234 /*
3235 * The NIC expects a watchdog command every 10 seconds. If it doesn't
3236 * get the watchdog, it figures the host is dead and stops. When it does
3237 * get the command, it'll generate a watchdog event to let the host know
3238 * that it is still alive. We watch for this.
3239 */
3240
3241 void
3242 eshwatchdog(ifp)
3243 struct ifnet *ifp;
3244 {
3245 struct esh_softc *sc = ifp->if_softc;
3246
3247 if (!sc->sc_watchdog) {
3248 printf("%s: watchdog timer expired. "
3249 "Should reset interface!\n",
3250 device_xname(&sc->sc_dev));
3251 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3252 eshstatus(sc);
3253 #if 0
3254 eshstop(sc); /* DON'T DO THIS, it'll clear data we
3255 could use to debug it! */
3256 #endif
3257 } else {
3258 sc->sc_watchdog = 0;
3259
3260 esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
3261 ifp->if_timer = 5;
3262 }
3263 }
3264
3265
3266 /*
3267 * Stop the NIC and throw away packets that have started to be sent,
3268 * but didn't make it all the way. Re-adjust the various queue
3269 * pointers to account for this.
3270 */
3271
3272 void
3273 eshstop(sc)
3274 struct esh_softc *sc;
3275 {
3276 struct ifnet *ifp = &sc->sc_if;
3277 bus_space_tag_t iot = sc->sc_iot;
3278 bus_space_handle_t ioh = sc->sc_ioh;
3279 u_int32_t misc_host_ctl;
3280 int i;
3281
3282 if (!(sc->sc_flags & ESH_FL_INITIALIZED))
3283 return;
3284
3285 /* Just shut it all down. This isn't pretty, but it works */
3286
3287 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
3288 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3289
3290 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
3291 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
3292 (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC);
3293 sc->sc_flags = 0;
3294 ifp->if_timer = 0; /* turn off watchdog timer */
3295
3296 while (sc->sc_snap_recv.ec_consumer
3297 != sc->sc_snap_recv.ec_producer) {
3298 struct mbuf *m0;
3299 u_int16_t offset = sc->sc_snap_recv.ec_consumer;
3300
3301 bus_dmamap_unload(sc->sc_dmat,
3302 sc->sc_snap_recv.ec_dma[offset]);
3303 MFREE(sc->sc_snap_recv.ec_m[offset], m0);
3304 sc->sc_snap_recv.ec_m[offset] = NULL;
3305 sc->sc_snap_recv.ec_consumer =
3306 NEXT_RECV(sc->sc_snap_recv.ec_consumer);
3307 wakeup((void *) &sc->sc_snap_recv);
3308 }
3309
3310 /* Handle FP rings */
3311
3312 for (i = 0; i < RR_ULP_COUNT; i++) {
3313 struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[i];
3314 struct esh_dmainfo *di = NULL;
3315
3316 if (ring == NULL)
3317 continue;
3318
3319 /* Get rid of outstanding buffers */
3320
3321 esh_dma_sync(sc, ring->ec_descr,
3322 ring->ec_consumer, ring->ec_producer,
3323 RR_FP_RECV_RING_SIZE, sizeof(struct rr_descr), 0,
3324 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3325
3326 while (ring->ec_consumer != ring->ec_producer) {
3327 di = ring->ec_dmainfo[ring->ec_consumer];
3328 if (di != NULL)
3329 break;
3330 ring->ec_consumer = NEXT_RECV(ring->ec_consumer);
3331 }
3332 if (di == NULL)
3333 di = ring->ec_cur_dmainfo;
3334
3335 if (di != NULL) {
3336 bus_dmamap_unload(sc->sc_dmat, di->ed_dma);
3337 di->ed_error = EIO;
3338 di->ed_flags = 0;
3339 wakeup((void *) &di->ed_flags); /* packet discard */
3340 wakeup((void *) di); /* wait on read */
3341 }
3342 wakeup((void *) &ring->ec_ulp); /* ring create */
3343 wakeup((void *) &ring->ec_index); /* ring disable */
3344 }
3345
3346 /* XXX: doesn't clear bufs being sent */
3347
3348 bus_dmamap_unload(sc->sc_dmat, sc->sc_send.ec_dma);
3349 if (sc->sc_send.ec_cur_mbuf) {
3350 m_freem(sc->sc_send.ec_cur_mbuf);
3351 } else if (sc->sc_send.ec_cur_buf) {
3352 struct buf *bp = sc->sc_send.ec_cur_buf;
3353
3354 bp->b_resid = bp->b_bcount;
3355 bp->b_error = EIO;
3356 biodone(bp);
3357 } else if (sc->sc_send.ec_cur_dmainfo) {
3358 struct esh_dmainfo *di = sc->sc_send.ec_cur_dmainfo;
3359
3360 di->ed_flags &= ~ESH_DI_BUSY;
3361 di->ed_error = EIO;
3362 wakeup((void *) di);
3363 }
3364 sc->sc_send.ec_cur_mbuf = NULL;
3365 sc->sc_send.ec_cur_buf = NULL;
3366 sc->sc_send.ec_cur_dmainfo = NULL;
3367
3368 /*
3369 * Clear out the index values, since they'll be useless
3370 * when we restart.
3371 */
3372
3373 memset(sc->sc_fp_recv_index, 0,
3374 sizeof(struct esh_fp_ring_ctl *) * RR_MAX_RECV_RING);
3375
3376 /* Be sure to wake up any other processes waiting on driver action. */
3377
3378 wakeup(sc); /* Wait on initialization */
3379 wakeup(&sc->sc_flags); /* Wait on EEPROM write */
3380
3381 /*
3382 * XXX: I have to come up with a way to avoid handling interrupts
3383 * received before this shuts down the card, but processed
3384 * afterwards!
3385 */
3386 }
3387
3388 /*
3389 * Read a value from the eeprom. This expects that the NIC has already
3390 * been tweaked to put it into the right state for reading from the
3391 * EEPROM -- the HALT bit is set in the MISC_HOST_CTL register,
3392 * and the FAST_PROM, ADD_SRAM, and PARITY flags have been cleared
3393 * in the MISC_LOCAL_CTL register.
3394 *
3395 * The EEPROM layout is a little weird. There is a valid byte every
3396 * eight bytes. Words are then smeared out over 32 bytes.
3397 * All addresses listed here are the actual starting addresses.
3398 */
3399
3400 static u_int32_t
3401 esh_read_eeprom(sc, addr)
3402 struct esh_softc *sc;
3403 u_int32_t addr;
3404 {
3405 int i;
3406 u_int32_t tmp;
3407 u_int32_t value = 0;
3408
3409 /* If the offset hasn't been added, add it. Otherwise pass through */
3410
3411 if (!(addr & RR_EE_OFFSET))
3412 addr += RR_EE_OFFSET;
3413
3414 for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) {
3415 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
3416 RR_WINDOW_BASE, addr);
3417 tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
3418 RR_WINDOW_DATA);
3419 value = (value << 8) | ((tmp >> 24) & 0xff);
3420 }
3421 return value;
3422 }
3423
3424
3425 /*
3426 * Write a value to the eeprom. Just like esh_read_eeprom, this routine
3427 * expects that the NIC has already been tweaked to put it into the right
3428 * state for reading from the EEPROM. Things are further complicated
3429 * in that we need to read each byte after we write it to ensure that
3430 * the new value has been successfully written. It can take as long
3431 * as 1ms (!) to write a byte.
3432 */
3433
3434 static int
3435 esh_write_eeprom(sc, addr, value)
3436 struct esh_softc *sc;
3437 u_int32_t addr;
3438 u_int32_t value;
3439 {
3440 int i, j;
3441 u_int32_t shifted_value, tmp = 0;
3442
3443 /* If the offset hasn't been added, add it. Otherwise pass through */
3444
3445 if (!(addr & RR_EE_OFFSET))
3446 addr += RR_EE_OFFSET;
3447
3448 for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) {
3449 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
3450 RR_WINDOW_BASE, addr);
3451
3452 /*
3453 * Get the byte out of value, starting with the top, and
3454 * put it into the top byte of the word to write.
3455 */
3456
3457 shifted_value = ((value >> ((3 - i) * 8)) & 0xff) << 24;
3458 bus_space_write_4(sc->sc_iot, sc->sc_ioh, RR_WINDOW_DATA,
3459 shifted_value);
3460 for (j = 0; j < 50; j++) {
3461 tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
3462 RR_WINDOW_DATA);
3463 if (tmp == shifted_value)
3464 break;
3465 delay(500); /* 50us break * 20 = 1ms */
3466 }
3467 if (tmp != shifted_value)
3468 return -1;
3469 }
3470
3471 return 0;
3472 }
3473
3474
3475 /*
3476 * Send a command to the NIC. If there is no room in the command ring,
3477 * panic.
3478 */
3479
3480 static void
3481 esh_send_cmd(sc, cmd, ring, index)
3482 struct esh_softc *sc;
3483 u_int8_t cmd;
3484 u_int8_t ring;
3485 u_int8_t index;
3486 {
3487 union rr_cmd c;
3488
3489 #define NEXT_CMD(i) (((i) + 0x10 - 1) & 0x0f)
3490
3491 c.l = 0;
3492 c.b.rc_code = cmd;
3493 c.b.rc_ring = ring;
3494 c.b.rc_index = index;
3495
3496 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
3497 RR_COMMAND_RING + sizeof(c) * sc->sc_cmd_producer,
3498 c.l);
3499
3500 #ifdef ESH_PRINTF
3501 /* avoid annoying messages when possible */
3502 if (cmd != RR_CC_WATCHDOG)
3503 printf("esh_send_cmd: cmd %x ring %d index %d slot %x\n",
3504 cmd, ring, index, sc->sc_cmd_producer);
3505 #endif
3506
3507 sc->sc_cmd_producer = NEXT_CMD(sc->sc_cmd_producer);
3508 }
3509
3510
3511 /*
3512 * Write an address to the device.
3513 * XXX: This belongs in bus-dependent land!
3514 */
3515
3516 static void
3517 esh_write_addr(iot, ioh, addr, value)
3518 bus_space_tag_t iot;
3519 bus_space_handle_t ioh;
3520 bus_addr_t addr;
3521 bus_addr_t value;
3522 {
3523 bus_space_write_4(iot, ioh, addr, 0);
3524 bus_space_write_4(iot, ioh, addr + sizeof(u_int32_t), value);
3525 }
3526
3527
3528 /* Copy the RunCode from EEPROM to SRAM. Ughly. */
3529
3530 static void
3531 esh_reset_runcode(sc)
3532 struct esh_softc *sc;
3533 {
3534 bus_space_tag_t iot = sc->sc_iot;
3535 bus_space_handle_t ioh = sc->sc_ioh;
3536 u_int32_t value;
3537 u_int32_t len;
3538 u_int32_t i;
3539 u_int32_t segments;
3540 u_int32_t ee_addr;
3541 u_int32_t rc_addr;
3542 u_int32_t sram_addr;
3543
3544 /* Zero the SRAM */
3545
3546 for (i = 0; i < sc->sc_sram_size; i += 4) {
3547 bus_space_write_4(iot, ioh, RR_WINDOW_BASE, i);
3548 bus_space_write_4(iot, ioh, RR_WINDOW_DATA, 0);
3549 }
3550
3551 /* Find the address of the segment description section */
3552
3553 rc_addr = esh_read_eeprom(sc, RR_EE_RUNCODE_SEGMENTS);
3554 segments = esh_read_eeprom(sc, rc_addr);
3555
3556 for (i = 0; i < segments; i++) {
3557 rc_addr += RR_EE_WORD_LEN;
3558 sram_addr = esh_read_eeprom(sc, rc_addr);
3559 rc_addr += RR_EE_WORD_LEN;
3560 len = esh_read_eeprom(sc, rc_addr);
3561 rc_addr += RR_EE_WORD_LEN;
3562 ee_addr = esh_read_eeprom(sc, rc_addr);
3563
3564 while (len--) {
3565 value = esh_read_eeprom(sc, ee_addr);
3566 bus_space_write_4(iot, ioh, RR_WINDOW_BASE, sram_addr);
3567 bus_space_write_4(iot, ioh, RR_WINDOW_DATA, value);
3568
3569 ee_addr += RR_EE_WORD_LEN;
3570 sram_addr += 4;
3571 }
3572 }
3573 }
3574
3575
3576 /*
3577 * Perform bus DMA syncing operations on various rings.
3578 * We have to worry about our relative position in the ring,
3579 * and whether the ring has wrapped. All of this code should take
3580 * care of those worries.
3581 */
3582
3583 static void
3584 esh_dma_sync(sc, mem, start, end, entries, size, do_equal, ops)
3585 struct esh_softc *sc;
3586 void *mem;
3587 int start;
3588 int end;
3589 int entries;
3590 int size;
3591 int do_equal;
3592 int ops;
3593 {
3594 int offset = (char *)mem - (char *)sc->sc_dma_addr;
3595
3596 if (start < end) {
3597 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
3598 offset + start * size,
3599 (end - start) * size, ops);
3600 } else if (do_equal || start != end) {
3601 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
3602 offset,
3603 end * size, ops);
3604 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
3605 offset + start * size,
3606 (entries - start) * size, ops);
3607 }
3608 }
3609
3610
3611 static struct esh_dmainfo *
3612 esh_new_dmainfo(sc)
3613 struct esh_softc *sc;
3614 {
3615 struct esh_dmainfo *di;
3616 int s;
3617
3618 s = splnet();
3619
3620 di = TAILQ_FIRST(&sc->sc_dmainfo_freelist);
3621 if (di != NULL) {
3622 TAILQ_REMOVE(&sc->sc_dmainfo_freelist, di, ed_list);
3623 sc->sc_dmainfo_freelist_count--;
3624 splx(s);
3625 return di;
3626 }
3627
3628 /* None sitting around, so build one now... */
3629
3630 di = (struct esh_dmainfo *) malloc(sizeof(*di), M_DEVBUF,
3631 M_WAITOK|M_ZERO);
3632 assert(di != NULL);
3633
3634 if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX,
3635 ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY,
3636 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3637 &di->ed_dma)) {
3638 printf("%s: failed dmainfo bus_dmamap_create\n",
3639 device_xname(&sc->sc_dev));
3640 free(di, M_DEVBUF);
3641 di = NULL;
3642 }
3643
3644 splx(s);
3645 return di;
3646 }
3647
3648 static void
3649 esh_free_dmainfo(sc, di)
3650 struct esh_softc *sc;
3651 struct esh_dmainfo *di;
3652 {
3653 int s = splnet();
3654
3655 assert(di != NULL);
3656 di->ed_buf = NULL;
3657 TAILQ_INSERT_TAIL(&sc->sc_dmainfo_freelist, di, ed_list);
3658 sc->sc_dmainfo_freelist_count++;
3659 #ifdef ESH_PRINTF
3660 printf("esh_free_dmainfo: freelist count %d\n", sc->sc_dmainfo_freelist_count);
3661 #endif
3662
3663 splx(s);
3664 }
3665
3666
3667 /* ------------------------- debugging functions --------------------------- */
3668
3669 /*
3670 * Print out status information about the NIC and the driver.
3671 */
3672
3673 static int
3674 eshstatus(sc)
3675 struct esh_softc *sc;
3676 {
3677 bus_space_tag_t iot = sc->sc_iot;
3678 bus_space_handle_t ioh = sc->sc_ioh;
3679 int i;
3680
3681 /* XXX: This looks pathetic, and should be improved! */
3682
3683 printf("%s: status -- fail1 %x fail2 %x\n",
3684 device_xname(&sc->sc_dev),
3685 bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL1),
3686 bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL2));
3687 printf("\tmisc host ctl %x misc local ctl %x\n",
3688 bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL),
3689 bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL));
3690 printf("\toperating mode %x event producer %x\n",
3691 bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS),
3692 bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER));
3693 printf("\tPC %x max rings %x\n",
3694 bus_space_read_4(iot, ioh, RR_PROC_PC),
3695 bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS));
3696 printf("\tHIPPI tx state %x rx state %x\n",
3697 bus_space_read_4(iot, ioh, RR_TX_STATE),
3698 bus_space_read_4(iot, ioh, RR_RX_STATE));
3699 printf("\tDMA write state %x read state %x\n",
3700 bus_space_read_4(iot, ioh, RR_DMA_WRITE_STATE),
3701 bus_space_read_4(iot, ioh, RR_DMA_READ_STATE));
3702 printf("\tDMA write addr %x%x read addr %x%x\n",
3703 bus_space_read_4(iot, ioh, RR_WRITE_HOST),
3704 bus_space_read_4(iot, ioh, RR_WRITE_HOST + 4),
3705 bus_space_read_4(iot, ioh, RR_READ_HOST),
3706 bus_space_read_4(iot, ioh, RR_READ_HOST + 4));
3707
3708 for (i = 0; i < 64; i++)
3709 if (sc->sc_gen_info->ri_stats.rs_stats[i])
3710 printf("stat %x is %x\n", i * 4,
3711 sc->sc_gen_info->ri_stats.rs_stats[i]);
3712
3713 return 0;
3714 }
3715
3716
3717 #ifdef ESH_PRINTF
3718
3719 /* Check to make sure that the NIC is still running */
3720
3721 static int
3722 esh_check(sc)
3723 struct esh_softc *sc;
3724 {
3725 bus_space_tag_t iot = sc->sc_iot;
3726 bus_space_handle_t ioh = sc->sc_ioh;
3727
3728 if (bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL) & RR_MH_HALT_PROC) {
3729 printf("esh_check: NIC stopped\n");
3730 eshstatus(sc);
3731 return 1;
3732 } else {
3733 return 0;
3734 }
3735 }
3736 #endif
3737
Cache object: 970d1b00f8cb4d5e001e80bc271d3339
|