FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/rrunner.c
1 /* $NetBSD: rrunner.c,v 1.59 2006/11/16 01:32:52 christos Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code contributed to The NetBSD Foundation by Kevin M. Lahey
8 * of the Numerical Aerospace Simulation Facility, NASA Ames Research
9 * Center.
10 *
11 * Partially based on a HIPPI driver written by Essential Communications
12 * Corporation. Thanks to Jason Thorpe, Matt Jacob, and Fred Templin
13 * for invaluable advice and encouragement!
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the NetBSD
26 * Foundation, Inc. and its contributors.
27 * 4. Neither the name of The NetBSD Foundation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
32 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
33 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
34 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
35 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
38 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
39 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: rrunner.c,v 1.59 2006/11/16 01:32:52 christos Exp $");
46
47 #include "opt_inet.h"
48
49 #include "bpfilter.h"
50 #include "esh.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/mbuf.h>
55 #include <sys/buf.h>
56 #include <sys/bufq.h>
57 #include <sys/socket.h>
58 #include <sys/ioctl.h>
59 #include <sys/errno.h>
60 #include <sys/syslog.h>
61 #include <sys/select.h>
62 #include <sys/device.h>
63 #include <sys/proc.h>
64 #include <sys/kernel.h>
65 #include <sys/conf.h>
66 #include <sys/kauth.h>
67
68 #include <uvm/uvm_extern.h>
69
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/route.h>
73
74 #include <net/if_hippi.h>
75 #include <net/if_media.h>
76
77 #ifdef INET
78 #include <netinet/in.h>
79 #include <netinet/in_systm.h>
80 #include <netinet/in_var.h>
81 #include <netinet/ip.h>
82 #include <netinet/if_inarp.h>
83 #endif
84
85
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #include <net/bpfdesc.h>
89 #endif
90
91 #include <machine/cpu.h>
92 #include <machine/bus.h>
93 #include <machine/intr.h>
94
95 #include <dev/ic/rrunnerreg.h>
96 #include <dev/ic/rrunnervar.h>
97
98 /*
99 #define ESH_PRINTF
100 */
101
102 /* Autoconfig definition of driver back-end */
103 extern struct cfdriver esh_cd;
104
105 struct esh_softc *esh_softc_debug[22]; /* for gdb */
106
107 #ifdef DIAGNOSTIC
108 u_int32_t max_write_len;
109 #endif
110
111 /* Network device driver and initialization framework routines */
112
113 void eshinit(struct esh_softc *);
114 int eshioctl(struct ifnet *, u_long, caddr_t);
115 void eshreset(struct esh_softc *);
116 void eshstart(struct ifnet *);
117 static int eshstatus(struct esh_softc *);
118 void eshstop(struct esh_softc *);
119 void eshwatchdog(struct ifnet *);
120
121 /* Routines to support FP operation */
122
123 dev_type_open(esh_fpopen);
124 dev_type_close(esh_fpclose);
125 dev_type_read(esh_fpread);
126 dev_type_write(esh_fpwrite);
127 #ifdef MORE_DONE
128 dev_type_mmap(esh_fpmmap);
129 #endif
130 dev_type_strategy(esh_fpstrategy);
131
132 const struct cdevsw esh_cdevsw = {
133 esh_fpopen, esh_fpclose, esh_fpread, esh_fpwrite, nullioctl,
134 nostop, notty, nullpoll,
135 #ifdef MORE_DONE
136 esh_fpmmap,
137 #else
138 nommap,
139 #endif
140 nullkqfilter,
141 D_OTHER,
142 };
143
144 /* General routines, not externally visable */
145
146 static struct mbuf *esh_adjust_mbufs(struct esh_softc *, struct mbuf *m);
147 static void esh_dma_sync(struct esh_softc *, void *,
148 int, int, int, int, int, int);
149 static void esh_fill_snap_ring(struct esh_softc *);
150 static void esh_init_snap_ring(struct esh_softc *);
151 static void esh_close_snap_ring(struct esh_softc *);
152 static void esh_read_snap_ring(struct esh_softc *, u_int16_t, int);
153 static void esh_fill_fp_ring(struct esh_softc *, struct esh_fp_ring_ctl *);
154 static void esh_flush_fp_ring(struct esh_softc *,
155 struct esh_fp_ring_ctl *,
156 struct esh_dmainfo *);
157 static void esh_init_fp_rings(struct esh_softc *);
158 static void esh_read_fp_ring(struct esh_softc *, u_int16_t, int, int);
159 static void esh_reset_runcode(struct esh_softc *);
160 static void esh_send(struct esh_softc *);
161 static void esh_send_cmd(struct esh_softc *, u_int8_t, u_int8_t, u_int8_t);
162 static u_int32_t esh_read_eeprom(struct esh_softc *, u_int32_t);
163 static void esh_write_addr(bus_space_tag_t, bus_space_handle_t,
164 bus_addr_t, bus_addr_t);
165 static int esh_write_eeprom(struct esh_softc *, u_int32_t, u_int32_t);
166 static void eshstart_cleanup(struct esh_softc *, u_int16_t, int);
167
168 static struct esh_dmainfo *esh_new_dmainfo(struct esh_softc *);
169 static void esh_free_dmainfo(struct esh_softc *, struct esh_dmainfo *);
170 static int esh_generic_ioctl(struct esh_softc *, u_long, caddr_t, u_long,
171 struct lwp *);
172
173 #ifdef ESH_PRINTF
174 static int esh_check(struct esh_softc *);
175 #endif
176
177 #define ESHUNIT(x) ((minor(x) & 0xff00) >> 8)
178 #define ESHULP(x) (minor(x) & 0x00ff)
179
180
181 /*
182 * Back-end attach and configure. Allocate DMA space and initialize
183 * all structures.
184 */
185
186 void
187 eshconfig(sc)
188 struct esh_softc *sc;
189 {
190 struct ifnet *ifp = &sc->sc_if;
191 bus_space_tag_t iot = sc->sc_iot;
192 bus_space_handle_t ioh = sc->sc_ioh;
193 u_int32_t misc_host_ctl;
194 u_int32_t misc_local_ctl;
195 u_int32_t header_format;
196 u_int32_t ula_tmp;
197 bus_size_t size;
198 int rseg;
199 int error;
200 int i;
201
202 esh_softc_debug[device_unit(&sc->sc_dev)] = sc;
203 sc->sc_flags = 0;
204
205 TAILQ_INIT(&sc->sc_dmainfo_freelist);
206 sc->sc_dmainfo_freelist_count = 0;
207
208 /*
209 * Allocate and divvy up some host side memory that can hold
210 * data structures that will be DMA'ed over to the NIC
211 */
212
213 sc->sc_dma_size = sizeof(struct rr_gen_info) +
214 sizeof(struct rr_ring_ctl) * RR_ULP_COUNT +
215 sizeof(struct rr_descr) * RR_SEND_RING_SIZE +
216 sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE +
217 sizeof(struct rr_event) * RR_EVENT_RING_SIZE;
218
219 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size,
220 0, RR_DMA_BOUNDARY, &sc->sc_dmaseg, 1,
221 &rseg, BUS_DMA_NOWAIT);
222 if (error) {
223 aprint_error("%s: couldn't allocate space for host-side"
224 "data structures\n", sc->sc_dev.dv_xname);
225 return;
226 }
227
228 if (rseg > 1) {
229 aprint_error("%s: contiguous memory not available\n",
230 sc->sc_dev.dv_xname);
231 goto bad_dmamem_map;
232 }
233
234 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, rseg,
235 sc->sc_dma_size, &sc->sc_dma_addr,
236 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
237 if (error) {
238 aprint_error(
239 "%s: couldn't map memory for host-side structures\n",
240 sc->sc_dev.dv_xname);
241 goto bad_dmamem_map;
242 }
243
244 if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size,
245 1, sc->sc_dma_size, RR_DMA_BOUNDARY,
246 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
247 &sc->sc_dma)) {
248 aprint_error("%s: couldn't create DMA map\n",
249 sc->sc_dev.dv_xname);
250 goto bad_dmamap_create;
251 }
252
253 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma, sc->sc_dma_addr,
254 sc->sc_dma_size, NULL, BUS_DMA_NOWAIT)) {
255 aprint_error("%s: couldn't load DMA map\n",
256 sc->sc_dev.dv_xname);
257 goto bad_dmamap_load;
258 }
259
260 memset(sc->sc_dma_addr, 0, sc->sc_dma_size);
261
262 sc->sc_gen_info_dma = sc->sc_dma->dm_segs->ds_addr;
263 sc->sc_gen_info = (struct rr_gen_info *) sc->sc_dma_addr;
264 size = sizeof(struct rr_gen_info);
265
266 sc->sc_recv_ring_table_dma = sc->sc_dma->dm_segs->ds_addr + size;
267 sc->sc_recv_ring_table =
268 (struct rr_ring_ctl *) (sc->sc_dma_addr + size);
269 size += sizeof(struct rr_ring_ctl) * RR_ULP_COUNT;
270
271 sc->sc_send_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
272 sc->sc_send_ring = (struct rr_descr *) (sc->sc_dma_addr + size);
273 sc->sc2_send_ring = (struct rr2_descr *) (sc->sc_dma_addr + size);
274 size += sizeof(struct rr_descr) * RR_SEND_RING_SIZE;
275
276 sc->sc_snap_recv_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
277 sc->sc_snap_recv_ring = (struct rr_descr *) (sc->sc_dma_addr + size);
278 sc->sc2_snap_recv_ring = (struct rr2_descr *) (sc->sc_dma_addr + size);
279 size += sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE;
280
281 sc->sc_event_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
282 sc->sc_event_ring = (struct rr_event *) (sc->sc_dma_addr + size);
283 size += sizeof(struct rr_event) * RR_EVENT_RING_SIZE;
284
285 #ifdef DIAGNOSTIC
286 if (size > sc->sc_dmaseg.ds_len) {
287 aprint_error("%s: bogus size calculation\n",
288 sc->sc_dev.dv_xname);
289 goto bad_other;
290 }
291 #endif
292
293 /*
294 * Allocate DMA maps for transfers. We do this here and now
295 * so we won't have to wait for them in the middle of sending
296 * or receiving something.
297 */
298
299 if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX,
300 ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY,
301 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
302 &sc->sc_send.ec_dma)) {
303 aprint_error("%s: failed bus_dmamap_create\n",
304 sc->sc_dev.dv_xname);
305 goto bad_other;
306 }
307 sc->sc_send.ec_offset = 0;
308 sc->sc_send.ec_descr = sc->sc_send_ring;
309 TAILQ_INIT(&sc->sc_send.ec_di_queue);
310 bufq_alloc(&sc->sc_send.ec_buf_queue, "fcfs", 0);
311
312 for (i = 0; i < RR_MAX_SNAP_RECV_RING_SIZE; i++)
313 if (bus_dmamap_create(sc->sc_dmat, RR_DMA_MAX, 1, RR_DMA_MAX,
314 RR_DMA_BOUNDARY,
315 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
316 &sc->sc_snap_recv.ec_dma[i])) {
317 aprint_error("%s: failed bus_dmamap_create\n",
318 sc->sc_dev.dv_xname);
319 for (i--; i >= 0; i--)
320 bus_dmamap_destroy(sc->sc_dmat,
321 sc->sc_snap_recv.ec_dma[i]);
322 goto bad_ring_dmamap_create;
323 }
324
325 /*
326 * If this is a coldboot, the NIC RunCode should be operational.
327 * If it is a warmboot, it may or may not be operational.
328 * Just to be sure, we'll stop the RunCode and reset everything.
329 */
330
331 /* Halt the processor (preserve NO_SWAP, if set) */
332
333 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
334 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
335 (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC);
336
337 /* Make the EEPROM readable */
338
339 misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
340 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL,
341 misc_local_ctl & ~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM |
342 RR_LC_PARITY_ON));
343
344 /* Extract interesting information from the EEPROM: */
345
346 header_format = esh_read_eeprom(sc, RR_EE_HEADER_FORMAT);
347 if (header_format != RR_EE_HEADER_FORMAT_MAGIC) {
348 aprint_error("%s: bogus EEPROM header format value %x\n",
349 sc->sc_dev.dv_xname, header_format);
350 goto bad_other;
351 }
352
353 /*
354 * As it is now, the runcode version in the EEPROM doesn't
355 * reflect the actual runcode version number. That is only
356 * available once the runcode starts up. We should probably
357 * change the firmware update code to modify this value,
358 * but Essential itself doesn't do it right now.
359 */
360
361 sc->sc_sram_size = 4 * esh_read_eeprom(sc, RR_EE_SRAM_SIZE);
362 sc->sc_runcode_start = esh_read_eeprom(sc, RR_EE_RUNCODE_START);
363 sc->sc_runcode_version = esh_read_eeprom(sc, RR_EE_RUNCODE_VERSION);
364
365 sc->sc_pci_latency = esh_read_eeprom(sc, RR_EE_PCI_LATENCY);
366 sc->sc_pci_lat_gnt = esh_read_eeprom(sc, RR_EE_PCI_LAT_GNT);
367
368 /* General tuning values */
369
370 sc->sc_tune.rt_mode_and_status =
371 esh_read_eeprom(sc, RR_EE_MODE_AND_STATUS);
372 sc->sc_tune.rt_conn_retry_count =
373 esh_read_eeprom(sc, RR_EE_CONN_RETRY_COUNT);
374 sc->sc_tune.rt_conn_retry_timer =
375 esh_read_eeprom(sc, RR_EE_CONN_RETRY_TIMER);
376 sc->sc_tune.rt_conn_timeout =
377 esh_read_eeprom(sc, RR_EE_CONN_TIMEOUT);
378 sc->sc_tune.rt_interrupt_timer =
379 esh_read_eeprom(sc, RR_EE_INTERRUPT_TIMER);
380 sc->sc_tune.rt_tx_timeout =
381 esh_read_eeprom(sc, RR_EE_TX_TIMEOUT);
382 sc->sc_tune.rt_rx_timeout =
383 esh_read_eeprom(sc, RR_EE_RX_TIMEOUT);
384 sc->sc_tune.rt_stats_timer =
385 esh_read_eeprom(sc, RR_EE_STATS_TIMER);
386 sc->sc_tune.rt_stats_timer = ESH_STATS_TIMER_DEFAULT;
387
388 /* DMA tuning values */
389
390 sc->sc_tune.rt_pci_state =
391 esh_read_eeprom(sc, RR_EE_PCI_STATE);
392 sc->sc_tune.rt_dma_write_state =
393 esh_read_eeprom(sc, RR_EE_DMA_WRITE_STATE);
394 sc->sc_tune.rt_dma_read_state =
395 esh_read_eeprom(sc, RR_EE_DMA_READ_STATE);
396 sc->sc_tune.rt_driver_param =
397 esh_read_eeprom(sc, RR_EE_DRIVER_PARAM);
398
399 /*
400 * Snag the ULA. The first two bytes are reserved.
401 * We don't really use it immediately, but it would be good to
402 * have for building IPv6 addresses, etc.
403 */
404
405 ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_HI);
406 sc->sc_ula[0] = (ula_tmp >> 8) & 0xff;
407 sc->sc_ula[1] = ula_tmp & 0xff;
408
409 ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_LO);
410 sc->sc_ula[2] = (ula_tmp >> 24) & 0xff;
411 sc->sc_ula[3] = (ula_tmp >> 16) & 0xff;
412 sc->sc_ula[4] = (ula_tmp >> 8) & 0xff;
413 sc->sc_ula[5] = ula_tmp & 0xff;
414
415 /* Reset EEPROM readability */
416
417 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
418
419 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
420 ifp->if_softc = sc;
421 ifp->if_start = eshstart;
422 ifp->if_ioctl = eshioctl;
423 ifp->if_watchdog = eshwatchdog;
424 ifp->if_flags = IFF_SIMPLEX | IFF_NOTRAILERS | IFF_NOARP;
425 IFQ_SET_READY(&ifp->if_snd);
426
427 if_attach(ifp);
428 hippi_ifattach(ifp, sc->sc_ula);
429
430 sc->sc_misaligned_bufs = sc->sc_bad_lens = 0;
431 sc->sc_fp_rings = 0;
432
433 return;
434
435 bad_ring_dmamap_create:
436 bus_dmamap_destroy(sc->sc_dmat, sc->sc_send.ec_dma);
437 bad_other:
438 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma);
439 bad_dmamap_load:
440 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma);
441 bad_dmamap_create:
442 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_addr, sc->sc_dma_size);
443 bad_dmamem_map:
444 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, rseg);
445 return;
446 }
447
448
449 /*
450 * Bring device up.
451 *
452 * Assume that the on-board processor has already been stopped,
453 * the rings have been cleared of valid buffers, and everything
454 * is pretty much as it was when the system started.
455 *
456 * Stop the processor (just for good measure), clear the SRAM,
457 * reload the boot code, and start it all up again, with the PC
458 * pointing at the boot code. Once the boot code has had a chance
459 * to come up, adjust all of the appropriate parameters, and send
460 * the 'start firmware' command.
461 *
462 * The NIC won't actually be up until it gets an interrupt with an
463 * event indicating the RunCode is up.
464 */
465
466 void
467 eshinit(sc)
468 struct esh_softc *sc;
469 {
470 struct ifnet *ifp = &sc->sc_if;
471 bus_space_tag_t iot = sc->sc_iot;
472 bus_space_handle_t ioh = sc->sc_ioh;
473 struct rr_ring_ctl *ring;
474 u_int32_t misc_host_ctl;
475 u_int32_t misc_local_ctl;
476 u_int32_t value;
477 u_int32_t mode;
478
479 /* If we're already doing an init, don't try again simultaniously */
480
481 if ((sc->sc_flags & ESH_FL_INITIALIZING) != 0)
482 return;
483 sc->sc_flags = ESH_FL_INITIALIZING;
484
485 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
486 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
487
488 /* Halt the processor (preserve NO_SWAP, if set) */
489
490 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
491 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
492 (misc_host_ctl & RR_MH_NO_SWAP)
493 | RR_MH_HALT_PROC | RR_MH_CLEAR_INT);
494
495 /* Make the EEPROM readable */
496
497 misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
498 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL,
499 misc_local_ctl & ~(RR_LC_FAST_PROM |
500 RR_LC_ADD_SRAM |
501 RR_LC_PARITY_ON));
502
503 /* Reset DMA */
504
505 bus_space_write_4(iot, ioh, RR_RX_STATE, RR_RS_RESET);
506 bus_space_write_4(iot, ioh, RR_TX_STATE, 0);
507 bus_space_write_4(iot, ioh, RR_DMA_READ_STATE, RR_DR_RESET);
508 bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE, RR_DW_RESET);
509 bus_space_write_4(iot, ioh, RR_PCI_STATE, 0);
510 bus_space_write_4(iot, ioh, RR_TIMER, 0);
511 bus_space_write_4(iot, ioh, RR_TIMER_REF, 0);
512
513 /*
514 * Reset the assist register that the documentation suggests
515 * resetting. Too bad that the docs don't mention anything
516 * else about the register!
517 */
518
519 bus_space_write_4(iot, ioh, 0x15C, 1);
520
521 /* Clear BIST, set the PC to the start of the code and let 'er rip */
522
523 value = bus_space_read_4(iot, ioh, RR_PCI_BIST);
524 bus_space_write_4(iot, ioh, RR_PCI_BIST, (value & ~0xff) | 8);
525
526 sc->sc_bist_write(sc, 0);
527 esh_reset_runcode(sc);
528
529 bus_space_write_4(iot, ioh, RR_PROC_PC, sc->sc_runcode_start);
530 bus_space_write_4(iot, ioh, RR_PROC_BREAKPT, 0x00000001);
531
532 misc_host_ctl &= ~RR_MH_HALT_PROC;
533 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL, misc_host_ctl);
534
535 /* XXX: should we sleep rather than delaying for 1ms!? */
536
537 delay(1000); /* Need 500 us, but we'll give it more */
538
539 value = sc->sc_bist_read(sc);
540 if (value != 0) {
541 printf("%s: BIST is %d, not 0!\n",
542 sc->sc_dev.dv_xname, value);
543 goto bad_init;
544 }
545
546 #ifdef ESH_PRINTF
547 printf("%s: BIST is %x\n", sc->sc_dev.dv_xname, value);
548 eshstatus(sc);
549 #endif
550
551 /* RunCode is up. Initialize NIC */
552
553 esh_write_addr(iot, ioh, RR_GEN_INFO_PTR, sc->sc_gen_info_dma);
554 esh_write_addr(iot, ioh, RR_RECV_RING_PTR, sc->sc_recv_ring_table_dma);
555
556 sc->sc_event_consumer = 0;
557 bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, sc->sc_event_consumer);
558 sc->sc_event_producer = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER);
559 sc->sc_cmd_producer = RR_INIT_CMD;
560 sc->sc_cmd_consumer = 0;
561
562 mode = bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS);
563 mode |= (RR_MS_WARNINGS |
564 RR_MS_ERR_TERM |
565 RR_MS_NO_RESTART |
566 RR_MS_SWAP_DATA);
567 mode &= ~RR_MS_PH_MODE;
568 bus_space_write_4(iot, ioh, RR_MODE_AND_STATUS, mode);
569
570 #if 0
571 #ifdef ESH_PRINTF
572 printf("eshinit: misc_local_ctl %x, SRAM size %d\n", misc_local_ctl,
573 sc->sc_sram_size);
574 #endif
575 /*
576 misc_local_ctl |= (RR_LC_FAST_PROM | RR_LC_PARITY_ON);
577 */
578 if (sc->sc_sram_size > 256 * 1024) {
579 misc_local_ctl |= RR_LC_ADD_SRAM;
580 }
581 #endif
582
583 #ifdef ESH_PRINTF
584 printf("eshinit: misc_local_ctl %x\n", misc_local_ctl);
585 #endif
586 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
587
588 /* Set tuning parameters */
589
590 bus_space_write_4(iot, ioh, RR_CONN_RETRY_COUNT,
591 sc->sc_tune.rt_conn_retry_count);
592 bus_space_write_4(iot, ioh, RR_CONN_RETRY_TIMER,
593 sc->sc_tune.rt_conn_retry_timer);
594 bus_space_write_4(iot, ioh, RR_CONN_TIMEOUT,
595 sc->sc_tune.rt_conn_timeout);
596 bus_space_write_4(iot, ioh, RR_INTERRUPT_TIMER,
597 sc->sc_tune.rt_interrupt_timer);
598 bus_space_write_4(iot, ioh, RR_TX_TIMEOUT,
599 sc->sc_tune.rt_tx_timeout);
600 bus_space_write_4(iot, ioh, RR_RX_TIMEOUT,
601 sc->sc_tune.rt_rx_timeout);
602 bus_space_write_4(iot, ioh, RR_STATS_TIMER,
603 sc->sc_tune.rt_stats_timer);
604 bus_space_write_4(iot, ioh, RR_PCI_STATE,
605 sc->sc_tune.rt_pci_state);
606 bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE,
607 sc->sc_tune.rt_dma_write_state);
608 bus_space_write_4(iot, ioh, RR_DMA_READ_STATE,
609 sc->sc_tune.rt_dma_read_state);
610
611 sc->sc_max_rings = bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS);
612
613 sc->sc_runcode_version =
614 bus_space_read_4(iot, ioh, RR_RUNCODE_VERSION);
615 sc->sc_version = sc->sc_runcode_version >> 16;
616 if (sc->sc_version != 1 && sc->sc_version != 2) {
617 printf("%s: bad version number %d in runcode\n",
618 sc->sc_dev.dv_xname, sc->sc_version);
619 goto bad_init;
620 }
621
622 if (sc->sc_version == 1) {
623 sc->sc_options = 0;
624 } else {
625 value = bus_space_read_4(iot, ioh, RR_ULA);
626 sc->sc_options = value >> 16;
627 }
628
629 if (sc->sc_options & (RR_OP_LONG_TX | RR_OP_LONG_RX)) {
630 printf("%s: unsupported firmware -- long descriptors\n",
631 sc->sc_dev.dv_xname);
632 goto bad_init;
633 }
634
635 printf("%s: startup runcode version %d.%d.%d, options %x\n",
636 sc->sc_dev.dv_xname,
637 sc->sc_version,
638 (sc->sc_runcode_version >> 8) & 0xff,
639 sc->sc_runcode_version & 0xff,
640 sc->sc_options);
641
642 /* Initialize the general ring information */
643
644 memset(sc->sc_recv_ring_table, 0,
645 sizeof(struct rr_ring_ctl) * RR_ULP_COUNT);
646
647 ring = &sc->sc_gen_info->ri_event_ring_ctl;
648 ring->rr_ring_addr = sc->sc_event_ring_dma;
649 ring->rr_entry_size = sizeof(struct rr_event);
650 ring->rr_free_bufs = RR_EVENT_RING_SIZE / 4;
651 ring->rr_entries = RR_EVENT_RING_SIZE;
652 ring->rr_prod_index = 0;
653
654 ring = &sc->sc_gen_info->ri_cmd_ring_ctl;
655 ring->rr_free_bufs = 8;
656 ring->rr_entry_size = sizeof(union rr_cmd);
657 ring->rr_prod_index = RR_INIT_CMD;
658
659 ring = &sc->sc_gen_info->ri_send_ring_ctl;
660 ring->rr_ring_addr = sc->sc_send_ring_dma;
661 if (sc->sc_version == 1) {
662 ring->rr_free_bufs = RR_RR_DONT_COMPLAIN;
663 } else {
664 ring->rr_free_bufs = 0;
665 }
666
667 ring->rr_entries = RR_SEND_RING_SIZE;
668 ring->rr_entry_size = sizeof(struct rr_descr);
669
670 ring->rr_prod_index = sc->sc_send.ec_producer =
671 sc->sc_send.ec_consumer = 0;
672 sc->sc_send.ec_cur_mbuf = NULL;
673 sc->sc_send.ec_cur_buf = NULL;
674
675 sc->sc_snap_recv.ec_descr = sc->sc_snap_recv_ring;
676 sc->sc_snap_recv.ec_consumer = sc->sc_snap_recv.ec_producer = 0;
677
678 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
679 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
680
681 /* Set up the watchdog to make sure something happens! */
682
683 sc->sc_watchdog = 0;
684 ifp->if_timer = 5;
685
686 /*
687 * Can't actually turn on interface until we see some events,
688 * so set initialized flag, but don't start sending.
689 */
690
691 sc->sc_flags = ESH_FL_INITIALIZED;
692 esh_send_cmd(sc, RR_CC_START_RUNCODE, 0, 0);
693 return;
694
695 bad_init:
696 sc->sc_flags = 0;
697 wakeup((void *) sc);
698 return;
699 }
700
701
702 /*
703 * Code to handle the Framing Protocol (FP) interface to the esh.
704 * This will allow us to write directly to the wire, with no
705 * intervening memcpy's to slow us down.
706 */
707
708 int
709 esh_fpopen(dev_t dev, int oflags, int devtype,
710 struct lwp *l)
711 {
712 struct esh_softc *sc;
713 struct rr_ring_ctl *ring_ctl;
714 struct esh_fp_ring_ctl *recv;
715 int ulp = ESHULP(dev);
716 int error = 0;
717 bus_size_t size;
718 int rseg;
719 int s;
720
721 sc = device_lookup(&esh_cd, ESHUNIT(dev));
722 if (sc == NULL || ulp == HIPPI_ULP_802)
723 return (ENXIO);
724
725 #ifdef ESH_PRINTF
726 printf("esh_fpopen: opening board %d, ulp %d\n",
727 device_unit(&sc->sc_dev), ulp);
728 #endif
729
730 /* If the card is not up, initialize it. */
731
732 s = splnet();
733
734 if (sc->sc_fp_rings >= sc->sc_max_rings - 1) {
735 splx(s);
736 return (ENOSPC);
737 }
738
739 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
740 eshinit(sc);
741 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0)
742 return EIO;
743 }
744
745 if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
746 /*
747 * Wait for the runcode to indicate that it is up,
748 * while watching to make sure we haven't crashed.
749 */
750
751 error = 0;
752 while (error == 0 &&
753 (sc->sc_flags & ESH_FL_INITIALIZED) != 0 &&
754 (sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
755 error = tsleep((void *) sc, PCATCH | PRIBIO,
756 "eshinit", 0);
757 #ifdef ESH_PRINTF
758 printf("esh_fpopen: tslept\n");
759 #endif
760 }
761
762 if (error != 0) {
763 splx(s);
764 return error;
765 }
766
767 if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
768 splx(s);
769 return EIO;
770 }
771 }
772
773
774 #ifdef ESH_PRINTF
775 printf("esh_fpopen: card up\n");
776 #endif
777
778 /* Look at the ring descriptor to see if the ULP is in use */
779
780 ring_ctl = &sc->sc_recv_ring_table[ulp];
781 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
782 (caddr_t) ring_ctl - (caddr_t) sc->sc_dma_addr,
783 sizeof(*ring_ctl),
784 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
785 if (ring_ctl->rr_entry_size != 0) {
786 splx(s);
787 return (EBUSY);
788 }
789
790 #ifdef ESH_PRINTF
791 printf("esh_fpopen: ring %d okay\n", ulp);
792 #endif
793
794 /*
795 * Allocate the DMA space for the ring; space for the
796 * ring control blocks has already been staticly allocated.
797 */
798
799 recv = (struct esh_fp_ring_ctl *)
800 malloc(sizeof(*recv), M_DEVBUF, M_WAITOK|M_ZERO);
801 if (recv == NULL)
802 return(ENOMEM);
803 TAILQ_INIT(&recv->ec_queue);
804
805 size = RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr);
806 error = bus_dmamem_alloc(sc->sc_dmat, size, 0, RR_DMA_BOUNDARY,
807 &recv->ec_dmaseg, 1,
808 &rseg, BUS_DMA_WAITOK);
809
810 if (error) {
811 printf("%s: couldn't allocate space for FP receive ring"
812 "data structures\n", sc->sc_dev.dv_xname);
813 goto bad_fp_dmamem_alloc;
814 }
815
816 if (rseg > 1) {
817 printf("%s: contiguous memory not available for "
818 "FP receive ring\n", sc->sc_dev.dv_xname);
819 goto bad_fp_dmamem_map;
820 }
821
822 error = bus_dmamem_map(sc->sc_dmat, &recv->ec_dmaseg, rseg,
823 size, (caddr_t *) &recv->ec_descr,
824 BUS_DMA_WAITOK | BUS_DMA_COHERENT);
825 if (error) {
826 printf("%s: couldn't map memory for FP receive ring\n",
827 sc->sc_dev.dv_xname);
828 goto bad_fp_dmamem_map;
829 }
830
831 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, RR_DMA_BOUNDARY,
832 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
833 &recv->ec_dma)) {
834 printf("%s: couldn't create DMA map for FP receive ring\n",
835 sc->sc_dev.dv_xname);
836 goto bad_fp_dmamap_create;
837 }
838
839 if (bus_dmamap_load(sc->sc_dmat, recv->ec_dma, recv->ec_descr,
840 size, NULL, BUS_DMA_WAITOK)) {
841 printf("%s: couldn't load DMA map for FP receive ring\n",
842 sc->sc_dev.dv_xname);
843 goto bad_fp_dmamap_load;
844 }
845
846 memset(recv->ec_descr, 0, size);
847
848 /*
849 * Create the ring:
850 *
851 * XXX: HTF are we gonna deal with the fact that we don't know
852 * if the open succeeded until we get a response from
853 * the event handler? I guess we could go to sleep waiting
854 * for the interrupt, and get woken up by the eshintr
855 * case handling it.
856 */
857
858 ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr;
859 ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4;
860 ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE;
861 ring_ctl->rr_entry_size = sizeof(struct rr_descr);
862 ring_ctl->rr_prod_index = recv->ec_producer = recv->ec_consumer = 0;
863 ring_ctl->rr_mode = RR_RR_CHARACTER;
864 recv->ec_ulp = ulp;
865 recv->ec_index = -1;
866
867 sc->sc_fp_recv[ulp] = recv;
868
869 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
870 (caddr_t) ring_ctl - (caddr_t) sc->sc_dma_addr,
871 sizeof(*ring_ctl),
872 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
873
874 bus_dmamap_sync(sc->sc_dmat, recv->ec_dma, 0, size,
875 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
876
877 esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer);
878
879 #ifdef ESH_PRINTF
880 printf("esh_fpopen: sent create ring cmd\n");
881 #endif
882
883 while (recv->ec_index == -1) {
884 error = tsleep((void *) &recv->ec_ulp, PCATCH | PRIBIO,
885 "eshfpopen", 0);
886 if (error != 0 || recv->ec_index == -1) {
887 splx(s);
888 goto bad_fp_ring_create;
889 }
890 }
891 #ifdef ESH_PRINTF
892 printf("esh_fpopen: created ring\n");
893 #endif
894
895 /*
896 * Ring is created. Set up various pointers to the ring
897 * information, fill the ring, and get going...
898 */
899
900 sc->sc_fp_rings++;
901 splx(s);
902 return 0;
903
904 bad_fp_ring_create:
905 #ifdef ESH_PRINTF
906 printf("esh_fpopen: bad ring create\n");
907 #endif
908 sc->sc_fp_recv[ulp] = NULL;
909 memset(ring_ctl, 0, sizeof(*ring_ctl));
910 bus_dmamap_unload(sc->sc_dmat, recv->ec_dma);
911 bad_fp_dmamap_load:
912 bus_dmamap_destroy(sc->sc_dmat, recv->ec_dma);
913 bad_fp_dmamap_create:
914 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) recv->ec_descr, size);
915 bad_fp_dmamem_map:
916 bus_dmamem_free(sc->sc_dmat, &recv->ec_dmaseg, rseg);
917 bad_fp_dmamem_alloc:
918 free(recv, M_DEVBUF);
919 if (error == 0)
920 error = ENOMEM;
921 splx(s);
922 return (error);
923 }
924
925
926 int
927 esh_fpclose(dev_t dev, int fflag, int devtype,
928 struct lwp *l)
929 {
930 struct esh_softc *sc;
931 struct rr_ring_ctl *ring_ctl;
932 struct esh_fp_ring_ctl *ring;
933 int ulp = ESHULP(dev);
934 int index;
935 int error = 0;
936 int s;
937
938 sc = device_lookup(&esh_cd, ESHUNIT(dev));
939 if (sc == NULL || ulp == HIPPI_ULP_802)
940 return (ENXIO);
941
942 s = splnet();
943
944 ring = sc->sc_fp_recv[ulp];
945 ring_ctl = &sc->sc_recv_ring_table[ulp];
946 index = ring->ec_index;
947
948 #ifdef ESH_PRINTF
949 printf("esh_fpclose: closing unit %d, ulp %d\n",
950 device_unit(&sc->sc_dev), ulp);
951 #endif
952 assert(ring);
953 assert(ring_ctl);
954
955 /*
956 * Disable the ring, wait for notification, and get rid of DMA
957 * stuff and dynamically allocated memory. Loop, waiting to
958 * learn that the ring has been disabled, or the card
959 * has been shut down.
960 */
961
962 do {
963 esh_send_cmd(sc, RR_CC_DISABLE_RING, ulp, ring->ec_producer);
964
965 error = tsleep((void *) &ring->ec_index, PCATCH | PRIBIO,
966 "esh_fpclose", 0);
967 if (error != 0 && error != EAGAIN) {
968 printf("%s: esh_fpclose: wait on ring disable bad\n",
969 sc->sc_dev.dv_xname);
970 ring->ec_index = -1;
971 break;
972 }
973 } while (ring->ec_index != -1 && sc->sc_flags != 0);
974
975 /*
976 * XXX: Gotta unload the ring, removing old descriptors!
977 * *Can* there be outstanding reads with a close issued!?
978 */
979
980 bus_dmamap_unload(sc->sc_dmat, ring->ec_dma);
981 bus_dmamap_destroy(sc->sc_dmat, ring->ec_dma);
982 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) ring->ec_descr,
983 RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr));
984 bus_dmamem_free(sc->sc_dmat, &ring->ec_dmaseg, ring->ec_dma->dm_nsegs);
985 free(ring, M_DEVBUF);
986 memset(ring_ctl, 0, sizeof(*ring_ctl));
987 sc->sc_fp_recv[ulp] = NULL;
988 sc->sc_fp_recv_index[index] = NULL;
989
990 sc->sc_fp_rings--;
991 if (sc->sc_fp_rings == 0)
992 sc->sc_flags &= ~ESH_FL_FP_RING_UP;
993
994 splx(s);
995 return 0;
996 }
997
998 int
999 esh_fpread(dev_t dev, struct uio *uio, int ioflag)
1000 {
1001 struct lwp *l = curlwp;
1002 struct proc *p = l->l_proc;
1003 struct iovec *iovp;
1004 struct esh_softc *sc;
1005 struct esh_fp_ring_ctl *ring;
1006 struct esh_dmainfo *di;
1007 int ulp = ESHULP(dev);
1008 int error;
1009 int i;
1010 int s;
1011
1012 #ifdef ESH_PRINTF
1013 printf("esh_fpread: dev %x\n", dev);
1014 #endif
1015
1016 sc = device_lookup(&esh_cd, ESHUNIT(dev));
1017 if (sc == NULL || ulp == HIPPI_ULP_802)
1018 return (ENXIO);
1019
1020 s = splnet();
1021
1022 ring = sc->sc_fp_recv[ulp];
1023
1024 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
1025 error = ENXIO;
1026 goto fpread_done;
1027 }
1028
1029 /* Check for validity */
1030 for (i = 0; i < uio->uio_iovcnt; i++) {
1031 /* Check for valid offsets and sizes */
1032 if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 ||
1033 (i < uio->uio_iovcnt - 1 &&
1034 (uio->uio_iov[i].iov_len & 3) != 0)) {
1035 error = EFAULT;
1036 goto fpread_done;
1037 }
1038 }
1039
1040 PHOLD(l); /* Lock process info into memory */
1041
1042 /* Lock down the pages */
1043 for (i = 0; i < uio->uio_iovcnt; i++) {
1044 iovp = &uio->uio_iov[i];
1045 error = uvm_vslock(p->p_vmspace, iovp->iov_base, iovp->iov_len,
1046 VM_PROT_WRITE);
1047 if (error) {
1048 /* Unlock what we've locked so far. */
1049 for (--i; i >= 0; i--) {
1050 iovp = &uio->uio_iov[i];
1051 uvm_vsunlock(p->p_vmspace, iovp->iov_base,
1052 iovp->iov_len);
1053 }
1054 goto fpread_done;
1055 }
1056 }
1057
1058 /*
1059 * Perform preliminary DMA mapping and throw the buffers
1060 * onto the queue to be sent.
1061 */
1062
1063 di = esh_new_dmainfo(sc);
1064 if (di == NULL) {
1065 error = ENOMEM;
1066 goto fpread_done;
1067 }
1068 di->ed_buf = NULL;
1069 di->ed_error = 0;
1070 di->ed_read_len = 0;
1071
1072 #ifdef ESH_PRINTF
1073 printf("esh_fpread: ulp %d, uio offset %qd, resid %d, iovcnt %d\n",
1074 ulp, uio->uio_offset, uio->uio_resid, uio->uio_iovcnt);
1075 #endif
1076
1077 error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma,
1078 uio, BUS_DMA_READ|BUS_DMA_WAITOK);
1079 if (error) {
1080 printf("%s: esh_fpread: bus_dmamap_load_uio "
1081 "failed\terror code %d\n",
1082 sc->sc_dev.dv_xname, error);
1083 error = ENOBUFS;
1084 esh_free_dmainfo(sc, di);
1085 goto fpread_done;
1086 }
1087
1088 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
1089 0, di->ed_dma->dm_mapsize,
1090 BUS_DMASYNC_PREREAD);
1091
1092 #ifdef ESH_PRINTF
1093 printf("esh_fpread: ulp %d, di %p, nsegs %d, uio len %d\n",
1094 ulp, di, di->ed_dma->dm_nsegs, uio->uio_resid);
1095 #endif
1096
1097 di->ed_flags |= ESH_DI_BUSY;
1098
1099 TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list);
1100 esh_fill_fp_ring(sc, ring);
1101
1102 while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) {
1103 error = tsleep((void *) di, PCATCH | PRIBIO, "esh_fpread", 0);
1104 #ifdef ESH_PRINTF
1105 printf("esh_fpread: ulp %d, tslept %d\n", ulp, error);
1106 #endif
1107 if (error) {
1108 /*
1109 * Remove the buffer entries from the ring; this
1110 * is gonna require a DISCARD_PKT command, and
1111 * will certainly disrupt things. This is why we
1112 * can have only one outstanding read on a ring
1113 * at a time. :-(
1114 */
1115
1116 printf("esh_fpread: was that a ^C!? error %d, ulp %d\n",
1117 error, ulp);
1118 if (error == EINTR || error == ERESTART)
1119 error = 0;
1120 if ((di->ed_flags & ESH_DI_BUSY) != 0) {
1121 esh_flush_fp_ring(sc, ring, di);
1122 error = EINTR;
1123 break;
1124 }
1125 }
1126 }
1127
1128 if (error == 0 && di->ed_error != 0)
1129 error = EIO;
1130
1131 /*
1132 * How do we let the caller know how much has been read?
1133 * Adjust the uio_resid stuff!?
1134 */
1135
1136 assert(uio->uio_resid >= di->ed_read_len);
1137
1138 uio->uio_resid -= di->ed_read_len;
1139 for (i = 0; i < uio->uio_iovcnt; i++) {
1140 iovp = &uio->uio_iov[i];
1141 uvm_vsunlock(p->p_vmspace, iovp->iov_base, iovp->iov_len);
1142 }
1143
1144 PRELE(l); /* Release process info */
1145 esh_free_dmainfo(sc, di);
1146
1147 fpread_done:
1148 #ifdef ESH_PRINTF
1149 printf("esh_fpread: ulp %d, error %d\n", ulp, error);
1150 #endif
1151 splx(s);
1152 return error;
1153 }
1154
1155
1156 int
1157 esh_fpwrite(dev_t dev, struct uio *uio, int ioflag)
1158 {
1159 struct lwp *l = curlwp;
1160 struct proc *p = l->l_proc;
1161 struct iovec *iovp;
1162 struct esh_softc *sc;
1163 struct esh_send_ring_ctl *ring;
1164 struct esh_dmainfo *di;
1165 int ulp = ESHULP(dev);
1166 int error;
1167 int len;
1168 int i;
1169 int s;
1170
1171 #ifdef ESH_PRINTF
1172 printf("esh_fpwrite: dev %x\n", dev);
1173 #endif
1174
1175 sc = device_lookup(&esh_cd, ESHUNIT(dev));
1176 if (sc == NULL || ulp == HIPPI_ULP_802)
1177 return (ENXIO);
1178
1179 s = splnet();
1180
1181 ring = &sc->sc_send;
1182
1183 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
1184 error = ENXIO;
1185 goto fpwrite_done;
1186 }
1187
1188 /* Check for validity */
1189 for (i = 0; i < uio->uio_iovcnt; i++) {
1190 if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 ||
1191 (i < uio->uio_iovcnt - 1 &&
1192 (uio->uio_iov[i].iov_len & 3) != 0)) {
1193 error = EFAULT;
1194 goto fpwrite_done;
1195 }
1196 }
1197
1198 PHOLD(l); /* Lock process info into memory */
1199
1200 /* Lock down the pages */
1201 for (i = 0; i < uio->uio_iovcnt; i++) {
1202 iovp = &uio->uio_iov[i];
1203 error = uvm_vslock(p->p_vmspace, iovp->iov_base, iovp->iov_len,
1204 VM_PROT_READ);
1205 if (error) {
1206 /* Unlock what we've locked so far. */
1207 for (--i; i >= 0; i--) {
1208 iovp = &uio->uio_iov[i];
1209 uvm_vsunlock(p->p_vmspace, iovp->iov_base,
1210 iovp->iov_len);
1211 }
1212 goto fpwrite_done;
1213 }
1214 }
1215
1216 /*
1217 * Perform preliminary DMA mapping and throw the buffers
1218 * onto the queue to be sent.
1219 */
1220
1221 di = esh_new_dmainfo(sc);
1222 if (di == NULL) {
1223 error = ENOMEM;
1224 goto fpwrite_done;
1225 }
1226 di->ed_buf = NULL;
1227 di->ed_error = 0;
1228
1229 #ifdef ESH_PRINTF
1230 printf("esh_fpwrite: uio offset %qd, resid %d, iovcnt %d\n",
1231 uio->uio_offset, uio->uio_resid, uio->uio_iovcnt);
1232 #endif
1233
1234 error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma,
1235 uio, BUS_DMA_WRITE|BUS_DMA_WAITOK);
1236 if (error) {
1237 printf("%s: esh_fpwrite: bus_dmamap_load_uio "
1238 "failed\terror code %d\n",
1239 sc->sc_dev.dv_xname, error);
1240 error = ENOBUFS;
1241 esh_free_dmainfo(sc, di);
1242 goto fpwrite_done;
1243 }
1244
1245 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
1246 0, di->ed_dma->dm_mapsize,
1247 BUS_DMASYNC_PREWRITE);
1248
1249 #ifdef ESH_PRINTF
1250 printf("esh_fpwrite: di %p, nsegs %d, uio len %d\n",
1251 di, di->ed_dma->dm_nsegs, uio->uio_resid);
1252 #endif
1253
1254 len = di->ed_dma->dm_mapsize;
1255 di->ed_flags |= ESH_DI_BUSY;
1256
1257 TAILQ_INSERT_TAIL(&ring->ec_di_queue, di, ed_list);
1258 eshstart(&sc->sc_if);
1259
1260 while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) {
1261 error = tsleep((void *) di, PRIBIO, "esh_fpwrite", 0);
1262 #ifdef ESH_PRINTF
1263 printf("esh_fpwrite: tslept %d\n", error);
1264 #endif
1265 if (error) {
1266 printf("esh_fpwrite: was that a ^C!? Shouldn't be! Error %d\n",
1267 error);
1268 if (error == EINTR || error == ERESTART)
1269 error = 0;
1270 if ((di->ed_flags & ESH_DI_BUSY) != 0) {
1271 panic("interrupted eshwrite!");
1272 #if 0
1273 /* Better do *something* here! */
1274 esh_flush_send_ring(sc, di);
1275 #endif
1276 error = EINTR;
1277 break;
1278 }
1279 }
1280 }
1281
1282 if (error == 0 && di->ed_error != 0)
1283 error = EIO;
1284
1285 /*
1286 * How do we let the caller know how much has been written?
1287 * Adjust the uio_resid stuff!?
1288 */
1289
1290 uio->uio_resid -= len;
1291 uio->uio_offset += len;
1292
1293 for (i = 0; i < uio->uio_iovcnt; i++) {
1294 iovp = &uio->uio_iov[i];
1295 uvm_vsunlock(p->p_vmspace, iovp->iov_base, iovp->iov_len);
1296 }
1297
1298 PRELE(l); /* Release process info */
1299 esh_free_dmainfo(sc, di);
1300
1301 fpwrite_done:
1302 #ifdef ESH_PRINTF
1303 printf("esh_fpwrite: error %d\n", error);
1304 #endif
1305 splx(s);
1306 return error;
1307 }
1308
1309 void
1310 esh_fpstrategy(bp)
1311 struct buf *bp;
1312 {
1313 struct esh_softc *sc;
1314 int ulp = ESHULP(bp->b_dev);
1315 int error = 0;
1316 int s;
1317
1318 #ifdef ESH_PRINTF
1319 printf("esh_fpstrategy: starting, bcount %ld, flags %lx, dev %x\n"
1320 "\tunit %x, ulp %d\n",
1321 bp->b_bcount, bp->b_flags, bp->b_dev, unit, ulp);
1322 #endif
1323
1324 sc = device_lookup(&esh_cd, ESHUNIT(bp->b_dev));
1325
1326 s = splnet();
1327 if (sc == NULL || ulp == HIPPI_ULP_802) {
1328 bp->b_error = ENXIO;
1329 bp->b_flags |= B_ERROR;
1330 goto done;
1331 }
1332
1333 if (bp->b_bcount == 0)
1334 goto done;
1335
1336 #define UP_FLAGS (ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP)
1337
1338 if ((sc->sc_flags & UP_FLAGS) != UP_FLAGS) {
1339 bp->b_error = EBUSY;
1340 bp->b_flags |= B_ERROR;
1341 goto done;
1342 }
1343 #undef UP_FLAGS
1344
1345 if (bp->b_flags & B_READ) {
1346 /*
1347 * Perform preliminary DMA mapping and throw the buffers
1348 * onto the queue to be sent.
1349 */
1350
1351 struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[ulp];
1352 struct esh_dmainfo *di = esh_new_dmainfo(sc);
1353
1354 if (di == NULL) {
1355 bp->b_error = ENOMEM;
1356 bp->b_flags |= B_ERROR;
1357 goto done;
1358 }
1359 di->ed_buf = bp;
1360 error = bus_dmamap_load(sc->sc_dmat, di->ed_dma,
1361 bp->b_data, bp->b_bcount,
1362 bp->b_proc,
1363 BUS_DMA_READ|BUS_DMA_WAITOK);
1364 if (error) {
1365 printf("%s: esh_fpstrategy: "
1366 "bus_dmamap_load "
1367 "failed\terror code %d\n",
1368 sc->sc_dev.dv_xname, error);
1369 bp->b_error = ENOBUFS;
1370 bp->b_flags |= B_ERROR;
1371 esh_free_dmainfo(sc, di);
1372 goto done;
1373 }
1374
1375 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
1376 0, di->ed_dma->dm_mapsize,
1377 BUS_DMASYNC_PREREAD);
1378
1379 #ifdef ESH_PRINTF
1380 printf("fpstrategy: di %p\n", di);
1381 #endif
1382
1383 TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list);
1384 esh_fill_fp_ring(sc, ring);
1385 } else {
1386 /*
1387 * Queue up the buffer for future sending. If the card
1388 * isn't already transmitting, give it a kick.
1389 */
1390
1391 struct esh_send_ring_ctl *ring = &sc->sc_send;
1392 BUFQ_PUT(ring->ec_buf_queue, bp);
1393 #ifdef ESH_PRINTF
1394 printf("esh_fpstrategy: ready to call eshstart to write!\n");
1395 #endif
1396 eshstart(&sc->sc_if);
1397 }
1398 splx(s);
1399 return;
1400
1401 done:
1402 splx(s);
1403 #ifdef ESH_PRINTF
1404 printf("esh_fpstrategy: failing, bp->b_error %d!\n",
1405 bp->b_error);
1406 #endif
1407 biodone(bp);
1408 }
1409
1410 /*
1411 * Handle interrupts. This is basicly event handling code; version two
1412 * firmware tries to speed things up by just telling us the location
1413 * of the producer and consumer indices, rather than sending us an event.
1414 */
1415
1416 int
1417 eshintr(arg)
1418 void *arg;
1419 {
1420 struct esh_softc *sc = arg;
1421 bus_space_tag_t iot = sc->sc_iot;
1422 bus_space_handle_t ioh = sc->sc_ioh;
1423 struct ifnet *ifp = &sc->sc_if;
1424 u_int32_t rc_offsets;
1425 u_int32_t misc_host_ctl;
1426 int rc_send_consumer = 0; /* shut up compiler */
1427 int rc_snap_ring_consumer = 0; /* ditto */
1428 u_int8_t fp_ring_consumer[RR_MAX_RECV_RING];
1429 int start_consumer;
1430 int ret = 0;
1431
1432 int okay = 0;
1433 int blah = 0;
1434 char sbuf[100];
1435 char t[100];
1436
1437
1438 /* Check to see if this is our interrupt. */
1439
1440 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
1441 if ((misc_host_ctl & RR_MH_INTERRUPT) == 0)
1442 return 0;
1443
1444 /* If we can't do anything with the interrupt, just drop it */
1445
1446 if (sc->sc_flags == 0)
1447 return 1;
1448
1449 rc_offsets = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER);
1450 sc->sc_event_producer = rc_offsets & 0xff;
1451 if (sc->sc_version == 2) {
1452 int i;
1453
1454 sbuf[0] = '\0';
1455 strlcat(sbuf, "rc: ", sizeof(sbuf));
1456 rc_send_consumer = (rc_offsets >> 8) & 0xff;
1457 rc_snap_ring_consumer = (rc_offsets >> 16) & 0xff;
1458 for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
1459 rc_offsets =
1460 bus_space_read_4(iot, ioh,
1461 RR_RUNCODE_RECV_CONS + i);
1462 /* XXX: should do this right! */
1463 NTOHL(rc_offsets);
1464 *((u_int32_t *) &fp_ring_consumer[i]) = rc_offsets;
1465 snprintf(t, sizeof(t), "%.8x|", rc_offsets);
1466 strlcat(sbuf, t, sizeof(sbuf));
1467 }
1468 }
1469 start_consumer = sc->sc_event_consumer;
1470
1471 /* Take care of synchronizing DMA with entries we read... */
1472
1473 esh_dma_sync(sc, sc->sc_event_ring,
1474 start_consumer, sc->sc_event_producer,
1475 RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0,
1476 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1477
1478 while (sc->sc_event_consumer != sc->sc_event_producer) {
1479 struct rr_event *event =
1480 &sc->sc_event_ring[sc->sc_event_consumer];
1481
1482 #ifdef ESH_PRINTF
1483 if (event->re_code != RR_EC_WATCHDOG &&
1484 event->re_code != RR_EC_STATS_UPDATE &&
1485 event->re_code != RR_EC_SET_CMD_CONSUMER) {
1486 printf("%s: event code %x, ring %d, index %d\n",
1487 sc->sc_dev.dv_xname, event->re_code,
1488 event->re_ring, event->re_index);
1489 if (okay == 0)
1490 printf("%s\n", sbuf);
1491 okay = 1;
1492 }
1493 #endif
1494 ret = 1; /* some action was taken by card */
1495
1496 switch(event->re_code) {
1497 case RR_EC_RUNCODE_UP:
1498 printf("%s: firmware up\n", sc->sc_dev.dv_xname);
1499 sc->sc_flags |= ESH_FL_RUNCODE_UP;
1500 esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
1501 esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0);
1502 #ifdef ESH_PRINTF
1503 eshstatus(sc);
1504 #endif
1505 if ((ifp->if_flags & IFF_UP) != 0)
1506 esh_init_snap_ring(sc);
1507 if (sc->sc_fp_rings > 0)
1508 esh_init_fp_rings(sc);
1509
1510 /*
1511 * XXX: crank up FP rings that might be
1512 * in use after a reset!
1513 */
1514 wakeup((void *) sc);
1515 break;
1516
1517 case RR_EC_WATCHDOG:
1518 /*
1519 * Record the watchdog event.
1520 * This is checked by eshwatchdog
1521 */
1522
1523 sc->sc_watchdog = 1;
1524 break;
1525
1526 case RR_EC_SET_CMD_CONSUMER:
1527 sc->sc_cmd_consumer = event->re_index;
1528 break;
1529
1530 case RR_EC_LINK_ON:
1531 printf("%s: link up\n", sc->sc_dev.dv_xname);
1532 sc->sc_flags |= ESH_FL_LINK_UP;
1533
1534 esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
1535 esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0);
1536 if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) {
1537 /*
1538 * Interface is now `running', with no
1539 * output active.
1540 */
1541 ifp->if_flags |= IFF_RUNNING;
1542 ifp->if_flags &= ~IFF_OACTIVE;
1543
1544 /* Attempt to start output, if any. */
1545 }
1546 eshstart(ifp);
1547 break;
1548
1549 case RR_EC_LINK_OFF:
1550 sc->sc_flags &= ~ESH_FL_LINK_UP;
1551 printf("%s: link down\n", sc->sc_dev.dv_xname);
1552 break;
1553
1554 /*
1555 * These are all unexpected. We need to handle all
1556 * of them, though.
1557 */
1558
1559 case RR_EC_INVALID_CMD:
1560 case RR_EC_INTERNAL_ERROR:
1561 case RR2_EC_INTERNAL_ERROR:
1562 case RR_EC_BAD_SEND_RING:
1563 case RR_EC_BAD_SEND_BUF:
1564 case RR_EC_BAD_SEND_DESC:
1565 case RR_EC_RECV_RING_FLUSH:
1566 case RR_EC_RECV_ERROR_INFO:
1567 case RR_EC_BAD_RECV_BUF:
1568 case RR_EC_BAD_RECV_DESC:
1569 case RR_EC_BAD_RECV_RING:
1570 case RR_EC_UNIMPLEMENTED:
1571 printf("%s: unexpected event %x;"
1572 "shutting down interface\n",
1573 sc->sc_dev.dv_xname, event->re_code);
1574 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1575 sc->sc_flags = ESH_FL_CRASHED;
1576 #ifdef ESH_PRINTF
1577 eshstatus(sc);
1578 #endif
1579 break;
1580
1581 #define CALLOUT(a) case a: \
1582 printf("%s: Event " #a " received -- " \
1583 "ring %d index %d timestamp %x\n", \
1584 sc->sc_dev.dv_xname, event->re_ring, event->re_index, \
1585 event->re_timestamp); \
1586 break;
1587
1588 CALLOUT(RR_EC_NO_RING_FOR_ULP);
1589 CALLOUT(RR_EC_REJECTING); /* dropping packets */
1590 #undef CALLOUT
1591
1592 /* Send events */
1593
1594 case RR_EC_PACKET_SENT: /* not used in firmware 2.x */
1595 ifp->if_opackets++;
1596 /* FALLTHROUGH */
1597
1598 case RR_EC_SET_SND_CONSUMER:
1599 assert(sc->sc_version == 1);
1600 /* FALLTHROUGH */
1601
1602 case RR_EC_SEND_RING_LOW:
1603 eshstart_cleanup(sc, event->re_index, 0);
1604 break;
1605
1606
1607 case RR_EC_CONN_REJECT:
1608 case RR_EC_CAMPON_TIMEOUT:
1609 case RR_EC_CONN_TIMEOUT:
1610 case RR_EC_DISCONN_ERR:
1611 case RR_EC_INTERNAL_PARITY:
1612 case RR_EC_TX_IDLE:
1613 case RR_EC_SEND_LINK_OFF:
1614 eshstart_cleanup(sc, event->re_index, event->re_code);
1615 break;
1616
1617 /* Receive events */
1618
1619 case RR_EC_RING_ENABLED:
1620 if (event->re_ring == HIPPI_ULP_802) {
1621 rc_snap_ring_consumer = 0; /* prevent read */
1622 sc->sc_flags |= ESH_FL_SNAP_RING_UP;
1623 esh_fill_snap_ring(sc);
1624
1625 if (sc->sc_flags & ESH_FL_LINK_UP) {
1626 /*
1627 * Interface is now `running', with no
1628 * output active.
1629 */
1630 ifp->if_flags |= IFF_RUNNING;
1631 ifp->if_flags &= ~IFF_OACTIVE;
1632
1633 /* Attempt to start output, if any. */
1634
1635 eshstart(ifp);
1636 }
1637 #ifdef ESH_PRINTF
1638 if (event->re_index != 0)
1639 printf("ENABLE snap ring -- index %d instead of 0!\n",
1640 event->re_index);
1641 #endif
1642 } else {
1643 struct esh_fp_ring_ctl *ring =
1644 sc->sc_fp_recv[event->re_ring];
1645
1646 sc->sc_flags |= ESH_FL_FP_RING_UP;
1647 #ifdef ESH_PRINTF
1648 printf("eshintr: FP ring %d up\n",
1649 event->re_ring);
1650 #endif
1651
1652 sc->sc_fp_recv_index[event->re_index] = ring;
1653 ring->ec_index = event->re_index;
1654 wakeup((void *) &ring->ec_ulp);
1655 }
1656 break;
1657
1658 case RR_EC_RING_DISABLED:
1659 #ifdef ESH_PRINTF
1660 printf("eshintr: disabling ring %d\n",
1661 event->re_ring);
1662 #endif
1663 if (event->re_ring == HIPPI_ULP_802) {
1664 struct rr_ring_ctl *ring =
1665 sc->sc_recv_ring_table + HIPPI_ULP_802;
1666 memset(ring, 0, sizeof(*ring));
1667 sc->sc_flags &= ~ESH_FL_CLOSING_SNAP;
1668 sc->sc_flags &= ~ESH_FL_SNAP_RING_UP;
1669 while (sc->sc_snap_recv.ec_consumer
1670 != sc->sc_snap_recv.ec_producer) {
1671 struct mbuf *m0;
1672 u_int16_t offset = sc->sc_snap_recv.ec_consumer;
1673
1674 bus_dmamap_unload(sc->sc_dmat,
1675 sc->sc_snap_recv.ec_dma[offset]);
1676 MFREE(sc->sc_snap_recv.ec_m[offset], m0);
1677 sc->sc_snap_recv.ec_m[offset] = NULL;
1678 sc->sc_snap_recv.ec_consumer =
1679 NEXT_RECV(sc->sc_snap_recv.ec_consumer);
1680 }
1681 sc->sc_snap_recv.ec_consumer =
1682 rc_snap_ring_consumer;
1683 sc->sc_snap_recv.ec_producer =
1684 rc_snap_ring_consumer;
1685 wakeup((void *) &sc->sc_snap_recv);
1686 } else {
1687 struct esh_fp_ring_ctl *recv =
1688 sc->sc_fp_recv[event->re_ring];
1689 assert(recv != NULL);
1690 recv->ec_consumer = recv->ec_producer =
1691 fp_ring_consumer[recv->ec_index];
1692 recv->ec_index = -1;
1693 wakeup((void *) &recv->ec_index);
1694 }
1695 break;
1696
1697 case RR_EC_RING_ENABLE_ERR:
1698 if (event->re_ring == HIPPI_ULP_802) {
1699 printf("%s: unable to enable SNAP ring!?\n\t"
1700 "shutting down interface\n",
1701 sc->sc_dev.dv_xname);
1702 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1703 #ifdef ESH_PRINTF
1704 eshstatus(sc);
1705 #endif
1706 } else {
1707 /*
1708 * If we just leave the ring index as-is,
1709 * the driver will figure out that
1710 * we failed to open the ring.
1711 */
1712 wakeup((void *) &(sc->sc_fp_recv[event->re_ring]->ec_ulp));
1713 }
1714 break;
1715
1716 case RR_EC_PACKET_DISCARDED:
1717 /*
1718 * Determine the dmainfo for the current packet
1719 * we just discarded and wake up the waiting
1720 * process.
1721 *
1722 * This should never happen on the network ring!
1723 */
1724
1725 if (event->re_ring == HIPPI_ULP_802) {
1726 printf("%s: discard on SNAP ring!?\n\t"
1727 "shutting down interface\n",
1728 sc->sc_dev.dv_xname);
1729 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1730 sc->sc_flags = ESH_FL_CRASHED;
1731 } else {
1732 struct esh_fp_ring_ctl *ring =
1733 sc->sc_fp_recv[event->re_ring];
1734 struct esh_dmainfo *di =
1735 ring->ec_cur_dmainfo;
1736
1737 if (di == NULL)
1738 di = ring->ec_dmainfo[ring->ec_producer];
1739 printf("eshintr: DISCARD: index %d,"
1740 "ring prod %d, di %p, ring[index] %p\n",
1741 event->re_index, ring->ec_producer, di,
1742 ring->ec_dmainfo[event->re_index]);
1743
1744 if (di == NULL)
1745 di = ring->ec_dmainfo[event->re_index];
1746
1747 if (di == NULL) {
1748 printf("eshintr: DISCARD: NULL di, skipping...\n");
1749 break;
1750 }
1751
1752 di->ed_flags &=
1753 ~(ESH_DI_READING | ESH_DI_BUSY);
1754 wakeup((void *) &di->ed_flags);
1755 }
1756 break;
1757
1758 case RR_EC_OUT_OF_BUF:
1759 case RR_EC_RECV_RING_OUT:
1760 case RR_EC_RECV_RING_LOW:
1761 break;
1762
1763 case RR_EC_SET_RECV_CONSUMER:
1764 case RR_EC_PACKET_RECVED:
1765 if (event->re_ring == HIPPI_ULP_802)
1766 esh_read_snap_ring(sc, event->re_index, 0);
1767 else if (sc->sc_fp_recv[event->re_ring] != NULL)
1768 esh_read_fp_ring(sc, event->re_index, 0,
1769 event->re_ring);
1770 break;
1771
1772 case RR_EC_RECV_IDLE:
1773 case RR_EC_PARITY_ERR:
1774 case RR_EC_LLRC_ERR:
1775 case RR_EC_PKT_LENGTH_ERR:
1776 case RR_EC_IP_HDR_CKSUM_ERR:
1777 case RR_EC_DATA_CKSUM_ERR:
1778 case RR_EC_SHORT_BURST_ERR:
1779 case RR_EC_RECV_LINK_OFF:
1780 case RR_EC_FLAG_SYNC_ERR:
1781 case RR_EC_FRAME_ERR:
1782 case RR_EC_STATE_TRANS_ERR:
1783 case RR_EC_NO_READY_PULSE:
1784 if (event->re_ring == HIPPI_ULP_802) {
1785 esh_read_snap_ring(sc, event->re_index,
1786 event->re_code);
1787 } else {
1788 struct esh_fp_ring_ctl *r;
1789
1790 r = sc->sc_fp_recv[event->re_ring];
1791 if (r)
1792 r->ec_error = event->re_code;
1793 }
1794 break;
1795
1796 /*
1797 * Statistics events can be ignored for now. They might become
1798 * necessary if we have to deliver stats on demand, rather than
1799 * just returning the statistics block of memory.
1800 */
1801
1802 case RR_EC_STATS_UPDATE:
1803 case RR_EC_STATS_RETRIEVED:
1804 case RR_EC_TRACE:
1805 break;
1806
1807 default:
1808 printf("%s: Bogus event code %x, "
1809 "ring %d, index %d, timestamp %x\n",
1810 sc->sc_dev.dv_xname, event->re_code,
1811 event->re_ring, event->re_index,
1812 event->re_timestamp);
1813 break;
1814 }
1815
1816 sc->sc_event_consumer = NEXT_EVENT(sc->sc_event_consumer);
1817 }
1818
1819 /* Do the receive and send ring processing for version 2 RunCode */
1820
1821 if (sc->sc_version == 2) {
1822 int i;
1823 if (sc->sc_send.ec_consumer != rc_send_consumer) {
1824 eshstart_cleanup(sc, rc_send_consumer, 0);
1825 ret = 1;
1826 blah++;
1827 }
1828 if (sc->sc_snap_recv.ec_consumer != rc_snap_ring_consumer &&
1829 (sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) {
1830 esh_read_snap_ring(sc, rc_snap_ring_consumer, 0);
1831 ret = 1;
1832 blah++;
1833 }
1834 for (i = 0; i < RR_MAX_RECV_RING; i++) {
1835 struct esh_fp_ring_ctl *r = sc->sc_fp_recv_index[i];
1836
1837 if (r != NULL &&
1838 r->ec_consumer != fp_ring_consumer[i]) {
1839 #ifdef ESH_PRINTF
1840 printf("eshintr: performed read on ring %d, index %d\n",
1841 r->ec_ulp, i);
1842 #endif
1843 blah++;
1844 esh_read_fp_ring(sc, fp_ring_consumer[i],
1845 0, r->ec_ulp);
1846 fp_ring_consumer[i] = r->ec_consumer;
1847 }
1848 }
1849 if (blah != 0 && okay == 0) {
1850 okay = 1;
1851 #ifdef ESH_PRINTF
1852 printf("%s\n", sbuf);
1853 #endif
1854 }
1855 rc_offsets = (sc->sc_snap_recv.ec_consumer << 16) |
1856 (sc->sc_send.ec_consumer << 8) | sc->sc_event_consumer;
1857 } else {
1858 rc_offsets = sc->sc_event_consumer;
1859 }
1860
1861 esh_dma_sync(sc, sc->sc_event_ring,
1862 start_consumer, sc->sc_event_producer,
1863 RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0,
1864 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1865
1866 /* Write out new values for the FP segments... */
1867
1868 if (sc->sc_version == 2) {
1869 int i;
1870 u_int32_t u;
1871
1872 sbuf[0] = '\0';
1873 strlcat(sbuf, "drv: ", sizeof(sbuf));
1874 for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
1875 /* XXX: should do this right! */
1876 u = *((u_int32_t *) &fp_ring_consumer[i]);
1877 snprintf(t, sizeof(t), "%.8x|", u);
1878 strlcat(sbuf, t, sizeof(sbuf));
1879 NTOHL(u);
1880 bus_space_write_4(iot, ioh,
1881 RR_DRIVER_RECV_CONS + i, u);
1882 }
1883 #ifdef ESH_PRINTF
1884 if (okay == 1)
1885 printf("%s\n", sbuf);
1886 #endif
1887
1888 sbuf[0] = '\0';
1889 strlcat(sbuf, "rcn: ", sizeof(sbuf));
1890 for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
1891 u = bus_space_read_4(iot, ioh,
1892 RR_RUNCODE_RECV_CONS + i);
1893 /* XXX: should do this right! */
1894 NTOHL(u);
1895 snprintf(t, sizeof(t), "%.8x|", u);
1896 strlcat(sbuf, t, sizeof(sbuf));
1897 }
1898 #ifdef ESH_PRINTF
1899 if (okay == 1)
1900 printf("%s\n", sbuf);
1901 #endif
1902 }
1903
1904 /* Clear interrupt */
1905 bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, rc_offsets);
1906
1907 return (ret);
1908 }
1909
1910
1911 /*
1912 * Start output on the interface. Always called at splnet().
1913 * Check to see if there are any mbufs that didn't get sent the
1914 * last time this was called. If there are none, get more mbufs
1915 * and send 'em.
1916 *
1917 * For now, we only send one packet at a time.
1918 */
1919
1920 void
1921 eshstart(ifp)
1922 struct ifnet *ifp;
1923 {
1924 struct esh_softc *sc = ifp->if_softc;
1925 struct esh_send_ring_ctl *send = &sc->sc_send;
1926 struct mbuf *m = NULL;
1927 int error;
1928
1929 /* Don't transmit if interface is busy or not running */
1930
1931 #ifdef ESH_PRINTF
1932 printf("eshstart: ready to look; flags %x\n", sc->sc_flags);
1933 #endif
1934
1935 #define LINK_UP_FLAGS (ESH_FL_LINK_UP | ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP)
1936 if ((sc->sc_flags & LINK_UP_FLAGS) != LINK_UP_FLAGS)
1937 return;
1938 #undef LINK_UP_FLAGS
1939
1940 #ifdef ESH_PRINTF
1941 if (esh_check(sc))
1942 return;
1943 #endif
1944
1945 /* If we have sent the current packet, get another */
1946
1947 while ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0 &&
1948 (m = send->ec_cur_mbuf) == NULL && send->ec_cur_buf == NULL &&
1949 send->ec_cur_dmainfo == NULL) {
1950 IFQ_DEQUEUE(&ifp->if_snd, m);
1951 if (m == 0) /* not really needed */
1952 break;
1953
1954 #if NBPFILTER > 0
1955 if (ifp->if_bpf) {
1956 /*
1957 * On output, the raw packet has a eight-byte CCI
1958 * field prepended. On input, there is no such field.
1959 * The bpf expects the packet to look the same in both
1960 * places, so we temporarily lop off the prepended CCI
1961 * field here, then replace it. Ugh.
1962 *
1963 * XXX: Need to use standard mbuf manipulation
1964 * functions, first mbuf may be less than
1965 * 8 bytes long.
1966 */
1967
1968 m->m_len -= 8;
1969 m->m_data += 8;
1970 m->m_pkthdr.len -= 8;
1971 bpf_mtap(ifp->if_bpf, m);
1972 m->m_len += 8;
1973 m->m_data -= 8;
1974 m->m_pkthdr.len += 8;
1975 }
1976 #endif
1977
1978 send->ec_len = m->m_pkthdr.len;
1979 m = send->ec_cur_mbuf = esh_adjust_mbufs(sc, m);
1980 if (m == NULL)
1981 continue;
1982
1983 error = bus_dmamap_load_mbuf(sc->sc_dmat, send->ec_dma,
1984 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1985 if (error)
1986 panic("%s: eshstart: "
1987 "bus_dmamap_load_mbuf failed err %d\n",
1988 sc->sc_dev.dv_xname, error);
1989 send->ec_offset = 0;
1990 }
1991
1992 /*
1993 * If there are no network packets to send, see if there
1994 * are any FP packets to send.
1995 *
1996 * XXX: Some users may disagree with these priorities;
1997 * this reduces network latency by increasing FP latency...
1998 * Note that it also means that FP packets can get
1999 * locked out so that they *never* get sent, if the
2000 * network constantly fills up the pipe. Not good!
2001 */
2002
2003 if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 &&
2004 send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
2005 send->ec_cur_dmainfo == NULL &&
2006 BUFQ_PEEK(send->ec_buf_queue) != NULL) {
2007 struct buf *bp;
2008
2009 #ifdef ESH_PRINTF
2010 printf("eshstart: getting a buf from send->ec_queue %p\n",
2011 send->ec_queue);
2012 #endif
2013
2014 bp = send->ec_cur_buf = BUFQ_GET(send->ec_buf_queue);
2015 send->ec_offset = 0;
2016 send->ec_len = bp->b_bcount;
2017
2018 /*
2019 * Determine the DMA mapping for the buffer.
2020 * If this is too large, what do we do!?
2021 */
2022
2023 error = bus_dmamap_load(sc->sc_dmat, send->ec_dma,
2024 bp->b_data, bp->b_bcount,
2025 bp->b_proc,
2026 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2027
2028 if (error)
2029 panic("%s: eshstart: "
2030 "bus_dmamap_load failed err %d\n",
2031 sc->sc_dev.dv_xname, error);
2032 }
2033
2034 /*
2035 * If there are no packets from strategy to send, see if there
2036 * are any FP packets to send from fpwrite.
2037 */
2038
2039 if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 &&
2040 send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
2041 send->ec_cur_dmainfo == NULL) {
2042 struct esh_dmainfo *di;
2043
2044 di = TAILQ_FIRST(&send->ec_di_queue);
2045 if (di == NULL)
2046 return;
2047 TAILQ_REMOVE(&send->ec_di_queue, di, ed_list);
2048
2049 #ifdef ESH_PRINTF
2050 printf("eshstart: getting a di from send->ec_di_queue %p\n",
2051 &send->ec_di_queue);
2052 #endif
2053
2054 send->ec_cur_dmainfo = di;
2055 send->ec_offset = 0;
2056 send->ec_len = di->ed_dma->dm_mapsize;
2057 }
2058
2059 if (send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
2060 send->ec_cur_dmainfo == NULL)
2061 return;
2062
2063 assert(send->ec_len);
2064 assert(send->ec_dma->dm_nsegs ||
2065 send->ec_cur_dmainfo->ed_dma->dm_nsegs);
2066 assert(send->ec_cur_mbuf || send->ec_cur_buf || send->ec_cur_dmainfo);
2067
2068 esh_send(sc);
2069 return;
2070 }
2071
2072
2073 /*
2074 * Put the buffers from the send dmamap into the descriptors and
2075 * send 'em off...
2076 */
2077
2078 static void
2079 esh_send(sc)
2080 struct esh_softc *sc;
2081 {
2082 struct esh_send_ring_ctl *send = &sc->sc_send;
2083 u_int start_producer = send->ec_producer;
2084 bus_dmamap_t dma;
2085
2086 if (send->ec_cur_dmainfo != NULL)
2087 dma = send->ec_cur_dmainfo->ed_dma;
2088 else
2089 dma = send->ec_dma;
2090
2091 #ifdef ESH_PRINTF
2092 printf("esh_send: producer %x consumer %x nsegs %d\n",
2093 send->ec_producer, send->ec_consumer, dma->dm_nsegs);
2094 #endif
2095
2096 esh_dma_sync(sc, send->ec_descr, send->ec_producer, send->ec_consumer,
2097 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1,
2098 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2099
2100 while (NEXT_SEND(send->ec_producer) != send->ec_consumer &&
2101 send->ec_offset < dma->dm_nsegs) {
2102 int offset = send->ec_producer;
2103
2104 send->ec_descr[offset].rd_buffer_addr =
2105 dma->dm_segs[send->ec_offset].ds_addr;
2106 send->ec_descr[offset].rd_length =
2107 dma->dm_segs[send->ec_offset].ds_len;
2108 send->ec_descr[offset].rd_control = 0;
2109
2110 if (send->ec_offset == 0) {
2111 /* Start of the dmamap... */
2112 send->ec_descr[offset].rd_control |=
2113 RR_CT_PACKET_START;
2114 }
2115
2116 if (send->ec_offset + 1 == dma->dm_nsegs) {
2117 send->ec_descr[offset].rd_control |= RR_CT_PACKET_END;
2118 }
2119
2120 send->ec_offset++;
2121 send->ec_producer = NEXT_SEND(send->ec_producer);
2122 }
2123
2124 /*
2125 * XXX: we could optimize the dmamap_sync to just get what we've
2126 * just set up, rather than the whole buffer...
2127 */
2128
2129 bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize,
2130 BUS_DMASYNC_PREWRITE);
2131 esh_dma_sync(sc, send->ec_descr,
2132 start_producer, send->ec_consumer,
2133 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1,
2134 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2135
2136 #ifdef ESH_PRINTF
2137 if (send->ec_offset != dma->dm_nsegs)
2138 printf("eshstart: couldn't fit packet in send ring!\n");
2139 #endif
2140
2141 if (sc->sc_version == 1) {
2142 esh_send_cmd(sc, RR_CC_SET_SEND_PRODUCER,
2143 0, send->ec_producer);
2144 } else {
2145 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2146 RR_SEND_PRODUCER, send->ec_producer);
2147 }
2148 return;
2149 }
2150
2151
2152 /*
2153 * Cleanup for the send routine. When the NIC sends us an event to
2154 * let us know that it has consumed our buffers, we need to free the
2155 * buffers, and possibly send another packet.
2156 */
2157
2158 static void
2159 eshstart_cleanup(sc, consumer, error)
2160 struct esh_softc *sc;
2161 u_int16_t consumer;
2162 int error;
2163 {
2164 struct esh_send_ring_ctl *send = &sc->sc_send;
2165 int start_consumer = send->ec_consumer;
2166 bus_dmamap_t dma;
2167
2168 if (send->ec_cur_dmainfo != NULL)
2169 dma = send->ec_cur_dmainfo->ed_dma;
2170 else
2171 dma = send->ec_dma;
2172
2173 #ifdef ESH_PRINTF
2174 printf("eshstart_cleanup: consumer %x, send->consumer %x\n",
2175 consumer, send->ec_consumer);
2176 #endif
2177
2178 esh_dma_sync(sc, send->ec_descr,
2179 send->ec_consumer, consumer,
2180 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0,
2181 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2182
2183 while (send->ec_consumer != consumer) {
2184 assert(dma->dm_nsegs);
2185 assert(send->ec_cur_mbuf || send->ec_cur_buf ||
2186 send->ec_cur_dmainfo);
2187
2188 if (send->ec_descr[send->ec_consumer].rd_control &
2189 RR_CT_PACKET_END) {
2190 #ifdef ESH_PRINT
2191 printf("eshstart_cleanup: dmamap_sync mapsize %d\n",
2192 send->ec_dma->dm_mapsize);
2193 #endif
2194 bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize,
2195 BUS_DMASYNC_POSTWRITE);
2196 bus_dmamap_unload(sc->sc_dmat, dma);
2197 if (send->ec_cur_mbuf) {
2198 m_freem(send->ec_cur_mbuf);
2199 send->ec_cur_mbuf = NULL;
2200 } else if (send->ec_cur_dmainfo) {
2201 send->ec_cur_dmainfo->ed_flags &= ~ESH_DI_BUSY;
2202 send->ec_cur_dmainfo->ed_error =
2203 (send->ec_error ? send->ec_error : error);
2204 send->ec_error = 0;
2205 wakeup((void *) send->ec_cur_dmainfo);
2206 send->ec_cur_dmainfo = NULL;
2207 } else if (send->ec_cur_buf) {
2208 biodone(send->ec_cur_buf);
2209 send->ec_cur_buf = NULL;
2210 } else {
2211 panic("%s: eshstart_cleanup: "
2212 "no current mbuf, buf, or dmainfo!\n",
2213 sc->sc_dev.dv_xname);
2214 }
2215
2216 /*
2217 * Version 1 of the firmware sent an event each
2218 * time it sent out a packet. Later versions do not
2219 * (which results in a considerable speedup), so we
2220 * have to keep track here.
2221 */
2222
2223 if (sc->sc_version != 1)
2224 sc->sc_if.if_opackets++;
2225 }
2226 if (error != 0)
2227 send->ec_error = error;
2228
2229 send->ec_consumer = NEXT_SEND(send->ec_consumer);
2230 }
2231
2232 esh_dma_sync(sc, send->ec_descr,
2233 start_consumer, consumer,
2234 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0,
2235 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2236
2237 eshstart(&sc->sc_if);
2238 }
2239
2240
2241 /*
2242 * XXX: Ouch: The NIC can only send word-aligned buffers, and only
2243 * the last buffer in the packet can have a length that is not
2244 * a multiple of four!
2245 *
2246 * Here we traverse the packet, pick out the bogus mbufs, and fix 'em
2247 * if possible. The fix is amazingly expensive, so we sure hope that
2248 * this is a rare occurance (it seems to be).
2249 */
2250
2251 static struct mbuf *
2252 esh_adjust_mbufs(sc, m)
2253 struct esh_softc *sc;
2254 struct mbuf *m;
2255 {
2256 struct mbuf *m0, *n, *n0;
2257 u_int32_t write_len;
2258
2259 write_len = m->m_pkthdr.len;
2260 #ifdef DIAGNOSTIC
2261 if (write_len > max_write_len)
2262 max_write_len = write_len;
2263 #endif
2264
2265 for (n0 = n = m; n; n = n->m_next) {
2266 while (n && n->m_len == 0) {
2267 MFREE(n, m0);
2268 if (n == m)
2269 n = n0 = m = m0;
2270 else
2271 n = n0->m_next = m0;
2272 }
2273 if (n == NULL)
2274 break;
2275
2276 if (mtod(n, long) & 3 || (n->m_next && n->m_len & 3)) {
2277 /* Gotta clean it up */
2278 struct mbuf *o;
2279 u_int32_t len;
2280
2281 sc->sc_misaligned_bufs++;
2282 MGETHDR(o, M_DONTWAIT, MT_DATA);
2283 if (!o)
2284 goto bogosity;
2285
2286 MCLGET(o, M_DONTWAIT);
2287 if (!(o->m_flags & M_EXT)) {
2288 MFREE(o, m0);
2289 goto bogosity;
2290 }
2291
2292 /*
2293 * XXX: Copy as much as we can into the
2294 * cluster. For now we can't have more
2295 * than a cluster in there. May change.
2296 * I'd prefer not to get this
2297 * down-n-dirty, but we have to be able
2298 * to do this kind of funky copy.
2299 */
2300
2301 len = min(MCLBYTES, write_len);
2302 #ifdef DIAGNOSTIC
2303 assert(n->m_len <= len);
2304 assert(len <= MCLBYTES);
2305 #endif
2306
2307 m_copydata(n, 0, len, mtod(o, void *));
2308 o->m_pkthdr.len = len;
2309 m_adj(n, len);
2310 o->m_len = len;
2311 o->m_next = n;
2312
2313 if (n == m)
2314 m = o;
2315 else
2316 n0->m_next = o;
2317 n = o;
2318 }
2319 n0 = n;
2320 write_len -= n->m_len;
2321 }
2322 return m;
2323
2324 bogosity:
2325 printf("%s: esh_adjust_mbuf: unable to allocate cluster for "
2326 "mbuf %p, len %x\n",
2327 sc->sc_dev.dv_xname, mtod(m, void *), m->m_len);
2328 m_freem(m);
2329 return NULL;
2330 }
2331
2332
2333 /*
2334 * Read in the current valid entries from the ring and forward
2335 * them to the upper layer protocols. It is possible that we
2336 * haven't received the whole packet yet, in which case we just
2337 * add each of the buffers into the packet until we have the whole
2338 * thing.
2339 */
2340
2341 static void
2342 esh_read_snap_ring(sc, consumer, error)
2343 struct esh_softc *sc;
2344 u_int16_t consumer;
2345 int error;
2346 {
2347 struct ifnet *ifp = &sc->sc_if;
2348 struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv;
2349 int start_consumer = recv->ec_consumer;
2350 u_int16_t control;
2351
2352 if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0)
2353 return;
2354
2355 if (error)
2356 recv->ec_error = error;
2357
2358 esh_dma_sync(sc, recv->ec_descr,
2359 start_consumer, consumer,
2360 RR_SNAP_RECV_RING_SIZE,
2361 sizeof(struct rr_descr), 0,
2362 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2363
2364 while (recv->ec_consumer != consumer) {
2365 u_int16_t offset = recv->ec_consumer;
2366 struct mbuf *m;
2367
2368 m = recv->ec_m[offset];
2369 m->m_len = recv->ec_descr[offset].rd_length;
2370 control = recv->ec_descr[offset].rd_control;
2371 bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, m->m_len,
2372 BUS_DMASYNC_POSTREAD);
2373 bus_dmamap_unload(sc->sc_dmat, recv->ec_dma[offset]);
2374
2375 #ifdef ESH_PRINTF
2376 printf("esh_read_snap_ring: offset %x addr %p len %x flags %x\n",
2377 offset, mtod(m, void *), m->m_len, control);
2378 #endif
2379 if (control & RR_CT_PACKET_START || !recv->ec_cur_mbuf) {
2380 if (recv->ec_cur_pkt) {
2381 m_freem(recv->ec_cur_pkt);
2382 recv->ec_cur_pkt = NULL;
2383 printf("%s: possible skipped packet!\n",
2384 sc->sc_dev.dv_xname);
2385 }
2386 recv->ec_cur_pkt = recv->ec_cur_mbuf = m;
2387 /* allocated buffers all have pkthdrs... */
2388 m->m_pkthdr.rcvif = ifp;
2389 m->m_pkthdr.len = m->m_len;
2390 } else {
2391 if (!recv->ec_cur_pkt)
2392 panic("esh_read_snap_ring: no cur_pkt");
2393
2394 recv->ec_cur_mbuf->m_next = m;
2395 recv->ec_cur_mbuf = m;
2396 recv->ec_cur_pkt->m_pkthdr.len += m->m_len;
2397 }
2398
2399 recv->ec_m[offset] = NULL;
2400 recv->ec_descr[offset].rd_length = 0;
2401 recv->ec_descr[offset].rd_buffer_addr = 0;
2402
2403 /* Note that we can START and END on the same buffer */
2404
2405 if (control & RR_CT_PACKET_END) { /* XXX: RR2_ matches */
2406 m = recv->ec_cur_pkt;
2407 if (!error && !recv->ec_error) {
2408 /*
2409 * We have a complete packet, send it up
2410 * the stack...
2411 */
2412 ifp->if_ipackets++;
2413
2414 #if NBPFILTER > 0
2415 /*
2416 * Check if there's a BPF listener on this
2417 * interface. If so, hand off the raw packet
2418 * to BPF.
2419 */
2420 if (ifp->if_bpf) {
2421 /*
2422 * Incoming packets start with the FP
2423 * data, so no alignment problems
2424 * here...
2425 */
2426 bpf_mtap(ifp->if_bpf, m);
2427 }
2428 #endif
2429 if ((ifp->if_flags & IFF_RUNNING) == 0) {
2430 m_freem(m);
2431 } else {
2432 m = m_pullup(m,
2433 sizeof(struct hippi_header));
2434 (*ifp->if_input)(ifp, m);
2435 }
2436 } else {
2437 ifp->if_ierrors++;
2438 recv->ec_error = 0;
2439 m_freem(m);
2440 }
2441 recv->ec_cur_pkt = recv->ec_cur_mbuf = NULL;
2442 }
2443
2444 recv->ec_descr[offset].rd_control = 0;
2445 recv->ec_consumer = NEXT_RECV(recv->ec_consumer);
2446 }
2447
2448 esh_dma_sync(sc, recv->ec_descr,
2449 start_consumer, consumer,
2450 RR_SNAP_RECV_RING_SIZE,
2451 sizeof(struct rr_descr), 0,
2452 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2453
2454 esh_fill_snap_ring(sc);
2455 }
2456
2457
2458 /*
2459 * Add the SNAP (IEEE 802) receive ring to the NIC. It is possible
2460 * that we are doing this after resetting the card, in which case
2461 * the structures have already been filled in and we may need to
2462 * resume sending data.
2463 */
2464
2465 static void
2466 esh_init_snap_ring(sc)
2467 struct esh_softc *sc;
2468 {
2469 struct rr_ring_ctl *ring = sc->sc_recv_ring_table + HIPPI_ULP_802;
2470
2471 if ((sc->sc_flags & ESH_FL_CLOSING_SNAP) != 0) {
2472 printf("%s: can't reopen SNAP ring until ring disable is completed\n", sc->sc_dev.dv_xname);
2473 return;
2474 }
2475
2476 if (ring->rr_entry_size == 0) {
2477 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
2478 (caddr_t) ring - (caddr_t) sc->sc_dma_addr,
2479 sizeof(*ring),
2480 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2481
2482 ring->rr_ring_addr = sc->sc_snap_recv_ring_dma;
2483 ring->rr_free_bufs = RR_SNAP_RECV_RING_SIZE / 4;
2484 ring->rr_entries = RR_SNAP_RECV_RING_SIZE;
2485 ring->rr_entry_size = sizeof(struct rr_descr);
2486 ring->rr_prod_index = 0;
2487 sc->sc_snap_recv.ec_producer = 0;
2488 sc->sc_snap_recv.ec_consumer = 0;
2489 ring->rr_mode = RR_RR_IP;
2490
2491 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
2492 (caddr_t) ring - (caddr_t) sc->sc_dma_addr,
2493 sizeof(ring),
2494 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2495 esh_send_cmd(sc, RR_CC_ENABLE_RING, HIPPI_ULP_802,
2496 sc->sc_snap_recv.ec_producer);
2497 } else {
2498 printf("%s: snap receive ring already initialized!\n",
2499 sc->sc_dev.dv_xname);
2500 }
2501 }
2502
2503 static void
2504 esh_close_snap_ring(sc)
2505 struct esh_softc *sc;
2506 {
2507 #ifdef ESH_PRINTF
2508 printf("esh_close_snap_ring: starting\n");
2509 #endif
2510
2511 if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0)
2512 return;
2513
2514 sc->sc_flags |= ESH_FL_CLOSING_SNAP;
2515 esh_send_cmd(sc, RR_CC_DISABLE_RING, HIPPI_ULP_802, 0);
2516
2517 /* Disable event will trigger the rest of the cleanup. */
2518 }
2519
2520 /*
2521 * Fill in the snap ring with more mbuf buffers so that we can
2522 * receive traffic.
2523 */
2524
2525 static void
2526 esh_fill_snap_ring(sc)
2527 struct esh_softc *sc;
2528 {
2529 struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv;
2530 int start_producer = recv->ec_producer;
2531 int error;
2532
2533 esh_dma_sync(sc, recv->ec_descr,
2534 recv->ec_producer, recv->ec_consumer,
2535 RR_SNAP_RECV_RING_SIZE,
2536 sizeof(struct rr_descr), 1,
2537 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2538
2539 while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) {
2540 int offset = recv->ec_producer;
2541 struct mbuf *m, *m0;
2542
2543 MGETHDR(m, M_DONTWAIT, MT_DATA);
2544 if (!m)
2545 break;
2546 MCLGET(m, M_DONTWAIT);
2547 if ((m->m_flags & M_EXT) == 0) {
2548 MFREE(m, m0);
2549 break;
2550 }
2551
2552 error = bus_dmamap_load(sc->sc_dmat, recv->ec_dma[offset],
2553 mtod(m, void *), MCLBYTES,
2554 NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
2555 if (error) {
2556 printf("%s: esh_fill_recv_ring: bus_dmamap_load "
2557 "failed\toffset %x, error code %d\n",
2558 sc->sc_dev.dv_xname, offset, error);
2559 MFREE(m, m0);
2560 break;
2561 }
2562
2563 /*
2564 * In this implementation, we should only see one segment
2565 * per DMA.
2566 */
2567
2568 assert(recv->ec_dma[offset]->dm_nsegs == 1);
2569
2570 /*
2571 * Load into the descriptors.
2572 */
2573
2574 recv->ec_descr[offset].rd_ring =
2575 (sc->sc_version == 1) ? HIPPI_ULP_802 : 0;
2576 recv->ec_descr[offset].rd_buffer_addr =
2577 recv->ec_dma[offset]->dm_segs->ds_addr;
2578 recv->ec_descr[offset].rd_length =
2579 recv->ec_dma[offset]->dm_segs->ds_len;
2580 recv->ec_descr[offset].rd_control = 0;
2581
2582 bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, MCLBYTES,
2583 BUS_DMASYNC_PREREAD);
2584
2585 recv->ec_m[offset] = m;
2586
2587 recv->ec_producer = NEXT_RECV(recv->ec_producer);
2588 }
2589
2590 esh_dma_sync(sc, recv->ec_descr,
2591 start_producer, recv->ec_consumer,
2592 RR_SNAP_RECV_RING_SIZE,
2593 sizeof(struct rr_descr), 1,
2594 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2595
2596 if (sc->sc_version == 1)
2597 esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, HIPPI_ULP_802,
2598 recv->ec_producer);
2599 else
2600 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2601 RR_SNAP_RECV_PRODUCER, recv->ec_producer);
2602 }
2603
2604 static void
2605 esh_init_fp_rings(sc)
2606 struct esh_softc *sc;
2607 {
2608 struct esh_fp_ring_ctl *recv;
2609 struct rr_ring_ctl *ring_ctl;
2610 int ulp;
2611
2612 for (ulp = 0; ulp < RR_ULP_COUNT; ulp++) {
2613 ring_ctl = &sc->sc_recv_ring_table[ulp];
2614 recv = sc->sc_fp_recv[ulp];
2615
2616 if (recv == NULL)
2617 continue;
2618
2619 ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr;
2620 ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4;
2621 ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE;
2622 ring_ctl->rr_entry_size = sizeof(struct rr_descr);
2623 ring_ctl->rr_prod_index = 0;
2624 ring_ctl->rr_mode = RR_RR_CHARACTER;
2625 recv->ec_producer = 0;
2626 recv->ec_consumer = 0;
2627 recv->ec_index = -1;
2628
2629 esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer);
2630 }
2631 }
2632
2633 static void
2634 esh_read_fp_ring(sc, consumer, error, ulp)
2635 struct esh_softc *sc;
2636 u_int16_t consumer;
2637 int error;
2638 int ulp;
2639 {
2640 struct esh_fp_ring_ctl *recv = sc->sc_fp_recv[ulp];
2641 int start_consumer = recv->ec_consumer;
2642 u_int16_t control;
2643
2644 #ifdef ESH_PRINTF
2645 printf("esh_read_fp_ring: ulp %d, consumer %d, producer %d, old consumer %d\n",
2646 recv->ec_ulp, consumer, recv->ec_producer, recv->ec_consumer);
2647 #endif
2648 if ((sc->sc_flags & ESH_FL_FP_RING_UP) == 0)
2649 return;
2650
2651 if (error != 0)
2652 recv->ec_error = error;
2653
2654 esh_dma_sync(sc, recv->ec_descr,
2655 start_consumer, consumer,
2656 RR_FP_RECV_RING_SIZE,
2657 sizeof(struct rr_descr), 0,
2658 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2659
2660 while (recv->ec_consumer != consumer) {
2661 u_int16_t offset = recv->ec_consumer;
2662
2663 control = recv->ec_descr[offset].rd_control;
2664
2665 if (control & RR_CT_PACKET_START) {
2666 if (recv->ec_read_len) {
2667 recv->ec_error = 0;
2668 printf("%s: ulp %d: possible skipped FP packet!\n",
2669 sc->sc_dev.dv_xname, recv->ec_ulp);
2670 }
2671 recv->ec_seen_end = 0;
2672 recv->ec_read_len = 0;
2673 }
2674 if (recv->ec_seen_end == 0)
2675 recv->ec_read_len += recv->ec_descr[offset].rd_length;
2676
2677 #if NOT_LAME
2678 recv->ec_descr[offset].rd_length = 0;
2679 recv->ec_descr[offset].rd_buffer_addr = 0;
2680 #endif
2681
2682 #ifdef ESH_PRINTF
2683 printf("esh_read_fp_ring: offset %d addr %d len %d flags %x, total %d\n",
2684 offset, recv->ec_descr[offset].rd_buffer_addr,
2685 recv->ec_descr[offset].rd_length, control, recv->ec_read_len);
2686 #endif
2687 /* Note that we can START and END on the same buffer */
2688
2689 if ((control & RR_CT_PACKET_END) == RR_CT_PACKET_END) {
2690 if (recv->ec_dmainfo[offset] != NULL) {
2691 struct esh_dmainfo *di =
2692 recv->ec_dmainfo[offset];
2693
2694 recv->ec_dmainfo[offset] = NULL;
2695 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
2696 0, recv->ec_read_len,
2697 BUS_DMASYNC_POSTREAD);
2698 bus_dmamap_unload(sc->sc_dmat, di->ed_dma);
2699
2700 if (!error && !recv->ec_error) {
2701 /*
2702 * XXX: we oughta do this right, with full
2703 * BPF support and the rest...
2704 */
2705 if (di->ed_buf != NULL) {
2706 di->ed_buf->b_resid =
2707 di->ed_buf->b_bcount -
2708 recv->ec_read_len;
2709 } else {
2710 di->ed_read_len =
2711 recv->ec_read_len;
2712 }
2713 } else {
2714 if (di->ed_buf != NULL) {
2715 di->ed_buf->b_resid =
2716 di->ed_buf->b_bcount;
2717 di->ed_buf->b_error = EIO;
2718 di->ed_buf->b_flags |= B_ERROR;
2719 } else {
2720 di->ed_error = EIO;
2721 recv->ec_error = 0;
2722 }
2723 }
2724
2725 #ifdef ESH_PRINTF
2726 printf("esh_read_fp_ring: ulp %d, read %d, resid %ld\n",
2727 recv->ec_ulp, recv->ec_read_len, (di->ed_buf ? di->ed_buf->b_resid : di->ed_read_len));
2728 #endif
2729 di->ed_flags &=
2730 ~(ESH_DI_BUSY | ESH_DI_READING);
2731 if (di->ed_buf != NULL)
2732 biodone(di->ed_buf);
2733 else
2734 wakeup((void *) di);
2735 recv->ec_read_len = 0;
2736 } else {
2737 #ifdef ESH_PRINTF
2738 printf("esh_read_fp_ring: ulp %d, seen end at %d\n",
2739 recv->ec_ulp, offset);
2740 #endif
2741 recv->ec_seen_end = 1;
2742 }
2743 }
2744
2745 #if NOT_LAME
2746 recv->ec_descr[offset].rd_control = 0;
2747 #endif
2748 recv->ec_consumer = NEXT_RECV(recv->ec_consumer);
2749 }
2750
2751 esh_dma_sync(sc, recv->ec_descr,
2752 start_consumer, consumer,
2753 RR_SNAP_RECV_RING_SIZE,
2754 sizeof(struct rr_descr), 0,
2755 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2756
2757 esh_fill_fp_ring(sc, recv);
2758 }
2759
2760
2761 static void
2762 esh_fill_fp_ring(sc, recv)
2763 struct esh_softc *sc;
2764 struct esh_fp_ring_ctl *recv;
2765 {
2766 struct esh_dmainfo *di = recv->ec_cur_dmainfo;
2767 int start_producer = recv->ec_producer;
2768
2769 #ifdef ESH_PRINTF
2770 printf("esh_fill_fp_ring: ulp %d, di %p, producer %d\n",
2771 recv->ec_ulp, di, start_producer);
2772 #endif
2773
2774 esh_dma_sync(sc, recv->ec_descr,
2775 recv->ec_producer, recv->ec_consumer,
2776 RR_SNAP_RECV_RING_SIZE,
2777 sizeof(struct rr_descr), 1,
2778 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2779
2780 while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) {
2781 int offset = recv->ec_producer;
2782
2783 if (di == NULL) {
2784 /*
2785 * Must allow only one reader at a time; see
2786 * esh_flush_fp_ring().
2787 */
2788
2789 if (offset != start_producer)
2790 goto fp_fill_done;
2791
2792 di = TAILQ_FIRST(&recv->ec_queue);
2793 if (di == NULL)
2794 goto fp_fill_done;
2795 TAILQ_REMOVE(&recv->ec_queue, di, ed_list);
2796 recv->ec_offset = 0;
2797 recv->ec_cur_dmainfo = di;
2798 di->ed_flags |= ESH_DI_READING;
2799 #ifdef ESH_PRINTF
2800 printf("\toffset %d nsegs %d\n",
2801 recv->ec_offset, di->ed_dma->dm_nsegs);
2802 #endif
2803 }
2804
2805 /*
2806 * Load into the descriptors.
2807 */
2808
2809 recv->ec_descr[offset].rd_ring = 0;
2810 recv->ec_descr[offset].rd_buffer_addr =
2811 di->ed_dma->dm_segs[recv->ec_offset].ds_addr;
2812 recv->ec_descr[offset].rd_length =
2813 di->ed_dma->dm_segs[recv->ec_offset].ds_len;
2814 recv->ec_descr[offset].rd_control = 0;
2815 recv->ec_dmainfo[offset] = NULL;
2816
2817 if (recv->ec_offset == 0) {
2818 /* Start of the dmamap... */
2819 recv->ec_descr[offset].rd_control |=
2820 RR_CT_PACKET_START;
2821 }
2822
2823 assert(recv->ec_offset < di->ed_dma->dm_nsegs);
2824
2825 recv->ec_offset++;
2826 if (recv->ec_offset == di->ed_dma->dm_nsegs) {
2827 recv->ec_descr[offset].rd_control |= RR_CT_PACKET_END;
2828 recv->ec_dmainfo[offset] = di;
2829 di = NULL;
2830 recv->ec_offset = 0;
2831 recv->ec_cur_dmainfo = NULL;
2832 }
2833
2834 recv->ec_producer = NEXT_RECV(recv->ec_producer);
2835 }
2836
2837 fp_fill_done:
2838 esh_dma_sync(sc, recv->ec_descr,
2839 start_producer, recv->ec_consumer,
2840 RR_SNAP_RECV_RING_SIZE,
2841 sizeof(struct rr_descr), 1,
2842 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2843
2844
2845 if (sc->sc_version == 1) {
2846 esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, recv->ec_ulp,
2847 recv->ec_producer);
2848 } else {
2849 union {
2850 u_int32_t producer;
2851 u_int8_t indices[4];
2852 } v;
2853 int which;
2854 int i;
2855 struct esh_fp_ring_ctl *r;
2856
2857 which = (recv->ec_index / 4) * 4;
2858 #if BAD_PRODUCER
2859 v.producer = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2860 RR_RECVS_PRODUCER + which);
2861 NTOHL(v.producer);
2862 #endif
2863 for (i = 0; i < 4; i++) {
2864 r = sc->sc_fp_recv_index[i + which];
2865 if (r != NULL)
2866 v.indices[i] = r->ec_producer;
2867 else
2868 v.indices[i] = 0;
2869 }
2870 #ifdef ESH_PRINTF
2871 printf("esh_fill_fp_ring: ulp %d, updating producer %d: %.8x\n",
2872 recv->ec_ulp, which, v.producer);
2873 #endif
2874 HTONL(v.producer);
2875 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2876 RR_RECVS_PRODUCER + which, v.producer);
2877 }
2878 #ifdef ESH_PRINTF
2879 printf("esh_fill_fp_ring: ulp %d, final producer %d\n",
2880 recv->ec_ulp, recv->ec_producer);
2881 #endif
2882 }
2883
2884 /*
2885 * When a read is interrupted, we need to flush the buffers out of
2886 * the ring; otherwise, a driver error could lock a process up,
2887 * with no way to exit.
2888 */
2889
2890 static void
2891 esh_flush_fp_ring(sc, recv, di)
2892 struct esh_softc *sc;
2893 struct esh_fp_ring_ctl *recv;
2894 struct esh_dmainfo *di;
2895 {
2896 int error = 0;
2897
2898 /*
2899 * If the read request hasn't yet made it to the top of the queue,
2900 * just remove it from the queue, and return.
2901 */
2902
2903 if ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING) {
2904 TAILQ_REMOVE(&recv->ec_queue, di, ed_list);
2905 return;
2906 }
2907
2908 #ifdef ESH_PRINTF
2909 printf("esh_flush_fp_ring: di->ed_flags %x, ulp %d, producer %x\n",
2910 di->ed_flags, recv->ec_ulp, recv->ec_producer);
2911 #endif
2912
2913 /* Now we gotta get tough. Issue a discard packet command */
2914
2915 esh_send_cmd(sc, RR_CC_DISCARD_PKT, recv->ec_ulp,
2916 recv->ec_producer - 1);
2917
2918 /* Wait for it to finish */
2919
2920 while ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING &&
2921 error == 0) {
2922 error = tsleep((void *) &di->ed_flags, PRIBIO,
2923 "esh_flush_fp_ring", hz);
2924 printf("esh_flush_fp_ring: di->ed_flags %x, error %d\n",
2925 di->ed_flags, error);
2926 /*
2927 * What do I do if this times out or gets interrupted?
2928 * Reset the card? I could get an interrupt before
2929 * giving it a chance to check. Perhaps I oughta wait
2930 * awhile? What about not giving the user a chance
2931 * to interrupt, and just expecting a quick answer?
2932 * That way I could reset the card if it doesn't
2933 * come back right away!
2934 */
2935 if (error != 0) {
2936 eshreset(sc);
2937 break;
2938 }
2939 }
2940
2941 /* XXX: Do we need to clear out the dmainfo pointers */
2942 }
2943
2944
2945 int
2946 eshioctl(ifp, cmd, data)
2947 struct ifnet *ifp;
2948 u_long cmd;
2949 caddr_t data;
2950 {
2951 int error = 0;
2952 struct esh_softc *sc = ifp->if_softc;
2953 struct ifaddr *ifa = (struct ifaddr *)data;
2954 struct ifdrv *ifd = (struct ifdrv *) data;
2955 u_long len;
2956 int s;
2957
2958 s = splnet();
2959
2960 while (sc->sc_flags & ESH_FL_EEPROM_BUSY) {
2961 error = tsleep(&sc->sc_flags, PCATCH | PRIBIO,
2962 "esheeprom", 0);
2963 if (error != 0)
2964 goto ioctl_done;
2965 }
2966
2967 switch (cmd) {
2968
2969 case SIOCSIFADDR:
2970 ifp->if_flags |= IFF_UP;
2971 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
2972 eshinit(sc);
2973 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
2974 error = EIO;
2975 goto ioctl_done;
2976 }
2977 }
2978
2979 if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP))
2980 == ESH_FL_RUNCODE_UP) {
2981 while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
2982 error = tsleep((void *) &sc->sc_snap_recv,
2983 PRIBIO, "esh_closing_fp_ring",
2984 hz);
2985 if (error != 0)
2986 goto ioctl_done;
2987 }
2988 esh_init_snap_ring(sc);
2989 }
2990
2991 switch (ifa->ifa_addr->sa_family) {
2992 #ifdef INET
2993 case AF_INET:
2994 /* The driver doesn't really care about IP addresses */
2995 break;
2996 #endif
2997 default:
2998 break;
2999 }
3000 break;
3001
3002 case SIOCSIFFLAGS:
3003 if ((ifp->if_flags & IFF_UP) == 0 &&
3004 (ifp->if_flags & IFF_RUNNING) != 0) {
3005 /*
3006 * If interface is marked down and it is running, then
3007 * stop it.
3008 */
3009
3010 ifp->if_flags &= ~IFF_RUNNING;
3011 esh_close_snap_ring(sc);
3012 while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
3013 error = tsleep((void *) &sc->sc_snap_recv,
3014 PRIBIO, "esh_closing_fp_ring",
3015 hz);
3016 if (error != 0)
3017 goto ioctl_done;
3018 }
3019
3020 } else if ((ifp->if_flags & IFF_UP) != 0 &&
3021 (ifp->if_flags & IFF_RUNNING) == 0) {
3022
3023 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
3024 eshinit(sc);
3025 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
3026 error = EIO;
3027 goto ioctl_done;
3028 }
3029 }
3030
3031 if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP)) == ESH_FL_RUNCODE_UP) {
3032 while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
3033 error = tsleep((void *) &sc->sc_snap_recv, PRIBIO, "esh_closing_fp_ring", hz);
3034 if (error != 0)
3035 goto ioctl_done;
3036 }
3037 esh_init_snap_ring(sc);
3038 }
3039 }
3040 break;
3041
3042 case SIOCSDRVSPEC: /* Driver-specific configuration calls */
3043 cmd = ifd->ifd_cmd;
3044 len = ifd->ifd_len;
3045 data = ifd->ifd_data;
3046
3047 esh_generic_ioctl(sc, cmd, data, len, NULL);
3048 break;
3049
3050 default:
3051 error = EINVAL;
3052 break;
3053 }
3054
3055 ioctl_done:
3056 splx(s);
3057 return (error);
3058 }
3059
3060
3061 static int
3062 esh_generic_ioctl(struct esh_softc *sc, u_long cmd, caddr_t data,
3063 u_long len, struct lwp *l)
3064 {
3065 struct ifnet *ifp = &sc->sc_if;
3066 struct rr_eeprom rr_eeprom;
3067 bus_space_tag_t iot = sc->sc_iot;
3068 bus_space_handle_t ioh = sc->sc_ioh;
3069 u_int32_t misc_host_ctl;
3070 u_int32_t misc_local_ctl;
3071 u_int32_t address;
3072 u_int32_t value;
3073 u_int32_t offset;
3074 u_int32_t length;
3075 int error = 0;
3076 int i;
3077
3078 /*
3079 * If we have a LWP pointer, check to make sure that the
3080 * user is privileged before performing any destruction operations.
3081 */
3082
3083 if (l != NULL) {
3084 switch (cmd) {
3085 case EIOCGTUNE:
3086 case EIOCGEEPROM:
3087 case EIOCGSTATS:
3088 break;
3089
3090 default:
3091 error = kauth_authorize_generic(l->l_cred,
3092 KAUTH_GENERIC_ISSUSER, &l->l_acflag);
3093 if (error)
3094 return (error);
3095 }
3096 }
3097
3098 switch (cmd) {
3099 case EIOCGTUNE:
3100 if (len != sizeof(struct rr_tuning))
3101 error = EMSGSIZE;
3102 else {
3103 error = copyout((caddr_t) &sc->sc_tune, data,
3104 sizeof(struct rr_tuning));
3105 }
3106 break;
3107
3108 case EIOCSTUNE:
3109 if ((ifp->if_flags & IFF_UP) == 0) {
3110 if (len != sizeof(struct rr_tuning)) {
3111 error = EMSGSIZE;
3112 } else {
3113 error = copyin(data, (caddr_t) &sc->sc_tune,
3114 sizeof(struct rr_tuning));
3115 }
3116 } else {
3117 error = EBUSY;
3118 }
3119 break;
3120
3121 case EIOCGSTATS:
3122 if (len != sizeof(struct rr_stats))
3123 error = EMSGSIZE;
3124 else
3125 error = copyout((caddr_t) &sc->sc_gen_info->ri_stats,
3126 data, sizeof(struct rr_stats));
3127 break;
3128
3129 case EIOCGEEPROM:
3130 case EIOCSEEPROM:
3131 if ((ifp->if_flags & IFF_UP) != 0) {
3132 error = EBUSY;
3133 break;
3134 }
3135
3136 if (len != sizeof(struct rr_eeprom)) {
3137 error = EMSGSIZE;
3138 break;
3139 }
3140
3141 error = copyin(data, (caddr_t) &rr_eeprom, sizeof(rr_eeprom));
3142 if (error != 0)
3143 break;
3144
3145 offset = rr_eeprom.ifr_offset;
3146 length = rr_eeprom.ifr_length;
3147
3148 if (length > RR_EE_MAX_LEN * sizeof(u_int32_t)) {
3149 error = EFBIG;
3150 break;
3151 }
3152
3153 if (offset + length > RR_EE_MAX_LEN * sizeof(u_int32_t)) {
3154 error = EFAULT;
3155 break;
3156 }
3157
3158 if (offset % 4 || length % 4) {
3159 error = EIO;
3160 break;
3161 }
3162
3163 /* Halt the processor (preserve NO_SWAP, if set) */
3164
3165 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
3166 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
3167 (misc_host_ctl & RR_MH_NO_SWAP) |
3168 RR_MH_HALT_PROC);
3169
3170 /* Make the EEPROM accessible */
3171
3172 misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
3173 value = misc_local_ctl &
3174 ~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM | RR_LC_PARITY_ON);
3175 if (cmd == EIOCSEEPROM) /* make writable! */
3176 value |= RR_LC_WRITE_PROM;
3177 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, value);
3178
3179 if (cmd == EIOCSEEPROM) {
3180 printf("%s: writing EEPROM\n", sc->sc_dev.dv_xname);
3181 sc->sc_flags |= ESH_FL_EEPROM_BUSY;
3182 }
3183
3184 /* Do that EEPROM voodoo that you do so well... */
3185
3186 address = offset * RR_EE_BYTE_LEN;
3187 for (i = 0; i < length; i += 4) {
3188 if (cmd == EIOCGEEPROM) {
3189 value = esh_read_eeprom(sc, address);
3190 address += RR_EE_WORD_LEN;
3191 if (copyout(&value,
3192 (caddr_t) rr_eeprom.ifr_buffer + i,
3193 sizeof(u_int32_t)) != 0) {
3194 error = EFAULT;
3195 break;
3196 }
3197 } else {
3198 if (copyin((caddr_t) rr_eeprom.ifr_buffer + i,
3199 &value, sizeof(u_int32_t)) != 0) {
3200 error = EFAULT;
3201 break;
3202 }
3203 if (esh_write_eeprom(sc, address,
3204 value) != 0) {
3205 error = EIO;
3206 break;
3207 }
3208
3209 /*
3210 * Have to give up control now and
3211 * then, so sleep for a clock tick.
3212 * Might be good to figure out how
3213 * long a tick is, so that we could
3214 * intelligently chose the frequency
3215 * of these pauses.
3216 */
3217
3218 if (i % 40 == 0) {
3219 tsleep(&sc->sc_flags,
3220 PRIBIO, "eshweeprom", 1);
3221 }
3222
3223 address += RR_EE_WORD_LEN;
3224 }
3225 }
3226
3227 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
3228 if (cmd == EIOCSEEPROM) {
3229 sc->sc_flags &= ~ESH_FL_EEPROM_BUSY;
3230 wakeup(&sc->sc_flags);
3231 printf("%s: done writing EEPROM\n",
3232 sc->sc_dev.dv_xname);
3233 }
3234 break;
3235
3236 case EIOCRESET:
3237 eshreset(sc);
3238 break;
3239
3240 default:
3241 error = EINVAL;
3242 break;
3243 }
3244
3245 return error;
3246 }
3247
3248
3249 void
3250 eshreset(sc)
3251 struct esh_softc *sc;
3252 {
3253 int s;
3254
3255 s = splnet();
3256 eshstop(sc);
3257 eshinit(sc);
3258 splx(s);
3259 }
3260
3261 /*
3262 * The NIC expects a watchdog command every 10 seconds. If it doesn't
3263 * get the watchdog, it figures the host is dead and stops. When it does
3264 * get the command, it'll generate a watchdog event to let the host know
3265 * that it is still alive. We watch for this.
3266 */
3267
3268 void
3269 eshwatchdog(ifp)
3270 struct ifnet *ifp;
3271 {
3272 struct esh_softc *sc = ifp->if_softc;
3273
3274 if (!sc->sc_watchdog) {
3275 printf("%s: watchdog timer expired. "
3276 "Should reset interface!\n",
3277 sc->sc_dev.dv_xname);
3278 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3279 eshstatus(sc);
3280 #if 0
3281 eshstop(sc); /* DON'T DO THIS, it'll clear data we
3282 could use to debug it! */
3283 #endif
3284 } else {
3285 sc->sc_watchdog = 0;
3286
3287 esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
3288 ifp->if_timer = 5;
3289 }
3290 }
3291
3292
3293 /*
3294 * Stop the NIC and throw away packets that have started to be sent,
3295 * but didn't make it all the way. Re-adjust the various queue
3296 * pointers to account for this.
3297 */
3298
3299 void
3300 eshstop(sc)
3301 struct esh_softc *sc;
3302 {
3303 struct ifnet *ifp = &sc->sc_if;
3304 bus_space_tag_t iot = sc->sc_iot;
3305 bus_space_handle_t ioh = sc->sc_ioh;
3306 u_int32_t misc_host_ctl;
3307 int i;
3308
3309 if (!(sc->sc_flags & ESH_FL_INITIALIZED))
3310 return;
3311
3312 /* Just shut it all down. This isn't pretty, but it works */
3313
3314 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
3315 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3316
3317 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
3318 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
3319 (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC);
3320 sc->sc_flags = 0;
3321 ifp->if_timer = 0; /* turn off watchdog timer */
3322
3323 while (sc->sc_snap_recv.ec_consumer
3324 != sc->sc_snap_recv.ec_producer) {
3325 struct mbuf *m0;
3326 u_int16_t offset = sc->sc_snap_recv.ec_consumer;
3327
3328 bus_dmamap_unload(sc->sc_dmat,
3329 sc->sc_snap_recv.ec_dma[offset]);
3330 MFREE(sc->sc_snap_recv.ec_m[offset], m0);
3331 sc->sc_snap_recv.ec_m[offset] = NULL;
3332 sc->sc_snap_recv.ec_consumer =
3333 NEXT_RECV(sc->sc_snap_recv.ec_consumer);
3334 wakeup((void *) &sc->sc_snap_recv);
3335 }
3336
3337 /* Handle FP rings */
3338
3339 for (i = 0; i < RR_ULP_COUNT; i++) {
3340 struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[i];
3341 struct esh_dmainfo *di = NULL;
3342
3343 if (ring == NULL)
3344 continue;
3345
3346 /* Get rid of outstanding buffers */
3347
3348 esh_dma_sync(sc, ring->ec_descr,
3349 ring->ec_consumer, ring->ec_producer,
3350 RR_FP_RECV_RING_SIZE, sizeof(struct rr_descr), 0,
3351 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3352
3353 while (ring->ec_consumer != ring->ec_producer) {
3354 di = ring->ec_dmainfo[ring->ec_consumer];
3355 if (di != NULL)
3356 break;
3357 ring->ec_consumer = NEXT_RECV(ring->ec_consumer);
3358 }
3359 if (di == NULL)
3360 di = ring->ec_cur_dmainfo;
3361
3362 if (di != NULL) {
3363 bus_dmamap_unload(sc->sc_dmat, di->ed_dma);
3364 di->ed_error = EIO;
3365 di->ed_flags = 0;
3366 wakeup((void *) &di->ed_flags); /* packet discard */
3367 wakeup((void *) di); /* wait on read */
3368 }
3369 wakeup((void *) &ring->ec_ulp); /* ring create */
3370 wakeup((void *) &ring->ec_index); /* ring disable */
3371 }
3372
3373 /* XXX: doesn't clear bufs being sent */
3374
3375 bus_dmamap_unload(sc->sc_dmat, sc->sc_send.ec_dma);
3376 if (sc->sc_send.ec_cur_mbuf) {
3377 m_freem(sc->sc_send.ec_cur_mbuf);
3378 } else if (sc->sc_send.ec_cur_buf) {
3379 struct buf *bp = sc->sc_send.ec_cur_buf;
3380
3381 bp->b_resid = bp->b_bcount;
3382 bp->b_error = EIO;
3383 bp->b_flags |= B_ERROR;
3384 biodone(bp);
3385 } else if (sc->sc_send.ec_cur_dmainfo) {
3386 struct esh_dmainfo *di = sc->sc_send.ec_cur_dmainfo;
3387
3388 di->ed_flags &= ~ESH_DI_BUSY;
3389 di->ed_error = EIO;
3390 wakeup((void *) di);
3391 }
3392 sc->sc_send.ec_cur_mbuf = NULL;
3393 sc->sc_send.ec_cur_buf = NULL;
3394 sc->sc_send.ec_cur_dmainfo = NULL;
3395
3396 /*
3397 * Clear out the index values, since they'll be useless
3398 * when we restart.
3399 */
3400
3401 memset(sc->sc_fp_recv_index, 0,
3402 sizeof(struct esh_fp_ring_ctl *) * RR_MAX_RECV_RING);
3403
3404 /* Be sure to wake up any other processes waiting on driver action. */
3405
3406 wakeup(sc); /* Wait on initialization */
3407 wakeup(&sc->sc_flags); /* Wait on EEPROM write */
3408
3409 /*
3410 * XXX: I have to come up with a way to avoid handling interrupts
3411 * received before this shuts down the card, but processed
3412 * afterwards!
3413 */
3414 }
3415
3416 /*
3417 * Read a value from the eeprom. This expects that the NIC has already
3418 * been tweaked to put it into the right state for reading from the
3419 * EEPROM -- the HALT bit is set in the MISC_HOST_CTL register,
3420 * and the FAST_PROM, ADD_SRAM, and PARITY flags have been cleared
3421 * in the MISC_LOCAL_CTL register.
3422 *
3423 * The EEPROM layout is a little weird. There is a valid byte every
3424 * eight bytes. Words are then smeared out over 32 bytes.
3425 * All addresses listed here are the actual starting addresses.
3426 */
3427
3428 static u_int32_t
3429 esh_read_eeprom(sc, addr)
3430 struct esh_softc *sc;
3431 u_int32_t addr;
3432 {
3433 int i;
3434 u_int32_t tmp;
3435 u_int32_t value = 0;
3436
3437 /* If the offset hasn't been added, add it. Otherwise pass through */
3438
3439 if (!(addr & RR_EE_OFFSET))
3440 addr += RR_EE_OFFSET;
3441
3442 for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) {
3443 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
3444 RR_WINDOW_BASE, addr);
3445 tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
3446 RR_WINDOW_DATA);
3447 value = (value << 8) | ((tmp >> 24) & 0xff);
3448 }
3449 return value;
3450 }
3451
3452
3453 /*
3454 * Write a value to the eeprom. Just like esh_read_eeprom, this routine
3455 * expects that the NIC has already been tweaked to put it into the right
3456 * state for reading from the EEPROM. Things are further complicated
3457 * in that we need to read each byte after we write it to ensure that
3458 * the new value has been successfully written. It can take as long
3459 * as 1ms (!) to write a byte.
3460 */
3461
3462 static int
3463 esh_write_eeprom(sc, addr, value)
3464 struct esh_softc *sc;
3465 u_int32_t addr;
3466 u_int32_t value;
3467 {
3468 int i, j;
3469 u_int32_t shifted_value, tmp = 0;
3470
3471 /* If the offset hasn't been added, add it. Otherwise pass through */
3472
3473 if (!(addr & RR_EE_OFFSET))
3474 addr += RR_EE_OFFSET;
3475
3476 for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) {
3477 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
3478 RR_WINDOW_BASE, addr);
3479
3480 /*
3481 * Get the byte out of value, starting with the top, and
3482 * put it into the top byte of the word to write.
3483 */
3484
3485 shifted_value = ((value >> ((3 - i) * 8)) & 0xff) << 24;
3486 bus_space_write_4(sc->sc_iot, sc->sc_ioh, RR_WINDOW_DATA,
3487 shifted_value);
3488 for (j = 0; j < 50; j++) {
3489 tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
3490 RR_WINDOW_DATA);
3491 if (tmp == shifted_value)
3492 break;
3493 delay(500); /* 50us break * 20 = 1ms */
3494 }
3495 if (tmp != shifted_value)
3496 return -1;
3497 }
3498
3499 return 0;
3500 }
3501
3502
3503 /*
3504 * Send a command to the NIC. If there is no room in the command ring,
3505 * panic.
3506 */
3507
3508 static void
3509 esh_send_cmd(sc, cmd, ring, index)
3510 struct esh_softc *sc;
3511 u_int8_t cmd;
3512 u_int8_t ring;
3513 u_int8_t index;
3514 {
3515 union rr_cmd c;
3516
3517 #define NEXT_CMD(i) (((i) + 0x10 - 1) & 0x0f)
3518
3519 c.l = 0;
3520 c.b.rc_code = cmd;
3521 c.b.rc_ring = ring;
3522 c.b.rc_index = index;
3523
3524 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
3525 RR_COMMAND_RING + sizeof(c) * sc->sc_cmd_producer,
3526 c.l);
3527
3528 #ifdef ESH_PRINTF
3529 /* avoid annoying messages when possible */
3530 if (cmd != RR_CC_WATCHDOG)
3531 printf("esh_send_cmd: cmd %x ring %d index %d slot %x\n",
3532 cmd, ring, index, sc->sc_cmd_producer);
3533 #endif
3534
3535 sc->sc_cmd_producer = NEXT_CMD(sc->sc_cmd_producer);
3536 }
3537
3538
3539 /*
3540 * Write an address to the device.
3541 * XXX: This belongs in bus-dependent land!
3542 */
3543
3544 static void
3545 esh_write_addr(iot, ioh, addr, value)
3546 bus_space_tag_t iot;
3547 bus_space_handle_t ioh;
3548 bus_addr_t addr;
3549 bus_addr_t value;
3550 {
3551 bus_space_write_4(iot, ioh, addr, 0);
3552 bus_space_write_4(iot, ioh, addr + sizeof(u_int32_t), value);
3553 }
3554
3555
3556 /* Copy the RunCode from EEPROM to SRAM. Ughly. */
3557
3558 static void
3559 esh_reset_runcode(sc)
3560 struct esh_softc *sc;
3561 {
3562 bus_space_tag_t iot = sc->sc_iot;
3563 bus_space_handle_t ioh = sc->sc_ioh;
3564 u_int32_t value;
3565 u_int32_t len;
3566 u_int32_t i;
3567 u_int32_t segments;
3568 u_int32_t ee_addr;
3569 u_int32_t rc_addr;
3570 u_int32_t sram_addr;
3571
3572 /* Zero the SRAM */
3573
3574 for (i = 0; i < sc->sc_sram_size; i += 4) {
3575 bus_space_write_4(iot, ioh, RR_WINDOW_BASE, i);
3576 bus_space_write_4(iot, ioh, RR_WINDOW_DATA, 0);
3577 }
3578
3579 /* Find the address of the segment description section */
3580
3581 rc_addr = esh_read_eeprom(sc, RR_EE_RUNCODE_SEGMENTS);
3582 segments = esh_read_eeprom(sc, rc_addr);
3583
3584 for (i = 0; i < segments; i++) {
3585 rc_addr += RR_EE_WORD_LEN;
3586 sram_addr = esh_read_eeprom(sc, rc_addr);
3587 rc_addr += RR_EE_WORD_LEN;
3588 len = esh_read_eeprom(sc, rc_addr);
3589 rc_addr += RR_EE_WORD_LEN;
3590 ee_addr = esh_read_eeprom(sc, rc_addr);
3591
3592 while (len--) {
3593 value = esh_read_eeprom(sc, ee_addr);
3594 bus_space_write_4(iot, ioh, RR_WINDOW_BASE, sram_addr);
3595 bus_space_write_4(iot, ioh, RR_WINDOW_DATA, value);
3596
3597 ee_addr += RR_EE_WORD_LEN;
3598 sram_addr += 4;
3599 }
3600 }
3601 }
3602
3603
3604 /*
3605 * Perform bus DMA syncing operations on various rings.
3606 * We have to worry about our relative position in the ring,
3607 * and whether the ring has wrapped. All of this code should take
3608 * care of those worries.
3609 */
3610
3611 static void
3612 esh_dma_sync(sc, mem, start, end, entries, size, do_equal, ops)
3613 struct esh_softc *sc;
3614 void *mem;
3615 int start;
3616 int end;
3617 int entries;
3618 int size;
3619 int do_equal;
3620 int ops;
3621 {
3622 int offset = (char *)mem - (char *)sc->sc_dma_addr;
3623
3624 if (start < end) {
3625 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
3626 offset + start * size,
3627 (end - start) * size, ops);
3628 } else if (do_equal || start != end) {
3629 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
3630 offset,
3631 end * size, ops);
3632 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
3633 offset + start * size,
3634 (entries - start) * size, ops);
3635 }
3636 }
3637
3638
3639 static struct esh_dmainfo *
3640 esh_new_dmainfo(sc)
3641 struct esh_softc *sc;
3642 {
3643 struct esh_dmainfo *di;
3644 int s;
3645
3646 s = splnet();
3647
3648 di = TAILQ_FIRST(&sc->sc_dmainfo_freelist);
3649 if (di != NULL) {
3650 TAILQ_REMOVE(&sc->sc_dmainfo_freelist, di, ed_list);
3651 sc->sc_dmainfo_freelist_count--;
3652 splx(s);
3653 return di;
3654 }
3655
3656 /* None sitting around, so build one now... */
3657
3658 di = (struct esh_dmainfo *) malloc(sizeof(*di), M_DEVBUF,
3659 M_WAITOK|M_ZERO);
3660 assert(di != NULL);
3661
3662 if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX,
3663 ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY,
3664 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3665 &di->ed_dma)) {
3666 printf("%s: failed dmainfo bus_dmamap_create\n",
3667 sc->sc_dev.dv_xname);
3668 free(di, M_DEVBUF);
3669 di = NULL;
3670 }
3671
3672 splx(s);
3673 return di;
3674 }
3675
3676 static void
3677 esh_free_dmainfo(sc, di)
3678 struct esh_softc *sc;
3679 struct esh_dmainfo *di;
3680 {
3681 int s = splnet();
3682
3683 assert(di != NULL);
3684 di->ed_buf = NULL;
3685 TAILQ_INSERT_TAIL(&sc->sc_dmainfo_freelist, di, ed_list);
3686 sc->sc_dmainfo_freelist_count++;
3687 #ifdef ESH_PRINTF
3688 printf("esh_free_dmainfo: freelist count %d\n", sc->sc_dmainfo_freelist_count);
3689 #endif
3690
3691 splx(s);
3692 }
3693
3694
3695 /* ------------------------- debugging functions --------------------------- */
3696
3697 /*
3698 * Print out status information about the NIC and the driver.
3699 */
3700
3701 static int
3702 eshstatus(sc)
3703 struct esh_softc *sc;
3704 {
3705 bus_space_tag_t iot = sc->sc_iot;
3706 bus_space_handle_t ioh = sc->sc_ioh;
3707 int i;
3708
3709 /* XXX: This looks pathetic, and should be improved! */
3710
3711 printf("%s: status -- fail1 %x fail2 %x\n",
3712 sc->sc_dev.dv_xname,
3713 bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL1),
3714 bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL2));
3715 printf("\tmisc host ctl %x misc local ctl %x\n",
3716 bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL),
3717 bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL));
3718 printf("\toperating mode %x event producer %x\n",
3719 bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS),
3720 bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER));
3721 printf("\tPC %x max rings %x\n",
3722 bus_space_read_4(iot, ioh, RR_PROC_PC),
3723 bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS));
3724 printf("\tHIPPI tx state %x rx state %x\n",
3725 bus_space_read_4(iot, ioh, RR_TX_STATE),
3726 bus_space_read_4(iot, ioh, RR_RX_STATE));
3727 printf("\tDMA write state %x read state %x\n",
3728 bus_space_read_4(iot, ioh, RR_DMA_WRITE_STATE),
3729 bus_space_read_4(iot, ioh, RR_DMA_READ_STATE));
3730 printf("\tDMA write addr %x%x read addr %x%x\n",
3731 bus_space_read_4(iot, ioh, RR_WRITE_HOST),
3732 bus_space_read_4(iot, ioh, RR_WRITE_HOST + 4),
3733 bus_space_read_4(iot, ioh, RR_READ_HOST),
3734 bus_space_read_4(iot, ioh, RR_READ_HOST + 4));
3735
3736 for (i = 0; i < 64; i++)
3737 if (sc->sc_gen_info->ri_stats.rs_stats[i])
3738 printf("stat %x is %x\n", i * 4,
3739 sc->sc_gen_info->ri_stats.rs_stats[i]);
3740
3741 return 0;
3742 }
3743
3744
3745 #ifdef ESH_PRINTF
3746
3747 /* Check to make sure that the NIC is still running */
3748
3749 static int
3750 esh_check(sc)
3751 struct esh_softc *sc;
3752 {
3753 bus_space_tag_t iot = sc->sc_iot;
3754 bus_space_handle_t ioh = sc->sc_ioh;
3755
3756 if (bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL) & RR_MH_HALT_PROC) {
3757 printf("esh_check: NIC stopped\n");
3758 eshstatus(sc);
3759 return 1;
3760 } else {
3761 return 0;
3762 }
3763 }
3764 #endif
3765
Cache object: ecae8fc0da755bbd5aa4c3329d3c23a7
|