FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/rrunner.c
1 /* $NetBSD: rrunner.c,v 1.44 2003/11/03 03:05:25 ichiro Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code contributed to The NetBSD Foundation by Kevin M. Lahey
8 * of the Numerical Aerospace Simulation Facility, NASA Ames Research
9 * Center.
10 *
11 * Partially based on a HIPPI driver written by Essential Communications
12 * Corporation. Thanks to Jason Thorpe, Matt Jacob, and Fred Templin
13 * for invaluable advice and encouragement!
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the NetBSD
26 * Foundation, Inc. and its contributors.
27 * 4. Neither the name of The NetBSD Foundation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
32 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
33 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
34 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
35 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
38 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
39 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: rrunner.c,v 1.44 2003/11/03 03:05:25 ichiro Exp $");
46
47 #include "opt_inet.h"
48 #include "opt_ns.h"
49
50 #include "bpfilter.h"
51 #include "esh.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/mbuf.h>
56 #include <sys/buf.h>
57 #include <sys/socket.h>
58 #include <sys/ioctl.h>
59 #include <sys/errno.h>
60 #include <sys/syslog.h>
61 #include <sys/select.h>
62 #include <sys/device.h>
63 #include <sys/proc.h>
64 #include <sys/kernel.h>
65 #include <sys/conf.h>
66
67 #include <uvm/uvm_extern.h>
68
69 #include <net/if.h>
70 #include <net/if_dl.h>
71 #include <net/route.h>
72
73 #include <net/if_hippi.h>
74 #include <net/if_media.h>
75
76 #ifdef INET
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/in_var.h>
80 #include <netinet/ip.h>
81 #include <netinet/if_inarp.h>
82 #endif
83
84 #ifdef NS
85 #include <netns/ns.h>
86 #include <netns/ns_if.h>
87 #endif
88
89 #if NBPFILTER > 0
90 #include <net/bpf.h>
91 #include <net/bpfdesc.h>
92 #endif
93
94 #include <machine/cpu.h>
95 #include <machine/bus.h>
96 #include <machine/intr.h>
97
98 #include <dev/ic/rrunnerreg.h>
99 #include <dev/ic/rrunnervar.h>
100
101 /*
102 #define ESH_PRINTF
103 */
104
105 /* Autoconfig definition of driver back-end */
106 extern struct cfdriver esh_cd;
107
108 struct esh_softc *esh_softc_debug[22]; /* for gdb */
109
110 #ifdef DIAGNOSTIC
111 u_int32_t max_write_len;
112 #endif
113
114 /* Network device driver and initialization framework routines */
115
116 void eshinit __P((struct esh_softc *));
117 int eshioctl __P((struct ifnet *, u_long, caddr_t));
118 void eshreset __P((struct esh_softc *));
119 void eshstart __P((struct ifnet *));
120 static int eshstatus __P((struct esh_softc *));
121 void eshstop __P((struct esh_softc *));
122 void eshwatchdog __P((struct ifnet *));
123
124 /* Routines to support FP operation */
125
126 dev_type_open(esh_fpopen);
127 dev_type_close(esh_fpclose);
128 dev_type_read(esh_fpread);
129 dev_type_write(esh_fpwrite);
130 #ifdef MORE_DONE
131 dev_type_mmap(esh_fpmmap);
132 #endif
133 dev_type_strategy(esh_fpstrategy);
134
135 const struct cdevsw esh_cdevsw = {
136 esh_fpopen, esh_fpclose, esh_fpread, esh_fpwrite, nullioctl,
137 nostop, notty, nullpoll,
138 #ifdef MORE_DONE
139 esh_fpmmap,
140 #else
141 nommap,
142 #endif
143 nullkqfilter,
144 };
145
146 /* General routines, not externally visable */
147
148 static struct mbuf *esh_adjust_mbufs __P((struct esh_softc *, struct mbuf *m));
149 static void esh_dma_sync __P((struct esh_softc *, void *,
150 int, int, int, int, int, int));
151 static void esh_fill_snap_ring __P((struct esh_softc *));
152 static void esh_init_snap_ring __P((struct esh_softc *));
153 static void esh_close_snap_ring __P((struct esh_softc *));
154 static void esh_read_snap_ring __P((struct esh_softc *, u_int16_t, int));
155 static void esh_fill_fp_ring __P((struct esh_softc *,
156 struct esh_fp_ring_ctl *));
157 static void esh_flush_fp_ring __P((struct esh_softc *,
158 struct esh_fp_ring_ctl *,
159 struct esh_dmainfo *));
160 static void esh_init_fp_rings __P((struct esh_softc *));
161 static void esh_read_fp_ring __P((struct esh_softc *, u_int16_t, int, int));
162 static void esh_reset_runcode __P((struct esh_softc *));
163 static void esh_send __P((struct esh_softc *));
164 static void esh_send_cmd __P((struct esh_softc *,
165 u_int8_t, u_int8_t, u_int8_t));
166 static u_int32_t esh_read_eeprom __P((struct esh_softc *, u_int32_t));
167 static void esh_write_addr __P((bus_space_tag_t, bus_space_handle_t,
168 bus_addr_t, bus_addr_t));
169 static int esh_write_eeprom __P((struct esh_softc *,
170 u_int32_t, u_int32_t));
171 static void eshstart_cleanup __P((struct esh_softc *, u_int16_t, int));
172
173 static struct esh_dmainfo *esh_new_dmainfo __P((struct esh_softc *));
174 static void esh_free_dmainfo __P((struct esh_softc *, struct esh_dmainfo *));
175 static int esh_generic_ioctl __P((struct esh_softc *, u_long, caddr_t, u_long,
176 struct proc *));
177
178 #ifdef ESH_PRINTF
179 static int esh_check __P((struct esh_softc *));
180 #endif
181
182 #define ESHUNIT(x) ((minor(x) & 0xff00) >> 8)
183 #define ESHULP(x) (minor(x) & 0x00ff)
184
185
186 /*
187 * Back-end attach and configure. Allocate DMA space and initialize
188 * all structures.
189 */
190
191 void
192 eshconfig(sc)
193 struct esh_softc *sc;
194 {
195 struct ifnet *ifp = &sc->sc_if;
196 bus_space_tag_t iot = sc->sc_iot;
197 bus_space_handle_t ioh = sc->sc_ioh;
198 u_int32_t misc_host_ctl;
199 u_int32_t misc_local_ctl;
200 u_int32_t header_format;
201 u_int32_t ula_tmp;
202 bus_size_t size;
203 int rseg;
204 int error;
205 int i;
206
207 esh_softc_debug[sc->sc_dev.dv_unit] = sc;
208 sc->sc_flags = 0;
209
210 TAILQ_INIT(&sc->sc_dmainfo_freelist);
211 sc->sc_dmainfo_freelist_count = 0;
212
213 /*
214 * Allocate and divvy up some host side memory that can hold
215 * data structures that will be DMA'ed over to the NIC
216 */
217
218 sc->sc_dma_size = sizeof(struct rr_gen_info) +
219 sizeof(struct rr_ring_ctl) * RR_ULP_COUNT +
220 sizeof(struct rr_descr) * RR_SEND_RING_SIZE +
221 sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE +
222 sizeof(struct rr_event) * RR_EVENT_RING_SIZE;
223
224 error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size,
225 0, RR_DMA_BOUNDARY, &sc->sc_dmaseg, 1,
226 &rseg, BUS_DMA_NOWAIT);
227 if (error) {
228 aprint_error("%s: couldn't allocate space for host-side"
229 "data structures\n", sc->sc_dev.dv_xname);
230 return;
231 }
232
233 if (rseg > 1) {
234 aprint_error("%s: contiguous memory not available\n",
235 sc->sc_dev.dv_xname);
236 goto bad_dmamem_map;
237 }
238
239 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, rseg,
240 sc->sc_dma_size, &sc->sc_dma_addr,
241 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
242 if (error) {
243 aprint_error(
244 "%s: couldn't map memory for host-side structures\n",
245 sc->sc_dev.dv_xname);
246 goto bad_dmamem_map;
247 }
248
249 if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size,
250 1, sc->sc_dma_size, RR_DMA_BOUNDARY,
251 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
252 &sc->sc_dma)) {
253 aprint_error("%s: couldn't create DMA map\n",
254 sc->sc_dev.dv_xname);
255 goto bad_dmamap_create;
256 }
257
258 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma, sc->sc_dma_addr,
259 sc->sc_dma_size, NULL, BUS_DMA_NOWAIT)) {
260 aprint_error("%s: couldn't load DMA map\n",
261 sc->sc_dev.dv_xname);
262 goto bad_dmamap_load;
263 }
264
265 memset(sc->sc_dma_addr, 0, sc->sc_dma_size);
266
267 sc->sc_gen_info_dma = sc->sc_dma->dm_segs->ds_addr;
268 sc->sc_gen_info = (struct rr_gen_info *) sc->sc_dma_addr;
269 size = sizeof(struct rr_gen_info);
270
271 sc->sc_recv_ring_table_dma = sc->sc_dma->dm_segs->ds_addr + size;
272 sc->sc_recv_ring_table =
273 (struct rr_ring_ctl *) (sc->sc_dma_addr + size);
274 size += sizeof(struct rr_ring_ctl) * RR_ULP_COUNT;
275
276 sc->sc_send_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
277 sc->sc_send_ring = (struct rr_descr *) (sc->sc_dma_addr + size);
278 sc->sc2_send_ring = (struct rr2_descr *) (sc->sc_dma_addr + size);
279 size += sizeof(struct rr_descr) * RR_SEND_RING_SIZE;
280
281 sc->sc_snap_recv_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
282 sc->sc_snap_recv_ring = (struct rr_descr *) (sc->sc_dma_addr + size);
283 sc->sc2_snap_recv_ring = (struct rr2_descr *) (sc->sc_dma_addr + size);
284 size += sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE;
285
286 sc->sc_event_ring_dma = sc->sc_dma->dm_segs->ds_addr + size;
287 sc->sc_event_ring = (struct rr_event *) (sc->sc_dma_addr + size);
288 size += sizeof(struct rr_event) * RR_EVENT_RING_SIZE;
289
290 #ifdef DIAGNOSTIC
291 if (size > sc->sc_dmaseg.ds_len) {
292 aprint_error("%s: bogus size calculation\n",
293 sc->sc_dev.dv_xname);
294 goto bad_other;
295 }
296 #endif
297
298 /*
299 * Allocate DMA maps for transfers. We do this here and now
300 * so we won't have to wait for them in the middle of sending
301 * or receiving something.
302 */
303
304 if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX,
305 ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY,
306 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
307 &sc->sc_send.ec_dma)) {
308 aprint_error("%s: failed bus_dmamap_create\n",
309 sc->sc_dev.dv_xname);
310 goto bad_other;
311 }
312 sc->sc_send.ec_offset = 0;
313 sc->sc_send.ec_descr = sc->sc_send_ring;
314 TAILQ_INIT(&sc->sc_send.ec_di_queue);
315 bufq_alloc(&sc->sc_send.ec_buf_queue, BUFQ_FCFS);
316
317 for (i = 0; i < RR_MAX_SNAP_RECV_RING_SIZE; i++)
318 if (bus_dmamap_create(sc->sc_dmat, RR_DMA_MAX, 1, RR_DMA_MAX,
319 RR_DMA_BOUNDARY,
320 BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT,
321 &sc->sc_snap_recv.ec_dma[i])) {
322 aprint_error("%s: failed bus_dmamap_create\n",
323 sc->sc_dev.dv_xname);
324 for (i--; i >= 0; i--)
325 bus_dmamap_destroy(sc->sc_dmat,
326 sc->sc_snap_recv.ec_dma[i]);
327 goto bad_ring_dmamap_create;
328 }
329
330 /*
331 * If this is a coldboot, the NIC RunCode should be operational.
332 * If it is a warmboot, it may or may not be operational.
333 * Just to be sure, we'll stop the RunCode and reset everything.
334 */
335
336 /* Halt the processor (preserve NO_SWAP, if set) */
337
338 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
339 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
340 (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC);
341
342 /* Make the EEPROM readable */
343
344 misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
345 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL,
346 misc_local_ctl & ~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM |
347 RR_LC_PARITY_ON));
348
349 /* Extract interesting information from the EEPROM: */
350
351 header_format = esh_read_eeprom(sc, RR_EE_HEADER_FORMAT);
352 if (header_format != RR_EE_HEADER_FORMAT_MAGIC) {
353 aprint_error("%s: bogus EEPROM header format value %x\n",
354 sc->sc_dev.dv_xname, header_format);
355 goto bad_other;
356 }
357
358 /*
359 * As it is now, the runcode version in the EEPROM doesn't
360 * reflect the actual runcode version number. That is only
361 * available once the runcode starts up. We should probably
362 * change the firmware update code to modify this value,
363 * but Essential itself doesn't do it right now.
364 */
365
366 sc->sc_sram_size = 4 * esh_read_eeprom(sc, RR_EE_SRAM_SIZE);
367 sc->sc_runcode_start = esh_read_eeprom(sc, RR_EE_RUNCODE_START);
368 sc->sc_runcode_version = esh_read_eeprom(sc, RR_EE_RUNCODE_VERSION);
369
370 sc->sc_pci_latency = esh_read_eeprom(sc, RR_EE_PCI_LATENCY);
371 sc->sc_pci_lat_gnt = esh_read_eeprom(sc, RR_EE_PCI_LAT_GNT);
372
373 /* General tuning values */
374
375 sc->sc_tune.rt_mode_and_status =
376 esh_read_eeprom(sc, RR_EE_MODE_AND_STATUS);
377 sc->sc_tune.rt_conn_retry_count =
378 esh_read_eeprom(sc, RR_EE_CONN_RETRY_COUNT);
379 sc->sc_tune.rt_conn_retry_timer =
380 esh_read_eeprom(sc, RR_EE_CONN_RETRY_TIMER);
381 sc->sc_tune.rt_conn_timeout =
382 esh_read_eeprom(sc, RR_EE_CONN_TIMEOUT);
383 sc->sc_tune.rt_interrupt_timer =
384 esh_read_eeprom(sc, RR_EE_INTERRUPT_TIMER);
385 sc->sc_tune.rt_tx_timeout =
386 esh_read_eeprom(sc, RR_EE_TX_TIMEOUT);
387 sc->sc_tune.rt_rx_timeout =
388 esh_read_eeprom(sc, RR_EE_RX_TIMEOUT);
389 sc->sc_tune.rt_stats_timer =
390 esh_read_eeprom(sc, RR_EE_STATS_TIMER);
391 sc->sc_tune.rt_stats_timer = ESH_STATS_TIMER_DEFAULT;
392
393 /* DMA tuning values */
394
395 sc->sc_tune.rt_pci_state =
396 esh_read_eeprom(sc, RR_EE_PCI_STATE);
397 sc->sc_tune.rt_dma_write_state =
398 esh_read_eeprom(sc, RR_EE_DMA_WRITE_STATE);
399 sc->sc_tune.rt_dma_read_state =
400 esh_read_eeprom(sc, RR_EE_DMA_READ_STATE);
401 sc->sc_tune.rt_driver_param =
402 esh_read_eeprom(sc, RR_EE_DRIVER_PARAM);
403
404 /*
405 * Snag the ULA. The first two bytes are reserved.
406 * We don't really use it immediately, but it would be good to
407 * have for building IPv6 addresses, etc.
408 */
409
410 ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_HI);
411 sc->sc_ula[0] = (ula_tmp >> 8) & 0xff;
412 sc->sc_ula[1] = ula_tmp & 0xff;
413
414 ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_LO);
415 sc->sc_ula[2] = (ula_tmp >> 24) & 0xff;
416 sc->sc_ula[3] = (ula_tmp >> 16) & 0xff;
417 sc->sc_ula[4] = (ula_tmp >> 8) & 0xff;
418 sc->sc_ula[5] = ula_tmp & 0xff;
419
420 /* Reset EEPROM readability */
421
422 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
423
424 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
425 ifp->if_softc = sc;
426 ifp->if_start = eshstart;
427 ifp->if_ioctl = eshioctl;
428 ifp->if_watchdog = eshwatchdog;
429 ifp->if_flags = IFF_SIMPLEX | IFF_NOTRAILERS | IFF_NOARP;
430 IFQ_SET_READY(&ifp->if_snd);
431
432 if_attach(ifp);
433 hippi_ifattach(ifp, sc->sc_ula);
434
435 sc->sc_misaligned_bufs = sc->sc_bad_lens = 0;
436 sc->sc_fp_rings = 0;
437
438 return;
439
440 bad_ring_dmamap_create:
441 bus_dmamap_destroy(sc->sc_dmat, sc->sc_send.ec_dma);
442 bad_other:
443 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma);
444 bad_dmamap_load:
445 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma);
446 bad_dmamap_create:
447 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_addr, sc->sc_dma_size);
448 bad_dmamem_map:
449 bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, rseg);
450 return;
451 }
452
453
454 /*
455 * Bring device up.
456 *
457 * Assume that the on-board processor has already been stopped,
458 * the rings have been cleared of valid buffers, and everything
459 * is pretty much as it was when the system started.
460 *
461 * Stop the processor (just for good measure), clear the SRAM,
462 * reload the boot code, and start it all up again, with the PC
463 * pointing at the boot code. Once the boot code has had a chance
464 * to come up, adjust all of the appropriate parameters, and send
465 * the 'start firmware' command.
466 *
467 * The NIC won't actually be up until it gets an interrupt with an
468 * event indicating the RunCode is up.
469 */
470
471 void
472 eshinit(sc)
473 struct esh_softc *sc;
474 {
475 struct ifnet *ifp = &sc->sc_if;
476 bus_space_tag_t iot = sc->sc_iot;
477 bus_space_handle_t ioh = sc->sc_ioh;
478 struct rr_ring_ctl *ring;
479 u_int32_t misc_host_ctl;
480 u_int32_t misc_local_ctl;
481 u_int32_t value;
482 u_int32_t mode;
483
484 /* If we're already doing an init, don't try again simultaniously */
485
486 if ((sc->sc_flags & ESH_FL_INITIALIZING) != 0)
487 return;
488 sc->sc_flags = ESH_FL_INITIALIZING;
489
490 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
491 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
492
493 /* Halt the processor (preserve NO_SWAP, if set) */
494
495 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
496 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
497 (misc_host_ctl & RR_MH_NO_SWAP)
498 | RR_MH_HALT_PROC | RR_MH_CLEAR_INT);
499
500 /* Make the EEPROM readable */
501
502 misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
503 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL,
504 misc_local_ctl & ~(RR_LC_FAST_PROM |
505 RR_LC_ADD_SRAM |
506 RR_LC_PARITY_ON));
507
508 /* Reset DMA */
509
510 bus_space_write_4(iot, ioh, RR_RX_STATE, RR_RS_RESET);
511 bus_space_write_4(iot, ioh, RR_TX_STATE, 0);
512 bus_space_write_4(iot, ioh, RR_DMA_READ_STATE, RR_DR_RESET);
513 bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE, RR_DW_RESET);
514 bus_space_write_4(iot, ioh, RR_PCI_STATE, 0);
515 bus_space_write_4(iot, ioh, RR_TIMER, 0);
516 bus_space_write_4(iot, ioh, RR_TIMER_REF, 0);
517
518 /*
519 * Reset the assist register that the documentation suggests
520 * resetting. Too bad that the docs don't mention anything
521 * else about the register!
522 */
523
524 bus_space_write_4(iot, ioh, 0x15C, 1);
525
526 /* Clear BIST, set the PC to the start of the code and let 'er rip */
527
528 value = bus_space_read_4(iot, ioh, RR_PCI_BIST);
529 bus_space_write_4(iot, ioh, RR_PCI_BIST, (value & ~0xff) | 8);
530
531 sc->sc_bist_write(sc, 0);
532 esh_reset_runcode(sc);
533
534 bus_space_write_4(iot, ioh, RR_PROC_PC, sc->sc_runcode_start);
535 bus_space_write_4(iot, ioh, RR_PROC_BREAKPT, 0x00000001);
536
537 misc_host_ctl &= ~RR_MH_HALT_PROC;
538 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL, misc_host_ctl);
539
540 /* XXX: should we sleep rather than delaying for 1ms!? */
541
542 delay(1000); /* Need 500 us, but we'll give it more */
543
544 value = sc->sc_bist_read(sc);
545 if (value != 0) {
546 printf("%s: BIST is %d, not 0!\n",
547 sc->sc_dev.dv_xname, value);
548 goto bad_init;
549 }
550
551 #ifdef ESH_PRINTF
552 printf("%s: BIST is %x\n", sc->sc_dev.dv_xname, value);
553 eshstatus(sc);
554 #endif
555
556 /* RunCode is up. Initialize NIC */
557
558 esh_write_addr(iot, ioh, RR_GEN_INFO_PTR, sc->sc_gen_info_dma);
559 esh_write_addr(iot, ioh, RR_RECV_RING_PTR, sc->sc_recv_ring_table_dma);
560
561 sc->sc_event_consumer = 0;
562 bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, sc->sc_event_consumer);
563 sc->sc_event_producer = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER);
564 sc->sc_cmd_producer = RR_INIT_CMD;
565 sc->sc_cmd_consumer = 0;
566
567 mode = bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS);
568 mode |= (RR_MS_WARNINGS |
569 RR_MS_ERR_TERM |
570 RR_MS_NO_RESTART |
571 RR_MS_SWAP_DATA);
572 mode &= ~RR_MS_PH_MODE;
573 bus_space_write_4(iot, ioh, RR_MODE_AND_STATUS, mode);
574
575 #if 0
576 #ifdef ESH_PRINTF
577 printf("eshinit: misc_local_ctl %x, SRAM size %d\n", misc_local_ctl,
578 sc->sc_sram_size);
579 #endif
580 /*
581 misc_local_ctl |= (RR_LC_FAST_PROM | RR_LC_PARITY_ON);
582 */
583 if (sc->sc_sram_size > 256 * 1024) {
584 misc_local_ctl |= RR_LC_ADD_SRAM;
585 }
586 #endif
587
588 #ifdef ESH_PRINTF
589 printf("eshinit: misc_local_ctl %x\n", misc_local_ctl);
590 #endif
591 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
592
593 /* Set tuning parameters */
594
595 bus_space_write_4(iot, ioh, RR_CONN_RETRY_COUNT,
596 sc->sc_tune.rt_conn_retry_count);
597 bus_space_write_4(iot, ioh, RR_CONN_RETRY_TIMER,
598 sc->sc_tune.rt_conn_retry_timer);
599 bus_space_write_4(iot, ioh, RR_CONN_TIMEOUT,
600 sc->sc_tune.rt_conn_timeout);
601 bus_space_write_4(iot, ioh, RR_INTERRUPT_TIMER,
602 sc->sc_tune.rt_interrupt_timer);
603 bus_space_write_4(iot, ioh, RR_TX_TIMEOUT,
604 sc->sc_tune.rt_tx_timeout);
605 bus_space_write_4(iot, ioh, RR_RX_TIMEOUT,
606 sc->sc_tune.rt_rx_timeout);
607 bus_space_write_4(iot, ioh, RR_STATS_TIMER,
608 sc->sc_tune.rt_stats_timer);
609 bus_space_write_4(iot, ioh, RR_PCI_STATE,
610 sc->sc_tune.rt_pci_state);
611 bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE,
612 sc->sc_tune.rt_dma_write_state);
613 bus_space_write_4(iot, ioh, RR_DMA_READ_STATE,
614 sc->sc_tune.rt_dma_read_state);
615
616 sc->sc_max_rings = bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS);
617
618 sc->sc_runcode_version =
619 bus_space_read_4(iot, ioh, RR_RUNCODE_VERSION);
620 sc->sc_version = sc->sc_runcode_version >> 16;
621 if (sc->sc_version != 1 && sc->sc_version != 2) {
622 printf("%s: bad version number %d in runcode\n",
623 sc->sc_dev.dv_xname, sc->sc_version);
624 goto bad_init;
625 }
626
627 if (sc->sc_version == 1) {
628 sc->sc_options = 0;
629 } else {
630 value = bus_space_read_4(iot, ioh, RR_ULA);
631 sc->sc_options = value >> 16;
632 }
633
634 if (sc->sc_options & (RR_OP_LONG_TX | RR_OP_LONG_RX)) {
635 printf("%s: unsupported firmware -- long descriptors\n",
636 sc->sc_dev.dv_xname);
637 goto bad_init;
638 }
639
640 printf("%s: startup runcode version %d.%d.%d, options %x\n",
641 sc->sc_dev.dv_xname,
642 sc->sc_version,
643 (sc->sc_runcode_version >> 8) & 0xff,
644 sc->sc_runcode_version & 0xff,
645 sc->sc_options);
646
647 /* Initialize the general ring information */
648
649 memset(sc->sc_recv_ring_table, 0,
650 sizeof(struct rr_ring_ctl) * RR_ULP_COUNT);
651
652 ring = &sc->sc_gen_info->ri_event_ring_ctl;
653 ring->rr_ring_addr = sc->sc_event_ring_dma;
654 ring->rr_entry_size = sizeof(struct rr_event);
655 ring->rr_free_bufs = RR_EVENT_RING_SIZE / 4;
656 ring->rr_entries = RR_EVENT_RING_SIZE;
657 ring->rr_prod_index = 0;
658
659 ring = &sc->sc_gen_info->ri_cmd_ring_ctl;
660 ring->rr_free_bufs = 8;
661 ring->rr_entry_size = sizeof(union rr_cmd);
662 ring->rr_prod_index = RR_INIT_CMD;
663
664 ring = &sc->sc_gen_info->ri_send_ring_ctl;
665 ring->rr_ring_addr = sc->sc_send_ring_dma;
666 if (sc->sc_version == 1) {
667 ring->rr_free_bufs = RR_RR_DONT_COMPLAIN;
668 } else {
669 ring->rr_free_bufs = 0;
670 }
671
672 ring->rr_entries = RR_SEND_RING_SIZE;
673 ring->rr_entry_size = sizeof(struct rr_descr);
674
675 ring->rr_prod_index = sc->sc_send.ec_producer =
676 sc->sc_send.ec_consumer = 0;
677 sc->sc_send.ec_cur_mbuf = NULL;
678 sc->sc_send.ec_cur_buf = NULL;
679
680 sc->sc_snap_recv.ec_descr = sc->sc_snap_recv_ring;
681 sc->sc_snap_recv.ec_consumer = sc->sc_snap_recv.ec_producer = 0;
682
683 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
684 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
685
686 /* Set up the watchdog to make sure something happens! */
687
688 sc->sc_watchdog = 0;
689 ifp->if_timer = 5;
690
691 /*
692 * Can't actually turn on interface until we see some events,
693 * so set initialized flag, but don't start sending.
694 */
695
696 sc->sc_flags = ESH_FL_INITIALIZED;
697 esh_send_cmd(sc, RR_CC_START_RUNCODE, 0, 0);
698 return;
699
700 bad_init:
701 sc->sc_flags = 0;
702 wakeup((void *) sc);
703 return;
704 }
705
706
707 /*
708 * Code to handle the Framing Protocol (FP) interface to the esh.
709 * This will allow us to write directly to the wire, with no
710 * intervening memcpy's to slow us down.
711 */
712
713 int
714 esh_fpopen(dev, oflags, devtype, p)
715 dev_t dev;
716 int oflags;
717 int devtype;
718 struct proc *p;
719 {
720 struct esh_softc *sc;
721 struct rr_ring_ctl *ring_ctl;
722 struct esh_fp_ring_ctl *recv;
723 int ulp = ESHULP(dev);
724 int error = 0;
725 bus_size_t size;
726 int rseg;
727 int s;
728
729 sc = device_lookup(&esh_cd, ESHUNIT(dev));
730 if (sc == NULL || ulp == HIPPI_ULP_802)
731 return (ENXIO);
732
733 #ifdef ESH_PRINTF
734 printf("esh_fpopen: opening board %d, ulp %d\n",
735 sc->sc_dev.dv_unit, ulp);
736 #endif
737
738 /* If the card is not up, initialize it. */
739
740 s = splnet();
741
742 if (sc->sc_fp_rings >= sc->sc_max_rings - 1) {
743 splx(s);
744 return (ENOSPC);
745 }
746
747 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
748 eshinit(sc);
749 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0)
750 return EIO;
751 }
752
753 if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
754 /*
755 * Wait for the runcode to indicate that it is up,
756 * while watching to make sure we haven't crashed.
757 */
758
759 error = 0;
760 while (error == 0 &&
761 (sc->sc_flags & ESH_FL_INITIALIZED) != 0 &&
762 (sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
763 error = tsleep((void *) sc, PCATCH | PRIBIO,
764 "eshinit", 0);
765 #ifdef ESH_PRINTF
766 printf("esh_fpopen: tslept\n");
767 #endif
768 }
769
770 if (error != 0) {
771 splx(s);
772 return error;
773 }
774
775 if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) {
776 splx(s);
777 return EIO;
778 }
779 }
780
781
782 #ifdef ESH_PRINTF
783 printf("esh_fpopen: card up\n");
784 #endif
785
786 /* Look at the ring descriptor to see if the ULP is in use */
787
788 ring_ctl = &sc->sc_recv_ring_table[ulp];
789 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
790 (caddr_t) ring_ctl - (caddr_t) sc->sc_dma_addr,
791 sizeof(*ring_ctl),
792 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
793 if (ring_ctl->rr_entry_size != 0) {
794 splx(s);
795 return (EBUSY);
796 }
797
798 #ifdef ESH_PRINTF
799 printf("esh_fpopen: ring %d okay\n", ulp);
800 #endif
801
802 /*
803 * Allocate the DMA space for the ring; space for the
804 * ring control blocks has already been staticly allocated.
805 */
806
807 recv = (struct esh_fp_ring_ctl *)
808 malloc(sizeof(*recv), M_DEVBUF, M_WAITOK|M_ZERO);
809 if (recv == NULL)
810 return(ENOMEM);
811 TAILQ_INIT(&recv->ec_queue);
812
813 size = RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr);
814 error = bus_dmamem_alloc(sc->sc_dmat, size, 0, RR_DMA_BOUNDARY,
815 &recv->ec_dmaseg, 1,
816 &rseg, BUS_DMA_WAITOK);
817
818 if (error) {
819 printf("%s: couldn't allocate space for FP receive ring"
820 "data structures\n", sc->sc_dev.dv_xname);
821 goto bad_fp_dmamem_alloc;
822 }
823
824 if (rseg > 1) {
825 printf("%s: contiguous memory not available for "
826 "FP receive ring\n", sc->sc_dev.dv_xname);
827 goto bad_fp_dmamem_map;
828 }
829
830 error = bus_dmamem_map(sc->sc_dmat, &recv->ec_dmaseg, rseg,
831 size, (caddr_t *) &recv->ec_descr,
832 BUS_DMA_WAITOK | BUS_DMA_COHERENT);
833 if (error) {
834 printf("%s: couldn't map memory for FP receive ring\n",
835 sc->sc_dev.dv_xname);
836 goto bad_fp_dmamem_map;
837 }
838
839 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, RR_DMA_BOUNDARY,
840 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
841 &recv->ec_dma)) {
842 printf("%s: couldn't create DMA map for FP receive ring\n",
843 sc->sc_dev.dv_xname);
844 goto bad_fp_dmamap_create;
845 }
846
847 if (bus_dmamap_load(sc->sc_dmat, recv->ec_dma, recv->ec_descr,
848 size, NULL, BUS_DMA_WAITOK)) {
849 printf("%s: couldn't load DMA map for FP receive ring\n",
850 sc->sc_dev.dv_xname);
851 goto bad_fp_dmamap_load;
852 }
853
854 memset(recv->ec_descr, 0, size);
855
856 /*
857 * Create the ring:
858 *
859 * XXX: HTF are we gonna deal with the fact that we don't know
860 * if the open succeeded until we get a response from
861 * the event handler? I guess we could go to sleep waiting
862 * for the interrupt, and get woken up by the eshintr
863 * case handling it.
864 */
865
866 ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr;
867 ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4;
868 ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE;
869 ring_ctl->rr_entry_size = sizeof(struct rr_descr);
870 ring_ctl->rr_prod_index = recv->ec_producer = recv->ec_consumer = 0;
871 ring_ctl->rr_mode = RR_RR_CHARACTER;
872 recv->ec_ulp = ulp;
873 recv->ec_index = -1;
874
875 sc->sc_fp_recv[ulp] = recv;
876
877 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
878 (caddr_t) ring_ctl - (caddr_t) sc->sc_dma_addr,
879 sizeof(*ring_ctl),
880 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
881
882 bus_dmamap_sync(sc->sc_dmat, recv->ec_dma, 0, size,
883 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
884
885 esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer);
886
887 #ifdef ESH_PRINTF
888 printf("esh_fpopen: sent create ring cmd\n");
889 #endif
890
891 while (recv->ec_index == -1) {
892 error = tsleep((void *) &recv->ec_ulp, PCATCH | PRIBIO,
893 "eshfpopen", 0);
894 if (error != 0 || recv->ec_index == -1) {
895 splx(s);
896 goto bad_fp_ring_create;
897 }
898 }
899 #ifdef ESH_PRINTF
900 printf("esh_fpopen: created ring\n");
901 #endif
902
903 /*
904 * Ring is created. Set up various pointers to the ring
905 * information, fill the ring, and get going...
906 */
907
908 sc->sc_fp_rings++;
909 splx(s);
910 return 0;
911
912 bad_fp_ring_create:
913 #ifdef ESH_PRINTF
914 printf("esh_fpopen: bad ring create\n");
915 #endif
916 sc->sc_fp_recv[ulp] = NULL;
917 memset(ring_ctl, 0, sizeof(*ring_ctl));
918 bus_dmamap_unload(sc->sc_dmat, recv->ec_dma);
919 bad_fp_dmamap_load:
920 bus_dmamap_destroy(sc->sc_dmat, recv->ec_dma);
921 bad_fp_dmamap_create:
922 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) recv->ec_descr, size);
923 bad_fp_dmamem_map:
924 bus_dmamem_free(sc->sc_dmat, &recv->ec_dmaseg, rseg);
925 bad_fp_dmamem_alloc:
926 free(recv, M_DEVBUF);
927 if (error == 0)
928 error = ENOMEM;
929 splx(s);
930 return (error);
931 }
932
933
934 int
935 esh_fpclose(dev, fflag, devtype, p)
936 dev_t dev;
937 int fflag;
938 int devtype;
939 struct proc *p;
940 {
941 struct esh_softc *sc;
942 struct rr_ring_ctl *ring_ctl;
943 struct esh_fp_ring_ctl *ring;
944 int ulp = ESHULP(dev);
945 int index;
946 int error = 0;
947 int s;
948
949 sc = device_lookup(&esh_cd, ESHUNIT(dev));
950 if (sc == NULL || ulp == HIPPI_ULP_802)
951 return (ENXIO);
952
953 s = splnet();
954
955 ring = sc->sc_fp_recv[ulp];
956 ring_ctl = &sc->sc_recv_ring_table[ulp];
957 index = ring->ec_index;
958
959 #ifdef ESH_PRINTF
960 printf("esh_fpclose: closing unit %d, ulp %d\n",
961 sc->sc_dev.dv_unit, ulp);
962 #endif
963 assert(ring);
964 assert(ring_ctl);
965
966 /*
967 * Disable the ring, wait for notification, and get rid of DMA
968 * stuff and dynamically allocated memory. Loop, waiting to
969 * learn that the ring has been disabled, or the card
970 * has been shut down.
971 */
972
973 do {
974 esh_send_cmd(sc, RR_CC_DISABLE_RING, ulp, ring->ec_producer);
975
976 error = tsleep((void *) &ring->ec_index, PCATCH | PRIBIO,
977 "esh_fpclose", 0);
978 if (error != 0 && error != EAGAIN) {
979 printf("%s: esh_fpclose: wait on ring disable bad\n",
980 sc->sc_dev.dv_xname);
981 ring->ec_index = -1;
982 break;
983 }
984 } while (ring->ec_index != -1 && sc->sc_flags != 0);
985
986 /*
987 * XXX: Gotta unload the ring, removing old descriptors!
988 * *Can* there be outstanding reads with a close issued!?
989 */
990
991 bus_dmamap_unload(sc->sc_dmat, ring->ec_dma);
992 bus_dmamap_destroy(sc->sc_dmat, ring->ec_dma);
993 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) ring->ec_descr,
994 RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr));
995 bus_dmamem_free(sc->sc_dmat, &ring->ec_dmaseg, ring->ec_dma->dm_nsegs);
996 free(ring, M_DEVBUF);
997 memset(ring_ctl, 0, sizeof(*ring_ctl));
998 sc->sc_fp_recv[ulp] = NULL;
999 sc->sc_fp_recv_index[index] = NULL;
1000
1001 sc->sc_fp_rings--;
1002 if (sc->sc_fp_rings == 0)
1003 sc->sc_flags &= ~ESH_FL_FP_RING_UP;
1004
1005 splx(s);
1006 return 0;
1007 }
1008
1009 int
1010 esh_fpread(dev, uio, ioflag)
1011 dev_t dev;
1012 struct uio *uio;
1013 int ioflag;
1014 {
1015 struct lwp *l = curlwp;
1016 struct proc *p = l->l_proc;
1017 struct iovec *iovp;
1018 struct esh_softc *sc;
1019 struct esh_fp_ring_ctl *ring;
1020 struct esh_dmainfo *di;
1021 int ulp = ESHULP(dev);
1022 int error;
1023 int i;
1024 int s;
1025
1026 #ifdef ESH_PRINTF
1027 printf("esh_fpread: dev %x\n", dev);
1028 #endif
1029
1030 sc = device_lookup(&esh_cd, ESHUNIT(dev));
1031 if (sc == NULL || ulp == HIPPI_ULP_802)
1032 return (ENXIO);
1033
1034 s = splnet();
1035
1036 ring = sc->sc_fp_recv[ulp];
1037
1038 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
1039 error = ENXIO;
1040 goto fpread_done;
1041 }
1042
1043 /* Check for validity */
1044 for (i = 0; i < uio->uio_iovcnt; i++) {
1045 /* Check for valid offsets and sizes */
1046 if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 ||
1047 (i < uio->uio_iovcnt - 1 &&
1048 (uio->uio_iov[i].iov_len & 3) != 0)) {
1049 error = EFAULT;
1050 goto fpread_done;
1051 }
1052 }
1053
1054 PHOLD(l); /* Lock process info into memory */
1055
1056 /* Lock down the pages */
1057 for (i = 0; i < uio->uio_iovcnt; i++) {
1058 iovp = &uio->uio_iov[i];
1059 error = uvm_vslock(p, iovp->iov_base, iovp->iov_len,
1060 VM_PROT_WRITE);
1061 if (error) {
1062 /* Unlock what we've locked so far. */
1063 for (--i; i >= 0; i--) {
1064 iovp = &uio->uio_iov[i];
1065 uvm_vsunlock(p, iovp->iov_base,
1066 iovp->iov_len);
1067 }
1068 goto fpread_done;
1069 }
1070 }
1071
1072 /*
1073 * Perform preliminary DMA mapping and throw the buffers
1074 * onto the queue to be sent.
1075 */
1076
1077 di = esh_new_dmainfo(sc);
1078 if (di == NULL) {
1079 error = ENOMEM;
1080 goto fpread_done;
1081 }
1082 di->ed_buf = NULL;
1083 di->ed_error = 0;
1084 di->ed_read_len = 0;
1085
1086 #ifdef ESH_PRINTF
1087 printf("esh_fpread: ulp %d, uio offset %qd, resid %d, iovcnt %d\n",
1088 ulp, uio->uio_offset, uio->uio_resid, uio->uio_iovcnt);
1089 #endif
1090
1091 error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma,
1092 uio, BUS_DMA_READ|BUS_DMA_WAITOK);
1093 if (error) {
1094 printf("%s: esh_fpread: bus_dmamap_load_uio "
1095 "failed\terror code %d\n",
1096 sc->sc_dev.dv_xname, error);
1097 error = ENOBUFS;
1098 esh_free_dmainfo(sc, di);
1099 goto fpread_done;
1100 }
1101
1102 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
1103 0, di->ed_dma->dm_mapsize,
1104 BUS_DMASYNC_PREREAD);
1105
1106 #ifdef ESH_PRINTF
1107 printf("esh_fpread: ulp %d, di %p, nsegs %d, uio len %d\n",
1108 ulp, di, di->ed_dma->dm_nsegs, uio->uio_resid);
1109 #endif
1110
1111 di->ed_flags |= ESH_DI_BUSY;
1112
1113 TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list);
1114 esh_fill_fp_ring(sc, ring);
1115
1116 while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) {
1117 error = tsleep((void *) di, PCATCH | PRIBIO, "esh_fpread", 0);
1118 #ifdef ESH_PRINTF
1119 printf("esh_fpread: ulp %d, tslept %d\n", ulp, error);
1120 #endif
1121 if (error) {
1122 /*
1123 * Remove the buffer entries from the ring; this
1124 * is gonna require a DISCARD_PKT command, and
1125 * will certainly disrupt things. This is why we
1126 * can have only one outstanding read on a ring
1127 * at a time. :-(
1128 */
1129
1130 printf("esh_fpread: was that a ^C!? error %d, ulp %d\n",
1131 error, ulp);
1132 if (error == EINTR || error == ERESTART)
1133 error = 0;
1134 if ((di->ed_flags & ESH_DI_BUSY) != 0) {
1135 esh_flush_fp_ring(sc, ring, di);
1136 error = EINTR;
1137 break;
1138 }
1139 }
1140 }
1141
1142 if (error == 0 && di->ed_error != 0)
1143 error = EIO;
1144
1145 /*
1146 * How do we let the caller know how much has been read?
1147 * Adjust the uio_resid stuff!?
1148 */
1149
1150 assert(uio->uio_resid >= di->ed_read_len);
1151
1152 uio->uio_resid -= di->ed_read_len;
1153 for (i = 0; i < uio->uio_iovcnt; i++) {
1154 iovp = &uio->uio_iov[i];
1155 uvm_vsunlock(p, iovp->iov_base, iovp->iov_len);
1156 }
1157
1158 PRELE(l); /* Release process info */
1159 esh_free_dmainfo(sc, di);
1160
1161 fpread_done:
1162 #ifdef ESH_PRINTF
1163 printf("esh_fpread: ulp %d, error %d\n", ulp, error);
1164 #endif
1165 splx(s);
1166 return error;
1167 }
1168
1169
1170 int
1171 esh_fpwrite(dev, uio, ioflag)
1172 dev_t dev;
1173 struct uio *uio;
1174 int ioflag;
1175 {
1176 struct lwp *l = curlwp;
1177 struct proc *p = l->l_proc;
1178 struct iovec *iovp;
1179 struct esh_softc *sc;
1180 struct esh_send_ring_ctl *ring;
1181 struct esh_dmainfo *di;
1182 int ulp = ESHULP(dev);
1183 int error;
1184 int len;
1185 int i;
1186 int s;
1187
1188 #ifdef ESH_PRINTF
1189 printf("esh_fpwrite: dev %x\n", dev);
1190 #endif
1191
1192 sc = device_lookup(&esh_cd, ESHUNIT(dev));
1193 if (sc == NULL || ulp == HIPPI_ULP_802)
1194 return (ENXIO);
1195
1196 s = splnet();
1197
1198 ring = &sc->sc_send;
1199
1200 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
1201 error = ENXIO;
1202 goto fpwrite_done;
1203 }
1204
1205 /* Check for validity */
1206 for (i = 0; i < uio->uio_iovcnt; i++) {
1207 if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 ||
1208 (i < uio->uio_iovcnt - 1 &&
1209 (uio->uio_iov[i].iov_len & 3) != 0)) {
1210 error = EFAULT;
1211 goto fpwrite_done;
1212 }
1213 }
1214
1215 PHOLD(l); /* Lock process info into memory */
1216
1217 /* Lock down the pages */
1218 for (i = 0; i < uio->uio_iovcnt; i++) {
1219 iovp = &uio->uio_iov[i];
1220 error = uvm_vslock(p, iovp->iov_base, iovp->iov_len,
1221 VM_PROT_READ);
1222 if (error) {
1223 /* Unlock what we've locked so far. */
1224 for (--i; i >= 0; i--) {
1225 iovp = &uio->uio_iov[i];
1226 uvm_vsunlock(p, iovp->iov_base,
1227 iovp->iov_len);
1228 }
1229 goto fpwrite_done;
1230 }
1231 }
1232
1233 /*
1234 * Perform preliminary DMA mapping and throw the buffers
1235 * onto the queue to be sent.
1236 */
1237
1238 di = esh_new_dmainfo(sc);
1239 if (di == NULL) {
1240 error = ENOMEM;
1241 goto fpwrite_done;
1242 }
1243 di->ed_buf = NULL;
1244 di->ed_error = 0;
1245
1246 #ifdef ESH_PRINTF
1247 printf("esh_fpwrite: uio offset %qd, resid %d, iovcnt %d\n",
1248 uio->uio_offset, uio->uio_resid, uio->uio_iovcnt);
1249 #endif
1250
1251 error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma,
1252 uio, BUS_DMA_WRITE|BUS_DMA_WAITOK);
1253 if (error) {
1254 printf("%s: esh_fpwrite: bus_dmamap_load_uio "
1255 "failed\terror code %d\n",
1256 sc->sc_dev.dv_xname, error);
1257 error = ENOBUFS;
1258 esh_free_dmainfo(sc, di);
1259 goto fpwrite_done;
1260 }
1261
1262 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
1263 0, di->ed_dma->dm_mapsize,
1264 BUS_DMASYNC_PREWRITE);
1265
1266 #ifdef ESH_PRINTF
1267 printf("esh_fpwrite: di %p, nsegs %d, uio len %d\n",
1268 di, di->ed_dma->dm_nsegs, uio->uio_resid);
1269 #endif
1270
1271 len = di->ed_dma->dm_mapsize;
1272 di->ed_flags |= ESH_DI_BUSY;
1273
1274 TAILQ_INSERT_TAIL(&ring->ec_di_queue, di, ed_list);
1275 eshstart(&sc->sc_if);
1276
1277 while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) {
1278 error = tsleep((void *) di, PRIBIO, "esh_fpwrite", 0);
1279 #ifdef ESH_PRINTF
1280 printf("esh_fpwrite: tslept %d\n", error);
1281 #endif
1282 if (error) {
1283 printf("esh_fpwrite: was that a ^C!? Shouldn't be! Error %d\n",
1284 error);
1285 if (error == EINTR || error == ERESTART)
1286 error = 0;
1287 if ((di->ed_flags & ESH_DI_BUSY) != 0) {
1288 panic("interrupted eshwrite!");
1289 #if 0
1290 /* Better do *something* here! */
1291 esh_flush_send_ring(sc, di);
1292 #endif
1293 error = EINTR;
1294 break;
1295 }
1296 }
1297 }
1298
1299 if (error == 0 && di->ed_error != 0)
1300 error = EIO;
1301
1302 /*
1303 * How do we let the caller know how much has been written?
1304 * Adjust the uio_resid stuff!?
1305 */
1306
1307 uio->uio_resid -= len;
1308 uio->uio_offset += len;
1309
1310 for (i = 0; i < uio->uio_iovcnt; i++) {
1311 iovp = &uio->uio_iov[i];
1312 uvm_vsunlock(p, iovp->iov_base, iovp->iov_len);
1313 }
1314
1315 PRELE(l); /* Release process info */
1316 esh_free_dmainfo(sc, di);
1317
1318 fpwrite_done:
1319 #ifdef ESH_PRINTF
1320 printf("esh_fpwrite: error %d\n", error);
1321 #endif
1322 splx(s);
1323 return error;
1324 }
1325
1326 void
1327 esh_fpstrategy(bp)
1328 struct buf *bp;
1329 {
1330 struct esh_softc *sc;
1331 int ulp = ESHULP(bp->b_dev);
1332 int error = 0;
1333 int s;
1334
1335 #ifdef ESH_PRINTF
1336 printf("esh_fpstrategy: starting, bcount %ld, flags %lx, dev %x\n"
1337 "\tunit %x, ulp %d\n",
1338 bp->b_bcount, bp->b_flags, bp->b_dev, unit, ulp);
1339 #endif
1340
1341 sc = device_lookup(&esh_cd, ESHUNIT(bp->b_dev));
1342
1343 s = splnet();
1344 if (sc == NULL || ulp == HIPPI_ULP_802) {
1345 bp->b_error = ENXIO;
1346 bp->b_flags |= B_ERROR;
1347 goto done;
1348 }
1349
1350 if (bp->b_bcount == 0)
1351 goto done;
1352
1353 #define UP_FLAGS (ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP)
1354
1355 if ((sc->sc_flags & UP_FLAGS) != UP_FLAGS) {
1356 bp->b_error = EBUSY;
1357 bp->b_flags |= B_ERROR;
1358 goto done;
1359 }
1360 #undef UP_FLAGS
1361
1362 if (bp->b_flags & B_READ) {
1363 /*
1364 * Perform preliminary DMA mapping and throw the buffers
1365 * onto the queue to be sent.
1366 */
1367
1368 struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[ulp];
1369 struct esh_dmainfo *di = esh_new_dmainfo(sc);
1370
1371 if (di == NULL) {
1372 bp->b_error = ENOMEM;
1373 bp->b_flags |= B_ERROR;
1374 goto done;
1375 }
1376 di->ed_buf = bp;
1377 error = bus_dmamap_load(sc->sc_dmat, di->ed_dma,
1378 bp->b_data, bp->b_bcount,
1379 bp->b_proc,
1380 BUS_DMA_READ|BUS_DMA_WAITOK);
1381 if (error) {
1382 printf("%s: esh_fpstrategy: "
1383 "bus_dmamap_load "
1384 "failed\terror code %d\n",
1385 sc->sc_dev.dv_xname, error);
1386 bp->b_error = ENOBUFS;
1387 bp->b_flags |= B_ERROR;
1388 esh_free_dmainfo(sc, di);
1389 goto done;
1390 }
1391
1392 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
1393 0, di->ed_dma->dm_mapsize,
1394 BUS_DMASYNC_PREREAD);
1395
1396 #ifdef ESH_PRINTF
1397 printf("fpstrategy: di %p\n", di);
1398 #endif
1399
1400 TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list);
1401 esh_fill_fp_ring(sc, ring);
1402 } else {
1403 /*
1404 * Queue up the buffer for future sending. If the card
1405 * isn't already transmitting, give it a kick.
1406 */
1407
1408 struct esh_send_ring_ctl *ring = &sc->sc_send;
1409 BUFQ_PUT(&ring->ec_buf_queue, bp);
1410 #ifdef ESH_PRINTF
1411 printf("esh_fpstrategy: ready to call eshstart to write!\n");
1412 #endif
1413 eshstart(&sc->sc_if);
1414 }
1415 splx(s);
1416 return;
1417
1418 done:
1419 splx(s);
1420 #ifdef ESH_PRINTF
1421 printf("esh_fpstrategy: failing, bp->b_error %d!\n",
1422 bp->b_error);
1423 #endif
1424 biodone(bp);
1425 }
1426
1427 /*
1428 * Handle interrupts. This is basicly event handling code; version two
1429 * firmware tries to speed things up by just telling us the location
1430 * of the producer and consumer indices, rather than sending us an event.
1431 */
1432
1433 int
1434 eshintr(arg)
1435 void *arg;
1436 {
1437 struct esh_softc *sc = arg;
1438 bus_space_tag_t iot = sc->sc_iot;
1439 bus_space_handle_t ioh = sc->sc_ioh;
1440 struct ifnet *ifp = &sc->sc_if;
1441 u_int32_t rc_offsets;
1442 u_int32_t misc_host_ctl;
1443 int rc_send_consumer = 0; /* shut up compiler */
1444 int rc_snap_ring_consumer = 0; /* ditto */
1445 u_int8_t fp_ring_consumer[RR_MAX_RECV_RING];
1446 int start_consumer;
1447 int ret = 0;
1448
1449 int okay = 0;
1450 int blah = 0;
1451 char buf[100];
1452 char t[100];
1453
1454
1455 /* Check to see if this is our interrupt. */
1456
1457 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
1458 if ((misc_host_ctl & RR_MH_INTERRUPT) == 0)
1459 return 0;
1460
1461 /* If we can't do anything with the interrupt, just drop it */
1462
1463 if (sc->sc_flags == 0)
1464 return 1;
1465
1466 rc_offsets = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER);
1467 sc->sc_event_producer = rc_offsets & 0xff;
1468 if (sc->sc_version == 2) {
1469 int i;
1470
1471 buf[0] = '\0';
1472 strcat(buf, "rc: ");
1473 rc_send_consumer = (rc_offsets >> 8) & 0xff;
1474 rc_snap_ring_consumer = (rc_offsets >> 16) & 0xff;
1475 for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
1476 rc_offsets =
1477 bus_space_read_4(iot, ioh,
1478 RR_RUNCODE_RECV_CONS + i);
1479 /* XXX: should do this right! */
1480 NTOHL(rc_offsets);
1481 *((u_int32_t *) &fp_ring_consumer[i]) = rc_offsets;
1482 sprintf(t, "%.8x|", rc_offsets);
1483 strcat(buf, t);
1484 }
1485 }
1486 start_consumer = sc->sc_event_consumer;
1487
1488 /* Take care of synchronizing DMA with entries we read... */
1489
1490 esh_dma_sync(sc, sc->sc_event_ring,
1491 start_consumer, sc->sc_event_producer,
1492 RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0,
1493 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1494
1495 while (sc->sc_event_consumer != sc->sc_event_producer) {
1496 struct rr_event *event =
1497 &sc->sc_event_ring[sc->sc_event_consumer];
1498
1499 #ifdef ESH_PRINTF
1500 if (event->re_code != RR_EC_WATCHDOG &&
1501 event->re_code != RR_EC_STATS_UPDATE &&
1502 event->re_code != RR_EC_SET_CMD_CONSUMER) {
1503 printf("%s: event code %x, ring %d, index %d\n",
1504 sc->sc_dev.dv_xname, event->re_code,
1505 event->re_ring, event->re_index);
1506 if (okay == 0)
1507 printf("%s\n", buf);
1508 okay = 1;
1509 }
1510 #endif
1511 ret = 1; /* some action was taken by card */
1512
1513 switch(event->re_code) {
1514 case RR_EC_RUNCODE_UP:
1515 printf("%s: firmware up\n", sc->sc_dev.dv_xname);
1516 sc->sc_flags |= ESH_FL_RUNCODE_UP;
1517 esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
1518 esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0);
1519 #ifdef ESH_PRINTF
1520 eshstatus(sc);
1521 #endif
1522 if ((ifp->if_flags & IFF_UP) != 0)
1523 esh_init_snap_ring(sc);
1524 if (sc->sc_fp_rings > 0)
1525 esh_init_fp_rings(sc);
1526
1527 /*
1528 * XXX: crank up FP rings that might be
1529 * in use after a reset!
1530 */
1531 wakeup((void *) sc);
1532 break;
1533
1534 case RR_EC_WATCHDOG:
1535 /*
1536 * Record the watchdog event.
1537 * This is checked by eshwatchdog
1538 */
1539
1540 sc->sc_watchdog = 1;
1541 break;
1542
1543 case RR_EC_SET_CMD_CONSUMER:
1544 sc->sc_cmd_consumer = event->re_index;
1545 break;
1546
1547 case RR_EC_LINK_ON:
1548 printf("%s: link up\n", sc->sc_dev.dv_xname);
1549 sc->sc_flags |= ESH_FL_LINK_UP;
1550
1551 esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
1552 esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0);
1553 if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) {
1554 /*
1555 * Interface is now `running', with no
1556 * output active.
1557 */
1558 ifp->if_flags |= IFF_RUNNING;
1559 ifp->if_flags &= ~IFF_OACTIVE;
1560
1561 /* Attempt to start output, if any. */
1562 }
1563 eshstart(ifp);
1564 break;
1565
1566 case RR_EC_LINK_OFF:
1567 sc->sc_flags &= ~ESH_FL_LINK_UP;
1568 printf("%s: link down\n", sc->sc_dev.dv_xname);
1569 break;
1570
1571 /*
1572 * These are all unexpected. We need to handle all
1573 * of them, though.
1574 */
1575
1576 case RR_EC_INVALID_CMD:
1577 case RR_EC_INTERNAL_ERROR:
1578 case RR2_EC_INTERNAL_ERROR:
1579 case RR_EC_BAD_SEND_RING:
1580 case RR_EC_BAD_SEND_BUF:
1581 case RR_EC_BAD_SEND_DESC:
1582 case RR_EC_RECV_RING_FLUSH:
1583 case RR_EC_RECV_ERROR_INFO:
1584 case RR_EC_BAD_RECV_BUF:
1585 case RR_EC_BAD_RECV_DESC:
1586 case RR_EC_BAD_RECV_RING:
1587 case RR_EC_UNIMPLEMENTED:
1588 printf("%s: unexpected event %x;"
1589 "shutting down interface\n",
1590 sc->sc_dev.dv_xname, event->re_code);
1591 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1592 sc->sc_flags = ESH_FL_CRASHED;
1593 #ifdef ESH_PRINTF
1594 eshstatus(sc);
1595 #endif
1596 break;
1597
1598 #define CALLOUT(a) case a: \
1599 printf("%s: Event " #a " received -- " \
1600 "ring %d index %d timestamp %x\n", \
1601 sc->sc_dev.dv_xname, event->re_ring, event->re_index, \
1602 event->re_timestamp); \
1603 break;
1604
1605 CALLOUT(RR_EC_NO_RING_FOR_ULP);
1606 CALLOUT(RR_EC_REJECTING); /* dropping packets */
1607 #undef CALLOUT
1608
1609 /* Send events */
1610
1611 case RR_EC_PACKET_SENT: /* not used in firmware 2.x */
1612 ifp->if_opackets++;
1613 /* FALLTHROUGH */
1614
1615 case RR_EC_SET_SND_CONSUMER:
1616 assert(sc->sc_version == 1);
1617 /* FALLTHROUGH */
1618
1619 case RR_EC_SEND_RING_LOW:
1620 eshstart_cleanup(sc, event->re_index, 0);
1621 break;
1622
1623
1624 case RR_EC_CONN_REJECT:
1625 case RR_EC_CAMPON_TIMEOUT:
1626 case RR_EC_CONN_TIMEOUT:
1627 case RR_EC_DISCONN_ERR:
1628 case RR_EC_INTERNAL_PARITY:
1629 case RR_EC_TX_IDLE:
1630 case RR_EC_SEND_LINK_OFF:
1631 eshstart_cleanup(sc, event->re_index, event->re_code);
1632 break;
1633
1634 /* Receive events */
1635
1636 case RR_EC_RING_ENABLED:
1637 if (event->re_ring == HIPPI_ULP_802) {
1638 rc_snap_ring_consumer = 0; /* prevent read */
1639 sc->sc_flags |= ESH_FL_SNAP_RING_UP;
1640 esh_fill_snap_ring(sc);
1641
1642 if (sc->sc_flags & ESH_FL_LINK_UP) {
1643 /*
1644 * Interface is now `running', with no
1645 * output active.
1646 */
1647 ifp->if_flags |= IFF_RUNNING;
1648 ifp->if_flags &= ~IFF_OACTIVE;
1649
1650 /* Attempt to start output, if any. */
1651
1652 eshstart(ifp);
1653 }
1654 #ifdef ESH_PRINTF
1655 if (event->re_index != 0)
1656 printf("ENABLE snap ring -- index %d instead of 0!\n",
1657 event->re_index);
1658 #endif
1659 } else {
1660 struct esh_fp_ring_ctl *ring =
1661 sc->sc_fp_recv[event->re_ring];
1662
1663 sc->sc_flags |= ESH_FL_FP_RING_UP;
1664 #ifdef ESH_PRINTF
1665 printf("eshintr: FP ring %d up\n",
1666 event->re_ring);
1667 #endif
1668
1669 sc->sc_fp_recv_index[event->re_index] = ring;
1670 ring->ec_index = event->re_index;
1671 wakeup((void *) &ring->ec_ulp);
1672 }
1673 break;
1674
1675 case RR_EC_RING_DISABLED:
1676 #ifdef ESH_PRINTF
1677 printf("eshintr: disabling ring %d\n",
1678 event->re_ring);
1679 #endif
1680 if (event->re_ring == HIPPI_ULP_802) {
1681 struct rr_ring_ctl *ring =
1682 sc->sc_recv_ring_table + HIPPI_ULP_802;
1683 memset(ring, 0, sizeof(*ring));
1684 sc->sc_flags &= ~ESH_FL_CLOSING_SNAP;
1685 sc->sc_flags &= ~ESH_FL_SNAP_RING_UP;
1686 while (sc->sc_snap_recv.ec_consumer
1687 != sc->sc_snap_recv.ec_producer) {
1688 struct mbuf *m0;
1689 u_int16_t offset = sc->sc_snap_recv.ec_consumer;
1690
1691 bus_dmamap_unload(sc->sc_dmat,
1692 sc->sc_snap_recv.ec_dma[offset]);
1693 MFREE(sc->sc_snap_recv.ec_m[offset], m0);
1694 sc->sc_snap_recv.ec_m[offset] = NULL;
1695 sc->sc_snap_recv.ec_consumer =
1696 NEXT_RECV(sc->sc_snap_recv.ec_consumer);
1697 }
1698 sc->sc_snap_recv.ec_consumer =
1699 rc_snap_ring_consumer;
1700 sc->sc_snap_recv.ec_producer =
1701 rc_snap_ring_consumer;
1702 wakeup((void *) &sc->sc_snap_recv);
1703 } else {
1704 struct esh_fp_ring_ctl *recv =
1705 sc->sc_fp_recv[event->re_ring];
1706 assert(recv != NULL);
1707 recv->ec_consumer = recv->ec_producer =
1708 fp_ring_consumer[recv->ec_index];
1709 recv->ec_index = -1;
1710 wakeup((void *) &recv->ec_index);
1711 }
1712 break;
1713
1714 case RR_EC_RING_ENABLE_ERR:
1715 if (event->re_ring == HIPPI_ULP_802) {
1716 printf("%s: unable to enable SNAP ring!?\n\t"
1717 "shutting down interface\n",
1718 sc->sc_dev.dv_xname);
1719 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1720 #ifdef ESH_PRINTF
1721 eshstatus(sc);
1722 #endif
1723 } else {
1724 /*
1725 * If we just leave the ring index as-is,
1726 * the driver will figure out that
1727 * we failed to open the ring.
1728 */
1729 wakeup((void *) &(sc->sc_fp_recv[event->re_ring]->ec_ulp));
1730 }
1731 break;
1732
1733 case RR_EC_PACKET_DISCARDED:
1734 /*
1735 * Determine the dmainfo for the current packet
1736 * we just discarded and wake up the waiting
1737 * process.
1738 *
1739 * This should never happen on the network ring!
1740 */
1741
1742 if (event->re_ring == HIPPI_ULP_802) {
1743 printf("%s: discard on SNAP ring!?\n\t"
1744 "shutting down interface\n",
1745 sc->sc_dev.dv_xname);
1746 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1747 sc->sc_flags = ESH_FL_CRASHED;
1748 } else {
1749 struct esh_fp_ring_ctl *ring =
1750 sc->sc_fp_recv[event->re_ring];
1751 struct esh_dmainfo *di =
1752 ring->ec_cur_dmainfo;
1753
1754 if (di == NULL)
1755 di = ring->ec_dmainfo[ring->ec_producer];
1756 printf("eshintr: DISCARD: index %d,"
1757 "ring prod %d, di %p, ring[index] %p\n",
1758 event->re_index, ring->ec_producer, di,
1759 ring->ec_dmainfo[event->re_index]);
1760
1761 if (di == NULL)
1762 di = ring->ec_dmainfo[event->re_index];
1763
1764 if (di == NULL) {
1765 printf("eshintr: DISCARD: NULL di, skipping...\n");
1766 break;
1767 }
1768
1769 di->ed_flags &=
1770 ~(ESH_DI_READING | ESH_DI_BUSY);
1771 wakeup((void *) &di->ed_flags);
1772 }
1773 break;
1774
1775 case RR_EC_OUT_OF_BUF:
1776 case RR_EC_RECV_RING_OUT:
1777 case RR_EC_RECV_RING_LOW:
1778 break;
1779
1780 case RR_EC_SET_RECV_CONSUMER:
1781 case RR_EC_PACKET_RECVED:
1782 if (event->re_ring == HIPPI_ULP_802)
1783 esh_read_snap_ring(sc, event->re_index, 0);
1784 else if (sc->sc_fp_recv[event->re_ring] != NULL)
1785 esh_read_fp_ring(sc, event->re_index, 0,
1786 event->re_ring);
1787 break;
1788
1789 case RR_EC_RECV_IDLE:
1790 case RR_EC_PARITY_ERR:
1791 case RR_EC_LLRC_ERR:
1792 case RR_EC_PKT_LENGTH_ERR:
1793 case RR_EC_IP_HDR_CKSUM_ERR:
1794 case RR_EC_DATA_CKSUM_ERR:
1795 case RR_EC_SHORT_BURST_ERR:
1796 case RR_EC_RECV_LINK_OFF:
1797 case RR_EC_FLAG_SYNC_ERR:
1798 case RR_EC_FRAME_ERR:
1799 case RR_EC_STATE_TRANS_ERR:
1800 case RR_EC_NO_READY_PULSE:
1801 if (event->re_ring == HIPPI_ULP_802) {
1802 esh_read_snap_ring(sc, event->re_index,
1803 event->re_code);
1804 } else {
1805 struct esh_fp_ring_ctl *r;
1806
1807 r = sc->sc_fp_recv[event->re_ring];
1808 if (r)
1809 r->ec_error = event->re_code;
1810 }
1811 break;
1812
1813 /*
1814 * Statistics events can be ignored for now. They might become
1815 * necessary if we have to deliver stats on demand, rather than
1816 * just returning the statistics block of memory.
1817 */
1818
1819 case RR_EC_STATS_UPDATE:
1820 case RR_EC_STATS_RETRIEVED:
1821 case RR_EC_TRACE:
1822 break;
1823
1824 default:
1825 printf("%s: Bogus event code %x, "
1826 "ring %d, index %d, timestamp %x\n",
1827 sc->sc_dev.dv_xname, event->re_code,
1828 event->re_ring, event->re_index,
1829 event->re_timestamp);
1830 break;
1831 }
1832
1833 sc->sc_event_consumer = NEXT_EVENT(sc->sc_event_consumer);
1834 }
1835
1836 /* Do the receive and send ring processing for version 2 RunCode */
1837
1838 if (sc->sc_version == 2) {
1839 int i;
1840 if (sc->sc_send.ec_consumer != rc_send_consumer) {
1841 eshstart_cleanup(sc, rc_send_consumer, 0);
1842 ret = 1;
1843 blah++;
1844 }
1845 if (sc->sc_snap_recv.ec_consumer != rc_snap_ring_consumer &&
1846 (sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) {
1847 esh_read_snap_ring(sc, rc_snap_ring_consumer, 0);
1848 ret = 1;
1849 blah++;
1850 }
1851 for (i = 0; i < RR_MAX_RECV_RING; i++) {
1852 struct esh_fp_ring_ctl *r = sc->sc_fp_recv_index[i];
1853
1854 if (r != NULL &&
1855 r->ec_consumer != fp_ring_consumer[i]) {
1856 #ifdef ESH_PRINTF
1857 printf("eshintr: performed read on ring %d, index %d\n",
1858 r->ec_ulp, i);
1859 #endif
1860 blah++;
1861 esh_read_fp_ring(sc, fp_ring_consumer[i],
1862 0, r->ec_ulp);
1863 fp_ring_consumer[i] = r->ec_consumer;
1864 }
1865 }
1866 if (blah != 0 && okay == 0) {
1867 okay = 1;
1868 #ifdef ESH_PRINTF
1869 printf("%s\n", buf);
1870 #endif
1871 }
1872 rc_offsets = (sc->sc_snap_recv.ec_consumer << 16) |
1873 (sc->sc_send.ec_consumer << 8) | sc->sc_event_consumer;
1874 } else {
1875 rc_offsets = sc->sc_event_consumer;
1876 }
1877
1878 esh_dma_sync(sc, sc->sc_event_ring,
1879 start_consumer, sc->sc_event_producer,
1880 RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0,
1881 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1882
1883 /* Write out new values for the FP segments... */
1884
1885 if (sc->sc_version == 2) {
1886 int i;
1887 u_int32_t u;
1888
1889 buf[0] = '\0';
1890 strcat(buf, "drv: ");
1891 for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
1892 /* XXX: should do this right! */
1893 u = *((u_int32_t *) &fp_ring_consumer[i]);
1894 sprintf(t, "%.8x|", u);
1895 strcat(buf, t);
1896 NTOHL(u);
1897 bus_space_write_4(iot, ioh,
1898 RR_DRIVER_RECV_CONS + i, u);
1899 }
1900 #ifdef ESH_PRINTF
1901 if (okay == 1)
1902 printf("%s\n", buf);
1903 #endif
1904
1905 buf[0] = '\0';
1906 strcat(buf, "rcn: ");
1907 for (i = 0; i < RR_MAX_RECV_RING; i += 4) {
1908 u = bus_space_read_4(iot, ioh,
1909 RR_RUNCODE_RECV_CONS + i);
1910 /* XXX: should do this right! */
1911 NTOHL(u);
1912 sprintf(t, "%.8x|", u);
1913 strcat(buf, t);
1914 }
1915 #ifdef ESH_PRINTF
1916 if (okay == 1)
1917 printf("%s\n", buf);
1918 #endif
1919 }
1920
1921 /* Clear interrupt */
1922 bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, rc_offsets);
1923
1924 return (ret);
1925 }
1926
1927
1928 /*
1929 * Start output on the interface. Always called at splnet().
1930 * Check to see if there are any mbufs that didn't get sent the
1931 * last time this was called. If there are none, get more mbufs
1932 * and send 'em.
1933 *
1934 * For now, we only send one packet at a time.
1935 */
1936
1937 void
1938 eshstart(ifp)
1939 struct ifnet *ifp;
1940 {
1941 struct esh_softc *sc = ifp->if_softc;
1942 struct esh_send_ring_ctl *send = &sc->sc_send;
1943 struct mbuf *m = NULL;
1944 int error;
1945
1946 /* Don't transmit if interface is busy or not running */
1947
1948 #ifdef ESH_PRINTF
1949 printf("eshstart: ready to look; flags %x\n", sc->sc_flags);
1950 #endif
1951
1952 #define LINK_UP_FLAGS (ESH_FL_LINK_UP | ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP)
1953 if ((sc->sc_flags & LINK_UP_FLAGS) != LINK_UP_FLAGS)
1954 return;
1955 #undef LINK_UP_FLAGS
1956
1957 #ifdef ESH_PRINTF
1958 if (esh_check(sc))
1959 return;
1960 #endif
1961
1962 /* If we have sent the current packet, get another */
1963
1964 while ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0 &&
1965 (m = send->ec_cur_mbuf) == NULL && send->ec_cur_buf == NULL &&
1966 send->ec_cur_dmainfo == NULL) {
1967 IFQ_DEQUEUE(&ifp->if_snd, m);
1968 if (m == 0) /* not really needed */
1969 break;
1970
1971 #if NBPFILTER > 0
1972 if (ifp->if_bpf) {
1973 /*
1974 * On output, the raw packet has a eight-byte CCI
1975 * field prepended. On input, there is no such field.
1976 * The bpf expects the packet to look the same in both
1977 * places, so we temporarily lop off the prepended CCI
1978 * field here, then replace it. Ugh.
1979 *
1980 * XXX: Need to use standard mbuf manipulation
1981 * functions, first mbuf may be less than
1982 * 8 bytes long.
1983 */
1984
1985 m->m_len -= 8;
1986 m->m_data += 8;
1987 m->m_pkthdr.len -= 8;
1988 bpf_mtap(ifp->if_bpf, m);
1989 m->m_len += 8;
1990 m->m_data -= 8;
1991 m->m_pkthdr.len += 8;
1992 }
1993 #endif
1994
1995 send->ec_len = m->m_pkthdr.len;
1996 m = send->ec_cur_mbuf = esh_adjust_mbufs(sc, m);
1997 if (m == NULL)
1998 continue;
1999
2000 error = bus_dmamap_load_mbuf(sc->sc_dmat, send->ec_dma,
2001 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2002 if (error)
2003 panic("%s: eshstart: "
2004 "bus_dmamap_load_mbuf failed err %d\n",
2005 sc->sc_dev.dv_xname, error);
2006 send->ec_offset = 0;
2007 }
2008
2009 /*
2010 * If there are no network packets to send, see if there
2011 * are any FP packets to send.
2012 *
2013 * XXX: Some users may disagree with these priorities;
2014 * this reduces network latency by increasing FP latency...
2015 * Note that it also means that FP packets can get
2016 * locked out so that they *never* get sent, if the
2017 * network constantly fills up the pipe. Not good!
2018 */
2019
2020 if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 &&
2021 send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
2022 send->ec_cur_dmainfo == NULL &&
2023 BUFQ_PEEK(&send->ec_buf_queue) != NULL) {
2024 struct buf *bp;
2025
2026 #ifdef ESH_PRINTF
2027 printf("eshstart: getting a buf from send->ec_queue %p\n",
2028 send->ec_queue);
2029 #endif
2030
2031 bp = send->ec_cur_buf = BUFQ_GET(&send->ec_buf_queue);
2032 send->ec_offset = 0;
2033 send->ec_len = bp->b_bcount;
2034
2035 /*
2036 * Determine the DMA mapping for the buffer.
2037 * If this is too large, what do we do!?
2038 */
2039
2040 error = bus_dmamap_load(sc->sc_dmat, send->ec_dma,
2041 bp->b_data, bp->b_bcount, bp->b_proc,
2042 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2043
2044 if (error)
2045 panic("%s: eshstart: "
2046 "bus_dmamap_load failed err %d\n",
2047 sc->sc_dev.dv_xname, error);
2048 }
2049
2050 /*
2051 * If there are no packets from strategy to send, see if there
2052 * are any FP packets to send from fpwrite.
2053 */
2054
2055 if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 &&
2056 send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
2057 send->ec_cur_dmainfo == NULL) {
2058 struct esh_dmainfo *di;
2059
2060 di = TAILQ_FIRST(&send->ec_di_queue);
2061 if (di == NULL)
2062 return;
2063 TAILQ_REMOVE(&send->ec_di_queue, di, ed_list);
2064
2065 #ifdef ESH_PRINTF
2066 printf("eshstart: getting a di from send->ec_di_queue %p\n",
2067 &send->ec_di_queue);
2068 #endif
2069
2070 send->ec_cur_dmainfo = di;
2071 send->ec_offset = 0;
2072 send->ec_len = di->ed_dma->dm_mapsize;
2073 }
2074
2075 if (send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL &&
2076 send->ec_cur_dmainfo == NULL)
2077 return;
2078
2079 assert(send->ec_len);
2080 assert(send->ec_dma->dm_nsegs ||
2081 send->ec_cur_dmainfo->ed_dma->dm_nsegs);
2082 assert(send->ec_cur_mbuf || send->ec_cur_buf || send->ec_cur_dmainfo);
2083
2084 esh_send(sc);
2085 return;
2086 }
2087
2088
2089 /*
2090 * Put the buffers from the send dmamap into the descriptors and
2091 * send 'em off...
2092 */
2093
2094 static void
2095 esh_send(sc)
2096 struct esh_softc *sc;
2097 {
2098 struct esh_send_ring_ctl *send = &sc->sc_send;
2099 u_int start_producer = send->ec_producer;
2100 bus_dmamap_t dma;
2101
2102 if (send->ec_cur_dmainfo != NULL)
2103 dma = send->ec_cur_dmainfo->ed_dma;
2104 else
2105 dma = send->ec_dma;
2106
2107 #ifdef ESH_PRINTF
2108 printf("esh_send: producer %x consumer %x nsegs %d\n",
2109 send->ec_producer, send->ec_consumer, dma->dm_nsegs);
2110 #endif
2111
2112 esh_dma_sync(sc, send->ec_descr, send->ec_producer, send->ec_consumer,
2113 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1,
2114 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2115
2116 while (NEXT_SEND(send->ec_producer) != send->ec_consumer &&
2117 send->ec_offset < dma->dm_nsegs) {
2118 int offset = send->ec_producer;
2119
2120 send->ec_descr[offset].rd_buffer_addr =
2121 dma->dm_segs[send->ec_offset].ds_addr;
2122 send->ec_descr[offset].rd_length =
2123 dma->dm_segs[send->ec_offset].ds_len;
2124 send->ec_descr[offset].rd_control = 0;
2125
2126 if (send->ec_offset == 0) {
2127 /* Start of the dmamap... */
2128 send->ec_descr[offset].rd_control |=
2129 RR_CT_PACKET_START;
2130 }
2131
2132 if (send->ec_offset + 1 == dma->dm_nsegs) {
2133 send->ec_descr[offset].rd_control |= RR_CT_PACKET_END;
2134 }
2135
2136 send->ec_offset++;
2137 send->ec_producer = NEXT_SEND(send->ec_producer);
2138 }
2139
2140 /*
2141 * XXX: we could optimize the dmamap_sync to just get what we've
2142 * just set up, rather than the whole buffer...
2143 */
2144
2145 bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize,
2146 BUS_DMASYNC_PREWRITE);
2147 esh_dma_sync(sc, send->ec_descr,
2148 start_producer, send->ec_consumer,
2149 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1,
2150 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2151
2152 #ifdef ESH_PRINTF
2153 if (send->ec_offset != dma->dm_nsegs)
2154 printf("eshstart: couldn't fit packet in send ring!\n");
2155 #endif
2156
2157 if (sc->sc_version == 1) {
2158 esh_send_cmd(sc, RR_CC_SET_SEND_PRODUCER,
2159 0, send->ec_producer);
2160 } else {
2161 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2162 RR_SEND_PRODUCER, send->ec_producer);
2163 }
2164 return;
2165 }
2166
2167
2168 /*
2169 * Cleanup for the send routine. When the NIC sends us an event to
2170 * let us know that it has consumed our buffers, we need to free the
2171 * buffers, and possibly send another packet.
2172 */
2173
2174 static void
2175 eshstart_cleanup(sc, consumer, error)
2176 struct esh_softc *sc;
2177 u_int16_t consumer;
2178 int error;
2179 {
2180 struct esh_send_ring_ctl *send = &sc->sc_send;
2181 int start_consumer = send->ec_consumer;
2182 bus_dmamap_t dma;
2183
2184 if (send->ec_cur_dmainfo != NULL)
2185 dma = send->ec_cur_dmainfo->ed_dma;
2186 else
2187 dma = send->ec_dma;
2188
2189 #ifdef ESH_PRINTF
2190 printf("eshstart_cleanup: consumer %x, send->consumer %x\n",
2191 consumer, send->ec_consumer);
2192 #endif
2193
2194 esh_dma_sync(sc, send->ec_descr,
2195 send->ec_consumer, consumer,
2196 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0,
2197 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2198
2199 while (send->ec_consumer != consumer) {
2200 assert(dma->dm_nsegs);
2201 assert(send->ec_cur_mbuf || send->ec_cur_buf ||
2202 send->ec_cur_dmainfo);
2203
2204 if (send->ec_descr[send->ec_consumer].rd_control &
2205 RR_CT_PACKET_END) {
2206 #ifdef ESH_PRINT
2207 printf("eshstart_cleanup: dmamap_sync mapsize %d\n",
2208 send->ec_dma->dm_mapsize);
2209 #endif
2210 bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize,
2211 BUS_DMASYNC_POSTWRITE);
2212 bus_dmamap_unload(sc->sc_dmat, dma);
2213 if (send->ec_cur_mbuf) {
2214 m_freem(send->ec_cur_mbuf);
2215 send->ec_cur_mbuf = NULL;
2216 } else if (send->ec_cur_dmainfo) {
2217 send->ec_cur_dmainfo->ed_flags &= ~ESH_DI_BUSY;
2218 send->ec_cur_dmainfo->ed_error =
2219 (send->ec_error ? send->ec_error : error);
2220 send->ec_error = 0;
2221 wakeup((void *) send->ec_cur_dmainfo);
2222 send->ec_cur_dmainfo = NULL;
2223 } else if (send->ec_cur_buf) {
2224 biodone(send->ec_cur_buf);
2225 send->ec_cur_buf = NULL;
2226 } else {
2227 panic("%s: eshstart_cleanup: "
2228 "no current mbuf, buf, or dmainfo!\n",
2229 sc->sc_dev.dv_xname);
2230 }
2231
2232 /*
2233 * Version 1 of the firmware sent an event each
2234 * time it sent out a packet. Later versions do not
2235 * (which results in a considerable speedup), so we
2236 * have to keep track here.
2237 */
2238
2239 if (sc->sc_version != 1)
2240 sc->sc_if.if_opackets++;
2241 }
2242 if (error != 0)
2243 send->ec_error = error;
2244
2245 send->ec_consumer = NEXT_SEND(send->ec_consumer);
2246 }
2247
2248 esh_dma_sync(sc, send->ec_descr,
2249 start_consumer, consumer,
2250 RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0,
2251 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2252
2253 eshstart(&sc->sc_if);
2254 }
2255
2256
2257 /*
2258 * XXX: Ouch: The NIC can only send word-aligned buffers, and only
2259 * the last buffer in the packet can have a length that is not
2260 * a multiple of four!
2261 *
2262 * Here we traverse the packet, pick out the bogus mbufs, and fix 'em
2263 * if possible. The fix is amazingly expensive, so we sure hope that
2264 * this is a rare occurance (it seems to be).
2265 */
2266
2267 static struct mbuf *
2268 esh_adjust_mbufs(sc, m)
2269 struct esh_softc *sc;
2270 struct mbuf *m;
2271 {
2272 struct mbuf *m0, *n, *n0;
2273 u_int32_t write_len;
2274
2275 write_len = m->m_pkthdr.len;
2276 #ifdef DIAGNOSTIC
2277 if (write_len > max_write_len)
2278 max_write_len = write_len;
2279 #endif
2280
2281 for (n0 = n = m; n; n = n->m_next) {
2282 while (n && n->m_len == 0) {
2283 MFREE(n, m0);
2284 if (n == m)
2285 n = n0 = m = m0;
2286 else
2287 n = n0->m_next = m0;
2288 }
2289 if (n == NULL)
2290 break;
2291
2292 if (mtod(n, long) & 3 || (n->m_next && n->m_len & 3)) {
2293 /* Gotta clean it up */
2294 struct mbuf *o;
2295 u_int32_t len;
2296
2297 sc->sc_misaligned_bufs++;
2298 MGETHDR(o, M_DONTWAIT, MT_DATA);
2299 if (!o)
2300 goto bogosity;
2301
2302 MCLGET(o, M_DONTWAIT);
2303 if (!(o->m_flags & M_EXT)) {
2304 MFREE(o, m0);
2305 goto bogosity;
2306 }
2307
2308 /*
2309 * XXX: Copy as much as we can into the
2310 * cluster. For now we can't have more
2311 * than a cluster in there. May change.
2312 * I'd prefer not to get this
2313 * down-n-dirty, but we have to be able
2314 * to do this kind of funky copy.
2315 */
2316
2317 len = min(MCLBYTES, write_len);
2318 #ifdef DIAGNOSTIC
2319 assert(n->m_len <= len);
2320 assert(len <= MCLBYTES);
2321 #endif
2322
2323 m_copydata(n, 0, len, mtod(o, void *));
2324 o->m_pkthdr.len = len;
2325 m_adj(n, len);
2326 o->m_len = len;
2327 o->m_next = n;
2328
2329 if (n == m)
2330 m = o;
2331 else
2332 n0->m_next = o;
2333 n = o;
2334 }
2335 n0 = n;
2336 write_len -= n->m_len;
2337 }
2338 return m;
2339
2340 bogosity:
2341 printf("%s: esh_adjust_mbuf: unable to allocate cluster for "
2342 "mbuf %p, len %x\n",
2343 sc->sc_dev.dv_xname, mtod(m, void *), m->m_len);
2344 m_freem(m);
2345 return NULL;
2346 }
2347
2348
2349 /*
2350 * Read in the current valid entries from the ring and forward
2351 * them to the upper layer protocols. It is possible that we
2352 * haven't received the whole packet yet, in which case we just
2353 * add each of the buffers into the packet until we have the whole
2354 * thing.
2355 */
2356
2357 static void
2358 esh_read_snap_ring(sc, consumer, error)
2359 struct esh_softc *sc;
2360 u_int16_t consumer;
2361 int error;
2362 {
2363 struct ifnet *ifp = &sc->sc_if;
2364 struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv;
2365 int start_consumer = recv->ec_consumer;
2366 u_int16_t control;
2367
2368 if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0)
2369 return;
2370
2371 if (error)
2372 recv->ec_error = error;
2373
2374 esh_dma_sync(sc, recv->ec_descr,
2375 start_consumer, consumer,
2376 RR_SNAP_RECV_RING_SIZE,
2377 sizeof(struct rr_descr), 0,
2378 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2379
2380 while (recv->ec_consumer != consumer) {
2381 u_int16_t offset = recv->ec_consumer;
2382 struct mbuf *m;
2383
2384 m = recv->ec_m[offset];
2385 m->m_len = recv->ec_descr[offset].rd_length;
2386 control = recv->ec_descr[offset].rd_control;
2387 bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, m->m_len,
2388 BUS_DMASYNC_POSTREAD);
2389 bus_dmamap_unload(sc->sc_dmat, recv->ec_dma[offset]);
2390
2391 #ifdef ESH_PRINTF
2392 printf("esh_read_snap_ring: offset %x addr %p len %x flags %x\n",
2393 offset, mtod(m, void *), m->m_len, control);
2394 #endif
2395 if (control & RR_CT_PACKET_START || !recv->ec_cur_mbuf) {
2396 if (recv->ec_cur_pkt) {
2397 m_freem(recv->ec_cur_pkt);
2398 recv->ec_cur_pkt = NULL;
2399 printf("%s: possible skipped packet!\n",
2400 sc->sc_dev.dv_xname);
2401 }
2402 recv->ec_cur_pkt = recv->ec_cur_mbuf = m;
2403 /* allocated buffers all have pkthdrs... */
2404 m->m_pkthdr.rcvif = ifp;
2405 m->m_pkthdr.len = m->m_len;
2406 } else {
2407 if (!recv->ec_cur_pkt)
2408 panic("esh_read_snap_ring: no cur_pkt");
2409
2410 recv->ec_cur_mbuf->m_next = m;
2411 recv->ec_cur_mbuf = m;
2412 recv->ec_cur_pkt->m_pkthdr.len += m->m_len;
2413 }
2414
2415 recv->ec_m[offset] = NULL;
2416 recv->ec_descr[offset].rd_length = 0;
2417 recv->ec_descr[offset].rd_buffer_addr = 0;
2418
2419 /* Note that we can START and END on the same buffer */
2420
2421 if (control & RR_CT_PACKET_END) { /* XXX: RR2_ matches */
2422 m = recv->ec_cur_pkt;
2423 if (!error && !recv->ec_error) {
2424 /*
2425 * We have a complete packet, send it up
2426 * the stack...
2427 */
2428 ifp->if_ipackets++;
2429
2430 #if NBPFILTER > 0
2431 /*
2432 * Check if there's a BPF listener on this
2433 * interface. If so, hand off the raw packet
2434 * to BPF.
2435 */
2436 if (ifp->if_bpf) {
2437 /*
2438 * Incoming packets start with the FP
2439 * data, so no alignment problems
2440 * here...
2441 */
2442 bpf_mtap(ifp->if_bpf, m);
2443 }
2444 #endif
2445 if ((ifp->if_flags & IFF_RUNNING) == 0) {
2446 m_freem(m);
2447 } else {
2448 m = m_pullup(m,
2449 sizeof(struct hippi_header));
2450 (*ifp->if_input)(ifp, m);
2451 }
2452 } else {
2453 ifp->if_ierrors++;
2454 recv->ec_error = 0;
2455 m_freem(m);
2456 }
2457 recv->ec_cur_pkt = recv->ec_cur_mbuf = NULL;
2458 }
2459
2460 recv->ec_descr[offset].rd_control = 0;
2461 recv->ec_consumer = NEXT_RECV(recv->ec_consumer);
2462 }
2463
2464 esh_dma_sync(sc, recv->ec_descr,
2465 start_consumer, consumer,
2466 RR_SNAP_RECV_RING_SIZE,
2467 sizeof(struct rr_descr), 0,
2468 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2469
2470 esh_fill_snap_ring(sc);
2471 }
2472
2473
2474 /*
2475 * Add the SNAP (IEEE 802) receive ring to the NIC. It is possible
2476 * that we are doing this after resetting the card, in which case
2477 * the structures have already been filled in and we may need to
2478 * resume sending data.
2479 */
2480
2481 static void
2482 esh_init_snap_ring(sc)
2483 struct esh_softc *sc;
2484 {
2485 struct rr_ring_ctl *ring = sc->sc_recv_ring_table + HIPPI_ULP_802;
2486
2487 if ((sc->sc_flags & ESH_FL_CLOSING_SNAP) != 0) {
2488 printf("%s: can't reopen SNAP ring until ring disable is completed\n", sc->sc_dev.dv_xname);
2489 return;
2490 }
2491
2492 if (ring->rr_entry_size == 0) {
2493 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
2494 (caddr_t) ring - (caddr_t) sc->sc_dma_addr,
2495 sizeof(*ring),
2496 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2497
2498 ring->rr_ring_addr = sc->sc_snap_recv_ring_dma;
2499 ring->rr_free_bufs = RR_SNAP_RECV_RING_SIZE / 4;
2500 ring->rr_entries = RR_SNAP_RECV_RING_SIZE;
2501 ring->rr_entry_size = sizeof(struct rr_descr);
2502 ring->rr_prod_index = 0;
2503 sc->sc_snap_recv.ec_producer = 0;
2504 sc->sc_snap_recv.ec_consumer = 0;
2505 ring->rr_mode = RR_RR_IP;
2506
2507 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
2508 (caddr_t) ring - (caddr_t) sc->sc_dma_addr,
2509 sizeof(ring),
2510 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2511 esh_send_cmd(sc, RR_CC_ENABLE_RING, HIPPI_ULP_802,
2512 sc->sc_snap_recv.ec_producer);
2513 } else {
2514 printf("%s: snap receive ring already initialized!\n",
2515 sc->sc_dev.dv_xname);
2516 }
2517 }
2518
2519 static void
2520 esh_close_snap_ring(sc)
2521 struct esh_softc *sc;
2522 {
2523 #ifdef ESH_PRINTF
2524 printf("esh_close_snap_ring: starting\n");
2525 #endif
2526
2527 if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0)
2528 return;
2529
2530 sc->sc_flags |= ESH_FL_CLOSING_SNAP;
2531 esh_send_cmd(sc, RR_CC_DISABLE_RING, HIPPI_ULP_802, 0);
2532
2533 /* Disable event will trigger the rest of the cleanup. */
2534 }
2535
2536 /*
2537 * Fill in the snap ring with more mbuf buffers so that we can
2538 * receive traffic.
2539 */
2540
2541 static void
2542 esh_fill_snap_ring(sc)
2543 struct esh_softc *sc;
2544 {
2545 struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv;
2546 int start_producer = recv->ec_producer;
2547 int error;
2548
2549 esh_dma_sync(sc, recv->ec_descr,
2550 recv->ec_producer, recv->ec_consumer,
2551 RR_SNAP_RECV_RING_SIZE,
2552 sizeof(struct rr_descr), 1,
2553 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2554
2555 while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) {
2556 int offset = recv->ec_producer;
2557 struct mbuf *m, *m0;
2558
2559 MGETHDR(m, M_DONTWAIT, MT_DATA);
2560 if (!m)
2561 break;
2562 MCLGET(m, M_DONTWAIT);
2563 if ((m->m_flags & M_EXT) == 0) {
2564 MFREE(m, m0);
2565 break;
2566 }
2567
2568 error = bus_dmamap_load(sc->sc_dmat, recv->ec_dma[offset],
2569 mtod(m, void *), MCLBYTES,
2570 NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
2571 if (error) {
2572 printf("%s: esh_fill_recv_ring: bus_dmamap_load "
2573 "failed\toffset %x, error code %d\n",
2574 sc->sc_dev.dv_xname, offset, error);
2575 MFREE(m, m0);
2576 break;
2577 }
2578
2579 /*
2580 * In this implementation, we should only see one segment
2581 * per DMA.
2582 */
2583
2584 assert(recv->ec_dma[offset]->dm_nsegs == 1);
2585
2586 /*
2587 * Load into the descriptors.
2588 */
2589
2590 recv->ec_descr[offset].rd_ring =
2591 (sc->sc_version == 1) ? HIPPI_ULP_802 : 0;
2592 recv->ec_descr[offset].rd_buffer_addr =
2593 recv->ec_dma[offset]->dm_segs->ds_addr;
2594 recv->ec_descr[offset].rd_length =
2595 recv->ec_dma[offset]->dm_segs->ds_len;
2596 recv->ec_descr[offset].rd_control = 0;
2597
2598 bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, MCLBYTES,
2599 BUS_DMASYNC_PREREAD);
2600
2601 recv->ec_m[offset] = m;
2602
2603 recv->ec_producer = NEXT_RECV(recv->ec_producer);
2604 }
2605
2606 esh_dma_sync(sc, recv->ec_descr,
2607 start_producer, recv->ec_consumer,
2608 RR_SNAP_RECV_RING_SIZE,
2609 sizeof(struct rr_descr), 1,
2610 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2611
2612 if (sc->sc_version == 1)
2613 esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, HIPPI_ULP_802,
2614 recv->ec_producer);
2615 else
2616 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2617 RR_SNAP_RECV_PRODUCER, recv->ec_producer);
2618 }
2619
2620 static void
2621 esh_init_fp_rings(sc)
2622 struct esh_softc *sc;
2623 {
2624 struct esh_fp_ring_ctl *recv;
2625 struct rr_ring_ctl *ring_ctl;
2626 int ulp;
2627
2628 for (ulp = 0; ulp < RR_ULP_COUNT; ulp++) {
2629 ring_ctl = &sc->sc_recv_ring_table[ulp];
2630 recv = sc->sc_fp_recv[ulp];
2631
2632 if (recv == NULL)
2633 continue;
2634
2635 ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr;
2636 ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4;
2637 ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE;
2638 ring_ctl->rr_entry_size = sizeof(struct rr_descr);
2639 ring_ctl->rr_prod_index = 0;
2640 ring_ctl->rr_mode = RR_RR_CHARACTER;
2641 recv->ec_producer = 0;
2642 recv->ec_consumer = 0;
2643 recv->ec_index = -1;
2644
2645 esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer);
2646 }
2647 }
2648
2649 static void
2650 esh_read_fp_ring(sc, consumer, error, ulp)
2651 struct esh_softc *sc;
2652 u_int16_t consumer;
2653 int error;
2654 int ulp;
2655 {
2656 struct esh_fp_ring_ctl *recv = sc->sc_fp_recv[ulp];
2657 int start_consumer = recv->ec_consumer;
2658 u_int16_t control;
2659
2660 #ifdef ESH_PRINTF
2661 printf("esh_read_fp_ring: ulp %d, consumer %d, producer %d, old consumer %d\n",
2662 recv->ec_ulp, consumer, recv->ec_producer, recv->ec_consumer);
2663 #endif
2664 if ((sc->sc_flags & ESH_FL_FP_RING_UP) == 0)
2665 return;
2666
2667 if (error != 0)
2668 recv->ec_error = error;
2669
2670 esh_dma_sync(sc, recv->ec_descr,
2671 start_consumer, consumer,
2672 RR_FP_RECV_RING_SIZE,
2673 sizeof(struct rr_descr), 0,
2674 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2675
2676 while (recv->ec_consumer != consumer) {
2677 u_int16_t offset = recv->ec_consumer;
2678
2679 control = recv->ec_descr[offset].rd_control;
2680
2681 if (control & RR_CT_PACKET_START) {
2682 if (recv->ec_read_len) {
2683 recv->ec_error = 0;
2684 printf("%s: ulp %d: possible skipped FP packet!\n",
2685 sc->sc_dev.dv_xname, recv->ec_ulp);
2686 }
2687 recv->ec_seen_end = 0;
2688 recv->ec_read_len = 0;
2689 }
2690 if (recv->ec_seen_end == 0)
2691 recv->ec_read_len += recv->ec_descr[offset].rd_length;
2692
2693 #if NOT_LAME
2694 recv->ec_descr[offset].rd_length = 0;
2695 recv->ec_descr[offset].rd_buffer_addr = 0;
2696 #endif
2697
2698 #ifdef ESH_PRINTF
2699 printf("esh_read_fp_ring: offset %d addr %d len %d flags %x, total %d\n",
2700 offset, recv->ec_descr[offset].rd_buffer_addr,
2701 recv->ec_descr[offset].rd_length, control, recv->ec_read_len);
2702 #endif
2703 /* Note that we can START and END on the same buffer */
2704
2705 if ((control & RR_CT_PACKET_END) == RR_CT_PACKET_END) {
2706 if (recv->ec_dmainfo[offset] != NULL) {
2707 struct esh_dmainfo *di =
2708 recv->ec_dmainfo[offset];
2709
2710 recv->ec_dmainfo[offset] = NULL;
2711 bus_dmamap_sync(sc->sc_dmat, di->ed_dma,
2712 0, recv->ec_read_len,
2713 BUS_DMASYNC_POSTREAD);
2714 bus_dmamap_unload(sc->sc_dmat, di->ed_dma);
2715
2716 if (!error && !recv->ec_error) {
2717 /*
2718 * XXX: we oughta do this right, with full
2719 * BPF support and the rest...
2720 */
2721 if (di->ed_buf != NULL) {
2722 di->ed_buf->b_resid =
2723 di->ed_buf->b_bcount -
2724 recv->ec_read_len;
2725 } else {
2726 di->ed_read_len =
2727 recv->ec_read_len;
2728 }
2729 } else {
2730 if (di->ed_buf != NULL) {
2731 di->ed_buf->b_resid =
2732 di->ed_buf->b_bcount;
2733 di->ed_buf->b_error = EIO;
2734 di->ed_buf->b_flags |= B_ERROR;
2735 } else {
2736 di->ed_error = EIO;
2737 recv->ec_error = 0;
2738 }
2739 }
2740
2741 #ifdef ESH_PRINTF
2742 printf("esh_read_fp_ring: ulp %d, read %d, resid %ld\n",
2743 recv->ec_ulp, recv->ec_read_len, (di->ed_buf ? di->ed_buf->b_resid : di->ed_read_len));
2744 #endif
2745 di->ed_flags &=
2746 ~(ESH_DI_BUSY | ESH_DI_READING);
2747 if (di->ed_buf != NULL)
2748 biodone(di->ed_buf);
2749 else
2750 wakeup((void *) di);
2751 recv->ec_read_len = 0;
2752 } else {
2753 #ifdef ESH_PRINTF
2754 printf("esh_read_fp_ring: ulp %d, seen end at %d\n",
2755 recv->ec_ulp, offset);
2756 #endif
2757 recv->ec_seen_end = 1;
2758 }
2759 }
2760
2761 #if NOT_LAME
2762 recv->ec_descr[offset].rd_control = 0;
2763 #endif
2764 recv->ec_consumer = NEXT_RECV(recv->ec_consumer);
2765 }
2766
2767 esh_dma_sync(sc, recv->ec_descr,
2768 start_consumer, consumer,
2769 RR_SNAP_RECV_RING_SIZE,
2770 sizeof(struct rr_descr), 0,
2771 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2772
2773 esh_fill_fp_ring(sc, recv);
2774 }
2775
2776
2777 static void
2778 esh_fill_fp_ring(sc, recv)
2779 struct esh_softc *sc;
2780 struct esh_fp_ring_ctl *recv;
2781 {
2782 struct esh_dmainfo *di = recv->ec_cur_dmainfo;
2783 int start_producer = recv->ec_producer;
2784
2785 #ifdef ESH_PRINTF
2786 printf("esh_fill_fp_ring: ulp %d, di %p, producer %d\n",
2787 recv->ec_ulp, di, start_producer);
2788 #endif
2789
2790 esh_dma_sync(sc, recv->ec_descr,
2791 recv->ec_producer, recv->ec_consumer,
2792 RR_SNAP_RECV_RING_SIZE,
2793 sizeof(struct rr_descr), 1,
2794 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2795
2796 while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) {
2797 int offset = recv->ec_producer;
2798
2799 if (di == NULL) {
2800 /*
2801 * Must allow only one reader at a time; see
2802 * esh_flush_fp_ring().
2803 */
2804
2805 if (offset != start_producer)
2806 goto fp_fill_done;
2807
2808 di = TAILQ_FIRST(&recv->ec_queue);
2809 if (di == NULL)
2810 goto fp_fill_done;
2811 TAILQ_REMOVE(&recv->ec_queue, di, ed_list);
2812 recv->ec_offset = 0;
2813 recv->ec_cur_dmainfo = di;
2814 di->ed_flags |= ESH_DI_READING;
2815 #ifdef ESH_PRINTF
2816 printf("\toffset %d nsegs %d\n",
2817 recv->ec_offset, di->ed_dma->dm_nsegs);
2818 #endif
2819 }
2820
2821 /*
2822 * Load into the descriptors.
2823 */
2824
2825 recv->ec_descr[offset].rd_ring = 0;
2826 recv->ec_descr[offset].rd_buffer_addr =
2827 di->ed_dma->dm_segs[recv->ec_offset].ds_addr;
2828 recv->ec_descr[offset].rd_length =
2829 di->ed_dma->dm_segs[recv->ec_offset].ds_len;
2830 recv->ec_descr[offset].rd_control = 0;
2831 recv->ec_dmainfo[offset] = NULL;
2832
2833 if (recv->ec_offset == 0) {
2834 /* Start of the dmamap... */
2835 recv->ec_descr[offset].rd_control |=
2836 RR_CT_PACKET_START;
2837 }
2838
2839 assert(recv->ec_offset < di->ed_dma->dm_nsegs);
2840
2841 recv->ec_offset++;
2842 if (recv->ec_offset == di->ed_dma->dm_nsegs) {
2843 recv->ec_descr[offset].rd_control |= RR_CT_PACKET_END;
2844 recv->ec_dmainfo[offset] = di;
2845 di = NULL;
2846 recv->ec_offset = 0;
2847 recv->ec_cur_dmainfo = NULL;
2848 }
2849
2850 recv->ec_producer = NEXT_RECV(recv->ec_producer);
2851 }
2852
2853 fp_fill_done:
2854 esh_dma_sync(sc, recv->ec_descr,
2855 start_producer, recv->ec_consumer,
2856 RR_SNAP_RECV_RING_SIZE,
2857 sizeof(struct rr_descr), 1,
2858 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2859
2860
2861 if (sc->sc_version == 1) {
2862 esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, recv->ec_ulp,
2863 recv->ec_producer);
2864 } else {
2865 union {
2866 u_int32_t producer;
2867 u_int8_t indices[4];
2868 } v;
2869 int which;
2870 int i;
2871 struct esh_fp_ring_ctl *r;
2872
2873 which = (recv->ec_index / 4) * 4;
2874 #if BAD_PRODUCER
2875 v.producer = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2876 RR_RECVS_PRODUCER + which);
2877 NTOHL(v.producer);
2878 #endif
2879 for (i = 0; i < 4; i++) {
2880 r = sc->sc_fp_recv_index[i + which];
2881 if (r != NULL)
2882 v.indices[i] = r->ec_producer;
2883 else
2884 v.indices[i] = 0;
2885 }
2886 #ifdef ESH_PRINTF
2887 printf("esh_fill_fp_ring: ulp %d, updating producer %d: %.8x\n",
2888 recv->ec_ulp, which, v.producer);
2889 #endif
2890 HTONL(v.producer);
2891 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
2892 RR_RECVS_PRODUCER + which, v.producer);
2893 }
2894 #ifdef ESH_PRINTF
2895 printf("esh_fill_fp_ring: ulp %d, final producer %d\n",
2896 recv->ec_ulp, recv->ec_producer);
2897 #endif
2898 }
2899
2900 /*
2901 * When a read is interrupted, we need to flush the buffers out of
2902 * the ring; otherwise, a driver error could lock a process up,
2903 * with no way to exit.
2904 */
2905
2906 static void
2907 esh_flush_fp_ring(sc, recv, di)
2908 struct esh_softc *sc;
2909 struct esh_fp_ring_ctl *recv;
2910 struct esh_dmainfo *di;
2911 {
2912 int error = 0;
2913
2914 /*
2915 * If the read request hasn't yet made it to the top of the queue,
2916 * just remove it from the queue, and return.
2917 */
2918
2919 if ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING) {
2920 TAILQ_REMOVE(&recv->ec_queue, di, ed_list);
2921 return;
2922 }
2923
2924 #ifdef ESH_PRINTF
2925 printf("esh_flush_fp_ring: di->ed_flags %x, ulp %d, producer %x\n",
2926 di->ed_flags, recv->ec_ulp, recv->ec_producer);
2927 #endif
2928
2929 /* Now we gotta get tough. Issue a discard packet command */
2930
2931 esh_send_cmd(sc, RR_CC_DISCARD_PKT, recv->ec_ulp,
2932 recv->ec_producer - 1);
2933
2934 /* Wait for it to finish */
2935
2936 while ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING &&
2937 error == 0) {
2938 error = tsleep((void *) &di->ed_flags, PRIBIO,
2939 "esh_flush_fp_ring", hz);
2940 printf("esh_flush_fp_ring: di->ed_flags %x, error %d\n",
2941 di->ed_flags, error);
2942 /*
2943 * What do I do if this times out or gets interrupted?
2944 * Reset the card? I could get an interrupt before
2945 * giving it a chance to check. Perhaps I oughta wait
2946 * awhile? What about not giving the user a chance
2947 * to interrupt, and just expecting a quick answer?
2948 * That way I could reset the card if it doesn't
2949 * come back right away!
2950 */
2951 if (error != 0) {
2952 eshreset(sc);
2953 break;
2954 }
2955 }
2956
2957 /* XXX: Do we need to clear out the dmainfo pointers */
2958 }
2959
2960
2961 int
2962 eshioctl(ifp, cmd, data)
2963 struct ifnet *ifp;
2964 u_long cmd;
2965 caddr_t data;
2966 {
2967 int error = 0;
2968 struct esh_softc *sc = ifp->if_softc;
2969 struct ifaddr *ifa = (struct ifaddr *)data;
2970 struct ifdrv *ifd = (struct ifdrv *) data;
2971 u_long len;
2972 int s;
2973
2974 s = splnet();
2975
2976 while (sc->sc_flags & ESH_FL_EEPROM_BUSY) {
2977 error = tsleep((void *)&sc->sc_flags, PCATCH | PRIBIO,
2978 "esheeprom", 0);
2979 if (error != 0)
2980 goto ioctl_done;
2981 }
2982
2983 switch (cmd) {
2984
2985 case SIOCSIFADDR:
2986 ifp->if_flags |= IFF_UP;
2987 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
2988 eshinit(sc);
2989 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
2990 error = EIO;
2991 goto ioctl_done;
2992 }
2993 }
2994
2995 if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP))
2996 == ESH_FL_RUNCODE_UP) {
2997 while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
2998 error = tsleep((void *) &sc->sc_snap_recv,
2999 PRIBIO, "esh_closing_fp_ring",
3000 hz);
3001 if (error != 0)
3002 goto ioctl_done;
3003 }
3004 esh_init_snap_ring(sc);
3005 }
3006
3007 switch (ifa->ifa_addr->sa_family) {
3008 #ifdef INET
3009 case AF_INET:
3010 /* The driver doesn't really care about IP addresses */
3011 break;
3012 #endif
3013 #ifdef NS
3014 case AF_NS:
3015 {
3016 struct ns_addr *ina =
3017 &IA_SNS(ifa)->sns_addr;
3018
3019 if (ns_nullhost(*ina))
3020 ina->x_host = *(union ns_host *)
3021 LLADDR(ifp->if_sadl);
3022 else
3023 memcpy(LLADDR(ifp->if_sadl),
3024 ina->x_host.c_host, ifp->if_addrlen);
3025 /* Set new address. */
3026 eshinit(sc);
3027 break;
3028 }
3029 #endif
3030 default:
3031 break;
3032 }
3033 break;
3034
3035 case SIOCSIFFLAGS:
3036 if ((ifp->if_flags & IFF_UP) == 0 &&
3037 (ifp->if_flags & IFF_RUNNING) != 0) {
3038 /*
3039 * If interface is marked down and it is running, then
3040 * stop it.
3041 */
3042
3043 ifp->if_flags &= ~IFF_RUNNING;
3044 esh_close_snap_ring(sc);
3045 while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
3046 error = tsleep((void *) &sc->sc_snap_recv,
3047 PRIBIO, "esh_closing_fp_ring",
3048 hz);
3049 if (error != 0)
3050 goto ioctl_done;
3051 }
3052
3053 } else if ((ifp->if_flags & IFF_UP) != 0 &&
3054 (ifp->if_flags & IFF_RUNNING) == 0) {
3055
3056 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
3057 eshinit(sc);
3058 if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) {
3059 error = EIO;
3060 goto ioctl_done;
3061 }
3062 }
3063
3064 if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP)) == ESH_FL_RUNCODE_UP) {
3065 while (sc->sc_flags & ESH_FL_CLOSING_SNAP) {
3066 error = tsleep((void *) &sc->sc_snap_recv, PRIBIO, "esh_closing_fp_ring", hz);
3067 if (error != 0)
3068 goto ioctl_done;
3069 }
3070 esh_init_snap_ring(sc);
3071 }
3072 }
3073 break;
3074
3075 case SIOCSDRVSPEC: /* Driver-specific configuration calls */
3076 cmd = ifd->ifd_cmd;
3077 len = ifd->ifd_len;
3078 data = ifd->ifd_data;
3079
3080 esh_generic_ioctl(sc, cmd, data, len, NULL);
3081 break;
3082
3083 default:
3084 error = EINVAL;
3085 break;
3086 }
3087
3088 ioctl_done:
3089 splx(s);
3090 return (error);
3091 }
3092
3093
3094 static int
3095 esh_generic_ioctl(struct esh_softc *sc, u_long cmd, caddr_t data,
3096 u_long len, struct proc *p)
3097 {
3098 struct ifnet *ifp = &sc->sc_if;
3099 struct rr_eeprom rr_eeprom;
3100 bus_space_tag_t iot = sc->sc_iot;
3101 bus_space_handle_t ioh = sc->sc_ioh;
3102 u_int32_t misc_host_ctl;
3103 u_int32_t misc_local_ctl;
3104 u_int32_t address;
3105 u_int32_t value;
3106 u_int32_t offset;
3107 u_int32_t length;
3108 int error = 0;
3109 int i;
3110
3111 /*
3112 * If we have a proc pointer, check to make sure that the
3113 * user is privileged before performing any destruction operations.
3114 */
3115
3116 if (p != NULL) {
3117 switch (cmd) {
3118 case EIOCGTUNE:
3119 case EIOCGEEPROM:
3120 case EIOCGSTATS:
3121 break;
3122
3123 default:
3124 error = suser(p->p_ucred, &p->p_acflag);
3125 if (error)
3126 return (error);
3127 }
3128 }
3129
3130 switch (cmd) {
3131 case EIOCGTUNE:
3132 if (len != sizeof(struct rr_tuning))
3133 error = EMSGSIZE;
3134 else {
3135 error = copyout((caddr_t) &sc->sc_tune, data,
3136 sizeof(struct rr_tuning));
3137 }
3138 break;
3139
3140 case EIOCSTUNE:
3141 if ((ifp->if_flags & IFF_UP) == 0) {
3142 if (len != sizeof(struct rr_tuning)) {
3143 error = EMSGSIZE;
3144 } else {
3145 error = copyin(data, (caddr_t) &sc->sc_tune,
3146 sizeof(struct rr_tuning));
3147 }
3148 } else {
3149 error = EBUSY;
3150 }
3151 break;
3152
3153 case EIOCGSTATS:
3154 if (len != sizeof(struct rr_stats))
3155 error = EMSGSIZE;
3156 else
3157 error = copyout((caddr_t) &sc->sc_gen_info->ri_stats,
3158 data, sizeof(struct rr_stats));
3159 break;
3160
3161 case EIOCGEEPROM:
3162 case EIOCSEEPROM:
3163 if ((ifp->if_flags & IFF_UP) != 0) {
3164 error = EBUSY;
3165 break;
3166 }
3167
3168 if (len != sizeof(struct rr_eeprom)) {
3169 error = EMSGSIZE;
3170 break;
3171 }
3172
3173 error = copyin(data, (caddr_t) &rr_eeprom, sizeof(rr_eeprom));
3174 if (error != 0)
3175 break;
3176
3177 offset = rr_eeprom.ifr_offset;
3178 length = rr_eeprom.ifr_length;
3179
3180 if (length > RR_EE_MAX_LEN * sizeof(u_int32_t)) {
3181 error = EFBIG;
3182 break;
3183 }
3184
3185 if (offset + length > RR_EE_MAX_LEN * sizeof(u_int32_t)) {
3186 error = EFAULT;
3187 break;
3188 }
3189
3190 if (offset % 4 || length % 4) {
3191 error = EIO;
3192 break;
3193 }
3194
3195 /* Halt the processor (preserve NO_SWAP, if set) */
3196
3197 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
3198 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
3199 (misc_host_ctl & RR_MH_NO_SWAP) |
3200 RR_MH_HALT_PROC);
3201
3202 /* Make the EEPROM accessible */
3203
3204 misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL);
3205 value = misc_local_ctl &
3206 ~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM | RR_LC_PARITY_ON);
3207 if (cmd == EIOCSEEPROM) /* make writable! */
3208 value |= RR_LC_WRITE_PROM;
3209 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, value);
3210
3211 if (cmd == EIOCSEEPROM) {
3212 printf("%s: writing EEPROM\n", sc->sc_dev.dv_xname);
3213 sc->sc_flags |= ESH_FL_EEPROM_BUSY;
3214 }
3215
3216 /* Do that EEPROM voodoo that you do so well... */
3217
3218 address = offset * RR_EE_BYTE_LEN;
3219 for (i = 0; i < length; i += 4) {
3220 if (cmd == EIOCGEEPROM) {
3221 value = esh_read_eeprom(sc, address);
3222 address += RR_EE_WORD_LEN;
3223 if (copyout(&value,
3224 (caddr_t) rr_eeprom.ifr_buffer + i,
3225 sizeof(u_int32_t)) != 0) {
3226 error = EFAULT;
3227 break;
3228 }
3229 } else {
3230 if (copyin((caddr_t) rr_eeprom.ifr_buffer + i,
3231 &value, sizeof(u_int32_t)) != 0) {
3232 error = EFAULT;
3233 break;
3234 }
3235 if (esh_write_eeprom(sc, address,
3236 value) != 0) {
3237 error = EIO;
3238 break;
3239 }
3240
3241 /*
3242 * Have to give up control now and
3243 * then, so sleep for a clock tick.
3244 * Might be good to figure out how
3245 * long a tick is, so that we could
3246 * intelligently chose the frequency
3247 * of these pauses.
3248 */
3249
3250 if (i % 40 == 0) {
3251 tsleep((void *)&sc->sc_flags,
3252 PRIBIO, "eshweeprom", 1);
3253 }
3254
3255 address += RR_EE_WORD_LEN;
3256 }
3257 }
3258
3259 bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl);
3260 if (cmd == EIOCSEEPROM) {
3261 sc->sc_flags &= ~ESH_FL_EEPROM_BUSY;
3262 wakeup((void *)&sc->sc_flags);
3263 printf("%s: done writing EEPROM\n",
3264 sc->sc_dev.dv_xname);
3265 }
3266 break;
3267
3268 case EIOCRESET:
3269 eshreset(sc);
3270 break;
3271
3272 default:
3273 error = EINVAL;
3274 break;
3275 }
3276
3277 return error;
3278 }
3279
3280
3281 void
3282 eshreset(sc)
3283 struct esh_softc *sc;
3284 {
3285 int s;
3286
3287 s = splnet();
3288 eshstop(sc);
3289 eshinit(sc);
3290 splx(s);
3291 }
3292
3293 /*
3294 * The NIC expects a watchdog command every 10 seconds. If it doesn't
3295 * get the watchdog, it figures the host is dead and stops. When it does
3296 * get the command, it'll generate a watchdog event to let the host know
3297 * that it is still alive. We watch for this.
3298 */
3299
3300 void
3301 eshwatchdog(ifp)
3302 struct ifnet *ifp;
3303 {
3304 struct esh_softc *sc = ifp->if_softc;
3305
3306 if (!sc->sc_watchdog) {
3307 printf("%s: watchdog timer expired. "
3308 "Should reset interface!\n",
3309 sc->sc_dev.dv_xname);
3310 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3311 eshstatus(sc);
3312 #if 0
3313 eshstop(sc); /* DON'T DO THIS, it'll clear data we
3314 could use to debug it! */
3315 #endif
3316 } else {
3317 sc->sc_watchdog = 0;
3318
3319 esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0);
3320 ifp->if_timer = 5;
3321 }
3322 }
3323
3324
3325 /*
3326 * Stop the NIC and throw away packets that have started to be sent,
3327 * but didn't make it all the way. Re-adjust the various queue
3328 * pointers to account for this.
3329 */
3330
3331 void
3332 eshstop(sc)
3333 struct esh_softc *sc;
3334 {
3335 struct ifnet *ifp = &sc->sc_if;
3336 bus_space_tag_t iot = sc->sc_iot;
3337 bus_space_handle_t ioh = sc->sc_ioh;
3338 u_int32_t misc_host_ctl;
3339 int i;
3340
3341 if (!(sc->sc_flags & ESH_FL_INITIALIZED))
3342 return;
3343
3344 /* Just shut it all down. This isn't pretty, but it works */
3345
3346 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size,
3347 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3348
3349 misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL);
3350 bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL,
3351 (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC);
3352 sc->sc_flags = 0;
3353 ifp->if_timer = 0; /* turn off watchdog timer */
3354
3355 while (sc->sc_snap_recv.ec_consumer
3356 != sc->sc_snap_recv.ec_producer) {
3357 struct mbuf *m0;
3358 u_int16_t offset = sc->sc_snap_recv.ec_consumer;
3359
3360 bus_dmamap_unload(sc->sc_dmat,
3361 sc->sc_snap_recv.ec_dma[offset]);
3362 MFREE(sc->sc_snap_recv.ec_m[offset], m0);
3363 sc->sc_snap_recv.ec_m[offset] = NULL;
3364 sc->sc_snap_recv.ec_consumer =
3365 NEXT_RECV(sc->sc_snap_recv.ec_consumer);
3366 wakeup((void *) &sc->sc_snap_recv);
3367 }
3368
3369 /* Handle FP rings */
3370
3371 for (i = 0; i < RR_ULP_COUNT; i++) {
3372 struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[i];
3373 struct esh_dmainfo *di = NULL;
3374
3375 if (ring == NULL)
3376 continue;
3377
3378 /* Get rid of outstanding buffers */
3379
3380 esh_dma_sync(sc, ring->ec_descr,
3381 ring->ec_consumer, ring->ec_producer,
3382 RR_FP_RECV_RING_SIZE, sizeof(struct rr_descr), 0,
3383 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3384
3385 while (ring->ec_consumer != ring->ec_producer) {
3386 di = ring->ec_dmainfo[ring->ec_consumer];
3387 if (di != NULL)
3388 break;
3389 ring->ec_consumer = NEXT_RECV(ring->ec_consumer);
3390 }
3391 if (di == NULL)
3392 di = ring->ec_cur_dmainfo;
3393
3394 if (di != NULL) {
3395 bus_dmamap_unload(sc->sc_dmat, di->ed_dma);
3396 di->ed_error = EIO;
3397 di->ed_flags = 0;
3398 wakeup((void *) &di->ed_flags); /* packet discard */
3399 wakeup((void *) di); /* wait on read */
3400 }
3401 wakeup((void *) &ring->ec_ulp); /* ring create */
3402 wakeup((void *) &ring->ec_index); /* ring disable */
3403 }
3404
3405 /* XXX: doesn't clear bufs being sent */
3406
3407 bus_dmamap_unload(sc->sc_dmat, sc->sc_send.ec_dma);
3408 if (sc->sc_send.ec_cur_mbuf) {
3409 m_freem(sc->sc_send.ec_cur_mbuf);
3410 } else if (sc->sc_send.ec_cur_buf) {
3411 struct buf *bp = sc->sc_send.ec_cur_buf;
3412
3413 bp->b_resid = bp->b_bcount;
3414 bp->b_error = EIO;
3415 bp->b_flags |= B_ERROR;
3416 biodone(bp);
3417 } else if (sc->sc_send.ec_cur_dmainfo) {
3418 struct esh_dmainfo *di = sc->sc_send.ec_cur_dmainfo;
3419
3420 di->ed_flags &= ~ESH_DI_BUSY;
3421 di->ed_error = EIO;
3422 wakeup((void *) di);
3423 }
3424 sc->sc_send.ec_cur_mbuf = NULL;
3425 sc->sc_send.ec_cur_buf = NULL;
3426 sc->sc_send.ec_cur_dmainfo = NULL;
3427
3428 /*
3429 * Clear out the index values, since they'll be useless
3430 * when we restart.
3431 */
3432
3433 memset(sc->sc_fp_recv_index, 0,
3434 sizeof(struct esh_fp_ring_ctl *) * RR_MAX_RECV_RING);
3435
3436 /* Be sure to wake up any other processes waiting on driver action. */
3437
3438 wakeup((void *) sc); /* Wait on initialization */
3439 wakeup((void *) &sc->sc_flags); /* Wait on EEPROM write */
3440
3441 /*
3442 * XXX: I have to come up with a way to avoid handling interrupts
3443 * received before this shuts down the card, but processed
3444 * afterwards!
3445 */
3446 }
3447
3448 /*
3449 * Read a value from the eeprom. This expects that the NIC has already
3450 * been tweaked to put it into the right state for reading from the
3451 * EEPROM -- the HALT bit is set in the MISC_HOST_CTL register,
3452 * and the FAST_PROM, ADD_SRAM, and PARITY flags have been cleared
3453 * in the MISC_LOCAL_CTL register.
3454 *
3455 * The EEPROM layout is a little weird. There is a valid byte every
3456 * eight bytes. Words are then smeared out over 32 bytes.
3457 * All addresses listed here are the actual starting addresses.
3458 */
3459
3460 static u_int32_t
3461 esh_read_eeprom(sc, addr)
3462 struct esh_softc *sc;
3463 u_int32_t addr;
3464 {
3465 int i;
3466 u_int32_t tmp;
3467 u_int32_t value = 0;
3468
3469 /* If the offset hasn't been added, add it. Otherwise pass through */
3470
3471 if (!(addr & RR_EE_OFFSET))
3472 addr += RR_EE_OFFSET;
3473
3474 for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) {
3475 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
3476 RR_WINDOW_BASE, addr);
3477 tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
3478 RR_WINDOW_DATA);
3479 value = (value << 8) | ((tmp >> 24) & 0xff);
3480 }
3481 return value;
3482 }
3483
3484
3485 /*
3486 * Write a value to the eeprom. Just like esh_read_eeprom, this routine
3487 * expects that the NIC has already been tweaked to put it into the right
3488 * state for reading from the EEPROM. Things are further complicated
3489 * in that we need to read each byte after we write it to ensure that
3490 * the new value has been successfully written. It can take as long
3491 * as 1ms (!) to write a byte.
3492 */
3493
3494 static int
3495 esh_write_eeprom(sc, addr, value)
3496 struct esh_softc *sc;
3497 u_int32_t addr;
3498 u_int32_t value;
3499 {
3500 int i, j;
3501 u_int32_t shifted_value, tmp = 0;
3502
3503 /* If the offset hasn't been added, add it. Otherwise pass through */
3504
3505 if (!(addr & RR_EE_OFFSET))
3506 addr += RR_EE_OFFSET;
3507
3508 for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) {
3509 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
3510 RR_WINDOW_BASE, addr);
3511
3512 /*
3513 * Get the byte out of value, starting with the top, and
3514 * put it into the top byte of the word to write.
3515 */
3516
3517 shifted_value = ((value >> ((3 - i) * 8)) & 0xff) << 24;
3518 bus_space_write_4(sc->sc_iot, sc->sc_ioh, RR_WINDOW_DATA,
3519 shifted_value);
3520 for (j = 0; j < 50; j++) {
3521 tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
3522 RR_WINDOW_DATA);
3523 if (tmp == shifted_value)
3524 break;
3525 delay(500); /* 50us break * 20 = 1ms */
3526 }
3527 if (tmp != shifted_value)
3528 return -1;
3529 }
3530
3531 return 0;
3532 }
3533
3534
3535 /*
3536 * Send a command to the NIC. If there is no room in the command ring,
3537 * panic.
3538 */
3539
3540 static void
3541 esh_send_cmd(sc, cmd, ring, index)
3542 struct esh_softc *sc;
3543 u_int8_t cmd;
3544 u_int8_t ring;
3545 u_int8_t index;
3546 {
3547 union rr_cmd c;
3548
3549 #define NEXT_CMD(i) (((i) + 0x10 - 1) & 0x0f)
3550
3551 c.l = 0;
3552 c.b.rc_code = cmd;
3553 c.b.rc_ring = ring;
3554 c.b.rc_index = index;
3555
3556 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
3557 RR_COMMAND_RING + sizeof(c) * sc->sc_cmd_producer,
3558 c.l);
3559
3560 #ifdef ESH_PRINTF
3561 /* avoid annoying messages when possible */
3562 if (cmd != RR_CC_WATCHDOG)
3563 printf("esh_send_cmd: cmd %x ring %d index %d slot %x\n",
3564 cmd, ring, index, sc->sc_cmd_producer);
3565 #endif
3566
3567 sc->sc_cmd_producer = NEXT_CMD(sc->sc_cmd_producer);
3568 }
3569
3570
3571 /*
3572 * Write an address to the device.
3573 * XXX: This belongs in bus-dependent land!
3574 */
3575
3576 static void
3577 esh_write_addr(iot, ioh, addr, value)
3578 bus_space_tag_t iot;
3579 bus_space_handle_t ioh;
3580 bus_addr_t addr;
3581 bus_addr_t value;
3582 {
3583 bus_space_write_4(iot, ioh, addr, 0);
3584 bus_space_write_4(iot, ioh, addr + sizeof(u_int32_t), value);
3585 }
3586
3587
3588 /* Copy the RunCode from EEPROM to SRAM. Ughly. */
3589
3590 static void
3591 esh_reset_runcode(sc)
3592 struct esh_softc *sc;
3593 {
3594 bus_space_tag_t iot = sc->sc_iot;
3595 bus_space_handle_t ioh = sc->sc_ioh;
3596 u_int32_t value;
3597 u_int32_t len;
3598 u_int32_t i;
3599 u_int32_t segments;
3600 u_int32_t ee_addr;
3601 u_int32_t rc_addr;
3602 u_int32_t sram_addr;
3603
3604 /* Zero the SRAM */
3605
3606 for (i = 0; i < sc->sc_sram_size; i += 4) {
3607 bus_space_write_4(iot, ioh, RR_WINDOW_BASE, i);
3608 bus_space_write_4(iot, ioh, RR_WINDOW_DATA, 0);
3609 }
3610
3611 /* Find the address of the segment description section */
3612
3613 rc_addr = esh_read_eeprom(sc, RR_EE_RUNCODE_SEGMENTS);
3614 segments = esh_read_eeprom(sc, rc_addr);
3615
3616 for (i = 0; i < segments; i++) {
3617 rc_addr += RR_EE_WORD_LEN;
3618 sram_addr = esh_read_eeprom(sc, rc_addr);
3619 rc_addr += RR_EE_WORD_LEN;
3620 len = esh_read_eeprom(sc, rc_addr);
3621 rc_addr += RR_EE_WORD_LEN;
3622 ee_addr = esh_read_eeprom(sc, rc_addr);
3623
3624 while (len--) {
3625 value = esh_read_eeprom(sc, ee_addr);
3626 bus_space_write_4(iot, ioh, RR_WINDOW_BASE, sram_addr);
3627 bus_space_write_4(iot, ioh, RR_WINDOW_DATA, value);
3628
3629 ee_addr += RR_EE_WORD_LEN;
3630 sram_addr += 4;
3631 }
3632 }
3633 }
3634
3635
3636 /*
3637 * Perform bus DMA syncing operations on various rings.
3638 * We have to worry about our relative position in the ring,
3639 * and whether the ring has wrapped. All of this code should take
3640 * care of those worries.
3641 */
3642
3643 static void
3644 esh_dma_sync(sc, mem, start, end, entries, size, do_equal, ops)
3645 struct esh_softc *sc;
3646 void *mem;
3647 int start;
3648 int end;
3649 int size;
3650 int do_equal;
3651 int ops;
3652 {
3653 int offset = (char *)mem - (char *)sc->sc_dma_addr;
3654
3655 if (start < end) {
3656 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
3657 offset + start * size,
3658 (end - start) * size, ops);
3659 } else if (do_equal || start != end) {
3660 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
3661 offset,
3662 end * size, ops);
3663 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma,
3664 offset + start * size,
3665 (entries - start) * size, ops);
3666 }
3667 }
3668
3669
3670 static struct esh_dmainfo *
3671 esh_new_dmainfo(sc)
3672 struct esh_softc *sc;
3673 {
3674 struct esh_dmainfo *di;
3675 int s;
3676
3677 s = splnet();
3678
3679 di = TAILQ_FIRST(&sc->sc_dmainfo_freelist);
3680 if (di != NULL) {
3681 TAILQ_REMOVE(&sc->sc_dmainfo_freelist, di, ed_list);
3682 sc->sc_dmainfo_freelist_count--;
3683 splx(s);
3684 return di;
3685 }
3686
3687 /* None sitting around, so build one now... */
3688
3689 di = (struct esh_dmainfo *) malloc(sizeof(*di), M_DEVBUF,
3690 M_WAITOK|M_ZERO);
3691 assert(di != NULL);
3692
3693 if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX,
3694 ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY,
3695 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3696 &di->ed_dma)) {
3697 printf("%s: failed dmainfo bus_dmamap_create\n",
3698 sc->sc_dev.dv_xname);
3699 free(di, M_DEVBUF);
3700 di = NULL;
3701 }
3702
3703 splx(s);
3704 return di;
3705 }
3706
3707 static void
3708 esh_free_dmainfo(sc, di)
3709 struct esh_softc *sc;
3710 struct esh_dmainfo *di;
3711 {
3712 int s = splnet();
3713
3714 assert(di != NULL);
3715 di->ed_buf = NULL;
3716 TAILQ_INSERT_TAIL(&sc->sc_dmainfo_freelist, di, ed_list);
3717 sc->sc_dmainfo_freelist_count++;
3718 #ifdef ESH_PRINTF
3719 printf("esh_free_dmainfo: freelist count %d\n", sc->sc_dmainfo_freelist_count);
3720 #endif
3721
3722 splx(s);
3723 }
3724
3725
3726 /* ------------------------- debugging functions --------------------------- */
3727
3728 /*
3729 * Print out status information about the NIC and the driver.
3730 */
3731
3732 static int
3733 eshstatus(sc)
3734 struct esh_softc *sc;
3735 {
3736 bus_space_tag_t iot = sc->sc_iot;
3737 bus_space_handle_t ioh = sc->sc_ioh;
3738 int i;
3739
3740 /* XXX: This looks pathetic, and should be improved! */
3741
3742 printf("%s: status -- fail1 %x fail2 %x\n",
3743 sc->sc_dev.dv_xname,
3744 bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL1),
3745 bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL2));
3746 printf("\tmisc host ctl %x misc local ctl %x\n",
3747 bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL),
3748 bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL));
3749 printf("\toperating mode %x event producer %x\n",
3750 bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS),
3751 bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER));
3752 printf("\tPC %x max rings %x\n",
3753 bus_space_read_4(iot, ioh, RR_PROC_PC),
3754 bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS));
3755 printf("\tHIPPI tx state %x rx state %x\n",
3756 bus_space_read_4(iot, ioh, RR_TX_STATE),
3757 bus_space_read_4(iot, ioh, RR_RX_STATE));
3758 printf("\tDMA write state %x read state %x\n",
3759 bus_space_read_4(iot, ioh, RR_DMA_WRITE_STATE),
3760 bus_space_read_4(iot, ioh, RR_DMA_READ_STATE));
3761 printf("\tDMA write addr %x%x read addr %x%x\n",
3762 bus_space_read_4(iot, ioh, RR_WRITE_HOST),
3763 bus_space_read_4(iot, ioh, RR_WRITE_HOST + 4),
3764 bus_space_read_4(iot, ioh, RR_READ_HOST),
3765 bus_space_read_4(iot, ioh, RR_READ_HOST + 4));
3766
3767 for (i = 0; i < 64; i++)
3768 if (sc->sc_gen_info->ri_stats.rs_stats[i])
3769 printf("stat %x is %x\n", i * 4,
3770 sc->sc_gen_info->ri_stats.rs_stats[i]);
3771
3772 return 0;
3773 }
3774
3775
3776 #ifdef ESH_PRINTF
3777
3778 /* Check to make sure that the NIC is still running */
3779
3780 static int
3781 esh_check(sc)
3782 struct esh_softc *sc;
3783 {
3784 bus_space_tag_t iot = sc->sc_iot;
3785 bus_space_handle_t ioh = sc->sc_ioh;
3786
3787 if (bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL) & RR_MH_HALT_PROC) {
3788 printf("esh_check: NIC stopped\n");
3789 eshstatus(sc);
3790 return 1;
3791 } else {
3792 return 0;
3793 }
3794 }
3795 #endif
3796
Cache object: e46bfeb25edf46aa9ba898aa8de33014
|