FreeBSD/Linux Kernel Cross Reference
sys/dev/pdq/pdq.c
1 /* $NetBSD: pdq.c,v 1.33 2001/11/13 13:14:43 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * Id: pdq.c,v 1.32 1997/06/05 01:56:35 thomas Exp
27 *
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/5.2/sys/dev/pdq/pdq.c 119418 2003-08-24 17:55:58Z obrien $");
32
33 /*
34 * DEC PDQ FDDI Controller O/S independent code
35 *
36 * This module should work any on PDQ based board. Note that changes for
37 * MIPS and Alpha architectures (or any other architecture which requires
38 * a flushing of memory or write buffers and/or has incoherent caches)
39 * have yet to be made.
40 *
41 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
42 * flushing of the write buffers.
43 */
44
45 #ifdef __NetBSD__
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: pdq.c,v 1.33 2001/11/13 13:14:43 lukem Exp $");
48 #endif
49
50 #define PDQ_HWSUPPORT /* for pdq.h */
51
52 #if defined(__FreeBSD__)
53 /*
54 * What a botch having to specific includes for FreeBSD!
55 */
56 #include <dev/pdq/pdq_freebsd.h>
57 #include <dev/pdq/pdqreg.h>
58 #else
59 #include "pdqvar.h"
60 #include "pdqreg.h"
61 #endif
62
63 #define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1))
64 #define PDQ_CMD_RX_ALIGNMENT 16
65
66 #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
67 #define PDQ_PRINTF(x) printf x
68 #else
69 #define PDQ_PRINTF(x) do { } while (0)
70 #endif
71
72 static const char * const pdq_halt_codes[] = {
73 "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
74 "Software Fault", "Hardware Fault", "PC Trace Path Test",
75 "DMA Error", "Image CRC Error", "Adapter Processer Error"
76 };
77
78 static const char * const pdq_adapter_states[] = {
79 "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
80 "Link Available", "Link Unavailable", "Halted", "Ring Member"
81 };
82
83 /*
84 * The following are used in conjunction with
85 * unsolicited events
86 */
87 static const char * const pdq_entities[] = {
88 "Station", "Link", "Phy Port"
89 };
90
91 static const char * const pdq_station_events[] = {
92 "Unknown Event #0",
93 "Trace Received"
94 };
95
96 static const char * const pdq_station_arguments[] = {
97 "Reason"
98 };
99
100 static const char * const pdq_link_events[] = {
101 "Transmit Underrun",
102 "Transmit Failed",
103 "Block Check Error (CRC)",
104 "Frame Status Error",
105 "PDU Length Error",
106 NULL,
107 NULL,
108 "Receive Data Overrun",
109 NULL,
110 "No User Buffer",
111 "Ring Initialization Initiated",
112 "Ring Initialization Received",
113 "Ring Beacon Initiated",
114 "Duplicate Address Failure",
115 "Duplicate Token Detected",
116 "Ring Purger Error",
117 "FCI Strip Error",
118 "Trace Initiated",
119 "Directed Beacon Received",
120 };
121
122 static const char * const pdq_link_arguments[] = {
123 "Reason",
124 "Data Link Header",
125 "Source",
126 "Upstream Neighbor"
127 };
128
129 static const char * const pdq_phy_events[] = {
130 "LEM Error Monitor Reject",
131 "Elasticy Buffer Error",
132 "Link Confidence Test Reject"
133 };
134
135 static const char * const pdq_phy_arguments[] = {
136 "Direction"
137 };
138
139 static const char * const * const pdq_event_arguments[] = {
140 pdq_station_arguments,
141 pdq_link_arguments,
142 pdq_phy_arguments
143 };
144
145 static const char * const * const pdq_event_codes[] = {
146 pdq_station_events,
147 pdq_link_events,
148 pdq_phy_events
149 };
150
151 static const char * const pdq_station_types[] = {
152 "SAS", "DAC", "SAC", "NAC", "DAS"
153 };
154
155 static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
156
157 static const char pdq_phy_types[] = "ABSM";
158
159 static const char * const pdq_pmd_types0[] = {
160 "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
161 "ANSI Sonet"
162 };
163
164 static const char * const pdq_pmd_types100[] = {
165 "Low Power", "Thin Wire", "Shielded Twisted Pair",
166 "Unshielded Twisted Pair"
167 };
168
169 static const char * const * const pdq_pmd_types[] = {
170 pdq_pmd_types0, pdq_pmd_types100
171 };
172
173 static const char * const pdq_descriptions[] = {
174 "DEFPA PCI",
175 "DEFEA EISA",
176 "DEFTA TC",
177 "DEFAA Futurebus",
178 "DEFQA Q-bus",
179 };
180
181 static void
182 pdq_print_fddi_chars(
183 pdq_t *pdq,
184 const pdq_response_status_chars_get_t *rsp)
185 {
186 const char hexchars[] = "0123456789abcdef";
187
188 printf(
189 #if !defined(__bsdi__) && !defined(__NetBSD__)
190 PDQ_OS_PREFIX
191 #else
192 ": "
193 #endif
194 "DEC %s FDDI %s Controller\n",
195 #if !defined(__bsdi__) && !defined(__NetBSD__)
196 PDQ_OS_PREFIX_ARGS,
197 #endif
198 pdq_descriptions[pdq->pdq_type],
199 pdq_station_types[rsp->status_chars_get.station_type]);
200
201 printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
202 PDQ_OS_PREFIX_ARGS,
203 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
204 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
205 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
206 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
207 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
208 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
209 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
210 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
211 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
212 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
213 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
214 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
215 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
216 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
217 rsp->status_chars_get.module_rev.fwrev_bytes[0]);
218
219 if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
220 printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
221 }
222
223 printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
224 PDQ_OS_PREFIX_ARGS,
225 rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
226 pdq_phy_types[rsp->status_chars_get.phy_type[0]],
227 pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
228
229 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
230 printf(", FDDI Port[B] = %c (PMD = %s)",
231 pdq_phy_types[rsp->status_chars_get.phy_type[1]],
232 pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
233
234 printf("\n");
235
236 pdq_os_update_status(pdq, rsp);
237 }
238
239 static void
240 pdq_init_csrs(
241 pdq_csrs_t *csrs,
242 pdq_bus_t bus,
243 pdq_bus_memaddr_t csr_base,
244 size_t csrsize)
245 {
246 csrs->csr_bus = bus;
247 csrs->csr_base = csr_base;
248 csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize);
249 csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize);
250 csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize);
251 csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize);
252 csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize);
253 csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize);
254 csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize);
255 csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize);
256 csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize);
257 csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
258 csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
259 csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
260 csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
261 }
262
263 static void
264 pdq_init_pci_csrs(
265 pdq_pci_csrs_t *csrs,
266 pdq_bus_t bus,
267 pdq_bus_memaddr_t csr_base,
268 size_t csrsize)
269 {
270 csrs->csr_bus = bus;
271 csrs->csr_base = csr_base;
272 csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
273 csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
274 csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
275 csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
276 }
277
278 static void
279 pdq_flush_databuf_queue(
280 pdq_t *pdq,
281 pdq_databuf_queue_t *q)
282 {
283 PDQ_OS_DATABUF_T *pdu;
284 for (;;) {
285 PDQ_OS_DATABUF_DEQUEUE(q, pdu);
286 if (pdu == NULL)
287 return;
288 PDQ_OS_DATABUF_FREE(pdq, pdu);
289 }
290 }
291
292 static pdq_boolean_t
293 pdq_do_port_control(
294 const pdq_csrs_t * const csrs,
295 pdq_uint32_t cmd)
296 {
297 int cnt = 0;
298 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
299 PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
300 while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
301 cnt++;
302 PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
303 if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
304 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
305 return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
306 }
307 /* adapter failure */
308 PDQ_ASSERT(0);
309 return PDQ_FALSE;
310 }
311
312 static void
313 pdq_read_mla(
314 const pdq_csrs_t * const csrs,
315 pdq_lanaddr_t *hwaddr)
316 {
317 pdq_uint32_t data;
318
319 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
320 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
321 data = PDQ_CSR_READ(csrs, csr_host_data);
322
323 hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
324 hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
325 hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
326 hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
327
328 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
329 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
330 data = PDQ_CSR_READ(csrs, csr_host_data);
331
332 hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
333 hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
334 }
335
336 static void
337 pdq_read_fwrev(
338 const pdq_csrs_t * const csrs,
339 pdq_fwrev_t *fwrev)
340 {
341 pdq_uint32_t data;
342
343 pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
344 data = PDQ_CSR_READ(csrs, csr_host_data);
345
346 fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
347 fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
348 fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
349 fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
350 }
351
352 static pdq_boolean_t
353 pdq_read_error_log(
354 pdq_t *pdq,
355 pdq_response_error_log_get_t *log_entry)
356 {
357 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
358 pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
359
360 pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
361
362 while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
363 *ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
364 if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
365 break;
366 }
367 return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
368 }
369
370 static pdq_chip_rev_t
371 pdq_read_chiprev(
372 const pdq_csrs_t * const csrs)
373 {
374 pdq_uint32_t data;
375
376 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
377 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
378 data = PDQ_CSR_READ(csrs, csr_host_data);
379
380 return (pdq_chip_rev_t) data;
381 }
382
383 static const struct {
384 size_t cmd_len;
385 size_t rsp_len;
386 const char *cmd_name;
387 } pdq_cmd_info[] = {
388 { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */
389 sizeof(pdq_response_generic_t),
390 "Start"
391 },
392 { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */
393 sizeof(pdq_response_generic_t),
394 "Filter Set"
395 },
396 { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */
397 sizeof(pdq_response_filter_get_t),
398 "Filter Get"
399 },
400 { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */
401 sizeof(pdq_response_generic_t),
402 "Chars Set"
403 },
404 { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */
405 sizeof(pdq_response_status_chars_get_t),
406 "Status Chars Get"
407 },
408 #if 0
409 { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */
410 sizeof(pdq_response_counters_get_t),
411 "Counters Get"
412 },
413 { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */
414 sizeof(pdq_response_generic_t),
415 "Counters Set"
416 },
417 #else
418 { 0, 0, "Counters Get" },
419 { 0, 0, "Counters Set" },
420 #endif
421 { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */
422 sizeof(pdq_response_generic_t),
423 "Addr Filter Set"
424 },
425 { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */
426 sizeof(pdq_response_addr_filter_get_t),
427 "Addr Filter Get"
428 },
429 { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */
430 sizeof(pdq_response_generic_t),
431 "Error Log Clear"
432 },
433 { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */
434 sizeof(pdq_response_generic_t),
435 "Error Log Set"
436 },
437 { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */
438 sizeof(pdq_response_generic_t),
439 "FDDI MIB Get"
440 },
441 { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */
442 sizeof(pdq_response_generic_t),
443 "DEC Ext MIB Get"
444 },
445 { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */
446 sizeof(pdq_response_generic_t),
447 "DEC Specific Get"
448 },
449 { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */
450 sizeof(pdq_response_generic_t),
451 "SNMP Set"
452 },
453 { 0, 0, "N/A" },
454 { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */
455 sizeof(pdq_response_generic_t),
456 "SMT MIB Get"
457 },
458 { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */
459 sizeof(pdq_response_generic_t),
460 "SMT MIB Set",
461 },
462 { 0, 0, "Bogus CMD" },
463 };
464
465 static void
466 pdq_queue_commands(
467 pdq_t *pdq)
468 {
469 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
470 pdq_command_info_t * const ci = &pdq->pdq_command_info;
471 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
472 pdq_txdesc_t * const txd = &dbp->pdqdb_command_requests[ci->ci_request_producer];
473 pdq_cmd_code_t op;
474 pdq_uint32_t cmdlen, rsplen, mask;
475
476 /*
477 * If there are commands or responses active or there aren't
478 * any pending commands, then don't queue any more.
479 */
480 if (ci->ci_command_active || ci->ci_pending_commands == 0)
481 return;
482
483 /*
484 * Determine which command needs to be queued.
485 */
486 op = PDQC_SMT_MIB_SET;
487 for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
488 op = (pdq_cmd_code_t) ((int) op - 1);
489 /*
490 * Obtain the sizes needed for the command and response.
491 * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
492 * always properly aligned.
493 */
494 cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
495 rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
496 if (cmdlen < rsplen)
497 cmdlen = rsplen;
498 /*
499 * Since only one command at a time will be queued, there will always
500 * be enough space.
501 */
502
503 /*
504 * Obtain and fill in the descriptor for the command (descriptor is
505 * pre-initialized)
506 */
507 txd->txd_seg_len = cmdlen;
508
509 /*
510 * Clear the command area, set the opcode, and the command from the pending
511 * mask.
512 */
513
514 ci->ci_queued_commands[ci->ci_request_producer] = op;
515 #if defined(PDQVERBOSE)
516 ((pdq_response_generic_t *) ci->ci_response_bufstart)->generic_op = PDQC_BOGUS_CMD;
517 #endif
518 PDQ_OS_MEMZERO(ci->ci_request_bufstart, cmdlen);
519 *(pdq_cmd_code_t *) ci->ci_request_bufstart = op;
520 ci->ci_pending_commands &= ~mask;
521
522 /*
523 * Fill in the command area, if needed.
524 */
525 switch (op) {
526 case PDQC_FILTER_SET: {
527 pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_request_bufstart;
528 unsigned idx = 0;
529 filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
530 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
531 idx++;
532 filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
533 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
534 idx++;
535 filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
536 filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
537 idx++;
538 filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
539 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
540 idx++;
541 filter_set->filter_set_items[idx].item_code = PDQI_EOL;
542 break;
543 }
544 case PDQC_ADDR_FILTER_SET: {
545 pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_request_bufstart;
546 pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
547 addr->lanaddr_bytes[0] = 0xFF;
548 addr->lanaddr_bytes[1] = 0xFF;
549 addr->lanaddr_bytes[2] = 0xFF;
550 addr->lanaddr_bytes[3] = 0xFF;
551 addr->lanaddr_bytes[4] = 0xFF;
552 addr->lanaddr_bytes[5] = 0xFF;
553 addr++;
554 pdq_os_addr_fill(pdq, addr, 61);
555 break;
556 }
557 case PDQC_SNMP_SET: {
558 pdq_cmd_snmp_set_t *snmp_set = (pdq_cmd_snmp_set_t *) ci->ci_request_bufstart;
559 unsigned idx = 0;
560 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_FULL_DUPLEX_ENABLE;
561 snmp_set->snmp_set_items[idx].item_value = (pdq->pdq_flags & PDQ_WANT_FDX ? 1 : 2);
562 snmp_set->snmp_set_items[idx].item_port = 0;
563 idx++;
564 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_EOL;
565 break;
566 }
567 default: { /* to make gcc happy */
568 break;
569 }
570 }
571
572
573 /*
574 * Sync the command request buffer and descriptor, then advance
575 * the request producer index.
576 */
577 PDQ_OS_CMDRQST_PRESYNC(pdq, txd->txd_seg_len);
578 PDQ_OS_DESC_PRESYNC(pdq, txd, sizeof(pdq_txdesc_t));
579 PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
580
581 /*
582 * Sync the command response buffer and advance the response
583 * producer index (descriptor is already pre-initialized)
584 */
585 PDQ_OS_CMDRSP_PRESYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
586 PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
587 /*
588 * At this point the command is done. All that needs to be done is to
589 * produce it to the PDQ.
590 */
591 PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
592 pdq_cmd_info[op].cmd_name));
593
594 ci->ci_command_active++;
595 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
596 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
597 }
598
599 static void
600 pdq_process_command_responses(
601 pdq_t * const pdq)
602 {
603 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
604 pdq_command_info_t * const ci = &pdq->pdq_command_info;
605 volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
606 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
607 const pdq_response_generic_t *rspgen;
608
609 /*
610 * We have to process the command and response in tandem so
611 * just wait for the response to be consumed. If it has been
612 * consumed then the command must have been as well.
613 */
614
615 if (cbp->pdqcb_command_response == ci->ci_response_completion)
616 return;
617
618 PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
619
620 PDQ_OS_CMDRSP_POSTSYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
621 rspgen = (const pdq_response_generic_t *) ci->ci_response_bufstart;
622 PDQ_ASSERT(rspgen->generic_op == ci->ci_queued_commands[ci->ci_request_completion]);
623 PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
624 PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d [0x%x])\n",
625 pdq_cmd_info[rspgen->generic_op].cmd_name,
626 rspgen->generic_status, rspgen->generic_status));
627
628 if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
629 pdq->pdq_flags &= ~PDQ_PRINTCHARS;
630 pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
631 } else if (rspgen->generic_op == PDQC_DEC_EXT_MIB_GET) {
632 pdq->pdq_flags &= ~PDQ_IS_FDX;
633 if (((const pdq_response_dec_ext_mib_get_t *)rspgen)->dec_ext_mib_get.fdx_operational)
634 pdq->pdq_flags |= PDQ_IS_FDX;
635 }
636
637 PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
638 PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
639 ci->ci_command_active = 0;
640
641 if (ci->ci_pending_commands != 0) {
642 pdq_queue_commands(pdq);
643 } else {
644 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
645 ci->ci_response_producer | (ci->ci_response_completion << 8));
646 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
647 ci->ci_request_producer | (ci->ci_request_completion << 8));
648 }
649 }
650
651 /*
652 * This following routine processes unsolicited events.
653 * In addition, it also fills the unsolicited queue with
654 * event buffers so it can be used to initialize the queue
655 * as well.
656 */
657 static void
658 pdq_process_unsolicited_events(
659 pdq_t *pdq)
660 {
661 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
662 pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
663 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
664 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
665
666 /*
667 * Process each unsolicited event (if any).
668 */
669
670 while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
671 const pdq_unsolicited_event_t *event;
672 event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
673 PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, event);
674
675 switch (event->event_type) {
676 case PDQ_UNSOLICITED_EVENT: {
677 int bad_event = 0;
678 switch (event->event_entity) {
679 case PDQ_ENTITY_STATION: {
680 bad_event = event->event_code.value >= PDQ_STATION_EVENT_MAX;
681 break;
682 }
683 case PDQ_ENTITY_LINK: {
684 bad_event = event->event_code.value >= PDQ_LINK_EVENT_MAX;
685 break;
686 }
687 case PDQ_ENTITY_PHY_PORT: {
688 bad_event = event->event_code.value >= PDQ_PHY_EVENT_MAX;
689 break;
690 }
691 default: {
692 bad_event = 1;
693 break;
694 }
695 }
696 if (bad_event) {
697 break;
698 }
699 printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
700 PDQ_OS_PREFIX_ARGS,
701 pdq_entities[event->event_entity],
702 pdq_event_codes[event->event_entity][event->event_code.value]);
703 if (event->event_entity == PDQ_ENTITY_PHY_PORT)
704 printf("[%d]", event->event_index);
705 printf("\n");
706 break;
707 }
708 case PDQ_UNSOLICITED_COUNTERS: {
709 break;
710 }
711 }
712 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
713 PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
714 ui->ui_free++;
715 }
716
717 /*
718 * Now give back the event buffers back to the PDQ.
719 */
720 PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
721 ui->ui_free = 0;
722
723 PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
724 ui->ui_producer | (ui->ui_completion << 8));
725 }
726
727 static void
728 pdq_process_received_data(
729 pdq_t *pdq,
730 pdq_rx_info_t *rx,
731 pdq_rxdesc_t *receives,
732 pdq_uint32_t completion_goal,
733 pdq_uint32_t ring_mask)
734 {
735 pdq_uint32_t completion = rx->rx_completion;
736 pdq_uint32_t producer = rx->rx_producer;
737 PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
738 pdq_rxdesc_t *rxd;
739 pdq_uint32_t idx;
740
741 while (completion != completion_goal) {
742 PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
743 pdq_uint8_t *dataptr;
744 pdq_uint32_t fc, datalen, pdulen, segcnt;
745 pdq_rxstatus_t status;
746
747 fpdu = lpdu = buffers[completion];
748 PDQ_ASSERT(fpdu != NULL);
749 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, 0, sizeof(u_int32_t));
750 dataptr = PDQ_OS_DATABUF_PTR(fpdu);
751 status = *(pdq_rxstatus_t *) dataptr;
752 if (status.rxs_rcc_badpdu == 0) {
753 datalen = status.rxs_len;
754 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, sizeof(u_int32_t),
755 PDQ_RX_FC_OFFSET + 1 - sizeof(u_int32_t));
756 fc = dataptr[PDQ_RX_FC_OFFSET];
757 switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
758 case PDQ_FDDI_LLC_ASYNC:
759 case PDQ_FDDI_LLC_SYNC:
760 case PDQ_FDDI_IMP_ASYNC:
761 case PDQ_FDDI_IMP_SYNC: {
762 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
763 PDQ_PRINTF(("discard: bad length %d\n", datalen));
764 goto discard_frame;
765 }
766 break;
767 }
768 case PDQ_FDDI_SMT: {
769 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
770 goto discard_frame;
771 break;
772 }
773 default: {
774 PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
775 goto discard_frame;
776 }
777 }
778 /*
779 * Update the lengths of the data buffers now that we know
780 * the real length.
781 */
782 pdulen = datalen + (PDQ_RX_FC_OFFSET - PDQ_OS_HDR_OFFSET) - 4 /* CRC */;
783 segcnt = (pdulen + PDQ_OS_HDR_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
784 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
785 if (npdu == NULL) {
786 PDQ_PRINTF(("discard: no databuf #0\n"));
787 goto discard_frame;
788 }
789 buffers[completion] = npdu;
790 for (idx = 1; idx < segcnt; idx++) {
791 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
792 if (npdu == NULL) {
793 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
794 PDQ_OS_DATABUF_FREE(pdq, fpdu);
795 goto discard_frame;
796 }
797 PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
798 lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
799 buffers[(completion + idx) & ring_mask] = npdu;
800 }
801 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
802 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
803 buffers[(producer + idx) & ring_mask] =
804 buffers[(completion + idx) & ring_mask];
805 buffers[(completion + idx) & ring_mask] = NULL;
806 }
807 PDQ_OS_DATABUF_ADJ(fpdu, PDQ_OS_HDR_OFFSET);
808 if (segcnt == 1) {
809 PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
810 } else {
811 PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_OS_HDR_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
812 }
813 /*
814 * Do not pass to protocol if packet was received promiscuously
815 */
816 pdq_os_receive_pdu(pdq, fpdu, pdulen,
817 status.rxs_rcc_dd < PDQ_RXS_RCC_DD_CAM_MATCH);
818 rx->rx_free += PDQ_RX_SEGCNT;
819 PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
820 PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
821 continue;
822 } else {
823 PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
824 status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
825 status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
826 if (status.rxs_rcc_reason == 7)
827 goto discard_frame;
828 if (status.rxs_rcc_reason != 0) {
829 /* hardware fault */
830 if (status.rxs_rcc_badcrc) {
831 printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
832 PDQ_OS_PREFIX_ARGS,
833 dataptr[PDQ_RX_FC_OFFSET+1],
834 dataptr[PDQ_RX_FC_OFFSET+2],
835 dataptr[PDQ_RX_FC_OFFSET+3],
836 dataptr[PDQ_RX_FC_OFFSET+4],
837 dataptr[PDQ_RX_FC_OFFSET+5],
838 dataptr[PDQ_RX_FC_OFFSET+6]);
839 /* rx->rx_badcrc++; */
840 } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
841 /* rx->rx_frame_status_errors++; */
842 } else {
843 /* hardware fault */
844 }
845 }
846 }
847 discard_frame:
848 /*
849 * Discarded frames go right back on the queue; therefore
850 * ring entries were freed.
851 */
852 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
853 buffers[producer] = buffers[completion];
854 buffers[completion] = NULL;
855 rxd = &receives[rx->rx_producer];
856 if (idx == 0) {
857 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
858 } else {
859 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
860 }
861 rxd->rxd_pa_hi = 0;
862 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
863 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, buffers[rx->rx_producer]);
864 PDQ_OS_RXPDU_PRESYNC(pdq, buffers[rx->rx_producer], 0, PDQ_OS_DATABUF_SIZE);
865 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
866 PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
867 PDQ_ADVANCE(producer, 1, ring_mask);
868 PDQ_ADVANCE(completion, 1, ring_mask);
869 }
870 }
871 rx->rx_completion = completion;
872
873 while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
874 PDQ_OS_DATABUF_T *pdu;
875 /*
876 * Allocate the needed number of data buffers.
877 * Try to obtain them from our free queue before
878 * asking the system for more.
879 */
880 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
881 if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
882 PDQ_OS_DATABUF_ALLOC(pdq, pdu);
883 if (pdu == NULL)
884 break;
885 buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
886 }
887 rxd = &receives[(rx->rx_producer + idx) & ring_mask];
888 if (idx == 0) {
889 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
890 } else {
891 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
892 }
893 rxd->rxd_pa_hi = 0;
894 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
895 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, pdu);
896 PDQ_OS_RXPDU_PRESYNC(pdq, pdu, 0, PDQ_OS_DATABUF_SIZE);
897 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
898 }
899 if (idx < PDQ_RX_SEGCNT) {
900 /*
901 * We didn't get all databufs required to complete a new
902 * receive buffer. Keep the ones we got and retry a bit
903 * later for the rest.
904 */
905 break;
906 }
907 PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
908 rx->rx_free -= PDQ_RX_SEGCNT;
909 }
910 }
911
912 static void pdq_process_transmitted_data(pdq_t *pdq);
913
914 pdq_boolean_t
915 pdq_queue_transmit_data(
916 pdq_t *pdq,
917 PDQ_OS_DATABUF_T *pdu)
918 {
919 pdq_tx_info_t * const tx = &pdq->pdq_tx_info;
920 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
921 pdq_uint32_t producer = tx->tx_producer;
922 pdq_txdesc_t *eop = NULL;
923 PDQ_OS_DATABUF_T *pdu0;
924 pdq_uint32_t freecnt;
925 #if defined(PDQ_BUS_DMA)
926 bus_dmamap_t map;
927 #endif
928
929 again:
930 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
931 freecnt = tx->tx_free - 1;
932 } else {
933 freecnt = tx->tx_free;
934 }
935 /*
936 * Need 2 or more descriptors to be able to send.
937 */
938 if (freecnt == 0) {
939 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
940 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
941 return PDQ_FALSE;
942 }
943
944 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
945 dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
946 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[producer], sizeof(pdq_txdesc_t));
947 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
948 }
949
950 #if defined(PDQ_BUS_DMA)
951 map = M_GETCTX(pdu, bus_dmamap_t);
952 if (freecnt >= map->dm_nsegs) {
953 int idx;
954 for (idx = 0; idx < map->dm_nsegs; idx++) {
955 /*
956 * Initialize the transmit descriptor
957 */
958 eop = &dbp->pdqdb_transmits[producer];
959 eop->txd_seg_len = map->dm_segs[idx].ds_len;
960 eop->txd_pa_lo = map->dm_segs[idx].ds_addr;
961 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
962 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
963 freecnt--;
964 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
965 }
966 pdu0 = NULL;
967 } else {
968 pdu0 = pdu;
969 }
970 #else
971 for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
972 pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
973 const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
974
975 /*
976 * The first segment is limited to the space remaining in
977 * page. All segments after that can be up to a full page
978 * in size.
979 */
980 fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
981 while (datalen > 0 && freecnt > 0) {
982 pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
983
984 /*
985 * Initialize the transmit descriptor
986 */
987 eop = &dbp->pdqdb_transmits[producer];
988 eop->txd_seg_len = seglen;
989 eop->txd_pa_lo = PDQ_OS_VA_TO_BUSPA(pdq, dataptr);
990 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
991 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
992 datalen -= seglen;
993 dataptr += seglen;
994 fraglen = PDQ_OS_PAGESIZE;
995 freecnt--;
996 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
997 }
998 pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
999 }
1000 #endif /* defined(PDQ_BUS_DMA) */
1001 if (pdu0 != NULL) {
1002 unsigned completion = tx->tx_completion;
1003 PDQ_ASSERT(freecnt == 0);
1004 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1005 pdq_process_transmitted_data(pdq);
1006 if (completion != tx->tx_completion) {
1007 producer = tx->tx_producer;
1008 eop = NULL;
1009 goto again;
1010 }
1011 /*
1012 * If we still have data to process then the ring was too full
1013 * to store the PDU. Return FALSE so the caller will requeue
1014 * the PDU for later.
1015 */
1016 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
1017 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1018 return PDQ_FALSE;
1019 }
1020 /*
1021 * Everything went fine. Finish it up.
1022 */
1023 tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
1024 if (PDQ_RX_FC_OFFSET != PDQ_OS_HDR_OFFSET) {
1025 dbp->pdqdb_transmits[tx->tx_producer].txd_sop = 1;
1026 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[tx->tx_producer],
1027 sizeof(pdq_txdesc_t));
1028 }
1029 eop->txd_eop = 1;
1030 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1031 PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
1032 tx->tx_producer = producer;
1033 tx->tx_free = freecnt;
1034 PDQ_DO_TYPE2_PRODUCER(pdq);
1035 return PDQ_TRUE;
1036 }
1037
1038 static void
1039 pdq_process_transmitted_data(
1040 pdq_t *pdq)
1041 {
1042 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1043 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1044 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
1045 pdq_uint32_t completion = tx->tx_completion;
1046 int reclaimed = 0;
1047
1048 while (completion != cbp->pdqcb_transmits) {
1049 PDQ_OS_DATABUF_T *pdu;
1050 pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
1051 PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
1052 PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
1053 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1054 pdq_os_transmit_done(pdq, pdu);
1055 tx->tx_free += descriptor_count;
1056 reclaimed = 1;
1057 PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
1058 }
1059 if (tx->tx_completion != completion) {
1060 tx->tx_completion = completion;
1061 pdq->pdq_intrmask &= ~PDQ_HOST_INT_TX_ENABLE;
1062 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1063 pdq_os_restart_transmitter(pdq);
1064 }
1065 if (reclaimed)
1066 PDQ_DO_TYPE2_PRODUCER(pdq);
1067 }
1068
1069 void
1070 pdq_flush_transmitter(
1071 pdq_t *pdq)
1072 {
1073 volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1074 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1075
1076 for (;;) {
1077 PDQ_OS_DATABUF_T *pdu;
1078 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1079 if (pdu == NULL)
1080 break;
1081 /*
1082 * Don't call transmit done since the packet never made it
1083 * out on the wire.
1084 */
1085 PDQ_OS_DATABUF_FREE(pdq, pdu);
1086 }
1087
1088 tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1089 cbp->pdqcb_transmits = tx->tx_completion = tx->tx_producer;
1090 PDQ_OS_CONSUMER_PRESYNC(pdq);
1091
1092 PDQ_DO_TYPE2_PRODUCER(pdq);
1093 }
1094
1095 void
1096 pdq_hwreset(
1097 pdq_t *pdq)
1098 {
1099 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1100 pdq_state_t state;
1101 int cnt;
1102
1103 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1104 if (state == PDQS_DMA_UNAVAILABLE)
1105 return;
1106 PDQ_CSR_WRITE(csrs, csr_port_data_a,
1107 (state == PDQS_HALTED && pdq->pdq_type != PDQ_DEFTA) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
1108 PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
1109 PDQ_OS_USEC_DELAY(100);
1110 PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
1111 for (cnt = 100000;;cnt--) {
1112 PDQ_OS_USEC_DELAY(1000);
1113 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1114 if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
1115 break;
1116 }
1117 PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 100000 - cnt));
1118 PDQ_OS_USEC_DELAY(10000);
1119 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1120 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1121 PDQ_ASSERT(cnt > 0);
1122 }
1123
1124 /*
1125 * The following routine brings the PDQ from whatever state it is
1126 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
1127 */
1128 pdq_state_t
1129 pdq_stop(
1130 pdq_t *pdq)
1131 {
1132 pdq_state_t state;
1133 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1134 int cnt, pass = 0, idx;
1135 PDQ_OS_DATABUF_T **buffers;
1136
1137 restart:
1138 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1139 if (state != PDQS_DMA_UNAVAILABLE) {
1140 pdq_hwreset(pdq);
1141 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1142 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1143 }
1144 #if 0
1145 switch (state) {
1146 case PDQS_RING_MEMBER:
1147 case PDQS_LINK_UNAVAILABLE:
1148 case PDQS_LINK_AVAILABLE: {
1149 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1150 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1151 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1152 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1153 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1154 /* FALLTHROUGH */
1155 }
1156 case PDQS_DMA_AVAILABLE: {
1157 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1158 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1159 pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1160 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1161 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1162 /* FALLTHROUGH */
1163 }
1164 case PDQS_DMA_UNAVAILABLE: {
1165 break;
1166 }
1167 }
1168 #endif
1169 /*
1170 * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into
1171 * DMA_AVAILABLE.
1172 */
1173
1174 /*
1175 * Obtain the hardware address and firmware revisions
1176 * (MLA = my long address which is FDDI speak for hardware address)
1177 */
1178 pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1179 pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1180 pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1181
1182 if (pdq->pdq_type == PDQ_DEFPA) {
1183 /*
1184 * Disable interrupts and DMA.
1185 */
1186 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1187 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1188 }
1189
1190 /*
1191 * Flush all the databuf queues.
1192 */
1193 pdq_flush_databuf_queue(pdq, &pdq->pdq_tx_info.tx_txq);
1194 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1195 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1196 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1197 if (buffers[idx] != NULL) {
1198 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1199 buffers[idx] = NULL;
1200 }
1201 }
1202 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1203 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1204 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1205 if (buffers[idx] != NULL) {
1206 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1207 buffers[idx] = NULL;
1208 }
1209 }
1210 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1211
1212 /*
1213 * Reset the consumer indexes to 0.
1214 */
1215 pdq->pdq_cbp->pdqcb_receives = 0;
1216 pdq->pdq_cbp->pdqcb_transmits = 0;
1217 pdq->pdq_cbp->pdqcb_host_smt = 0;
1218 pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1219 pdq->pdq_cbp->pdqcb_command_response = 0;
1220 pdq->pdq_cbp->pdqcb_command_request = 0;
1221 PDQ_OS_CONSUMER_PRESYNC(pdq);
1222
1223 /*
1224 * Reset the producer and completion indexes to 0.
1225 */
1226 pdq->pdq_command_info.ci_request_producer = 0;
1227 pdq->pdq_command_info.ci_response_producer = 0;
1228 pdq->pdq_command_info.ci_request_completion = 0;
1229 pdq->pdq_command_info.ci_response_completion = 0;
1230 pdq->pdq_unsolicited_info.ui_producer = 0;
1231 pdq->pdq_unsolicited_info.ui_completion = 0;
1232 pdq->pdq_rx_info.rx_producer = 0;
1233 pdq->pdq_rx_info.rx_completion = 0;
1234 pdq->pdq_tx_info.tx_producer = 0;
1235 pdq->pdq_tx_info.tx_completion = 0;
1236 pdq->pdq_host_smt_info.rx_producer = 0;
1237 pdq->pdq_host_smt_info.rx_completion = 0;
1238
1239 pdq->pdq_command_info.ci_command_active = 0;
1240 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1241 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1242
1243 /*
1244 * Allow the DEFPA to do DMA. Then program the physical
1245 * addresses of the consumer and descriptor blocks.
1246 */
1247 if (pdq->pdq_type == PDQ_DEFPA) {
1248 #ifdef PDQTEST
1249 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1250 PDQ_PFI_MODE_DMA_ENABLE);
1251 #else
1252 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1253 PDQ_PFI_MODE_DMA_ENABLE
1254 /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1255 #endif
1256 }
1257
1258 /*
1259 * Make sure the unsolicited queue has events ...
1260 */
1261 pdq_process_unsolicited_events(pdq);
1262
1263 if ((pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1264 || pdq->pdq_type == PDQ_DEFTA)
1265 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1266 else
1267 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1268 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1269 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1270
1271 /*
1272 * Make sure there isn't stale information in the caches before
1273 * tell the adapter about the blocks it's going to use.
1274 */
1275 PDQ_OS_CONSUMER_PRESYNC(pdq);
1276
1277 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1278 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_consumer_block);
1279 pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1280
1281 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1282 #if !defined(BYTE_ORDER) || BYTE_ORDER == LITTLE_ENDIAN
1283 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA);
1284 #else
1285 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA | PDQ_DMA_INIT_LW_BSWAP_LITERAL);
1286 #endif
1287 pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1288
1289 for (cnt = 0; cnt < 1000; cnt++) {
1290 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1291 if (state == PDQS_HALTED) {
1292 if (pass > 0)
1293 return PDQS_HALTED;
1294 pass = 1;
1295 goto restart;
1296 }
1297 if (state == PDQS_DMA_AVAILABLE) {
1298 PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1299 break;
1300 }
1301 PDQ_OS_USEC_DELAY(1000);
1302 }
1303 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1304
1305 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1306 pdq->pdq_intrmask = 0;
1307 /* PDQ_HOST_INT_STATE_CHANGE
1308 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1309 |PDQ_HOST_INT_UNSOL_ENABLE */;
1310 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1311
1312 /*
1313 * Any other command but START should be valid.
1314 */
1315 pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1316 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1317 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1318 pdq_queue_commands(pdq);
1319
1320 if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1321 /*
1322 * Now wait (up to 100ms) for the command(s) to finish.
1323 */
1324 for (cnt = 0; cnt < 1000; cnt++) {
1325 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1326 pdq_process_command_responses(pdq);
1327 if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1328 break;
1329 PDQ_OS_USEC_DELAY(1000);
1330 }
1331 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1332 }
1333
1334 return state;
1335 }
1336
1337 void
1338 pdq_run(
1339 pdq_t *pdq)
1340 {
1341 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1342 pdq_state_t state;
1343
1344 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1345 PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1346 PDQ_ASSERT(state != PDQS_RESET);
1347 PDQ_ASSERT(state != PDQS_HALTED);
1348 PDQ_ASSERT(state != PDQS_UPGRADE);
1349 PDQ_ASSERT(state != PDQS_RING_MEMBER);
1350 switch (state) {
1351 case PDQS_DMA_AVAILABLE: {
1352 /*
1353 * The PDQ after being reset screws up some of its state.
1354 * So we need to clear all the errors/interrupts so the real
1355 * ones will get through.
1356 */
1357 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1358 pdq->pdq_intrmask = PDQ_HOST_INT_STATE_CHANGE
1359 |PDQ_HOST_INT_XMT_DATA_FLUSH|PDQ_HOST_INT_FATAL_ERROR
1360 |PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1361 |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE;
1362 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1363 /*
1364 * Set the MAC and address filters and start up the PDQ.
1365 */
1366 pdq_process_unsolicited_events(pdq);
1367 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1368 pdq->pdq_dbp->pdqdb_receives,
1369 pdq->pdq_cbp->pdqcb_receives,
1370 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1371 PDQ_DO_TYPE2_PRODUCER(pdq);
1372 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1373 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1374 pdq->pdq_dbp->pdqdb_host_smt,
1375 pdq->pdq_cbp->pdqcb_host_smt,
1376 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1377 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1378 pdq->pdq_host_smt_info.rx_producer
1379 | (pdq->pdq_host_smt_info.rx_completion << 8));
1380 }
1381 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1382 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1383 | PDQ_BITMASK(PDQC_SNMP_SET)
1384 | PDQ_BITMASK(PDQC_START);
1385 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1386 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1387 pdq_queue_commands(pdq);
1388 break;
1389 }
1390 case PDQS_LINK_UNAVAILABLE:
1391 case PDQS_LINK_AVAILABLE: {
1392 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1393 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1394 | PDQ_BITMASK(PDQC_SNMP_SET);
1395 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1396 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1397 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1398 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1399 pdq->pdq_dbp->pdqdb_host_smt,
1400 pdq->pdq_cbp->pdqcb_host_smt,
1401 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1402 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1403 pdq->pdq_host_smt_info.rx_producer
1404 | (pdq->pdq_host_smt_info.rx_completion << 8));
1405 }
1406 pdq_process_unsolicited_events(pdq);
1407 pdq_queue_commands(pdq);
1408 break;
1409 }
1410 case PDQS_RING_MEMBER: {
1411 }
1412 default: { /* to make gcc happy */
1413 break;
1414 }
1415 }
1416 }
1417
1418 int
1419 pdq_interrupt(
1420 pdq_t *pdq)
1421 {
1422 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1423 pdq_uint32_t data;
1424 int progress = 0;
1425
1426 if (pdq->pdq_type == PDQ_DEFPA)
1427 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1428
1429 while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1430 progress = 1;
1431 PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1432 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1433 if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1434 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1435 pdq->pdq_dbp->pdqdb_receives,
1436 pdq->pdq_cbp->pdqcb_receives,
1437 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1438 PDQ_DO_TYPE2_PRODUCER(pdq);
1439 }
1440 if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1441 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1442 pdq->pdq_dbp->pdqdb_host_smt,
1443 pdq->pdq_cbp->pdqcb_host_smt,
1444 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1445 PDQ_DO_HOST_SMT_PRODUCER(pdq);
1446 }
1447 /* if (data & PDQ_PSTS_XMT_DATA_PENDING) */
1448 pdq_process_transmitted_data(pdq);
1449 if (data & PDQ_PSTS_UNSOL_PENDING)
1450 pdq_process_unsolicited_events(pdq);
1451 if (data & PDQ_PSTS_CMD_RSP_PENDING)
1452 pdq_process_command_responses(pdq);
1453 if (data & PDQ_PSTS_TYPE_0_PENDING) {
1454 data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1455 if (data & PDQ_HOST_INT_STATE_CHANGE) {
1456 pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1457 printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1458 if (state == PDQS_LINK_UNAVAILABLE) {
1459 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1460 } else if (state == PDQS_LINK_AVAILABLE) {
1461 if (pdq->pdq_flags & PDQ_WANT_FDX) {
1462 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_DEC_EXT_MIB_GET);
1463 pdq_queue_commands(pdq);
1464 }
1465 pdq->pdq_flags |= PDQ_TXOK|PDQ_IS_ONRING;
1466 pdq_os_restart_transmitter(pdq);
1467 } else if (state == PDQS_HALTED) {
1468 pdq_response_error_log_get_t log_entry;
1469 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1470 printf(": halt code = %d (%s)\n",
1471 halt_code, pdq_halt_codes[halt_code]);
1472 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1473 PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1474 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1475 data & PDQ_HOST_INT_FATAL_ERROR));
1476 }
1477 PDQ_OS_MEMZERO(&log_entry, sizeof(log_entry));
1478 if (pdq_read_error_log(pdq, &log_entry)) {
1479 PDQ_PRINTF((" Error log Entry:\n"));
1480 PDQ_PRINTF((" CMD Status = %d (0x%x)\n",
1481 log_entry.error_log_get_status,
1482 log_entry.error_log_get_status));
1483 PDQ_PRINTF((" Event Status = %d (0x%x)\n",
1484 log_entry.error_log_get_event_status,
1485 log_entry.error_log_get_event_status));
1486 PDQ_PRINTF((" Caller Id = %d (0x%x)\n",
1487 log_entry.error_log_get_caller_id,
1488 log_entry.error_log_get_caller_id));
1489 PDQ_PRINTF((" Write Count = %d (0x%x)\n",
1490 log_entry.error_log_get_write_count,
1491 log_entry.error_log_get_write_count));
1492 PDQ_PRINTF((" FRU Implication Mask = %d (0x%x)\n",
1493 log_entry.error_log_get_fru_implication_mask,
1494 log_entry.error_log_get_fru_implication_mask));
1495 PDQ_PRINTF((" Test ID = %d (0x%x)\n",
1496 log_entry.error_log_get_test_id,
1497 log_entry.error_log_get_test_id));
1498 }
1499 pdq_stop(pdq);
1500 if (pdq->pdq_flags & PDQ_RUNNING)
1501 pdq_run(pdq);
1502 return 1;
1503 }
1504 printf("\n");
1505 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1506 }
1507 if (data & PDQ_HOST_INT_FATAL_ERROR) {
1508 pdq_stop(pdq);
1509 if (pdq->pdq_flags & PDQ_RUNNING)
1510 pdq_run(pdq);
1511 return 1;
1512 }
1513 if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1514 printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1515 pdq->pdq_flags &= ~PDQ_TXOK;
1516 pdq_flush_transmitter(pdq);
1517 pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1518 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1519 }
1520 }
1521 if (pdq->pdq_type == PDQ_DEFPA)
1522 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1523 }
1524 return progress;
1525 }
1526
1527 pdq_t *
1528 pdq_initialize(
1529 pdq_bus_t bus,
1530 pdq_bus_memaddr_t csr_base,
1531 const char *name,
1532 int unit,
1533 void *ctx,
1534 pdq_type_t type)
1535 {
1536 pdq_t *pdq;
1537 pdq_state_t state;
1538 pdq_descriptor_block_t *dbp;
1539 #if !defined(PDQ_BUS_DMA)
1540 const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1541 pdq_uint8_t *p;
1542 #endif
1543 int idx;
1544
1545 PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1546 PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1547 PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1548 PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1549 PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1550 PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1551 PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1552 PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1553 PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1554
1555 pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1556 if (pdq == NULL) {
1557 PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1558 return NULL;
1559 }
1560 PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1561 pdq->pdq_type = type;
1562 pdq->pdq_unit = unit;
1563 pdq->pdq_os_ctx = (void *) ctx;
1564 pdq->pdq_os_name = name;
1565 pdq->pdq_flags = PDQ_PRINTCHARS;
1566 /*
1567 * Allocate the additional data structures required by
1568 * the PDQ driver. Allocate a contiguous region of memory
1569 * for the descriptor block. We need to allocated enough
1570 * to guarantee that we will a get 8KB block of memory aligned
1571 * on a 8KB boundary. This turns to require that we allocate
1572 * (N*2 - 1 page) pages of memory. On machine with less than
1573 * a 8KB page size, it mean we will allocate more memory than
1574 * we need. The extra will be used for the unsolicited event
1575 * buffers (though on machines with 8KB pages we will to allocate
1576 * them separately since there will be nothing left overs.)
1577 */
1578 #if defined(PDQ_OS_MEMALLOC_CONTIG)
1579 p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1580
1581 if (p == NULL)
1582 printf("%s() - PDQ_OS_MEMALLOC_CONTIG() failed!\n", __FUNCTION__);
1583
1584 if (p != NULL) {
1585 pdq_physaddr_t physaddr = PDQ_OS_VA_TO_BUSPA(pdq, p);
1586 /*
1587 * Assert that we really got contiguous memory. This isn't really
1588 * needed on systems that actually have physical contiguous allocation
1589 * routines, but on those systems that don't ...
1590 */
1591 for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1592 if (PDQ_OS_VA_TO_BUSPA(pdq, p + idx) - physaddr != idx)
1593 goto cleanup_and_return;
1594 }
1595 if (physaddr & 0x1FFF) {
1596 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1597 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr;
1598 pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - (physaddr & 0x1FFF)];
1599 pdq->pdq_pa_descriptor_block = physaddr & ~0x1FFFUL;
1600 } else {
1601 pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1602 pdq->pdq_pa_descriptor_block = physaddr;
1603 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1604 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr + 0x2000;
1605 }
1606 }
1607 pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1608 pdq->pdq_pa_consumer_block = PDQ_DB_BUSPA(pdq, pdq->pdq_cbp);
1609 if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1610 pdq->pdq_unsolicited_info.ui_events =
1611 (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1612 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1613 }
1614 #else
1615 if (pdq_os_memalloc_contig(pdq))
1616 goto cleanup_and_return;
1617 #endif
1618
1619 /*
1620 * Make sure everything got allocated. If not, free what did
1621 * get allocated and return.
1622 */
1623 if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1624 cleanup_and_return:
1625 #ifdef PDQ_OS_MEMFREE_CONTIG
1626 if (p /* pdq->pdq_dbp */ != NULL)
1627 PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1628 if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1629 PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1630 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1631 #endif
1632 PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1633 return NULL;
1634 }
1635 dbp = pdq->pdq_dbp;
1636
1637 PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT " (PA = 0x%x)\n", dbp, pdq->pdq_pa_descriptor_block));
1638 PDQ_PRINTF((" Receive Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_receives));
1639 PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_transmits));
1640 PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_host_smt));
1641 PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_responses));
1642 PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_requests));
1643 PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1644
1645 /*
1646 * Zero out the descriptor block. Not really required but
1647 * it pays to be neat. This will also zero out the consumer
1648 * block, command pool, and buffer pointers for the receive
1649 * host_smt rings.
1650 */
1651 PDQ_OS_MEMZERO(dbp, sizeof(*dbp));
1652
1653 /*
1654 * Initialize the CSR references.
1655 * the DEFAA (FutureBus+) skips a longword between registers
1656 */
1657 pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1658 if (pdq->pdq_type == PDQ_DEFPA)
1659 pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1660
1661 PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_CSR_FMT "\n", pdq->pdq_csrs.csr_base));
1662 PDQ_PRINTF((" Port Reset = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1663 pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1664 PDQ_PRINTF((" Host Data = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1665 pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1666 PDQ_PRINTF((" Port Control = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1667 pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1668 PDQ_PRINTF((" Port Data A = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1669 pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1670 PDQ_PRINTF((" Port Data B = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1671 pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1672 PDQ_PRINTF((" Port Status = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1673 pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1674 PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1675 pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1676 PDQ_PRINTF((" Host Int Enable = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1677 pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1678 PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1679 pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1680 PDQ_PRINTF((" Command Response Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1681 pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1682 PDQ_PRINTF((" Command Request Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1683 pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1684 PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1685 pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1686 PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1687 pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1688
1689 /*
1690 * Initialize the command information block
1691 */
1692 pdq->pdq_command_info.ci_request_bufstart = dbp->pdqdb_cmd_request_buf;
1693 pdq->pdq_command_info.ci_pa_request_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_request_bufstart);
1694 pdq->pdq_command_info.ci_pa_request_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_requests);
1695 PDQ_PRINTF(("PDQ Command Request Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1696 pdq->pdq_command_info.ci_request_bufstart,
1697 pdq->pdq_command_info.ci_pa_request_bufstart));
1698 for (idx = 0; idx < sizeof(dbp->pdqdb_command_requests)/sizeof(dbp->pdqdb_command_requests[0]); idx++) {
1699 pdq_txdesc_t *txd = &dbp->pdqdb_command_requests[idx];
1700
1701 txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_request_bufstart;
1702 txd->txd_eop = txd->txd_sop = 1;
1703 txd->txd_pa_hi = 0;
1704 }
1705 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_requests,
1706 sizeof(dbp->pdqdb_command_requests));
1707
1708 pdq->pdq_command_info.ci_response_bufstart = dbp->pdqdb_cmd_response_buf;
1709 pdq->pdq_command_info.ci_pa_response_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_response_bufstart);
1710 pdq->pdq_command_info.ci_pa_response_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_responses);
1711 PDQ_PRINTF(("PDQ Command Response Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1712 pdq->pdq_command_info.ci_response_bufstart,
1713 pdq->pdq_command_info.ci_pa_response_bufstart));
1714 for (idx = 0; idx < sizeof(dbp->pdqdb_command_responses)/sizeof(dbp->pdqdb_command_responses[0]); idx++) {
1715 pdq_rxdesc_t *rxd = &dbp->pdqdb_command_responses[idx];
1716
1717 rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_response_bufstart;
1718 rxd->rxd_sop = 1;
1719 rxd->rxd_seg_cnt = 0;
1720 rxd->rxd_seg_len_lo = 0;
1721 rxd->rxd_seg_len_hi = PDQ_SIZE_COMMAND_RESPONSE / 16;
1722 }
1723 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_responses,
1724 sizeof(dbp->pdqdb_command_responses));
1725
1726 /*
1727 * Initialize the unsolicited event information block
1728 */
1729 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1730 pdq->pdq_unsolicited_info.ui_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_unsolicited_events);
1731 PDQ_PRINTF(("PDQ Unsolicit Event Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1732 pdq->pdq_unsolicited_info.ui_events,
1733 pdq->pdq_unsolicited_info.ui_pa_bufstart));
1734 for (idx = 0; idx < sizeof(dbp->pdqdb_unsolicited_events)/sizeof(dbp->pdqdb_unsolicited_events[0]); idx++) {
1735 pdq_rxdesc_t *rxd = &dbp->pdqdb_unsolicited_events[idx];
1736 pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1737
1738 rxd->rxd_sop = 1;
1739 rxd->rxd_seg_cnt = 0;
1740 rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1741 rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1742 - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1743 rxd->rxd_pa_hi = 0;
1744 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
1745 }
1746 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_unsolicited_events,
1747 sizeof(dbp->pdqdb_unsolicited_events));
1748
1749 /*
1750 * Initialize the receive information blocks (normal and SMT).
1751 */
1752 pdq->pdq_rx_info.rx_buffers = pdq->pdq_receive_buffers;
1753 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_receives);
1754 pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1755 pdq->pdq_rx_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_receives);
1756
1757 pdq->pdq_host_smt_info.rx_buffers = pdq->pdq_host_smt_buffers;
1758 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_host_smt);
1759 pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1760 pdq->pdq_host_smt_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_host_smt);
1761
1762 /*
1763 * Initialize the transmit information block.
1764 */
1765 dbp->pdqdb_tx_hdr[0] = PDQ_FDDI_PH0;
1766 dbp->pdqdb_tx_hdr[1] = PDQ_FDDI_PH1;
1767 dbp->pdqdb_tx_hdr[2] = PDQ_FDDI_PH2;
1768 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(dbp->pdqdb_transmits);
1769 pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = 3;
1770 pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1771 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_DB_BUSPA(pdq, dbp->pdqdb_tx_hdr);
1772 pdq->pdq_tx_info.tx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_transmits);
1773
1774 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1775 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1776
1777 /*
1778 * Stop the PDQ if it is running and put it into a known state.
1779 */
1780 state = pdq_stop(pdq);
1781
1782 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1783 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1784 /*
1785 * If the adapter is not the state we expect, then the initialization
1786 * failed. Cleanup and exit.
1787 */
1788 #if defined(PDQVERBOSE)
1789 if (state == PDQS_HALTED) {
1790 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1791 printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1792 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1793 PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1794 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1795 PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1796 }
1797 #endif
1798 if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1799 goto cleanup_and_return;
1800
1801 PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1802 pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1803 pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1804 pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1805 PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1806 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1807 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1808 PDQ_PRINTF(("PDQ Chip Revision = "));
1809 switch (pdq->pdq_chip_rev) {
1810 case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1811 case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1812 case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1813 default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));
1814 }
1815 PDQ_PRINTF(("\n"));
1816
1817 return pdq;
1818 }
Cache object: f6678b52be4f0514e0033cbb0cb15209
|