FreeBSD/Linux Kernel Cross Reference
sys/dev/pdq/pdq.c
1 /* $NetBSD: pdq.c,v 1.33 2001/11/13 13:14:43 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * Id: pdq.c,v 1.32 1997/06/05 01:56:35 thomas Exp
27 * $FreeBSD: releng/5.0/sys/dev/pdq/pdq.c 105507 2002-10-20 08:46:56Z phk $
28 *
29 */
30
31 /*
32 * DEC PDQ FDDI Controller O/S independent code
33 *
34 * This module should work any on PDQ based board. Note that changes for
35 * MIPS and Alpha architectures (or any other architecture which requires
36 * a flushing of memory or write buffers and/or has incoherent caches)
37 * have yet to be made.
38 *
39 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
40 * flushing of the write buffers.
41 */
42
43 #ifdef __NetBSD__
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: pdq.c,v 1.33 2001/11/13 13:14:43 lukem Exp $");
46 #endif
47
48 #define PDQ_HWSUPPORT /* for pdq.h */
49
50 #if defined(__FreeBSD__)
51 /*
52 * What a botch having to specific includes for FreeBSD!
53 */
54 #include <dev/pdq/pdq_freebsd.h>
55 #include <dev/pdq/pdqreg.h>
56 #else
57 #include "pdqvar.h"
58 #include "pdqreg.h"
59 #endif
60
61 #define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1))
62 #define PDQ_CMD_RX_ALIGNMENT 16
63
64 #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
65 #define PDQ_PRINTF(x) printf x
66 #else
67 #define PDQ_PRINTF(x) do { } while (0)
68 #endif
69
70 static const char * const pdq_halt_codes[] = {
71 "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
72 "Software Fault", "Hardware Fault", "PC Trace Path Test",
73 "DMA Error", "Image CRC Error", "Adapter Processer Error"
74 };
75
76 static const char * const pdq_adapter_states[] = {
77 "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
78 "Link Available", "Link Unavailable", "Halted", "Ring Member"
79 };
80
81 /*
82 * The following are used in conjunction with
83 * unsolicited events
84 */
85 static const char * const pdq_entities[] = {
86 "Station", "Link", "Phy Port"
87 };
88
89 static const char * const pdq_station_events[] = {
90 "Unknown Event #0",
91 "Trace Received"
92 };
93
94 static const char * const pdq_station_arguments[] = {
95 "Reason"
96 };
97
98 static const char * const pdq_link_events[] = {
99 "Transmit Underrun",
100 "Transmit Failed",
101 "Block Check Error (CRC)",
102 "Frame Status Error",
103 "PDU Length Error",
104 NULL,
105 NULL,
106 "Receive Data Overrun",
107 NULL,
108 "No User Buffer",
109 "Ring Initialization Initiated",
110 "Ring Initialization Received",
111 "Ring Beacon Initiated",
112 "Duplicate Address Failure",
113 "Duplicate Token Detected",
114 "Ring Purger Error",
115 "FCI Strip Error",
116 "Trace Initiated",
117 "Directed Beacon Received",
118 };
119
120 static const char * const pdq_link_arguments[] = {
121 "Reason",
122 "Data Link Header",
123 "Source",
124 "Upstream Neighbor"
125 };
126
127 static const char * const pdq_phy_events[] = {
128 "LEM Error Monitor Reject",
129 "Elasticy Buffer Error",
130 "Link Confidence Test Reject"
131 };
132
133 static const char * const pdq_phy_arguments[] = {
134 "Direction"
135 };
136
137 static const char * const * const pdq_event_arguments[] = {
138 pdq_station_arguments,
139 pdq_link_arguments,
140 pdq_phy_arguments
141 };
142
143 static const char * const * const pdq_event_codes[] = {
144 pdq_station_events,
145 pdq_link_events,
146 pdq_phy_events
147 };
148
149 static const char * const pdq_station_types[] = {
150 "SAS", "DAC", "SAC", "NAC", "DAS"
151 };
152
153 static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
154
155 static const char pdq_phy_types[] = "ABSM";
156
157 static const char * const pdq_pmd_types0[] = {
158 "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
159 "ANSI Sonet"
160 };
161
162 static const char * const pdq_pmd_types100[] = {
163 "Low Power", "Thin Wire", "Shielded Twisted Pair",
164 "Unshielded Twisted Pair"
165 };
166
167 static const char * const * const pdq_pmd_types[] = {
168 pdq_pmd_types0, pdq_pmd_types100
169 };
170
171 static const char * const pdq_descriptions[] = {
172 "DEFPA PCI",
173 "DEFEA EISA",
174 "DEFTA TC",
175 "DEFAA Futurebus",
176 "DEFQA Q-bus",
177 };
178
179 static void
180 pdq_print_fddi_chars(
181 pdq_t *pdq,
182 const pdq_response_status_chars_get_t *rsp)
183 {
184 const char hexchars[] = "0123456789abcdef";
185
186 printf(
187 #if !defined(__bsdi__) && !defined(__NetBSD__)
188 PDQ_OS_PREFIX
189 #else
190 ": "
191 #endif
192 "DEC %s FDDI %s Controller\n",
193 #if !defined(__bsdi__) && !defined(__NetBSD__)
194 PDQ_OS_PREFIX_ARGS,
195 #endif
196 pdq_descriptions[pdq->pdq_type],
197 pdq_station_types[rsp->status_chars_get.station_type]);
198
199 printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
200 PDQ_OS_PREFIX_ARGS,
201 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
202 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
203 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
204 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
205 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
206 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
207 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
208 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
209 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
210 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
211 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
212 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
213 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
214 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
215 rsp->status_chars_get.module_rev.fwrev_bytes[0]);
216
217 if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
218 printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
219 }
220
221 printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
222 PDQ_OS_PREFIX_ARGS,
223 rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
224 pdq_phy_types[rsp->status_chars_get.phy_type[0]],
225 pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
226
227 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
228 printf(", FDDI Port[B] = %c (PMD = %s)",
229 pdq_phy_types[rsp->status_chars_get.phy_type[1]],
230 pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
231
232 printf("\n");
233
234 pdq_os_update_status(pdq, rsp);
235 }
236
237 static void
238 pdq_init_csrs(
239 pdq_csrs_t *csrs,
240 pdq_bus_t bus,
241 pdq_bus_memaddr_t csr_base,
242 size_t csrsize)
243 {
244 csrs->csr_bus = bus;
245 csrs->csr_base = csr_base;
246 csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize);
247 csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize);
248 csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize);
249 csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize);
250 csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize);
251 csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize);
252 csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize);
253 csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize);
254 csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize);
255 csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
256 csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
257 csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
258 csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
259 }
260
261 static void
262 pdq_init_pci_csrs(
263 pdq_pci_csrs_t *csrs,
264 pdq_bus_t bus,
265 pdq_bus_memaddr_t csr_base,
266 size_t csrsize)
267 {
268 csrs->csr_bus = bus;
269 csrs->csr_base = csr_base;
270 csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
271 csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
272 csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
273 csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
274 }
275
276 static void
277 pdq_flush_databuf_queue(
278 pdq_t *pdq,
279 pdq_databuf_queue_t *q)
280 {
281 PDQ_OS_DATABUF_T *pdu;
282 for (;;) {
283 PDQ_OS_DATABUF_DEQUEUE(q, pdu);
284 if (pdu == NULL)
285 return;
286 PDQ_OS_DATABUF_FREE(pdq, pdu);
287 }
288 }
289
290 static pdq_boolean_t
291 pdq_do_port_control(
292 const pdq_csrs_t * const csrs,
293 pdq_uint32_t cmd)
294 {
295 int cnt = 0;
296 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
297 PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
298 while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
299 cnt++;
300 PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
301 if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
302 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
303 return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
304 }
305 /* adapter failure */
306 PDQ_ASSERT(0);
307 return PDQ_FALSE;
308 }
309
310 static void
311 pdq_read_mla(
312 const pdq_csrs_t * const csrs,
313 pdq_lanaddr_t *hwaddr)
314 {
315 pdq_uint32_t data;
316
317 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
318 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
319 data = PDQ_CSR_READ(csrs, csr_host_data);
320
321 hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
322 hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
323 hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
324 hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
325
326 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
327 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
328 data = PDQ_CSR_READ(csrs, csr_host_data);
329
330 hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
331 hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
332 }
333
334 static void
335 pdq_read_fwrev(
336 const pdq_csrs_t * const csrs,
337 pdq_fwrev_t *fwrev)
338 {
339 pdq_uint32_t data;
340
341 pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
342 data = PDQ_CSR_READ(csrs, csr_host_data);
343
344 fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
345 fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
346 fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
347 fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
348 }
349
350 static pdq_boolean_t
351 pdq_read_error_log(
352 pdq_t *pdq,
353 pdq_response_error_log_get_t *log_entry)
354 {
355 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
356 pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
357
358 pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
359
360 while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
361 *ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
362 if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
363 break;
364 }
365 return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
366 }
367
368 static pdq_chip_rev_t
369 pdq_read_chiprev(
370 const pdq_csrs_t * const csrs)
371 {
372 pdq_uint32_t data;
373
374 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
375 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
376 data = PDQ_CSR_READ(csrs, csr_host_data);
377
378 return (pdq_chip_rev_t) data;
379 }
380
381 static const struct {
382 size_t cmd_len;
383 size_t rsp_len;
384 const char *cmd_name;
385 } pdq_cmd_info[] = {
386 { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */
387 sizeof(pdq_response_generic_t),
388 "Start"
389 },
390 { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */
391 sizeof(pdq_response_generic_t),
392 "Filter Set"
393 },
394 { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */
395 sizeof(pdq_response_filter_get_t),
396 "Filter Get"
397 },
398 { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */
399 sizeof(pdq_response_generic_t),
400 "Chars Set"
401 },
402 { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */
403 sizeof(pdq_response_status_chars_get_t),
404 "Status Chars Get"
405 },
406 #if 0
407 { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */
408 sizeof(pdq_response_counters_get_t),
409 "Counters Get"
410 },
411 { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */
412 sizeof(pdq_response_generic_t),
413 "Counters Set"
414 },
415 #else
416 { 0, 0, "Counters Get" },
417 { 0, 0, "Counters Set" },
418 #endif
419 { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */
420 sizeof(pdq_response_generic_t),
421 "Addr Filter Set"
422 },
423 { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */
424 sizeof(pdq_response_addr_filter_get_t),
425 "Addr Filter Get"
426 },
427 { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */
428 sizeof(pdq_response_generic_t),
429 "Error Log Clear"
430 },
431 { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */
432 sizeof(pdq_response_generic_t),
433 "Error Log Set"
434 },
435 { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */
436 sizeof(pdq_response_generic_t),
437 "FDDI MIB Get"
438 },
439 { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */
440 sizeof(pdq_response_generic_t),
441 "DEC Ext MIB Get"
442 },
443 { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */
444 sizeof(pdq_response_generic_t),
445 "DEC Specific Get"
446 },
447 { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */
448 sizeof(pdq_response_generic_t),
449 "SNMP Set"
450 },
451 { 0, 0, "N/A" },
452 { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */
453 sizeof(pdq_response_generic_t),
454 "SMT MIB Get"
455 },
456 { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */
457 sizeof(pdq_response_generic_t),
458 "SMT MIB Set",
459 },
460 { 0, 0, "Bogus CMD" },
461 };
462
463 static void
464 pdq_queue_commands(
465 pdq_t *pdq)
466 {
467 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
468 pdq_command_info_t * const ci = &pdq->pdq_command_info;
469 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
470 pdq_txdesc_t * const txd = &dbp->pdqdb_command_requests[ci->ci_request_producer];
471 pdq_cmd_code_t op;
472 pdq_uint32_t cmdlen, rsplen, mask;
473
474 /*
475 * If there are commands or responses active or there aren't
476 * any pending commands, then don't queue any more.
477 */
478 if (ci->ci_command_active || ci->ci_pending_commands == 0)
479 return;
480
481 /*
482 * Determine which command needs to be queued.
483 */
484 op = PDQC_SMT_MIB_SET;
485 for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
486 op = (pdq_cmd_code_t) ((int) op - 1);
487 /*
488 * Obtain the sizes needed for the command and response.
489 * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
490 * always properly aligned.
491 */
492 cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
493 rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
494 if (cmdlen < rsplen)
495 cmdlen = rsplen;
496 /*
497 * Since only one command at a time will be queued, there will always
498 * be enough space.
499 */
500
501 /*
502 * Obtain and fill in the descriptor for the command (descriptor is
503 * pre-initialized)
504 */
505 txd->txd_seg_len = cmdlen;
506
507 /*
508 * Clear the command area, set the opcode, and the command from the pending
509 * mask.
510 */
511
512 ci->ci_queued_commands[ci->ci_request_producer] = op;
513 #if defined(PDQVERBOSE)
514 ((pdq_response_generic_t *) ci->ci_response_bufstart)->generic_op = PDQC_BOGUS_CMD;
515 #endif
516 PDQ_OS_MEMZERO(ci->ci_request_bufstart, cmdlen);
517 *(pdq_cmd_code_t *) ci->ci_request_bufstart = op;
518 ci->ci_pending_commands &= ~mask;
519
520 /*
521 * Fill in the command area, if needed.
522 */
523 switch (op) {
524 case PDQC_FILTER_SET: {
525 pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_request_bufstart;
526 unsigned idx = 0;
527 filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
528 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
529 idx++;
530 filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
531 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
532 idx++;
533 filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
534 filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
535 idx++;
536 filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
537 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
538 idx++;
539 filter_set->filter_set_items[idx].item_code = PDQI_EOL;
540 break;
541 }
542 case PDQC_ADDR_FILTER_SET: {
543 pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_request_bufstart;
544 pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
545 addr->lanaddr_bytes[0] = 0xFF;
546 addr->lanaddr_bytes[1] = 0xFF;
547 addr->lanaddr_bytes[2] = 0xFF;
548 addr->lanaddr_bytes[3] = 0xFF;
549 addr->lanaddr_bytes[4] = 0xFF;
550 addr->lanaddr_bytes[5] = 0xFF;
551 addr++;
552 pdq_os_addr_fill(pdq, addr, 61);
553 break;
554 }
555 case PDQC_SNMP_SET: {
556 pdq_cmd_snmp_set_t *snmp_set = (pdq_cmd_snmp_set_t *) ci->ci_request_bufstart;
557 unsigned idx = 0;
558 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_FULL_DUPLEX_ENABLE;
559 snmp_set->snmp_set_items[idx].item_value = (pdq->pdq_flags & PDQ_WANT_FDX ? 1 : 2);
560 snmp_set->snmp_set_items[idx].item_port = 0;
561 idx++;
562 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_EOL;
563 break;
564 }
565 default: { /* to make gcc happy */
566 break;
567 }
568 }
569
570
571 /*
572 * Sync the command request buffer and descriptor, then advance
573 * the request producer index.
574 */
575 PDQ_OS_CMDRQST_PRESYNC(pdq, txd->txd_seg_len);
576 PDQ_OS_DESC_PRESYNC(pdq, txd, sizeof(pdq_txdesc_t));
577 PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
578
579 /*
580 * Sync the command response buffer and advance the response
581 * producer index (descriptor is already pre-initialized)
582 */
583 PDQ_OS_CMDRSP_PRESYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
584 PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
585 /*
586 * At this point the command is done. All that needs to be done is to
587 * produce it to the PDQ.
588 */
589 PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
590 pdq_cmd_info[op].cmd_name));
591
592 ci->ci_command_active++;
593 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
594 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
595 }
596
597 static void
598 pdq_process_command_responses(
599 pdq_t * const pdq)
600 {
601 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
602 pdq_command_info_t * const ci = &pdq->pdq_command_info;
603 volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
604 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
605 const pdq_response_generic_t *rspgen;
606
607 /*
608 * We have to process the command and response in tandem so
609 * just wait for the response to be consumed. If it has been
610 * consumed then the command must have been as well.
611 */
612
613 if (cbp->pdqcb_command_response == ci->ci_response_completion)
614 return;
615
616 PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
617
618 PDQ_OS_CMDRSP_POSTSYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
619 rspgen = (const pdq_response_generic_t *) ci->ci_response_bufstart;
620 PDQ_ASSERT(rspgen->generic_op == ci->ci_queued_commands[ci->ci_request_completion]);
621 PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
622 PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d [0x%x])\n",
623 pdq_cmd_info[rspgen->generic_op].cmd_name,
624 rspgen->generic_status, rspgen->generic_status));
625
626 if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
627 pdq->pdq_flags &= ~PDQ_PRINTCHARS;
628 pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
629 } else if (rspgen->generic_op == PDQC_DEC_EXT_MIB_GET) {
630 pdq->pdq_flags &= ~PDQ_IS_FDX;
631 if (((const pdq_response_dec_ext_mib_get_t *)rspgen)->dec_ext_mib_get.fdx_operational)
632 pdq->pdq_flags |= PDQ_IS_FDX;
633 }
634
635 PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
636 PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
637 ci->ci_command_active = 0;
638
639 if (ci->ci_pending_commands != 0) {
640 pdq_queue_commands(pdq);
641 } else {
642 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
643 ci->ci_response_producer | (ci->ci_response_completion << 8));
644 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
645 ci->ci_request_producer | (ci->ci_request_completion << 8));
646 }
647 }
648
649 /*
650 * This following routine processes unsolicited events.
651 * In addition, it also fills the unsolicited queue with
652 * event buffers so it can be used to initialize the queue
653 * as well.
654 */
655 static void
656 pdq_process_unsolicited_events(
657 pdq_t *pdq)
658 {
659 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
660 pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
661 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
662 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
663
664 /*
665 * Process each unsolicited event (if any).
666 */
667
668 while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
669 const pdq_unsolicited_event_t *event;
670 event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
671 PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, event);
672
673 switch (event->event_type) {
674 case PDQ_UNSOLICITED_EVENT: {
675 int bad_event = 0;
676 switch (event->event_entity) {
677 case PDQ_ENTITY_STATION: {
678 bad_event = event->event_code.value >= PDQ_STATION_EVENT_MAX;
679 break;
680 }
681 case PDQ_ENTITY_LINK: {
682 bad_event = event->event_code.value >= PDQ_LINK_EVENT_MAX;
683 break;
684 }
685 case PDQ_ENTITY_PHY_PORT: {
686 bad_event = event->event_code.value >= PDQ_PHY_EVENT_MAX;
687 break;
688 }
689 default: {
690 bad_event = 1;
691 break;
692 }
693 }
694 if (bad_event) {
695 break;
696 }
697 printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
698 PDQ_OS_PREFIX_ARGS,
699 pdq_entities[event->event_entity],
700 pdq_event_codes[event->event_entity][event->event_code.value]);
701 if (event->event_entity == PDQ_ENTITY_PHY_PORT)
702 printf("[%d]", event->event_index);
703 printf("\n");
704 break;
705 }
706 case PDQ_UNSOLICITED_COUNTERS: {
707 break;
708 }
709 }
710 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
711 PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
712 ui->ui_free++;
713 }
714
715 /*
716 * Now give back the event buffers back to the PDQ.
717 */
718 PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
719 ui->ui_free = 0;
720
721 PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
722 ui->ui_producer | (ui->ui_completion << 8));
723 }
724
725 static void
726 pdq_process_received_data(
727 pdq_t *pdq,
728 pdq_rx_info_t *rx,
729 pdq_rxdesc_t *receives,
730 pdq_uint32_t completion_goal,
731 pdq_uint32_t ring_mask)
732 {
733 pdq_uint32_t completion = rx->rx_completion;
734 pdq_uint32_t producer = rx->rx_producer;
735 PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
736 pdq_rxdesc_t *rxd;
737 pdq_uint32_t idx;
738
739 while (completion != completion_goal) {
740 PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
741 pdq_uint8_t *dataptr;
742 pdq_uint32_t fc, datalen, pdulen, segcnt;
743 pdq_rxstatus_t status;
744
745 fpdu = lpdu = buffers[completion];
746 PDQ_ASSERT(fpdu != NULL);
747 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, 0, sizeof(u_int32_t));
748 dataptr = PDQ_OS_DATABUF_PTR(fpdu);
749 status = *(pdq_rxstatus_t *) dataptr;
750 if (status.rxs_rcc_badpdu == 0) {
751 datalen = status.rxs_len;
752 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, sizeof(u_int32_t),
753 PDQ_RX_FC_OFFSET + 1 - sizeof(u_int32_t));
754 fc = dataptr[PDQ_RX_FC_OFFSET];
755 switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
756 case PDQ_FDDI_LLC_ASYNC:
757 case PDQ_FDDI_LLC_SYNC:
758 case PDQ_FDDI_IMP_ASYNC:
759 case PDQ_FDDI_IMP_SYNC: {
760 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
761 PDQ_PRINTF(("discard: bad length %d\n", datalen));
762 goto discard_frame;
763 }
764 break;
765 }
766 case PDQ_FDDI_SMT: {
767 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
768 goto discard_frame;
769 break;
770 }
771 default: {
772 PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
773 goto discard_frame;
774 }
775 }
776 /*
777 * Update the lengths of the data buffers now that we know
778 * the real length.
779 */
780 pdulen = datalen + (PDQ_RX_FC_OFFSET - PDQ_OS_HDR_OFFSET) - 4 /* CRC */;
781 segcnt = (pdulen + PDQ_OS_HDR_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
782 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
783 if (npdu == NULL) {
784 PDQ_PRINTF(("discard: no databuf #0\n"));
785 goto discard_frame;
786 }
787 buffers[completion] = npdu;
788 for (idx = 1; idx < segcnt; idx++) {
789 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
790 if (npdu == NULL) {
791 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
792 PDQ_OS_DATABUF_FREE(pdq, fpdu);
793 goto discard_frame;
794 }
795 PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
796 lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
797 buffers[(completion + idx) & ring_mask] = npdu;
798 }
799 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
800 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
801 buffers[(producer + idx) & ring_mask] =
802 buffers[(completion + idx) & ring_mask];
803 buffers[(completion + idx) & ring_mask] = NULL;
804 }
805 PDQ_OS_DATABUF_ADJ(fpdu, PDQ_OS_HDR_OFFSET);
806 if (segcnt == 1) {
807 PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
808 } else {
809 PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_OS_HDR_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
810 }
811 /*
812 * Do not pass to protocol if packet was received promiscuously
813 */
814 pdq_os_receive_pdu(pdq, fpdu, pdulen,
815 status.rxs_rcc_dd < PDQ_RXS_RCC_DD_CAM_MATCH);
816 rx->rx_free += PDQ_RX_SEGCNT;
817 PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
818 PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
819 continue;
820 } else {
821 PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
822 status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
823 status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
824 if (status.rxs_rcc_reason == 7)
825 goto discard_frame;
826 if (status.rxs_rcc_reason != 0) {
827 /* hardware fault */
828 if (status.rxs_rcc_badcrc) {
829 printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
830 PDQ_OS_PREFIX_ARGS,
831 dataptr[PDQ_RX_FC_OFFSET+1],
832 dataptr[PDQ_RX_FC_OFFSET+2],
833 dataptr[PDQ_RX_FC_OFFSET+3],
834 dataptr[PDQ_RX_FC_OFFSET+4],
835 dataptr[PDQ_RX_FC_OFFSET+5],
836 dataptr[PDQ_RX_FC_OFFSET+6]);
837 /* rx->rx_badcrc++; */
838 } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
839 /* rx->rx_frame_status_errors++; */
840 } else {
841 /* hardware fault */
842 }
843 }
844 }
845 discard_frame:
846 /*
847 * Discarded frames go right back on the queue; therefore
848 * ring entries were freed.
849 */
850 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
851 buffers[producer] = buffers[completion];
852 buffers[completion] = NULL;
853 rxd = &receives[rx->rx_producer];
854 if (idx == 0) {
855 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
856 } else {
857 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
858 }
859 rxd->rxd_pa_hi = 0;
860 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
861 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, buffers[rx->rx_producer]);
862 PDQ_OS_RXPDU_PRESYNC(pdq, buffers[rx->rx_producer], 0, PDQ_OS_DATABUF_SIZE);
863 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
864 PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
865 PDQ_ADVANCE(producer, 1, ring_mask);
866 PDQ_ADVANCE(completion, 1, ring_mask);
867 }
868 }
869 rx->rx_completion = completion;
870
871 while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
872 PDQ_OS_DATABUF_T *pdu;
873 /*
874 * Allocate the needed number of data buffers.
875 * Try to obtain them from our free queue before
876 * asking the system for more.
877 */
878 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
879 if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
880 PDQ_OS_DATABUF_ALLOC(pdq, pdu);
881 if (pdu == NULL)
882 break;
883 buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
884 }
885 rxd = &receives[(rx->rx_producer + idx) & ring_mask];
886 if (idx == 0) {
887 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
888 } else {
889 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
890 }
891 rxd->rxd_pa_hi = 0;
892 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
893 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, pdu);
894 PDQ_OS_RXPDU_PRESYNC(pdq, pdu, 0, PDQ_OS_DATABUF_SIZE);
895 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
896 }
897 if (idx < PDQ_RX_SEGCNT) {
898 /*
899 * We didn't get all databufs required to complete a new
900 * receive buffer. Keep the ones we got and retry a bit
901 * later for the rest.
902 */
903 break;
904 }
905 PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
906 rx->rx_free -= PDQ_RX_SEGCNT;
907 }
908 }
909
910 static void pdq_process_transmitted_data(pdq_t *pdq);
911
912 pdq_boolean_t
913 pdq_queue_transmit_data(
914 pdq_t *pdq,
915 PDQ_OS_DATABUF_T *pdu)
916 {
917 pdq_tx_info_t * const tx = &pdq->pdq_tx_info;
918 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
919 pdq_uint32_t producer = tx->tx_producer;
920 pdq_txdesc_t *eop = NULL;
921 PDQ_OS_DATABUF_T *pdu0;
922 pdq_uint32_t freecnt;
923 #if defined(PDQ_BUS_DMA)
924 bus_dmamap_t map;
925 #endif
926
927 again:
928 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
929 freecnt = tx->tx_free - 1;
930 } else {
931 freecnt = tx->tx_free;
932 }
933 /*
934 * Need 2 or more descriptors to be able to send.
935 */
936 if (freecnt == 0) {
937 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
938 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
939 return PDQ_FALSE;
940 }
941
942 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
943 dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
944 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[producer], sizeof(pdq_txdesc_t));
945 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
946 }
947
948 #if defined(PDQ_BUS_DMA)
949 map = M_GETCTX(pdu, bus_dmamap_t);
950 if (freecnt >= map->dm_nsegs) {
951 int idx;
952 for (idx = 0; idx < map->dm_nsegs; idx++) {
953 /*
954 * Initialize the transmit descriptor
955 */
956 eop = &dbp->pdqdb_transmits[producer];
957 eop->txd_seg_len = map->dm_segs[idx].ds_len;
958 eop->txd_pa_lo = map->dm_segs[idx].ds_addr;
959 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
960 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
961 freecnt--;
962 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
963 }
964 pdu0 = NULL;
965 } else {
966 pdu0 = pdu;
967 }
968 #else
969 for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
970 pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
971 const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
972
973 /*
974 * The first segment is limited to the space remaining in
975 * page. All segments after that can be up to a full page
976 * in size.
977 */
978 fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
979 while (datalen > 0 && freecnt > 0) {
980 pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
981
982 /*
983 * Initialize the transmit descriptor
984 */
985 eop = &dbp->pdqdb_transmits[producer];
986 eop->txd_seg_len = seglen;
987 eop->txd_pa_lo = PDQ_OS_VA_TO_BUSPA(pdq, dataptr);
988 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
989 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
990 datalen -= seglen;
991 dataptr += seglen;
992 fraglen = PDQ_OS_PAGESIZE;
993 freecnt--;
994 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
995 }
996 pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
997 }
998 #endif /* defined(PDQ_BUS_DMA) */
999 if (pdu0 != NULL) {
1000 unsigned completion = tx->tx_completion;
1001 PDQ_ASSERT(freecnt == 0);
1002 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1003 pdq_process_transmitted_data(pdq);
1004 if (completion != tx->tx_completion) {
1005 producer = tx->tx_producer;
1006 eop = NULL;
1007 goto again;
1008 }
1009 /*
1010 * If we still have data to process then the ring was too full
1011 * to store the PDU. Return FALSE so the caller will requeue
1012 * the PDU for later.
1013 */
1014 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
1015 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1016 return PDQ_FALSE;
1017 }
1018 /*
1019 * Everything went fine. Finish it up.
1020 */
1021 tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
1022 if (PDQ_RX_FC_OFFSET != PDQ_OS_HDR_OFFSET) {
1023 dbp->pdqdb_transmits[tx->tx_producer].txd_sop = 1;
1024 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[tx->tx_producer],
1025 sizeof(pdq_txdesc_t));
1026 }
1027 eop->txd_eop = 1;
1028 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1029 PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
1030 tx->tx_producer = producer;
1031 tx->tx_free = freecnt;
1032 PDQ_DO_TYPE2_PRODUCER(pdq);
1033 return PDQ_TRUE;
1034 }
1035
1036 static void
1037 pdq_process_transmitted_data(
1038 pdq_t *pdq)
1039 {
1040 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1041 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1042 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
1043 pdq_uint32_t completion = tx->tx_completion;
1044 int reclaimed = 0;
1045
1046 while (completion != cbp->pdqcb_transmits) {
1047 PDQ_OS_DATABUF_T *pdu;
1048 pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
1049 PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
1050 PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
1051 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1052 pdq_os_transmit_done(pdq, pdu);
1053 tx->tx_free += descriptor_count;
1054 reclaimed = 1;
1055 PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
1056 }
1057 if (tx->tx_completion != completion) {
1058 tx->tx_completion = completion;
1059 pdq->pdq_intrmask &= ~PDQ_HOST_INT_TX_ENABLE;
1060 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1061 pdq_os_restart_transmitter(pdq);
1062 }
1063 if (reclaimed)
1064 PDQ_DO_TYPE2_PRODUCER(pdq);
1065 }
1066
1067 void
1068 pdq_flush_transmitter(
1069 pdq_t *pdq)
1070 {
1071 volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1072 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1073
1074 for (;;) {
1075 PDQ_OS_DATABUF_T *pdu;
1076 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1077 if (pdu == NULL)
1078 break;
1079 /*
1080 * Don't call transmit done since the packet never made it
1081 * out on the wire.
1082 */
1083 PDQ_OS_DATABUF_FREE(pdq, pdu);
1084 }
1085
1086 tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1087 cbp->pdqcb_transmits = tx->tx_completion = tx->tx_producer;
1088 PDQ_OS_CONSUMER_PRESYNC(pdq);
1089
1090 PDQ_DO_TYPE2_PRODUCER(pdq);
1091 }
1092
1093 void
1094 pdq_hwreset(
1095 pdq_t *pdq)
1096 {
1097 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1098 pdq_state_t state;
1099 int cnt;
1100
1101 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1102 if (state == PDQS_DMA_UNAVAILABLE)
1103 return;
1104 PDQ_CSR_WRITE(csrs, csr_port_data_a,
1105 (state == PDQS_HALTED && pdq->pdq_type != PDQ_DEFTA) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
1106 PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
1107 PDQ_OS_USEC_DELAY(100);
1108 PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
1109 for (cnt = 100000;;cnt--) {
1110 PDQ_OS_USEC_DELAY(1000);
1111 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1112 if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
1113 break;
1114 }
1115 PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 100000 - cnt));
1116 PDQ_OS_USEC_DELAY(10000);
1117 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1118 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1119 PDQ_ASSERT(cnt > 0);
1120 }
1121
1122 /*
1123 * The following routine brings the PDQ from whatever state it is
1124 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
1125 */
1126 pdq_state_t
1127 pdq_stop(
1128 pdq_t *pdq)
1129 {
1130 pdq_state_t state;
1131 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1132 int cnt, pass = 0, idx;
1133 PDQ_OS_DATABUF_T **buffers;
1134
1135 restart:
1136 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1137 if (state != PDQS_DMA_UNAVAILABLE) {
1138 pdq_hwreset(pdq);
1139 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1140 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1141 }
1142 #if 0
1143 switch (state) {
1144 case PDQS_RING_MEMBER:
1145 case PDQS_LINK_UNAVAILABLE:
1146 case PDQS_LINK_AVAILABLE: {
1147 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1148 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1149 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1150 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1151 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1152 /* FALLTHROUGH */
1153 }
1154 case PDQS_DMA_AVAILABLE: {
1155 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1156 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1157 pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1158 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1159 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1160 /* FALLTHROUGH */
1161 }
1162 case PDQS_DMA_UNAVAILABLE: {
1163 break;
1164 }
1165 }
1166 #endif
1167 /*
1168 * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into
1169 * DMA_AVAILABLE.
1170 */
1171
1172 /*
1173 * Obtain the hardware address and firmware revisions
1174 * (MLA = my long address which is FDDI speak for hardware address)
1175 */
1176 pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1177 pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1178 pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1179
1180 if (pdq->pdq_type == PDQ_DEFPA) {
1181 /*
1182 * Disable interrupts and DMA.
1183 */
1184 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1185 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1186 }
1187
1188 /*
1189 * Flush all the databuf queues.
1190 */
1191 pdq_flush_databuf_queue(pdq, &pdq->pdq_tx_info.tx_txq);
1192 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1193 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1194 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1195 if (buffers[idx] != NULL) {
1196 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1197 buffers[idx] = NULL;
1198 }
1199 }
1200 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1201 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1202 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1203 if (buffers[idx] != NULL) {
1204 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1205 buffers[idx] = NULL;
1206 }
1207 }
1208 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1209
1210 /*
1211 * Reset the consumer indexes to 0.
1212 */
1213 pdq->pdq_cbp->pdqcb_receives = 0;
1214 pdq->pdq_cbp->pdqcb_transmits = 0;
1215 pdq->pdq_cbp->pdqcb_host_smt = 0;
1216 pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1217 pdq->pdq_cbp->pdqcb_command_response = 0;
1218 pdq->pdq_cbp->pdqcb_command_request = 0;
1219 PDQ_OS_CONSUMER_PRESYNC(pdq);
1220
1221 /*
1222 * Reset the producer and completion indexes to 0.
1223 */
1224 pdq->pdq_command_info.ci_request_producer = 0;
1225 pdq->pdq_command_info.ci_response_producer = 0;
1226 pdq->pdq_command_info.ci_request_completion = 0;
1227 pdq->pdq_command_info.ci_response_completion = 0;
1228 pdq->pdq_unsolicited_info.ui_producer = 0;
1229 pdq->pdq_unsolicited_info.ui_completion = 0;
1230 pdq->pdq_rx_info.rx_producer = 0;
1231 pdq->pdq_rx_info.rx_completion = 0;
1232 pdq->pdq_tx_info.tx_producer = 0;
1233 pdq->pdq_tx_info.tx_completion = 0;
1234 pdq->pdq_host_smt_info.rx_producer = 0;
1235 pdq->pdq_host_smt_info.rx_completion = 0;
1236
1237 pdq->pdq_command_info.ci_command_active = 0;
1238 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1239 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1240
1241 /*
1242 * Allow the DEFPA to do DMA. Then program the physical
1243 * addresses of the consumer and descriptor blocks.
1244 */
1245 if (pdq->pdq_type == PDQ_DEFPA) {
1246 #ifdef PDQTEST
1247 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1248 PDQ_PFI_MODE_DMA_ENABLE);
1249 #else
1250 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1251 PDQ_PFI_MODE_DMA_ENABLE
1252 /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1253 #endif
1254 }
1255
1256 /*
1257 * Make sure the unsolicited queue has events ...
1258 */
1259 pdq_process_unsolicited_events(pdq);
1260
1261 if ((pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1262 || pdq->pdq_type == PDQ_DEFTA)
1263 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1264 else
1265 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1266 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1267 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1268
1269 /*
1270 * Make sure there isn't stale information in the caches before
1271 * tell the adapter about the blocks it's going to use.
1272 */
1273 PDQ_OS_CONSUMER_PRESYNC(pdq);
1274
1275 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1276 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_consumer_block);
1277 pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1278
1279 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1280 #if !defined(BYTE_ORDER) || BYTE_ORDER == LITTLE_ENDIAN
1281 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA);
1282 #else
1283 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA | PDQ_DMA_INIT_LW_BSWAP_LITERAL);
1284 #endif
1285 pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1286
1287 for (cnt = 0; cnt < 1000; cnt++) {
1288 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1289 if (state == PDQS_HALTED) {
1290 if (pass > 0)
1291 return PDQS_HALTED;
1292 pass = 1;
1293 goto restart;
1294 }
1295 if (state == PDQS_DMA_AVAILABLE) {
1296 PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1297 break;
1298 }
1299 PDQ_OS_USEC_DELAY(1000);
1300 }
1301 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1302
1303 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1304 pdq->pdq_intrmask = 0;
1305 /* PDQ_HOST_INT_STATE_CHANGE
1306 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1307 |PDQ_HOST_INT_UNSOL_ENABLE */;
1308 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1309
1310 /*
1311 * Any other command but START should be valid.
1312 */
1313 pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1314 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1315 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1316 pdq_queue_commands(pdq);
1317
1318 if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1319 /*
1320 * Now wait (up to 100ms) for the command(s) to finish.
1321 */
1322 for (cnt = 0; cnt < 1000; cnt++) {
1323 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1324 pdq_process_command_responses(pdq);
1325 if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1326 break;
1327 PDQ_OS_USEC_DELAY(1000);
1328 }
1329 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1330 }
1331
1332 return state;
1333 }
1334
1335 void
1336 pdq_run(
1337 pdq_t *pdq)
1338 {
1339 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1340 pdq_state_t state;
1341
1342 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1343 PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1344 PDQ_ASSERT(state != PDQS_RESET);
1345 PDQ_ASSERT(state != PDQS_HALTED);
1346 PDQ_ASSERT(state != PDQS_UPGRADE);
1347 PDQ_ASSERT(state != PDQS_RING_MEMBER);
1348 switch (state) {
1349 case PDQS_DMA_AVAILABLE: {
1350 /*
1351 * The PDQ after being reset screws up some of its state.
1352 * So we need to clear all the errors/interrupts so the real
1353 * ones will get through.
1354 */
1355 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1356 pdq->pdq_intrmask = PDQ_HOST_INT_STATE_CHANGE
1357 |PDQ_HOST_INT_XMT_DATA_FLUSH|PDQ_HOST_INT_FATAL_ERROR
1358 |PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1359 |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE;
1360 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1361 /*
1362 * Set the MAC and address filters and start up the PDQ.
1363 */
1364 pdq_process_unsolicited_events(pdq);
1365 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1366 pdq->pdq_dbp->pdqdb_receives,
1367 pdq->pdq_cbp->pdqcb_receives,
1368 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1369 PDQ_DO_TYPE2_PRODUCER(pdq);
1370 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1371 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1372 pdq->pdq_dbp->pdqdb_host_smt,
1373 pdq->pdq_cbp->pdqcb_host_smt,
1374 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1375 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1376 pdq->pdq_host_smt_info.rx_producer
1377 | (pdq->pdq_host_smt_info.rx_completion << 8));
1378 }
1379 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1380 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1381 | PDQ_BITMASK(PDQC_SNMP_SET)
1382 | PDQ_BITMASK(PDQC_START);
1383 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1384 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1385 pdq_queue_commands(pdq);
1386 break;
1387 }
1388 case PDQS_LINK_UNAVAILABLE:
1389 case PDQS_LINK_AVAILABLE: {
1390 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1391 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1392 | PDQ_BITMASK(PDQC_SNMP_SET);
1393 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1394 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1395 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1396 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1397 pdq->pdq_dbp->pdqdb_host_smt,
1398 pdq->pdq_cbp->pdqcb_host_smt,
1399 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1400 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1401 pdq->pdq_host_smt_info.rx_producer
1402 | (pdq->pdq_host_smt_info.rx_completion << 8));
1403 }
1404 pdq_process_unsolicited_events(pdq);
1405 pdq_queue_commands(pdq);
1406 break;
1407 }
1408 case PDQS_RING_MEMBER: {
1409 }
1410 default: { /* to make gcc happy */
1411 break;
1412 }
1413 }
1414 }
1415
1416 int
1417 pdq_interrupt(
1418 pdq_t *pdq)
1419 {
1420 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1421 pdq_uint32_t data;
1422 int progress = 0;
1423
1424 if (pdq->pdq_type == PDQ_DEFPA)
1425 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1426
1427 while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1428 progress = 1;
1429 PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1430 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1431 if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1432 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1433 pdq->pdq_dbp->pdqdb_receives,
1434 pdq->pdq_cbp->pdqcb_receives,
1435 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1436 PDQ_DO_TYPE2_PRODUCER(pdq);
1437 }
1438 if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1439 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1440 pdq->pdq_dbp->pdqdb_host_smt,
1441 pdq->pdq_cbp->pdqcb_host_smt,
1442 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1443 PDQ_DO_HOST_SMT_PRODUCER(pdq);
1444 }
1445 /* if (data & PDQ_PSTS_XMT_DATA_PENDING) */
1446 pdq_process_transmitted_data(pdq);
1447 if (data & PDQ_PSTS_UNSOL_PENDING)
1448 pdq_process_unsolicited_events(pdq);
1449 if (data & PDQ_PSTS_CMD_RSP_PENDING)
1450 pdq_process_command_responses(pdq);
1451 if (data & PDQ_PSTS_TYPE_0_PENDING) {
1452 data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1453 if (data & PDQ_HOST_INT_STATE_CHANGE) {
1454 pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1455 printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1456 if (state == PDQS_LINK_UNAVAILABLE) {
1457 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1458 } else if (state == PDQS_LINK_AVAILABLE) {
1459 if (pdq->pdq_flags & PDQ_WANT_FDX) {
1460 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_DEC_EXT_MIB_GET);
1461 pdq_queue_commands(pdq);
1462 }
1463 pdq->pdq_flags |= PDQ_TXOK|PDQ_IS_ONRING;
1464 pdq_os_restart_transmitter(pdq);
1465 } else if (state == PDQS_HALTED) {
1466 pdq_response_error_log_get_t log_entry;
1467 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1468 printf(": halt code = %d (%s)\n",
1469 halt_code, pdq_halt_codes[halt_code]);
1470 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1471 PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1472 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1473 data & PDQ_HOST_INT_FATAL_ERROR));
1474 }
1475 PDQ_OS_MEMZERO(&log_entry, sizeof(log_entry));
1476 if (pdq_read_error_log(pdq, &log_entry)) {
1477 PDQ_PRINTF((" Error log Entry:\n"));
1478 PDQ_PRINTF((" CMD Status = %d (0x%x)\n",
1479 log_entry.error_log_get_status,
1480 log_entry.error_log_get_status));
1481 PDQ_PRINTF((" Event Status = %d (0x%x)\n",
1482 log_entry.error_log_get_event_status,
1483 log_entry.error_log_get_event_status));
1484 PDQ_PRINTF((" Caller Id = %d (0x%x)\n",
1485 log_entry.error_log_get_caller_id,
1486 log_entry.error_log_get_caller_id));
1487 PDQ_PRINTF((" Write Count = %d (0x%x)\n",
1488 log_entry.error_log_get_write_count,
1489 log_entry.error_log_get_write_count));
1490 PDQ_PRINTF((" FRU Implication Mask = %d (0x%x)\n",
1491 log_entry.error_log_get_fru_implication_mask,
1492 log_entry.error_log_get_fru_implication_mask));
1493 PDQ_PRINTF((" Test ID = %d (0x%x)\n",
1494 log_entry.error_log_get_test_id,
1495 log_entry.error_log_get_test_id));
1496 }
1497 pdq_stop(pdq);
1498 if (pdq->pdq_flags & PDQ_RUNNING)
1499 pdq_run(pdq);
1500 return 1;
1501 }
1502 printf("\n");
1503 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1504 }
1505 if (data & PDQ_HOST_INT_FATAL_ERROR) {
1506 pdq_stop(pdq);
1507 if (pdq->pdq_flags & PDQ_RUNNING)
1508 pdq_run(pdq);
1509 return 1;
1510 }
1511 if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1512 printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1513 pdq->pdq_flags &= ~PDQ_TXOK;
1514 pdq_flush_transmitter(pdq);
1515 pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1516 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1517 }
1518 }
1519 if (pdq->pdq_type == PDQ_DEFPA)
1520 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1521 }
1522 return progress;
1523 }
1524
1525 pdq_t *
1526 pdq_initialize(
1527 pdq_bus_t bus,
1528 pdq_bus_memaddr_t csr_base,
1529 const char *name,
1530 int unit,
1531 void *ctx,
1532 pdq_type_t type)
1533 {
1534 pdq_t *pdq;
1535 pdq_state_t state;
1536 pdq_descriptor_block_t *dbp;
1537 #if !defined(PDQ_BUS_DMA)
1538 const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1539 pdq_uint8_t *p;
1540 #endif
1541 int idx;
1542
1543 PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1544 PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1545 PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1546 PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1547 PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1548 PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1549 PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1550 PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1551 PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1552
1553 pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1554 if (pdq == NULL) {
1555 PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1556 return NULL;
1557 }
1558 PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1559 pdq->pdq_type = type;
1560 pdq->pdq_unit = unit;
1561 pdq->pdq_os_ctx = (void *) ctx;
1562 pdq->pdq_os_name = name;
1563 pdq->pdq_flags = PDQ_PRINTCHARS;
1564 /*
1565 * Allocate the additional data structures required by
1566 * the PDQ driver. Allocate a contiguous region of memory
1567 * for the descriptor block. We need to allocated enough
1568 * to guarantee that we will a get 8KB block of memory aligned
1569 * on a 8KB boundary. This turns to require that we allocate
1570 * (N*2 - 1 page) pages of memory. On machine with less than
1571 * a 8KB page size, it mean we will allocate more memory than
1572 * we need. The extra will be used for the unsolicited event
1573 * buffers (though on machines with 8KB pages we will to allocate
1574 * them separately since there will be nothing left overs.)
1575 */
1576 #if defined(PDQ_OS_MEMALLOC_CONTIG)
1577 p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1578
1579 if (p == NULL)
1580 printf("%s() - PDQ_OS_MEMALLOC_CONTIG() failed!\n", __FUNCTION__);
1581
1582 if (p != NULL) {
1583 pdq_physaddr_t physaddr = PDQ_OS_VA_TO_BUSPA(pdq, p);
1584 /*
1585 * Assert that we really got contiguous memory. This isn't really
1586 * needed on systems that actually have physical contiguous allocation
1587 * routines, but on those systems that don't ...
1588 */
1589 for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1590 if (PDQ_OS_VA_TO_BUSPA(pdq, p + idx) - physaddr != idx)
1591 goto cleanup_and_return;
1592 }
1593 if (physaddr & 0x1FFF) {
1594 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1595 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr;
1596 pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - (physaddr & 0x1FFF)];
1597 pdq->pdq_pa_descriptor_block = physaddr & ~0x1FFFUL;
1598 } else {
1599 pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1600 pdq->pdq_pa_descriptor_block = physaddr;
1601 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1602 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr + 0x2000;
1603 }
1604 }
1605 pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1606 pdq->pdq_pa_consumer_block = PDQ_DB_BUSPA(pdq, pdq->pdq_cbp);
1607 if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1608 pdq->pdq_unsolicited_info.ui_events =
1609 (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1610 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1611 }
1612 #else
1613 if (pdq_os_memalloc_contig(pdq))
1614 goto cleanup_and_return;
1615 #endif
1616
1617 /*
1618 * Make sure everything got allocated. If not, free what did
1619 * get allocated and return.
1620 */
1621 if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1622 cleanup_and_return:
1623 #ifdef PDQ_OS_MEMFREE_CONTIG
1624 if (p /* pdq->pdq_dbp */ != NULL)
1625 PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1626 if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1627 PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1628 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1629 #endif
1630 PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1631 return NULL;
1632 }
1633 dbp = pdq->pdq_dbp;
1634
1635 PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT " (PA = 0x%x)\n", dbp, pdq->pdq_pa_descriptor_block));
1636 PDQ_PRINTF((" Receive Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_receives));
1637 PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_transmits));
1638 PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_host_smt));
1639 PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_responses));
1640 PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_requests));
1641 PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1642
1643 /*
1644 * Zero out the descriptor block. Not really required but
1645 * it pays to be neat. This will also zero out the consumer
1646 * block, command pool, and buffer pointers for the receive
1647 * host_smt rings.
1648 */
1649 PDQ_OS_MEMZERO(dbp, sizeof(*dbp));
1650
1651 /*
1652 * Initialize the CSR references.
1653 * the DEFAA (FutureBus+) skips a longword between registers
1654 */
1655 pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1656 if (pdq->pdq_type == PDQ_DEFPA)
1657 pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1658
1659 PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_CSR_FMT "\n", pdq->pdq_csrs.csr_base));
1660 PDQ_PRINTF((" Port Reset = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1661 pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1662 PDQ_PRINTF((" Host Data = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1663 pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1664 PDQ_PRINTF((" Port Control = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1665 pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1666 PDQ_PRINTF((" Port Data A = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1667 pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1668 PDQ_PRINTF((" Port Data B = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1669 pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1670 PDQ_PRINTF((" Port Status = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1671 pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1672 PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1673 pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1674 PDQ_PRINTF((" Host Int Enable = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1675 pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1676 PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1677 pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1678 PDQ_PRINTF((" Command Response Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1679 pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1680 PDQ_PRINTF((" Command Request Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1681 pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1682 PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1683 pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1684 PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1685 pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1686
1687 /*
1688 * Initialize the command information block
1689 */
1690 pdq->pdq_command_info.ci_request_bufstart = dbp->pdqdb_cmd_request_buf;
1691 pdq->pdq_command_info.ci_pa_request_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_request_bufstart);
1692 pdq->pdq_command_info.ci_pa_request_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_requests);
1693 PDQ_PRINTF(("PDQ Command Request Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1694 pdq->pdq_command_info.ci_request_bufstart,
1695 pdq->pdq_command_info.ci_pa_request_bufstart));
1696 for (idx = 0; idx < sizeof(dbp->pdqdb_command_requests)/sizeof(dbp->pdqdb_command_requests[0]); idx++) {
1697 pdq_txdesc_t *txd = &dbp->pdqdb_command_requests[idx];
1698
1699 txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_request_bufstart;
1700 txd->txd_eop = txd->txd_sop = 1;
1701 txd->txd_pa_hi = 0;
1702 }
1703 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_requests,
1704 sizeof(dbp->pdqdb_command_requests));
1705
1706 pdq->pdq_command_info.ci_response_bufstart = dbp->pdqdb_cmd_response_buf;
1707 pdq->pdq_command_info.ci_pa_response_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_response_bufstart);
1708 pdq->pdq_command_info.ci_pa_response_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_responses);
1709 PDQ_PRINTF(("PDQ Command Response Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1710 pdq->pdq_command_info.ci_response_bufstart,
1711 pdq->pdq_command_info.ci_pa_response_bufstart));
1712 for (idx = 0; idx < sizeof(dbp->pdqdb_command_responses)/sizeof(dbp->pdqdb_command_responses[0]); idx++) {
1713 pdq_rxdesc_t *rxd = &dbp->pdqdb_command_responses[idx];
1714
1715 rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_response_bufstart;
1716 rxd->rxd_sop = 1;
1717 rxd->rxd_seg_cnt = 0;
1718 rxd->rxd_seg_len_lo = 0;
1719 rxd->rxd_seg_len_hi = PDQ_SIZE_COMMAND_RESPONSE / 16;
1720 }
1721 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_responses,
1722 sizeof(dbp->pdqdb_command_responses));
1723
1724 /*
1725 * Initialize the unsolicited event information block
1726 */
1727 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1728 pdq->pdq_unsolicited_info.ui_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_unsolicited_events);
1729 PDQ_PRINTF(("PDQ Unsolicit Event Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1730 pdq->pdq_unsolicited_info.ui_events,
1731 pdq->pdq_unsolicited_info.ui_pa_bufstart));
1732 for (idx = 0; idx < sizeof(dbp->pdqdb_unsolicited_events)/sizeof(dbp->pdqdb_unsolicited_events[0]); idx++) {
1733 pdq_rxdesc_t *rxd = &dbp->pdqdb_unsolicited_events[idx];
1734 pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1735
1736 rxd->rxd_sop = 1;
1737 rxd->rxd_seg_cnt = 0;
1738 rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1739 rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1740 - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1741 rxd->rxd_pa_hi = 0;
1742 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
1743 }
1744 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_unsolicited_events,
1745 sizeof(dbp->pdqdb_unsolicited_events));
1746
1747 /*
1748 * Initialize the receive information blocks (normal and SMT).
1749 */
1750 pdq->pdq_rx_info.rx_buffers = pdq->pdq_receive_buffers;
1751 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_receives);
1752 pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1753 pdq->pdq_rx_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_receives);
1754
1755 pdq->pdq_host_smt_info.rx_buffers = pdq->pdq_host_smt_buffers;
1756 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_host_smt);
1757 pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1758 pdq->pdq_host_smt_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_host_smt);
1759
1760 /*
1761 * Initialize the transmit information block.
1762 */
1763 dbp->pdqdb_tx_hdr[0] = PDQ_FDDI_PH0;
1764 dbp->pdqdb_tx_hdr[1] = PDQ_FDDI_PH1;
1765 dbp->pdqdb_tx_hdr[2] = PDQ_FDDI_PH2;
1766 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(dbp->pdqdb_transmits);
1767 pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = 3;
1768 pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1769 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_DB_BUSPA(pdq, dbp->pdqdb_tx_hdr);
1770 pdq->pdq_tx_info.tx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_transmits);
1771
1772 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1773 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1774
1775 /*
1776 * Stop the PDQ if it is running and put it into a known state.
1777 */
1778 state = pdq_stop(pdq);
1779
1780 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1781 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1782 /*
1783 * If the adapter is not the state we expect, then the initialization
1784 * failed. Cleanup and exit.
1785 */
1786 #if defined(PDQVERBOSE)
1787 if (state == PDQS_HALTED) {
1788 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1789 printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1790 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1791 PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1792 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1793 PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1794 }
1795 #endif
1796 if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1797 goto cleanup_and_return;
1798
1799 PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1800 pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1801 pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1802 pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1803 PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1804 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1805 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1806 PDQ_PRINTF(("PDQ Chip Revision = "));
1807 switch (pdq->pdq_chip_rev) {
1808 case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1809 case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1810 case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1811 default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));
1812 }
1813 PDQ_PRINTF(("\n"));
1814
1815 return pdq;
1816 }
Cache object: 00af74ebc9e56cb7b2c3b258c7eeb8cc
|