FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/pdq.c
1 /* $NetBSD: pdq.c,v 1.37 2005/02/27 00:27:02 perry Exp $ */
2
3 /*-
4 * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * Id: pdq.c,v 1.32 1997/06/05 01:56:35 thomas Exp
27 *
28 */
29
30 /*
31 * DEC PDQ FDDI Controller O/S independent code
32 *
33 * This module should work any on PDQ based board. Note that changes for
34 * MIPS and Alpha architectures (or any other architecture which requires
35 * a flushing of memory or write buffers and/or has incoherent caches)
36 * have yet to be made.
37 *
38 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
39 * flushing of the write buffers.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: pdq.c,v 1.37 2005/02/27 00:27:02 perry Exp $");
44
45 #define PDQ_HWSUPPORT /* for pdq.h */
46
47 #if defined(__FreeBSD__)
48 /*
49 * What a botch having to specific includes for FreeBSD!
50 */
51 #include <dev/pdq/pdqvar.h>
52 #include <dev/pdq/pdqreg.h>
53 #else
54 #include "pdqvar.h"
55 #include "pdqreg.h"
56 #endif
57
58 #define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1))
59 #define PDQ_CMD_RX_ALIGNMENT 16
60
61 #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
62 #define PDQ_PRINTF(x) printf x
63 #else
64 #define PDQ_PRINTF(x) do { } while (0)
65 #endif
66
67 static const char * const pdq_halt_codes[] = {
68 "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
69 "Software Fault", "Hardware Fault", "PC Trace Path Test",
70 "DMA Error", "Image CRC Error", "Adapter Processer Error"
71 };
72
73 static const char * const pdq_adapter_states[] = {
74 "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
75 "Link Available", "Link Unavailable", "Halted", "Ring Member"
76 };
77
78 /*
79 * The following are used in conjunction with
80 * unsolicited events
81 */
82 static const char * const pdq_entities[] = {
83 "Station", "Link", "Phy Port"
84 };
85
86 static const char * const pdq_station_events[] = {
87 "Unknown Event #0",
88 "Trace Received"
89 };
90
91 static const char * const pdq_link_events[] = {
92 "Transmit Underrun",
93 "Transmit Failed",
94 "Block Check Error (CRC)",
95 "Frame Status Error",
96 "PDU Length Error",
97 NULL,
98 NULL,
99 "Receive Data Overrun",
100 NULL,
101 "No User Buffer",
102 "Ring Initialization Initiated",
103 "Ring Initialization Received",
104 "Ring Beacon Initiated",
105 "Duplicate Address Failure",
106 "Duplicate Token Detected",
107 "Ring Purger Error",
108 "FCI Strip Error",
109 "Trace Initiated",
110 "Directed Beacon Received",
111 };
112
113 #if 0
114 static const char * const pdq_station_arguments[] = {
115 "Reason"
116 };
117
118 static const char * const pdq_link_arguments[] = {
119 "Reason",
120 "Data Link Header",
121 "Source",
122 "Upstream Neighbor"
123 };
124
125 static const char * const pdq_phy_arguments[] = {
126 "Direction"
127 };
128
129 static const char * const * const pdq_event_arguments[] = {
130 pdq_station_arguments,
131 pdq_link_arguments,
132 pdq_phy_arguments
133 };
134
135 #endif
136
137
138 static const char * const pdq_phy_events[] = {
139 "LEM Error Monitor Reject",
140 "Elasticy Buffer Error",
141 "Link Confidence Test Reject"
142 };
143
144 static const char * const * const pdq_event_codes[] = {
145 pdq_station_events,
146 pdq_link_events,
147 pdq_phy_events
148 };
149
150 static const char * const pdq_station_types[] = {
151 "SAS", "DAC", "SAC", "NAC", "DAS"
152 };
153
154 static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
155
156 static const char pdq_phy_types[] = "ABSM";
157
158 static const char * const pdq_pmd_types0[] = {
159 "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
160 "ANSI Sonet"
161 };
162
163 static const char * const pdq_pmd_types100[] = {
164 "Low Power", "Thin Wire", "Shielded Twisted Pair",
165 "Unshielded Twisted Pair"
166 };
167
168 static const char * const * const pdq_pmd_types[] = {
169 pdq_pmd_types0, pdq_pmd_types100
170 };
171
172 static const char * const pdq_descriptions[] = {
173 "DEFPA PCI",
174 "DEFEA EISA",
175 "DEFTA TC",
176 "DEFAA Futurebus",
177 "DEFQA Q-bus",
178 };
179
180 static void
181 pdq_print_fddi_chars(
182 pdq_t *pdq,
183 const pdq_response_status_chars_get_t *rsp)
184 {
185 static const char hexchars[] = "0123456789abcdef";
186 pdq_uint32_t phy_type;
187 pdq_uint32_t pmd_type;
188 pdq_uint32_t smt_version_id;
189 pdq_station_type_t station_type;
190
191 printf(
192 #if !defined(__bsdi__) && !defined(__NetBSD__)
193 PDQ_OS_PREFIX
194 #else
195 ": "
196 #endif
197 "DEC %s FDDI %s Controller\n",
198 #if !defined(__bsdi__) && !defined(__NetBSD__)
199 PDQ_OS_PREFIX_ARGS,
200 #endif
201 pdq_descriptions[pdq->pdq_type],
202 pdq_station_types[le32toh(rsp->status_chars_get.station_type)]);
203
204 printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
205 PDQ_OS_PREFIX_ARGS,
206 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
207 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
208 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
209 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
210 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
211 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
212 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
213 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
214 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
215 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
216 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
217 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
218 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
219 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
220 rsp->status_chars_get.module_rev.fwrev_bytes[0]);
221
222 phy_type = le32toh(rsp->status_chars_get.phy_type[0]);
223 pmd_type = le32toh(rsp->status_chars_get.pmd_type[0]);
224 station_type = le32toh(rsp->status_chars_get.station_type);
225 smt_version_id = le32toh(rsp->status_chars_get.smt_version_id);
226
227 if (smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions))
228 printf(", SMT %s\n", pdq_smt_versions[smt_version_id]);
229
230 printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
231 PDQ_OS_PREFIX_ARGS,
232 le32toh(station_type) == PDQ_STATION_TYPE_DAS ? "[A]" : "",
233 pdq_phy_types[phy_type],
234 pdq_pmd_types[pmd_type / 100][pmd_type % 100]);
235
236 if (station_type == PDQ_STATION_TYPE_DAS) {
237 phy_type = le32toh(rsp->status_chars_get.phy_type[1]);
238 pmd_type = le32toh(rsp->status_chars_get.pmd_type[1]);
239 printf(", FDDI Port[B] = %c (PMD = %s)",
240 pdq_phy_types[phy_type],
241 pdq_pmd_types[pmd_type / 100][pmd_type % 100]);
242 }
243
244 printf("\n");
245
246 pdq_os_update_status(pdq, rsp);
247 }
248
249 static void
250 pdq_init_csrs(
251 pdq_csrs_t *csrs,
252 pdq_bus_t bus,
253 pdq_bus_memaddr_t csr_base,
254 size_t csrsize)
255 {
256 csrs->csr_bus = bus;
257 csrs->csr_base = csr_base;
258 csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize);
259 csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize);
260 csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize);
261 csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize);
262 csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize);
263 csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize);
264 csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize);
265 csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize);
266 csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize);
267 csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
268 csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
269 csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
270 csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
271 }
272
273 static void
274 pdq_init_pci_csrs(
275 pdq_pci_csrs_t *csrs,
276 pdq_bus_t bus,
277 pdq_bus_memaddr_t csr_base,
278 size_t csrsize)
279 {
280 csrs->csr_bus = bus;
281 csrs->csr_base = csr_base;
282 csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
283 csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
284 csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
285 csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
286 }
287
288 static void
289 pdq_flush_databuf_queue(
290 pdq_t *pdq,
291 pdq_databuf_queue_t *q)
292 {
293 PDQ_OS_DATABUF_T *pdu;
294 for (;;) {
295 PDQ_OS_DATABUF_DEQUEUE(q, pdu);
296 if (pdu == NULL)
297 return;
298 PDQ_OS_DATABUF_FREE(pdq, pdu);
299 }
300 }
301
302 static pdq_boolean_t
303 pdq_do_port_control(
304 const pdq_csrs_t * const csrs,
305 pdq_uint32_t cmd)
306 {
307 int cnt = 0;
308 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
309 PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
310 while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
311 cnt++;
312 PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
313 if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
314 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
315 return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
316 }
317 /* adapter failure */
318 PDQ_ASSERT(0);
319 return PDQ_FALSE;
320 }
321
322 static void
323 pdq_read_mla(
324 const pdq_csrs_t * const csrs,
325 pdq_lanaddr_t *hwaddr)
326 {
327 pdq_uint32_t data;
328
329 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
330 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
331 data = PDQ_CSR_READ(csrs, csr_host_data);
332
333 hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
334 hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
335 hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
336 hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
337
338 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
339 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
340 data = PDQ_CSR_READ(csrs, csr_host_data);
341
342 hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
343 hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
344 }
345
346 static void
347 pdq_read_fwrev(
348 const pdq_csrs_t * const csrs,
349 pdq_fwrev_t *fwrev)
350 {
351 pdq_uint32_t data;
352
353 pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
354 data = PDQ_CSR_READ(csrs, csr_host_data);
355
356 fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
357 fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
358 fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
359 fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
360 }
361
362 static pdq_boolean_t
363 pdq_read_error_log(
364 pdq_t *pdq,
365 pdq_response_error_log_get_t *log_entry)
366 {
367 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
368 pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
369
370 pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
371
372 while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
373 *ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
374 if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
375 break;
376 }
377 return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
378 }
379
380 static pdq_chip_rev_t
381 pdq_read_chiprev(
382 const pdq_csrs_t * const csrs)
383 {
384 pdq_uint32_t data;
385
386 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
387 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
388 data = PDQ_CSR_READ(csrs, csr_host_data);
389
390 return (pdq_chip_rev_t) data;
391 }
392
393 static const struct {
394 size_t cmd_len;
395 size_t rsp_len;
396 const char *cmd_name;
397 } pdq_cmd_info[] = {
398 { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */
399 sizeof(pdq_response_generic_t),
400 "Start"
401 },
402 { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */
403 sizeof(pdq_response_generic_t),
404 "Filter Set"
405 },
406 { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */
407 sizeof(pdq_response_filter_get_t),
408 "Filter Get"
409 },
410 { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */
411 sizeof(pdq_response_generic_t),
412 "Chars Set"
413 },
414 { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */
415 sizeof(pdq_response_status_chars_get_t),
416 "Status Chars Get"
417 },
418 #if 0
419 { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */
420 sizeof(pdq_response_counters_get_t),
421 "Counters Get"
422 },
423 { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */
424 sizeof(pdq_response_generic_t),
425 "Counters Set"
426 },
427 #else
428 { 0, 0, "Counters Get" },
429 { 0, 0, "Counters Set" },
430 #endif
431 { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */
432 sizeof(pdq_response_generic_t),
433 "Addr Filter Set"
434 },
435 { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */
436 sizeof(pdq_response_addr_filter_get_t),
437 "Addr Filter Get"
438 },
439 { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */
440 sizeof(pdq_response_generic_t),
441 "Error Log Clear"
442 },
443 { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */
444 sizeof(pdq_response_generic_t),
445 "Error Log Set"
446 },
447 { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */
448 sizeof(pdq_response_generic_t),
449 "FDDI MIB Get"
450 },
451 { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */
452 sizeof(pdq_response_generic_t),
453 "DEC Ext MIB Get"
454 },
455 { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */
456 sizeof(pdq_response_generic_t),
457 "DEC Specific Get"
458 },
459 { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */
460 sizeof(pdq_response_generic_t),
461 "SNMP Set"
462 },
463 { 0, 0, "N/A" },
464 { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */
465 sizeof(pdq_response_generic_t),
466 "SMT MIB Get"
467 },
468 { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */
469 sizeof(pdq_response_generic_t),
470 "SMT MIB Set",
471 },
472 { 0, 0, "Bogus CMD" },
473 };
474
475 static void
476 pdq_queue_commands(
477 pdq_t *pdq)
478 {
479 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
480 pdq_command_info_t * const ci = &pdq->pdq_command_info;
481 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
482 pdq_txdesc_t * const txd = &dbp->pdqdb_command_requests[ci->ci_request_producer];
483 pdq_cmd_code_t op;
484 pdq_uint32_t cmdlen, rsplen, mask;
485
486 /*
487 * If there are commands or responses active or there aren't
488 * any pending commands, then don't queue any more.
489 */
490 if (ci->ci_command_active || ci->ci_pending_commands == 0)
491 return;
492
493 /*
494 * Determine which command needs to be queued.
495 */
496 op = PDQC_SMT_MIB_SET;
497 for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
498 op = (pdq_cmd_code_t) ((int) op - 1);
499 /*
500 * Obtain the sizes needed for the command and response.
501 * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
502 * always properly aligned.
503 */
504 cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
505 rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
506 if (cmdlen < rsplen)
507 cmdlen = rsplen;
508 /*
509 * Since only one command at a time will be queued, there will always
510 * be enough space.
511 */
512
513 /*
514 * Obtain and fill in the descriptor for the command (descriptor is
515 * pre-initialized)
516 */
517 txd->txd_pa_hi =
518 htole32(PDQ_TXDESC_SEG_LEN(cmdlen)|PDQ_TXDESC_EOP|PDQ_TXDESC_SOP);
519
520 /*
521 * Clear the command area, set the opcode, and the command from the pending
522 * mask.
523 */
524
525 ci->ci_queued_commands[ci->ci_request_producer] = op;
526 #if defined(PDQVERBOSE)
527 ((pdq_response_generic_t *) ci->ci_response_bufstart)->generic_op =
528 htole32(PDQC_BOGUS_CMD);
529 #endif
530 PDQ_OS_MEMZERO(ci->ci_request_bufstart, cmdlen);
531 *(pdq_uint32_t *) ci->ci_request_bufstart = htole32(op);
532 ci->ci_pending_commands &= ~mask;
533
534 /*
535 * Fill in the command area, if needed.
536 */
537 switch (op) {
538 case PDQC_FILTER_SET: {
539 pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_request_bufstart;
540 unsigned idx = 0;
541 filter_set->filter_set_items[idx].item_code =
542 htole32(PDQI_IND_GROUP_PROM);
543 filter_set->filter_set_items[idx].filter_state =
544 htole32(pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
545 idx++;
546 filter_set->filter_set_items[idx].item_code =
547 htole32(PDQI_GROUP_PROM);
548 filter_set->filter_set_items[idx].filter_state =
549 htole32(pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
550 idx++;
551 filter_set->filter_set_items[idx].item_code =
552 htole32(PDQI_SMT_PROM);
553 filter_set->filter_set_items[idx].filter_state =
554 htole32((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
555 idx++;
556 filter_set->filter_set_items[idx].item_code =
557 htole32(PDQI_SMT_USER);
558 filter_set->filter_set_items[idx].filter_state =
559 htole32((pdq->pdq_flags & PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
560 idx++;
561 filter_set->filter_set_items[idx].item_code =
562 htole32(PDQI_EOL);
563 break;
564 }
565 case PDQC_ADDR_FILTER_SET: {
566 pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_request_bufstart;
567 pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
568 addr->lanaddr_bytes[0] = 0xFF;
569 addr->lanaddr_bytes[1] = 0xFF;
570 addr->lanaddr_bytes[2] = 0xFF;
571 addr->lanaddr_bytes[3] = 0xFF;
572 addr->lanaddr_bytes[4] = 0xFF;
573 addr->lanaddr_bytes[5] = 0xFF;
574 addr++;
575 pdq_os_addr_fill(pdq, addr, 61);
576 break;
577 }
578 case PDQC_SNMP_SET: {
579 pdq_cmd_snmp_set_t *snmp_set = (pdq_cmd_snmp_set_t *) ci->ci_request_bufstart;
580 unsigned idx = 0;
581 snmp_set->snmp_set_items[idx].item_code = htole32(PDQSNMP_FULL_DUPLEX_ENABLE);
582 snmp_set->snmp_set_items[idx].item_value = htole32(pdq->pdq_flags & PDQ_WANT_FDX ? 1 : 2);
583 snmp_set->snmp_set_items[idx].item_port = 0;
584 idx++;
585 snmp_set->snmp_set_items[idx].item_code = htole32(PDQSNMP_EOL);
586 break;
587 }
588 default: { /* to make gcc happy */
589 break;
590 }
591 }
592
593
594 /*
595 * Sync the command request buffer and descriptor, then advance
596 * the request producer index.
597 */
598 PDQ_OS_CMDRQST_PRESYNC(pdq, cmdlen);
599 PDQ_OS_DESC_PRESYNC(pdq, txd, sizeof(pdq_txdesc_t));
600 PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
601
602 /*
603 * Sync the command response buffer and advance the response
604 * producer index (descriptor is already pre-initialized)
605 */
606 PDQ_OS_CMDRSP_PRESYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
607 PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
608 /*
609 * At this point the command is done. All that needs to be done is to
610 * produce it to the PDQ.
611 */
612 PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
613 pdq_cmd_info[op].cmd_name));
614
615 ci->ci_command_active++;
616 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
617 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
618 }
619
620 static void
621 pdq_process_command_responses(
622 pdq_t * const pdq)
623 {
624 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
625 pdq_command_info_t * const ci = &pdq->pdq_command_info;
626 volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
627 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
628 const pdq_response_generic_t *rspgen;
629 pdq_cmd_code_t op;
630 pdq_response_code_t status;
631
632 /*
633 * We have to process the command and response in tandem so
634 * just wait for the response to be consumed. If it has been
635 * consumed then the command must have been as well.
636 */
637
638 if (le32toh(cbp->pdqcb_command_response) == ci->ci_response_completion)
639 return;
640
641 PDQ_ASSERT(le32toh(cbp->pdqcb_command_request) != ci->ci_request_completion);
642
643 PDQ_OS_CMDRSP_POSTSYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
644 rspgen = (const pdq_response_generic_t *) ci->ci_response_bufstart;
645 op = le32toh(rspgen->generic_op);
646 status = le32toh(rspgen->generic_status);
647 PDQ_ASSERT(op == ci->ci_queued_commands[ci->ci_request_completion]);
648 PDQ_ASSERT(status == PDQR_SUCCESS);
649 PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d [0x%x])\n",
650 pdq_cmd_info[op].cmd_name,
651 htole32(status),
652 htole32(status)));
653
654 if (op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
655 pdq->pdq_flags &= ~PDQ_PRINTCHARS;
656 pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
657 } else if (op == PDQC_DEC_EXT_MIB_GET) {
658 pdq->pdq_flags &= ~PDQ_IS_FDX;
659 if (le32toh(((const pdq_response_dec_ext_mib_get_t *)rspgen)->dec_ext_mib_get.fdx_operational))
660 pdq->pdq_flags |= PDQ_IS_FDX;
661 }
662
663 PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
664 PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
665 ci->ci_command_active = 0;
666
667 if (ci->ci_pending_commands != 0) {
668 pdq_queue_commands(pdq);
669 } else {
670 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
671 ci->ci_response_producer | (ci->ci_response_completion << 8));
672 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
673 ci->ci_request_producer | (ci->ci_request_completion << 8));
674 }
675 }
676
677 /*
678 * This following routine processes unsolicited events.
679 * In addition, it also fills the unsolicited queue with
680 * event buffers so it can be used to initialize the queue
681 * as well.
682 */
683 static void
684 pdq_process_unsolicited_events(
685 pdq_t *pdq)
686 {
687 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
688 pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
689 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
690 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
691
692 /*
693 * Process each unsolicited event (if any).
694 */
695
696 while (le32toh(cbp->pdqcb_unsolicited_event) != ui->ui_completion) {
697 const pdq_unsolicited_event_t *event;
698 pdq_entity_t entity;
699 uint32_t value;
700 event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
701 PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, event);
702
703 switch (event->event_type) {
704 case PDQ_UNSOLICITED_EVENT: {
705 int bad_event = 0;
706 entity = le32toh(event->event_entity);
707 value = le32toh(event->event_code.value);
708 switch (entity) {
709 case PDQ_ENTITY_STATION: {
710 bad_event = value >= PDQ_STATION_EVENT_MAX;
711 break;
712 }
713 case PDQ_ENTITY_LINK: {
714 bad_event = value >= PDQ_LINK_EVENT_MAX;
715 break;
716 }
717 case PDQ_ENTITY_PHY_PORT: {
718 bad_event = value >= PDQ_PHY_EVENT_MAX;
719 break;
720 }
721 default: {
722 bad_event = 1;
723 break;
724 }
725 }
726 if (bad_event) {
727 break;
728 }
729 printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
730 PDQ_OS_PREFIX_ARGS,
731 pdq_entities[entity],
732 pdq_event_codes[entity][value]);
733 if (event->event_entity == PDQ_ENTITY_PHY_PORT)
734 printf("[%d]", le32toh(event->event_index));
735 printf("\n");
736 break;
737 }
738 case PDQ_UNSOLICITED_COUNTERS: {
739 break;
740 }
741 }
742 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
743 PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
744 ui->ui_free++;
745 }
746
747 /*
748 * Now give back the event buffers back to the PDQ.
749 */
750 PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
751 ui->ui_free = 0;
752
753 PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
754 ui->ui_producer | (ui->ui_completion << 8));
755 }
756
757 static void
758 pdq_process_received_data(
759 pdq_t *pdq,
760 pdq_rx_info_t *rx,
761 pdq_rxdesc_t *receives,
762 pdq_uint32_t completion_goal,
763 pdq_uint32_t ring_mask)
764 {
765 pdq_uint32_t completion = rx->rx_completion;
766 pdq_uint32_t producer = rx->rx_producer;
767 PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
768 pdq_rxdesc_t *rxd;
769 pdq_uint32_t idx;
770
771 while (completion != completion_goal) {
772 PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
773 pdq_uint8_t *dataptr;
774 pdq_uint32_t fc, datalen, pdulen, segcnt;
775 pdq_uint32_t status;
776
777 fpdu = lpdu = buffers[completion];
778 PDQ_ASSERT(fpdu != NULL);
779 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, 0, sizeof(u_int32_t));
780 dataptr = PDQ_OS_DATABUF_PTR(fpdu);
781 status = le32toh(*(pdq_uint32_t *) dataptr);
782 if (PDQ_RXS_RCC_BADPDU(status) == 0) {
783 datalen = PDQ_RXS_LEN(status);
784 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, sizeof(u_int32_t),
785 PDQ_RX_FC_OFFSET + 1 - sizeof(u_int32_t));
786 fc = dataptr[PDQ_RX_FC_OFFSET];
787 switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
788 case PDQ_FDDI_LLC_ASYNC:
789 case PDQ_FDDI_LLC_SYNC:
790 case PDQ_FDDI_IMP_ASYNC:
791 case PDQ_FDDI_IMP_SYNC: {
792 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
793 PDQ_PRINTF(("discard: bad length %d\n", datalen));
794 goto discard_frame;
795 }
796 break;
797 }
798 case PDQ_FDDI_SMT: {
799 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
800 goto discard_frame;
801 break;
802 }
803 default: {
804 PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
805 goto discard_frame;
806 }
807 }
808 /*
809 * Update the lengths of the data buffers now that we know
810 * the real length.
811 */
812 pdulen = datalen + (PDQ_RX_FC_OFFSET - PDQ_OS_HDR_OFFSET) - 4 /* CRC */;
813 segcnt = (pdulen + PDQ_OS_HDR_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
814 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
815 if (npdu == NULL) {
816 PDQ_PRINTF(("discard: no databuf #0\n"));
817 goto discard_frame;
818 }
819 buffers[completion] = npdu;
820 for (idx = 1; idx < segcnt; idx++) {
821 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
822 if (npdu == NULL) {
823 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
824 PDQ_OS_DATABUF_FREE(pdq, fpdu);
825 goto discard_frame;
826 }
827 PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
828 lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
829 buffers[(completion + idx) & ring_mask] = npdu;
830 }
831 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
832 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
833 buffers[(producer + idx) & ring_mask] =
834 buffers[(completion + idx) & ring_mask];
835 buffers[(completion + idx) & ring_mask] = NULL;
836 }
837 PDQ_OS_DATABUF_ADJ(fpdu, PDQ_OS_HDR_OFFSET);
838 if (segcnt == 1) {
839 PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
840 } else {
841 PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_OS_HDR_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
842 }
843 /*
844 * Do not pass to protocol if packet was received promiscuously
845 */
846 pdq_os_receive_pdu(pdq, fpdu, pdulen,
847 PDQ_RXS_RCC_DD(status) < PDQ_RXS_RCC_DD_CAM_MATCH);
848 rx->rx_free += PDQ_RX_SEGCNT;
849 PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
850 PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
851 continue;
852 } else {
853 PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
854 PDQ_RXS_RCC_BADPDU(status), PDQ_RXS_RCC_BADCRC(status),
855 PDQ_RXS_RCC_REASON(status), PDQ_RXS_FSC(status),
856 PDQ_RXS_FSB_E(status)));
857 if (PDQ_RXS_RCC_REASON(status) == 7)
858 goto discard_frame;
859 if (PDQ_RXS_RCC_REASON(status) != 0) {
860 /* hardware fault */
861 if (PDQ_RXS_RCC_BADCRC(status)) {
862 printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
863 PDQ_OS_PREFIX_ARGS,
864 dataptr[PDQ_RX_FC_OFFSET+1],
865 dataptr[PDQ_RX_FC_OFFSET+2],
866 dataptr[PDQ_RX_FC_OFFSET+3],
867 dataptr[PDQ_RX_FC_OFFSET+4],
868 dataptr[PDQ_RX_FC_OFFSET+5],
869 dataptr[PDQ_RX_FC_OFFSET+6]);
870 /* rx->rx_badcrc++; */
871 } else if (PDQ_RXS_FSC(status) == 0 || PDQ_RXS_FSB_E(status) == 1) {
872 /* rx->rx_frame_status_errors++; */
873 } else {
874 /* hardware fault */
875 }
876 }
877 }
878 discard_frame:
879 /*
880 * Discarded frames go right back on the queue; therefore
881 * ring entries were freed.
882 */
883 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
884 buffers[producer] = buffers[completion];
885 buffers[completion] = NULL;
886 rxd = &receives[rx->rx_producer];
887 if (idx == 0) {
888 rxd->rxd_pa_hi = htole32(
889 PDQ_RXDESC_SOP |
890 PDQ_RXDESC_SEG_CNT(PDQ_RX_SEGCNT - 1) |
891 PDQ_RXDESC_SEG_LEN(PDQ_OS_DATABUF_SIZE));
892 } else {
893 rxd->rxd_pa_hi =
894 htole32(PDQ_RXDESC_SEG_LEN(PDQ_OS_DATABUF_SIZE));
895 }
896 rxd->rxd_pa_lo = htole32(PDQ_OS_DATABUF_BUSPA(pdq, buffers[rx->rx_producer]));
897 PDQ_OS_RXPDU_PRESYNC(pdq, buffers[rx->rx_producer], 0, PDQ_OS_DATABUF_SIZE);
898 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
899 PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
900 PDQ_ADVANCE(producer, 1, ring_mask);
901 PDQ_ADVANCE(completion, 1, ring_mask);
902 }
903 }
904 rx->rx_completion = completion;
905
906 while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
907 PDQ_OS_DATABUF_T *pdu;
908 /*
909 * Allocate the needed number of data buffers.
910 * Try to obtain them from our free queue before
911 * asking the system for more.
912 */
913 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
914 if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
915 PDQ_OS_DATABUF_ALLOC(pdq, pdu);
916 if (pdu == NULL)
917 break;
918 buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
919 }
920 rxd = &receives[(rx->rx_producer + idx) & ring_mask];
921 if (idx == 0) {
922 rxd->rxd_pa_hi = htole32(
923 PDQ_RXDESC_SOP|
924 PDQ_RXDESC_SEG_CNT(PDQ_RX_SEGCNT - 1)|
925 PDQ_RXDESC_SEG_LEN(PDQ_OS_DATABUF_SIZE));
926 } else {
927 rxd->rxd_pa_hi =
928 htole32(PDQ_RXDESC_SEG_LEN(PDQ_OS_DATABUF_SIZE));
929 }
930 rxd->rxd_pa_lo = htole32(PDQ_OS_DATABUF_BUSPA(pdq, pdu));
931 PDQ_OS_RXPDU_PRESYNC(pdq, pdu, 0, PDQ_OS_DATABUF_SIZE);
932 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
933 }
934 if (idx < PDQ_RX_SEGCNT) {
935 /*
936 * We didn't get all databufs required to complete a new
937 * receive buffer. Keep the ones we got and retry a bit
938 * later for the rest.
939 */
940 break;
941 }
942 PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
943 rx->rx_free -= PDQ_RX_SEGCNT;
944 }
945 }
946
947 static void pdq_process_transmitted_data(pdq_t *pdq);
948
949 pdq_boolean_t
950 pdq_queue_transmit_data(
951 pdq_t *pdq,
952 PDQ_OS_DATABUF_T *pdu)
953 {
954 pdq_tx_info_t * const tx = &pdq->pdq_tx_info;
955 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
956 pdq_uint32_t producer = tx->tx_producer;
957 pdq_txdesc_t *eop = NULL;
958 PDQ_OS_DATABUF_T *pdu0;
959 pdq_uint32_t freecnt;
960 #if defined(PDQ_BUS_DMA)
961 bus_dmamap_t map;
962 #endif
963
964 again:
965 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
966 freecnt = tx->tx_free - 1;
967 } else {
968 freecnt = tx->tx_free;
969 }
970 /*
971 * Need 2 or more descriptors to be able to send.
972 */
973 if (freecnt == 0) {
974 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
975 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
976 return PDQ_FALSE;
977 }
978
979 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
980 dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
981 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[producer], sizeof(pdq_txdesc_t));
982 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
983 }
984
985 #if defined(PDQ_BUS_DMA)
986 map = M_GETCTX(pdu, bus_dmamap_t);
987 if (freecnt >= map->dm_nsegs) {
988 int idx;
989 for (idx = 0; idx < map->dm_nsegs; idx++) {
990 /*
991 * Initialize the transmit descriptor
992 */
993 eop = &dbp->pdqdb_transmits[producer];
994 eop->txd_pa_hi =
995 htole32(PDQ_TXDESC_SEG_LEN(map->dm_segs[idx].ds_len));
996 eop->txd_pa_lo = htole32(map->dm_segs[idx].ds_addr);
997 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
998 freecnt--;
999 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
1000 }
1001 pdu0 = NULL;
1002 } else {
1003 pdu0 = pdu;
1004 }
1005 #else
1006 for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
1007 pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
1008 const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
1009
1010 /*
1011 * The first segment is limited to the space remaining in
1012 * page. All segments after that can be up to a full page
1013 * in size.
1014 */
1015 fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
1016 while (datalen > 0 && freecnt > 0) {
1017 pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
1018
1019 /*
1020 * Initialize the transmit descriptor
1021 */
1022 eop = &dbp->pdqdb_transmits[producer];
1023 eop->txd_pa_hi = htole32(PDQ_TXDESC_SEG_LEN(seglen));
1024 eop->txd_pa_lo = htole32(PDQ_OS_VA_TO_BUSPA(pdq, dataptr));
1025 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1026 datalen -= seglen;
1027 dataptr += seglen;
1028 fraglen = PDQ_OS_PAGESIZE;
1029 freecnt--;
1030 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
1031 }
1032 pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
1033 }
1034 #endif /* defined(PDQ_BUS_DMA) */
1035 if (pdu0 != NULL) {
1036 unsigned completion = tx->tx_completion;
1037 PDQ_ASSERT(freecnt == 0);
1038 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1039 pdq_process_transmitted_data(pdq);
1040 if (completion != tx->tx_completion) {
1041 producer = tx->tx_producer;
1042 eop = NULL;
1043 goto again;
1044 }
1045 /*
1046 * If we still have data to process then the ring was too full
1047 * to store the PDU. Return FALSE so the caller will requeue
1048 * the PDU for later.
1049 */
1050 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
1051 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1052 return PDQ_FALSE;
1053 }
1054 /*
1055 * Everything went fine. Finish it up.
1056 */
1057 tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
1058 if (PDQ_RX_FC_OFFSET != PDQ_OS_HDR_OFFSET) {
1059 dbp->pdqdb_transmits[tx->tx_producer].txd_pa_hi |=
1060 htole32(PDQ_TXDESC_SOP);
1061 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[tx->tx_producer],
1062 sizeof(pdq_txdesc_t));
1063 }
1064 eop->txd_pa_hi |= htole32(PDQ_TXDESC_EOP);
1065 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1066 PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
1067 tx->tx_producer = producer;
1068 tx->tx_free = freecnt;
1069 PDQ_DO_TYPE2_PRODUCER(pdq);
1070 return PDQ_TRUE;
1071 }
1072
1073 static void
1074 pdq_process_transmitted_data(
1075 pdq_t *pdq)
1076 {
1077 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1078 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1079 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
1080 pdq_uint32_t completion = tx->tx_completion;
1081 int reclaimed = 0;
1082
1083 while (completion != le16toh(cbp->pdqcb_transmits)) {
1084 PDQ_OS_DATABUF_T *pdu;
1085 pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
1086 PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
1087 PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
1088 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1089 pdq_os_transmit_done(pdq, pdu);
1090 tx->tx_free += descriptor_count;
1091 reclaimed = 1;
1092 PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
1093 }
1094 if (tx->tx_completion != completion) {
1095 tx->tx_completion = completion;
1096 pdq->pdq_intrmask &= ~PDQ_HOST_INT_TX_ENABLE;
1097 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1098 pdq_os_restart_transmitter(pdq);
1099 }
1100 if (reclaimed)
1101 PDQ_DO_TYPE2_PRODUCER(pdq);
1102 }
1103
1104 void
1105 pdq_flush_transmitter(
1106 pdq_t *pdq)
1107 {
1108 volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1109 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1110
1111 for (;;) {
1112 PDQ_OS_DATABUF_T *pdu;
1113 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1114 if (pdu == NULL)
1115 break;
1116 /*
1117 * Don't call transmit done since the packet never made it
1118 * out on the wire.
1119 */
1120 PDQ_OS_DATABUF_FREE(pdq, pdu);
1121 }
1122
1123 tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1124 tx->tx_completion = tx->tx_producer;
1125 cbp->pdqcb_transmits = htole16(tx->tx_completion);
1126 PDQ_OS_CONSUMER_PRESYNC(pdq);
1127
1128 PDQ_DO_TYPE2_PRODUCER(pdq);
1129 }
1130
1131 void
1132 pdq_hwreset(
1133 pdq_t *pdq)
1134 {
1135 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1136 pdq_state_t state;
1137 int cnt;
1138
1139 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1140 if (state == PDQS_DMA_UNAVAILABLE)
1141 return;
1142 PDQ_CSR_WRITE(csrs, csr_port_data_a,
1143 (state == PDQS_HALTED && pdq->pdq_type != PDQ_DEFTA) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
1144 PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
1145 PDQ_OS_USEC_DELAY(100);
1146 PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
1147 for (cnt = 100000;;cnt--) {
1148 PDQ_OS_USEC_DELAY(1000);
1149 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1150 if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
1151 break;
1152 }
1153 PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 100000 - cnt));
1154 PDQ_OS_USEC_DELAY(10000);
1155 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1156 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1157 PDQ_ASSERT(cnt > 0);
1158 }
1159
1160 /*
1161 * The following routine brings the PDQ from whatever state it is
1162 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
1163 */
1164 pdq_state_t
1165 pdq_stop(
1166 pdq_t *pdq)
1167 {
1168 pdq_state_t state;
1169 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1170 int cnt, pass = 0, idx;
1171 PDQ_OS_DATABUF_T **buffers;
1172
1173 restart:
1174 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1175 if (state != PDQS_DMA_UNAVAILABLE) {
1176 pdq_hwreset(pdq);
1177 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1178 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1179 }
1180 #if 0
1181 switch (state) {
1182 case PDQS_RING_MEMBER:
1183 case PDQS_LINK_UNAVAILABLE:
1184 case PDQS_LINK_AVAILABLE: {
1185 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1186 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1187 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1188 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1189 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1190 /* FALL THROUGH */
1191 }
1192 case PDQS_DMA_AVAILABLE: {
1193 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1194 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1195 pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1196 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1197 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1198 /* FALL THROUGH */
1199 }
1200 case PDQS_DMA_UNAVAILABLE: {
1201 break;
1202 }
1203 }
1204 #endif
1205 /*
1206 * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into
1207 * DMA_AVAILABLE.
1208 */
1209
1210 /*
1211 * Obtain the hardware address and firmware revisions
1212 * (MLA = my long address which is FDDI speak for hardware address)
1213 */
1214 pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1215 pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1216 pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1217
1218 if (pdq->pdq_type == PDQ_DEFPA) {
1219 /*
1220 * Disable interrupts and DMA.
1221 */
1222 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1223 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1224 }
1225
1226 /*
1227 * Flush all the databuf queues.
1228 */
1229 pdq_flush_databuf_queue(pdq, &pdq->pdq_tx_info.tx_txq);
1230 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1231 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1232 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1233 if (buffers[idx] != NULL) {
1234 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1235 buffers[idx] = NULL;
1236 }
1237 }
1238 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1239 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1240 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1241 if (buffers[idx] != NULL) {
1242 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1243 buffers[idx] = NULL;
1244 }
1245 }
1246 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1247
1248 /*
1249 * Reset the consumer indexes to 0.
1250 */
1251 pdq->pdq_cbp->pdqcb_receives = 0;
1252 pdq->pdq_cbp->pdqcb_transmits = 0;
1253 pdq->pdq_cbp->pdqcb_host_smt = 0;
1254 pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1255 pdq->pdq_cbp->pdqcb_command_response = 0;
1256 pdq->pdq_cbp->pdqcb_command_request = 0;
1257 PDQ_OS_CONSUMER_PRESYNC(pdq);
1258
1259 /*
1260 * Reset the producer and completion indexes to 0.
1261 */
1262 pdq->pdq_command_info.ci_request_producer = 0;
1263 pdq->pdq_command_info.ci_response_producer = 0;
1264 pdq->pdq_command_info.ci_request_completion = 0;
1265 pdq->pdq_command_info.ci_response_completion = 0;
1266 pdq->pdq_unsolicited_info.ui_producer = 0;
1267 pdq->pdq_unsolicited_info.ui_completion = 0;
1268 pdq->pdq_rx_info.rx_producer = 0;
1269 pdq->pdq_rx_info.rx_completion = 0;
1270 pdq->pdq_tx_info.tx_producer = 0;
1271 pdq->pdq_tx_info.tx_completion = 0;
1272 pdq->pdq_host_smt_info.rx_producer = 0;
1273 pdq->pdq_host_smt_info.rx_completion = 0;
1274
1275 pdq->pdq_command_info.ci_command_active = 0;
1276 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1277 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1278
1279 /*
1280 * Allow the DEFPA to do DMA. Then program the physical
1281 * addresses of the consumer and descriptor blocks.
1282 */
1283 if (pdq->pdq_type == PDQ_DEFPA) {
1284 #ifdef PDQTEST
1285 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1286 PDQ_PFI_MODE_DMA_ENABLE);
1287 #else
1288 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1289 PDQ_PFI_MODE_DMA_ENABLE
1290 /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1291 #endif
1292 }
1293
1294 /*
1295 * Make sure the unsolicited queue has events ...
1296 */
1297 pdq_process_unsolicited_events(pdq);
1298
1299 if ((pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1300 || pdq->pdq_type == PDQ_DEFTA)
1301 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1302 else
1303 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1304 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1305 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1306
1307 /*
1308 * Make sure there isn't stale information in the caches before
1309 * tell the adapter about the blocks it's going to use.
1310 */
1311 PDQ_OS_CONSUMER_PRESYNC(pdq);
1312
1313 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1314 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_consumer_block);
1315 pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1316
1317 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1318 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA);
1319 pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1320
1321 for (cnt = 0; cnt < 1000; cnt++) {
1322 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1323 if (state == PDQS_HALTED) {
1324 if (pass > 0)
1325 return PDQS_HALTED;
1326 pass = 1;
1327 goto restart;
1328 }
1329 if (state == PDQS_DMA_AVAILABLE) {
1330 PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1331 break;
1332 }
1333 PDQ_OS_USEC_DELAY(1000);
1334 }
1335 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1336
1337 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1338 pdq->pdq_intrmask = 0;
1339 /* PDQ_HOST_INT_STATE_CHANGE
1340 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1341 |PDQ_HOST_INT_UNSOL_ENABLE */;
1342 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1343
1344 /*
1345 * Any other command but START should be valid.
1346 */
1347 pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1348 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1349 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1350 pdq_queue_commands(pdq);
1351
1352 if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1353 /*
1354 * Now wait (up to 100ms) for the command(s) to finish.
1355 */
1356 for (cnt = 0; cnt < 1000; cnt++) {
1357 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1358 pdq_process_command_responses(pdq);
1359 if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1360 break;
1361 PDQ_OS_USEC_DELAY(1000);
1362 }
1363 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1364 }
1365
1366 return state;
1367 }
1368
1369 void
1370 pdq_run(
1371 pdq_t *pdq)
1372 {
1373 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1374 pdq_state_t state;
1375
1376 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1377 PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1378 PDQ_ASSERT(state != PDQS_RESET);
1379 PDQ_ASSERT(state != PDQS_HALTED);
1380 PDQ_ASSERT(state != PDQS_UPGRADE);
1381 PDQ_ASSERT(state != PDQS_RING_MEMBER);
1382 switch (state) {
1383 case PDQS_DMA_AVAILABLE: {
1384 /*
1385 * The PDQ after being reset screws up some of its state.
1386 * So we need to clear all the errors/interrupts so the real
1387 * ones will get through.
1388 */
1389 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1390 pdq->pdq_intrmask = PDQ_HOST_INT_STATE_CHANGE
1391 |PDQ_HOST_INT_XMT_DATA_FLUSH|PDQ_HOST_INT_FATAL_ERROR
1392 |PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1393 |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE;
1394 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1395 /*
1396 * Set the MAC and address filters and start up the PDQ.
1397 */
1398 pdq_process_unsolicited_events(pdq);
1399 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1400 pdq->pdq_dbp->pdqdb_receives,
1401 le16toh(pdq->pdq_cbp->pdqcb_receives),
1402 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1403 PDQ_DO_TYPE2_PRODUCER(pdq);
1404 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1405 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1406 pdq->pdq_dbp->pdqdb_host_smt,
1407 le32toh(pdq->pdq_cbp->pdqcb_host_smt),
1408 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1409 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1410 pdq->pdq_host_smt_info.rx_producer
1411 | (pdq->pdq_host_smt_info.rx_completion << 8));
1412 }
1413 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1414 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1415 | PDQ_BITMASK(PDQC_SNMP_SET)
1416 | PDQ_BITMASK(PDQC_START);
1417 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1418 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1419 pdq_queue_commands(pdq);
1420 break;
1421 }
1422 case PDQS_LINK_UNAVAILABLE:
1423 case PDQS_LINK_AVAILABLE: {
1424 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1425 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1426 | PDQ_BITMASK(PDQC_SNMP_SET);
1427 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1428 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1429 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1430 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1431 pdq->pdq_dbp->pdqdb_host_smt,
1432 le32toh(pdq->pdq_cbp->pdqcb_host_smt),
1433 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1434 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1435 pdq->pdq_host_smt_info.rx_producer
1436 | (pdq->pdq_host_smt_info.rx_completion << 8));
1437 }
1438 pdq_process_unsolicited_events(pdq);
1439 pdq_queue_commands(pdq);
1440 break;
1441 }
1442 case PDQS_RING_MEMBER: {
1443 }
1444 default: { /* to make gcc happy */
1445 break;
1446 }
1447 }
1448 }
1449
1450 int
1451 pdq_interrupt(
1452 pdq_t *pdq)
1453 {
1454 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1455 pdq_uint32_t data;
1456 int progress = 0;
1457
1458 if (pdq->pdq_type == PDQ_DEFPA)
1459 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1460
1461 while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1462 progress = 1;
1463 PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1464 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1465 if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1466 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1467 pdq->pdq_dbp->pdqdb_receives,
1468 le16toh(pdq->pdq_cbp->pdqcb_receives),
1469 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1470 PDQ_DO_TYPE2_PRODUCER(pdq);
1471 }
1472 if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1473 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1474 pdq->pdq_dbp->pdqdb_host_smt,
1475 le32toh(pdq->pdq_cbp->pdqcb_host_smt),
1476 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1477 PDQ_DO_HOST_SMT_PRODUCER(pdq);
1478 }
1479 /* if (data & PDQ_PSTS_XMT_DATA_PENDING) */
1480 pdq_process_transmitted_data(pdq);
1481 if (data & PDQ_PSTS_UNSOL_PENDING)
1482 pdq_process_unsolicited_events(pdq);
1483 if (data & PDQ_PSTS_CMD_RSP_PENDING)
1484 pdq_process_command_responses(pdq);
1485 if (data & PDQ_PSTS_TYPE_0_PENDING) {
1486 data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1487 if (data & PDQ_HOST_INT_STATE_CHANGE) {
1488 pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1489 printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1490 if (state == PDQS_LINK_UNAVAILABLE) {
1491 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1492 } else if (state == PDQS_LINK_AVAILABLE) {
1493 if (pdq->pdq_flags & PDQ_WANT_FDX) {
1494 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_DEC_EXT_MIB_GET);
1495 pdq_queue_commands(pdq);
1496 }
1497 pdq->pdq_flags |= PDQ_TXOK|PDQ_IS_ONRING;
1498 pdq_os_restart_transmitter(pdq);
1499 } else if (state == PDQS_HALTED) {
1500 pdq_response_error_log_get_t log_entry;
1501 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1502 printf(": halt code = %d (%s)\n",
1503 halt_code, pdq_halt_codes[halt_code]);
1504 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1505 PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1506 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1507 data & PDQ_HOST_INT_FATAL_ERROR));
1508 }
1509 PDQ_OS_MEMZERO(&log_entry, sizeof(log_entry));
1510 if (pdq_read_error_log(pdq, &log_entry)) {
1511 PDQ_PRINTF((" Error log Entry:\n"));
1512 PDQ_PRINTF((" CMD Status = %d (0x%x)\n",
1513 log_entry.error_log_get_status,
1514 log_entry.error_log_get_status));
1515 PDQ_PRINTF((" Event Status = %d (0x%x)\n",
1516 log_entry.error_log_get_event_status,
1517 log_entry.error_log_get_event_status));
1518 PDQ_PRINTF((" Caller Id = %d (0x%x)\n",
1519 log_entry.error_log_get_caller_id,
1520 log_entry.error_log_get_caller_id));
1521 PDQ_PRINTF((" Write Count = %d (0x%x)\n",
1522 log_entry.error_log_get_write_count,
1523 log_entry.error_log_get_write_count));
1524 PDQ_PRINTF((" FRU Implication Mask = %d (0x%x)\n",
1525 log_entry.error_log_get_fru_implication_mask,
1526 log_entry.error_log_get_fru_implication_mask));
1527 PDQ_PRINTF((" Test ID = %d (0x%x)\n",
1528 log_entry.error_log_get_test_id,
1529 log_entry.error_log_get_test_id));
1530 }
1531 pdq_stop(pdq);
1532 if (pdq->pdq_flags & PDQ_RUNNING)
1533 pdq_run(pdq);
1534 return 1;
1535 }
1536 printf("\n");
1537 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1538 }
1539 if (data & PDQ_HOST_INT_FATAL_ERROR) {
1540 pdq_stop(pdq);
1541 if (pdq->pdq_flags & PDQ_RUNNING)
1542 pdq_run(pdq);
1543 return 1;
1544 }
1545 if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1546 printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1547 pdq->pdq_flags &= ~PDQ_TXOK;
1548 pdq_flush_transmitter(pdq);
1549 pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1550 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1551 }
1552 }
1553 if (pdq->pdq_type == PDQ_DEFPA)
1554 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1555 }
1556 return progress;
1557 }
1558
1559 pdq_t *
1560 pdq_initialize(
1561 pdq_bus_t bus,
1562 pdq_bus_memaddr_t csr_base,
1563 const char *name,
1564 int unit,
1565 void *ctx,
1566 pdq_type_t type)
1567 {
1568 pdq_t *pdq;
1569 pdq_state_t state;
1570 pdq_descriptor_block_t *dbp;
1571 #if !defined(PDQ_BUS_DMA)
1572 const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1573 pdq_uint8_t *p;
1574 #endif
1575 int idx;
1576
1577 PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1578 PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1579 PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1580 PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1581 PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1582 PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1583 PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1584 PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1585 PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1586
1587 pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1588 if (pdq == NULL) {
1589 PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1590 return NULL;
1591 }
1592 PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1593 pdq->pdq_type = type;
1594 pdq->pdq_unit = unit;
1595 pdq->pdq_os_ctx = (void *) ctx;
1596 pdq->pdq_os_name = name;
1597 pdq->pdq_flags = PDQ_PRINTCHARS;
1598 /*
1599 * Allocate the additional data structures required by
1600 * the PDQ driver. Allocate a contiguous region of memory
1601 * for the descriptor block. We need to allocated enough
1602 * to guarantee that we will a get 8KB block of memory aligned
1603 * on a 8KB boundary. This turns to require that we allocate
1604 * (N*2 - 1 page) pages of memory. On machine with less than
1605 * a 8KB page size, it mean we will allocate more memory than
1606 * we need. The extra will be used for the unsolicited event
1607 * buffers (though on machines with 8KB pages we will to allocate
1608 * them separately since there will be nothing left overs.)
1609 */
1610 #if defined(PDQ_OS_MEMALLOC_CONTIG)
1611 p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1612 if (p != NULL) {
1613 pdq_physaddr_t physaddr = PDQ_OS_VA_TO_BUSPA(pdq, p);
1614 /*
1615 * Assert that we really got contiguous memory. This isn't really
1616 * needed on systems that actually have physical contiguous allocation
1617 * routines, but on those systems that don't ...
1618 */
1619 for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1620 if (PDQ_OS_VA_TO_BUSPA(pdq, p + idx) - physaddr != idx)
1621 goto cleanup_and_return;
1622 }
1623 if (physaddr & 0x1FFF) {
1624 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1625 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr;
1626 pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - (physaddr & 0x1FFF)];
1627 pdq->pdq_pa_descriptor_block = physaddr & ~0x1FFFUL;
1628 } else {
1629 pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1630 pdq->pdq_pa_descriptor_block = physaddr;
1631 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1632 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr + 0x2000;
1633 }
1634 }
1635 pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1636 pdq->pdq_pa_consumer_block = PDQ_DB_BUSPA(pdq, pdq->pdq_cbp);
1637 if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1638 pdq->pdq_unsolicited_info.ui_events =
1639 (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1640 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1641 }
1642 #else
1643 if (pdq_os_memalloc_contig(pdq))
1644 goto cleanup_and_return;
1645 #endif
1646
1647 /*
1648 * Make sure everything got allocated. If not, free what did
1649 * get allocated and return.
1650 */
1651 if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1652 cleanup_and_return:
1653 #ifdef PDQ_OS_MEMFREE_CONTIG
1654 if (p /* pdq->pdq_dbp */ != NULL)
1655 PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1656 if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1657 PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1658 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1659 #endif
1660 PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1661 return NULL;
1662 }
1663 dbp = pdq->pdq_dbp;
1664
1665 PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT " (PA = 0x%x)\n", dbp, pdq->pdq_pa_descriptor_block));
1666 PDQ_PRINTF((" Receive Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_receives));
1667 PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_transmits));
1668 PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_host_smt));
1669 PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_responses));
1670 PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_requests));
1671 PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1672
1673 /*
1674 * Zero out the descriptor block. Not really required but
1675 * it pays to be neat. This will also zero out the consumer
1676 * block, command pool, and buffer pointers for the receive
1677 * host_smt rings.
1678 */
1679 PDQ_OS_MEMZERO(dbp, sizeof(*dbp));
1680
1681 /*
1682 * Initialize the CSR references.
1683 * the DEFAA (FutureBus+) skips a longword between registers
1684 */
1685 pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1686 if (pdq->pdq_type == PDQ_DEFPA)
1687 pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1688
1689 PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_CSR_FMT "\n", pdq->pdq_csrs.csr_base));
1690 PDQ_PRINTF((" Port Reset = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1691 pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1692 PDQ_PRINTF((" Host Data = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1693 pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1694 PDQ_PRINTF((" Port Control = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1695 pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1696 PDQ_PRINTF((" Port Data A = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1697 pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1698 PDQ_PRINTF((" Port Data B = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1699 pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1700 PDQ_PRINTF((" Port Status = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1701 pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1702 PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1703 pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1704 PDQ_PRINTF((" Host Int Enable = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1705 pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1706 PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1707 pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1708 PDQ_PRINTF((" Command Response Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1709 pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1710 PDQ_PRINTF((" Command Request Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1711 pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1712 PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1713 pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1714 PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1715 pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1716
1717 /*
1718 * Initialize the command information block
1719 */
1720 pdq->pdq_command_info.ci_request_bufstart = dbp->pdqdb_cmd_request_buf;
1721 pdq->pdq_command_info.ci_pa_request_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_request_bufstart);
1722 pdq->pdq_command_info.ci_pa_request_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_requests);
1723 PDQ_PRINTF(("PDQ Command Request Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1724 pdq->pdq_command_info.ci_request_bufstart,
1725 pdq->pdq_command_info.ci_pa_request_bufstart));
1726 for (idx = 0; idx < sizeof(dbp->pdqdb_command_requests)/sizeof(dbp->pdqdb_command_requests[0]); idx++) {
1727 pdq_txdesc_t *txd = &dbp->pdqdb_command_requests[idx];
1728
1729 txd->txd_pa_lo = htole32(pdq->pdq_command_info.ci_pa_request_bufstart);
1730 txd->txd_pa_hi = htole32(PDQ_TXDESC_SOP | PDQ_TXDESC_EOP);
1731 }
1732 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_requests,
1733 sizeof(dbp->pdqdb_command_requests));
1734
1735 pdq->pdq_command_info.ci_response_bufstart = dbp->pdqdb_cmd_response_buf;
1736 pdq->pdq_command_info.ci_pa_response_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_response_bufstart);
1737 pdq->pdq_command_info.ci_pa_response_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_responses);
1738 PDQ_PRINTF(("PDQ Command Response Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1739 pdq->pdq_command_info.ci_response_bufstart,
1740 pdq->pdq_command_info.ci_pa_response_bufstart));
1741 for (idx = 0; idx < sizeof(dbp->pdqdb_command_responses)/sizeof(dbp->pdqdb_command_responses[0]); idx++) {
1742 pdq_rxdesc_t *rxd = &dbp->pdqdb_command_responses[idx];
1743
1744 rxd->rxd_pa_hi = htole32(PDQ_RXDESC_SOP |
1745 PDQ_RXDESC_SEG_LEN(PDQ_SIZE_COMMAND_RESPONSE));
1746 rxd->rxd_pa_lo = htole32(pdq->pdq_command_info.ci_pa_response_bufstart);
1747 }
1748 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_responses,
1749 sizeof(dbp->pdqdb_command_responses));
1750
1751 /*
1752 * Initialize the unsolicited event information block
1753 */
1754 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1755 pdq->pdq_unsolicited_info.ui_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_unsolicited_events);
1756 PDQ_PRINTF(("PDQ Unsolicit Event Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1757 pdq->pdq_unsolicited_info.ui_events,
1758 pdq->pdq_unsolicited_info.ui_pa_bufstart));
1759 for (idx = 0; idx < sizeof(dbp->pdqdb_unsolicited_events)/sizeof(dbp->pdqdb_unsolicited_events[0]); idx++) {
1760 pdq_rxdesc_t *rxd = &dbp->pdqdb_unsolicited_events[idx];
1761 pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1762
1763 rxd->rxd_pa_hi = htole32(PDQ_RXDESC_SOP |
1764 PDQ_RXDESC_SEG_LEN(sizeof(pdq_unsolicited_event_t)));
1765 rxd->rxd_pa_lo = htole32(pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1766 - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events);
1767 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
1768 }
1769 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_unsolicited_events,
1770 sizeof(dbp->pdqdb_unsolicited_events));
1771
1772 /*
1773 * Initialize the receive information blocks (normal and SMT).
1774 */
1775 pdq->pdq_rx_info.rx_buffers = pdq->pdq_receive_buffers;
1776 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_receives);
1777 pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1778 pdq->pdq_rx_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_receives);
1779
1780 pdq->pdq_host_smt_info.rx_buffers = pdq->pdq_host_smt_buffers;
1781 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_host_smt);
1782 pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1783 pdq->pdq_host_smt_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_host_smt);
1784
1785 /*
1786 * Initialize the transmit information block.
1787 */
1788 dbp->pdqdb_tx_hdr[0] = PDQ_FDDI_PH0;
1789 dbp->pdqdb_tx_hdr[1] = PDQ_FDDI_PH1;
1790 dbp->pdqdb_tx_hdr[2] = PDQ_FDDI_PH2;
1791 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(dbp->pdqdb_transmits);
1792 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_hi = htole32(PDQ_TXDESC_SOP|PDQ_TXDESC_SEG_LEN(3));
1793 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = htole32(PDQ_DB_BUSPA(pdq, dbp->pdqdb_tx_hdr));
1794 pdq->pdq_tx_info.tx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_transmits);
1795
1796 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1797 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1798
1799 /*
1800 * Stop the PDQ if it is running and put it into a known state.
1801 */
1802 state = pdq_stop(pdq);
1803
1804 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1805 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1806 /*
1807 * If the adapter is not the state we expect, then the initialization
1808 * failed. Cleanup and exit.
1809 */
1810 #if defined(PDQVERBOSE)
1811 if (state == PDQS_HALTED) {
1812 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1813 printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1814 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1815 PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1816 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1817 PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1818 }
1819 #endif
1820 if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1821 goto cleanup_and_return;
1822
1823 PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1824 pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1825 pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1826 pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1827 PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1828 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1829 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1830 PDQ_PRINTF(("PDQ Chip Revision = "));
1831 switch (pdq->pdq_chip_rev) {
1832 case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1833 case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1834 case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1835 default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));
1836 }
1837 PDQ_PRINTF(("\n"));
1838
1839 return pdq;
1840 }
Cache object: 93dafc85833d549e359e07b285b891ab
|