FreeBSD/Linux Kernel Cross Reference
sys/dev/dpt/dpt_scsi.c
1 /**
2 * Copyright (c) 1997 by Simon Shapiro
3 * All Rights Reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /**
31 * dpt_scsi.c: SCSI dependant code for the DPT driver
32 *
33 * credits: Assisted by Mike Neuffer in the early low level DPT code
34 * Thanx to Mark Salyzyn of DPT for his assistance.
35 * Special thanx to Justin Gibbs for invaluable help in
36 * making this driver look and work like a FreeBSD component.
37 * Last but not least, many thanx to UCB and the FreeBSD
38 * team for creating and maintaining such a wonderful O/S.
39 *
40 * TODO: * Add EISA and ISA probe code.
41 * * Add driver-level RSID-0. This will allow interoperability with
42 * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc. in recognizing RAID
43 * arrays that span controllers (Wow!).
44 */
45
46 /**
47 * IMPORTANT:
48 * There are two critical section "levels" used in this driver:
49 * splcam() and splsoftcam(). Splcam() protects us from re-entrancy
50 * from both our software and hardware interrupt handler. Splsoftcam()
51 * protects us only from our software interrupt handler. The two
52 * main data structures that need protection are the submitted and
53 * completed queue.
54 *
55 * There are three places where the submitted queue is accessed:
56 *
57 * 1. dpt_run_queue inserts into the queue
58 * 2. dpt_intr removes from the queue
59 * 3 dpt_handle_timeouts potentially removes from the queue.
60 *
61 * There are three places where the the completed queue is accessed:
62 * 1. dpt_intr() inserts into the queue
63 * 2. dpt_sintr() removes from the queue
64 * 3. dpt_handle_timeouts potentially inserts into the queue
65 */
66
67 #ident "$FreeBSD: src/sys/dev/dpt/dpt_scsi.c,v 1.4.2.8 1999/09/05 08:09:12 peter Exp $"
68 #define _DPT_C_
69
70 #include "opt_dpt.h"
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/buf.h>
75 #include <sys/kernel.h>
76
77 #include <machine/ipl.h>
78 #include <scsi/scsiconf.h>
79 #include <scsi/scsi_disk.h>
80
81 #include <machine/clock.h>
82
83 #include <vm/vm.h>
84 #include <vm/pmap.h>
85
86 #include <sys/dpt.h>
87
88 #ifdef INLINE
89 #undef INLINE
90 #endif
91
92 #define INLINE __inline
93 #define INLINE_Q
94
95 /* dpt_isa.c, dpt_eisa.c, and dpt_pci.c need this in a central place */
96
97 int dpt_controllers_present = 0;
98
99 /* Function Prototypes */
100
101 static INLINE u_int32_t dpt_inl(dpt_softc_t * dpt, u_int32_t offset);
102 static INLINE u_int8_t dpt_inb(dpt_softc_t * dpt, u_int32_t offset);
103 static INLINE void
104 dpt_outb(dpt_softc_t * dpt, u_int32_t offset,
105 u_int8_t value);
106 static INLINE void
107 dpt_outl(dpt_softc_t * dpt, u_int32_t offset,
108 u_int32_t value);
109 static INLINE_Q void dpt_Qpush_free(dpt_softc_t * dpt, dpt_ccb_t * ccb);
110 static INLINE_Q dpt_ccb_t *dpt_Qpop_free(dpt_softc_t * dpt);
111 static INLINE_Q void dpt_Qadd_waiting(dpt_softc_t * dpt, dpt_ccb_t * ccb);
112 static INLINE_Q void dpt_Qpush_waiting(dpt_softc_t * dpt, dpt_ccb_t * ccb);
113 static INLINE_Q void
114 dpt_Qremove_waiting(dpt_softc_t * dpt,
115 dpt_ccb_t * ccb);
116 static INLINE_Q void
117 dpt_Qadd_submitted(dpt_softc_t * dpt,
118 dpt_ccb_t * ccb);
119 static INLINE_Q void
120 dpt_Qremove_submitted(dpt_softc_t * dpt,
121 dpt_ccb_t * ccb);
122 static INLINE_Q void
123 dpt_Qadd_completed(dpt_softc_t * dpt,
124 dpt_ccb_t * ccb);
125 static INLINE_Q void
126 dpt_Qremove_completed(dpt_softc_t * dpt,
127 dpt_ccb_t * ccb);
128 static int
129 dpt_send_eata_command(dpt_softc_t * dpt,
130 eata_ccb_t * cmd_block,
131 u_int8_t command,
132 int32_t retries,
133 u_int8_t ifc, u_int8_t code,
134 u_int8_t code2);
135 static INLINE int
136 dpt_send_immediate(dpt_softc_t * dpt,
137 eata_ccb_t * cmd_block,
138 u_int8_t ifc, u_int8_t code,
139 u_int8_t code2);
140 static INLINE int dpt_just_reset(dpt_softc_t * dpt);
141 static INLINE int dpt_raid_busy(dpt_softc_t * dpt);
142 static INLINE void dpt_sched_queue(dpt_softc_t * dpt);
143
144 #ifdef DPT_MEASURE_PERFORMANCE
145 static void
146 dpt_IObySize(dpt_softc_t * dpt, dpt_ccb_t * ccb,
147 int op, int index);
148 #endif
149
150 static void dpt_swi_register(void *);
151
152 #ifdef DPT_HANDLE_TIMEOUTS
153 static void dpt_handle_timeouts(dpt_softc_t * dpt);
154 static void dpt_timeout(void *dpt);
155 #endif
156
157 #ifdef DPT_LOST_IRQ
158 static void dpt_irq_timeout(void *dpt);
159 #endif
160
161 typedef struct scsi_inquiry_data s_inq_data_t;
162
163
164 static int
165 dpt_scatter_gather(dpt_softc_t * dpt, dpt_ccb_t * ccb,
166 u_int32_t data_length,
167 caddr_t data);
168 static int dpt_alloc_freelist(dpt_softc_t * dpt);
169 static void dpt_run_queue(dpt_softc_t * dpt, int requests);
170 static void dpt_complete(dpt_softc_t * dpt);
171 static int
172 dpt_process_completion(dpt_softc_t * dpt,
173 dpt_ccb_t * ccb);
174 static void
175 dpt_set_target(int redo, dpt_softc_t * dpt,
176 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode,
177 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb);
178 static void
179 dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun,
180 dpt_ccb_t * ccb, int mode, u_int8_t command,
181 u_int16_t length, u_int16_t offset);
182 static void dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb);
183 static void dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb);
184
185
186
187 u_int8_t dpt_blinking_led(dpt_softc_t * dpt);
188 int
189 dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd,
190 caddr_t cmdarg, int minor_no);
191 void dpt_detect_cache(dpt_softc_t * dpt);
192 void dpt_shutdown(int howto, void *dpt);
193 static void hex_dump(u_int8_t * data, int length, char *name, int no);
194 char *i2bin(unsigned int no, int length);
195 dpt_conf_t *
196 dpt_get_conf(dpt_softc_t * dpt, u_int8_t page, u_int8_t target,
197 u_int8_t size, int extent);
198 static dpt_inq_t *dpt_get_board_data(dpt_softc_t * dpt, u_int32_t target_id);
199 int dpt_setup(dpt_softc_t * dpt, dpt_conf_t * conf);
200 int dpt_attach(dpt_softc_t * dpt);
201 static int32_t dpt_scsi_cmd(struct scsi_xfer * xs);
202 static void dptminphys(struct buf * bp);
203 static void dpt_sintr(void);
204 void dpt_intr(void *arg);
205 static char *scsi_cmd_name(u_int8_t cmd);
206
207 dpt_rb_t
208 dpt_register_buffer(int unit,
209 u_int8_t channel,
210 u_int8_t target,
211 u_int8_t lun,
212 u_int8_t mode,
213 u_int16_t length,
214 u_int16_t offset,
215 dpt_rec_buff callback,
216 dpt_rb_op_t op);
217 int
218 dpt_send_buffer(int unit,
219 u_int8_t channel,
220 u_int8_t target,
221 u_int8_t lun,
222 u_int8_t mode,
223 u_int16_t length,
224 u_int16_t offset,
225 void *data,
226 buff_wr_done callback);
227
228 extern void (*ihandlers[32]) __P((void));
229
230 u_long dpt_unit; /* This one is kernel-related, do not touch! */
231
232 /* The linked list of softc structures */
233 TAILQ_HEAD(, dpt_softc) dpt_softc_list = TAILQ_HEAD_INITIALIZER(dpt_softc_list);
234
235 /*
236 * These will have to be setup by parameters passed at boot/load time. For
237 * perfromance reasons, we make them constants for the time being.
238 */
239 #define dpt_min_segs DPT_MAX_SEGS
240 #define dpt_max_segs DPT_MAX_SEGS
241
242 static struct scsi_adapter dpt_switch =
243 {
244 dpt_scsi_cmd,
245 dptminphys,
246 NULL,
247 NULL,
248 NULL,
249 "dpt",
250 {0, 0}
251 };
252
253 static struct scsi_device dpt_dev =
254 {
255 NULL, /* Use default error handler */
256 NULL, /* have a queue, served by this */
257 NULL, /* have no async handler */
258 NULL, /* Use default 'done' routine */
259 "dpt",
260 0,
261 {0, 0}
262 };
263
264 /* Software Interrupt Vector */
265
266 static void
267 dpt_swi_register(void *unused)
268 {
269 ihandlers[SWI_CAMBIO] = dpt_sintr;
270 }
271
272 SYSINIT(dpt_camswi, SI_SUB_DRIVERS, SI_ORDER_FIRST, dpt_swi_register, NULL)
273 /* These functions allows us to do memory mapped I/O, if hardware supported. */
274
275 static INLINE u_int8_t
276 dpt_inb(dpt_softc_t * dpt, u_int32_t offset)
277 {
278 u_int8_t result;
279
280 if (dpt->v_membase != NULL) {
281 result = dpt->v_membase[offset];
282 } else {
283 result = inb(dpt->io_base + offset);
284 }
285 return (result);
286 }
287
288 static INLINE u_int32_t
289 dpt_inl(dpt_softc_t * dpt, u_int32_t offset)
290 {
291 u_int32_t result;
292
293 if (dpt->v_membase != NULL) {
294 result = *(volatile u_int32_t *) (&dpt->v_membase[offset]);
295 } else {
296 result = inl(dpt->io_base + offset);
297 }
298 return (result);
299 }
300
301 static INLINE void
302 dpt_outb(dpt_softc_t * dpt, u_int32_t offset, u_int8_t value)
303 {
304 if (dpt->v_membase != NULL) {
305 dpt->v_membase[offset] = value;
306 } else {
307 outb(dpt->io_base + offset, value);
308 }
309 }
310
311 static INLINE void
312 dpt_outl(dpt_softc_t * dpt, u_int32_t offset, u_int32_t value)
313 {
314 if (dpt->v_membase != NULL) {
315 *(volatile u_int32_t *) (&dpt->v_membase[offset]) = value;
316 } else {
317 outl(dpt->io_base + offset, value);
318 }
319 }
320
321 static INLINE void
322 dpt_sched_queue(dpt_softc_t * dpt)
323 {
324 if (dpt->state & DPT_HA_QUIET) {
325 printf("dpt%d: Under Quiet Busses Condition. "
326 "No Commands are submitted\n", dpt->unit);
327 return;
328 }
329 setsoftcambio();
330 }
331
332 static INLINE int
333 dpt_wait(dpt_softc_t * dpt, u_int8_t bits, u_int8_t state)
334 {
335 int i;
336 u_int8_t c;
337
338 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */
339 c = dpt_inb(dpt, HA_RSTATUS) & bits;
340 if (c == state)
341 return (0);
342 else
343 DELAY(50);
344 }
345 return (-1);
346 }
347
348 static INLINE int
349 dpt_just_reset(dpt_softc_t * dpt)
350 {
351 if ((dpt_inb(dpt, 2) == 'D')
352 && (dpt_inb(dpt, 3) == 'P')
353 && (dpt_inb(dpt, 4) == 'T')
354 && (dpt_inb(dpt, 5) == 'H'))
355 return (1);
356 else
357 return (0);
358 }
359
360 static INLINE int
361 dpt_raid_busy(dpt_softc_t * dpt)
362 {
363 if ((dpt_inb(dpt, 0) == 'D')
364 && (dpt_inb(dpt, 1) == 'P')
365 && (dpt_inb(dpt, 2) == 'T'))
366 return (1);
367 else
368 return (0);
369 }
370
371 /**
372 * Build a Command Block for target mode READ/WRITE BUFFER,
373 * with the ``sync'' bit ON.
374 *
375 * Although the length and offset are 24 bit fields in the command, they cannot
376 * exceed 8192 bytes, so we take them as short integers andcheck their range.
377 * If they are sensless, we round them to zero offset, maximum length and complain.
378 */
379
380 static void
381 dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun,
382 dpt_ccb_t * ccb, int mode, u_int8_t command,
383 u_int16_t length, u_int16_t offset)
384 {
385 eata_ccb_t *cp;
386 int ospl;
387
388 if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) {
389 printf("dpt%d: Length of %d, and offset of %d are wrong\n",
390 dpt->unit, length, offset);
391 length = DPT_MAX_TARGET_MODE_BUFFER_SIZE;
392 offset = 0;
393 }
394 ccb->xs = NULL;
395 ccb->flags = 0;
396 ccb->state = DPT_CCB_STATE_NEW;
397 ccb->std_callback = (ccb_callback) dpt_target_done;
398 ccb->wrbuff_callback = NULL;
399
400 cp = &ccb->eata_ccb;
401 cp->CP_OpCode = EATA_CMD_DMA_SEND_CP;
402 cp->SCSI_Reset = 0;
403 cp->HBA_Init = 0;
404 cp->Auto_Req_Sen = 1;
405 cp->cp_id = target;
406 cp->DataIn = 1;
407 cp->DataOut = 0;
408 cp->Interpret = 0;
409 cp->reqlen = htonl(sizeof(struct scsi_sense_data));
410 cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA));
411 cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA));
412 cp->cp_viraddr = (u_int32_t) & ccb;
413
414 cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
415
416 cp->cp_scsi_cmd = command;
417 cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK);
418 cp->cp_lun = lun; /* Order is important here! */
419 cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */
420 cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */
421 cp->cp_cdb[4] = (length >> 8) & 0xFF;
422 cp->cp_cdb[5] = length & 0xFF;
423 cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */
424 cp->cp_cdb[7] = (length >> 8) & 0xFF;
425 cp->cp_cdb[8] = length & 0xFF; /* Length LSB */
426 cp->cp_cdb[9] = 0; /* No sync, no match bits */
427
428 /**
429 * This could be optimized to live in dpt_register_buffer.
430 * We keep it here, just in case the kernel decides to reallocate pages
431 */
432 if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE,
433 dpt->rw_buffer[bus][target][lun])) {
434 printf("dpt%d: Failed to setup Scatter/Gather for Target-Mode buffer\n",
435 dpt->unit);
436 }
437 }
438
439 /* Setup a target mode READ command */
440
441 #define cmd_ct dpt->performance.command_count[(int)ccb->eata_ccb.cp_scsi_cmd];
442
443 static void
444 dpt_set_target(int redo, dpt_softc_t * dpt,
445 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode,
446 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb)
447 {
448 int ospl;
449
450 #ifdef DPT_MEASURE_PERFORMANCE
451 struct timeval now;
452 #endif
453
454 if (dpt->target_mode_enabled) {
455 ospl = splcam();
456
457 if (!redo)
458 dpt_target_ccb(dpt, bus, target, lun, ccb, mode,
459 SCSI_TM_READ_BUFFER, length, offset);
460
461 ccb->transaction_id = ++dpt->commands_processed;
462
463 #ifdef DPT_MEASURE_PERFORMANCE
464 ++cmd_ct;
465 microtime(&now);
466 ccb->command_started = now;
467 #endif
468 dpt_Qadd_waiting(dpt, ccb);
469 dpt_sched_queue(dpt);
470
471 splx(ospl);
472 } else {
473 printf("dpt%d: Target Mode Request, but Target Mode is OFF\n",
474 dpt->unit);
475 }
476 }
477
478 /**
479 * Schedule a buffer to be sent to another target.
480 * The work will be scheduled and the callback provided will be called when the work is
481 * actually done.
482 *
483 * Please NOTE: ``Anyone'' can send a buffer, but only registered clients get notified
484 of receipt of buffers.
485 */
486
487 int
488 dpt_send_buffer(int unit,
489 u_int8_t channel,
490 u_int8_t target,
491 u_int8_t lun,
492 u_int8_t mode,
493 u_int16_t length,
494 u_int16_t offset,
495 void *data,
496 buff_wr_done callback)
497 {
498 dpt_softc_t *dpt;
499 dpt_ccb_t *ccb = NULL;
500 int ospl;
501 #ifdef DPT_MEASURE_PERFORMANCE
502 struct timeval now;
503 #endif
504
505 /* This is an external call. Be a bit paranoid */
506 for (dpt = TAILQ_FIRST(&dpt_softc_list);
507 dpt != NULL;
508 dpt = TAILQ_NEXT(dpt, links)) {
509 if (dpt->unit == unit)
510 goto valid_unit;
511 }
512
513 return (INVALID_UNIT);
514
515 valid_unit:
516
517 if (dpt->target_mode_enabled) {
518 if ((channel >= dpt->channels) || (target > dpt->max_id) ||
519 (lun > dpt->max_lun)) {
520 return (INVALID_SENDER);
521 }
522 if ((dpt->rw_buffer[channel][target][lun] == NULL) ||
523 (dpt->buffer_receiver[channel][target][lun] == NULL))
524 return (NOT_REGISTERED);
525
526 ospl = splsoftcam();
527 /* Process the free list */
528 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
529 printf("dpt%d ERROR: Cannot allocate any more free CCB's.\n"
530 " Please try later\n",
531 dpt->unit);
532 splx(ospl);
533 return (NO_RESOURCES);
534 }
535 /* Now grab the newest CCB */
536 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
537 splx(ospl);
538 panic("dpt%d: Got a NULL CCB from pop_free()\n", dpt->unit);
539 }
540 splx(ospl);
541
542 bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length);
543 dpt_target_ccb(dpt, channel, target, lun, ccb, mode, SCSI_TM_WRITE_BUFFER,
544 length, offset);
545 ccb->std_callback = (ccb_callback) callback; /* A hack. Potential
546 * trouble */
547
548 ospl = splcam();
549 ccb->transaction_id = ++dpt->commands_processed;
550
551 #ifdef DPT_MEASURE_PERFORMANCE
552 ++cmd_ct;
553 microtime(&now);
554 ccb->command_started = now;
555 #endif
556 dpt_Qadd_waiting(dpt, ccb);
557 dpt_sched_queue(dpt);
558
559 splx(ospl);
560 return (0);
561 }
562 return (DRIVER_DOWN);
563 }
564
565 static void
566 dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
567 {
568 int ospl;
569 eata_ccb_t *cp;
570
571 cp = &ccb->eata_ccb;
572
573 /*
574 * Remove the CCB from the waiting queue.
575 * We do NOT put it back on the free, etc., queues as it is a special
576 * ccb, owned by the dpt_softc of this unit.
577 */
578 ospl = splsoftcam();
579 dpt_Qremove_completed(dpt, ccb);
580 splx(ospl);
581
582 #define br_channel (ccb->eata_ccb.cp_channel)
583 #define br_target (ccb->eata_ccb.cp_id)
584 #define br_lun (ccb->eata_ccb.cp_LUN)
585 #define br_index [br_channel][br_target][br_lun]
586 #define read_buffer_callback (dpt->buffer_receiver br_index )
587 #define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun])
588 #define cb(offset) (ccb->eata_ccb.cp_cdb[offset])
589 #define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5))
590 #define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8))
591
592 /* Different reasons for being here, you know... */
593 switch (ccb->eata_ccb.cp_scsi_cmd) {
594 case SCSI_TM_READ_BUFFER:
595 if (read_buffer_callback != NULL) {
596 /* This is a buffer generated by a kernel process */
597 read_buffer_callback(dpt->unit, br_channel,
598 br_target, br_lun,
599 read_buffer,
600 br_offset, br_length);
601 } else {
602 /*
603 * This is a buffer waited for by a user (sleeping)
604 * command
605 */
606 wakeup(ccb);
607 }
608
609 /* We ALWAYS re-issue the same command; args are don't-care */
610 dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0);
611 break;
612
613 case SCSI_TM_WRITE_BUFFER:
614 (ccb->wrbuff_callback) (dpt->unit, br_channel, br_target,
615 br_offset, br_length,
616 br_lun, ccb->status_packet.hba_stat);
617 break;
618 default:
619 printf("dpt%d: %s is an unsupported command for target mode\n",
620 dpt->unit, scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd));
621 }
622 ospl = splsoftcam();
623 dpt->target_ccb[br_channel][br_target][br_lun] = NULL;
624 dpt_Qpush_free(dpt, ccb);
625 splx(ospl);
626
627 }
628
629
630 /**
631 * Use this function to register a client for a buffer read target operation.
632 * The function you register will be called every time a buffer is received
633 * by the target mode code.
634 */
635
636 dpt_rb_t
637 dpt_register_buffer(int unit,
638 u_int8_t channel,
639 u_int8_t target,
640 u_int8_t lun,
641 u_int8_t mode,
642 u_int16_t length,
643 u_int16_t offset,
644 dpt_rec_buff callback,
645 dpt_rb_op_t op)
646 {
647 dpt_softc_t *dpt;
648 dpt_ccb_t *ccb = NULL;
649 int ospl;
650
651 for (dpt = TAILQ_FIRST(&dpt_softc_list);
652 dpt != NULL;
653 dpt = TAILQ_NEXT(dpt, links)) {
654 if (dpt->unit == unit)
655 goto valid_unit;
656 }
657
658 return (INVALID_UNIT);
659
660 valid_unit:
661
662 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE)
663 return (DRIVER_DOWN);
664
665 if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) ||
666 (lun > (dpt->max_lun - 1)))
667 return (INVALID_SENDER);
668
669 if (dpt->buffer_receiver[channel][target][lun] == NULL) {
670 if (op == REGISTER_BUFFER) {
671 /* Assign the requested callback */
672 dpt->buffer_receiver[channel][target][lun] = callback;
673 /* Get a CCB */
674 ospl = splsoftcam();
675
676 /* Process the free list */
677 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
678 printf("dpt%d ERROR: Cannot allocate any more free CCB's.\n"
679 " Please try later\n",
680 dpt->unit);
681 splx(ospl);
682 return (NO_RESOURCES);
683 }
684 /* Now grab the newest CCB */
685 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
686 splx(ospl);
687 panic("dpt%d: Got a NULL CCB from pop_free()\n", dpt->unit);
688 }
689 splx(ospl);
690
691 /* Clean up the leftover of the previous tenant */
692 ccb->status = DPT_CCB_STATE_NEW;
693 dpt->target_ccb[channel][target][lun] = ccb;
694
695 dpt->rw_buffer[channel][target][lun] = malloc(DPT_RW_BUFFER_SIZE,
696 M_DEVBUF, M_NOWAIT);
697 if (dpt->rw_buffer[channel][target][lun] == NULL) {
698 printf("dpt%d: Failed to allocate Target-Mode buffer\n",
699 dpt->unit);
700 ospl = splsoftcam();
701 dpt_Qpush_free(dpt, ccb);
702 splx(ospl);
703 return (NO_RESOURCES);
704 }
705 dpt_set_target(0, dpt, channel, target, lun, mode, length,
706 offset, ccb);
707 return (SUCCESSFULLY_REGISTERED);
708 } else
709 return (NOT_REGISTERED);
710 } else {
711 if (op == REGISTER_BUFFER) {
712 if (dpt->buffer_receiver[channel][target][lun] == callback)
713 return (ALREADY_REGISTERED);
714 else
715 return (REGISTERED_TO_ANOTHER);
716 } else {
717 if (dpt->buffer_receiver[channel][target][lun] == callback) {
718 dpt->buffer_receiver[channel][target][lun] = NULL;
719 ospl = splsoftcam();
720 dpt_Qpush_free(dpt, ccb);
721 splx(ospl);
722 free(dpt->rw_buffer[channel][target][lun], M_DEVBUF);
723 return (SUCCESSFULLY_REGISTERED);
724 } else
725 return (INVALID_CALLBACK);
726 }
727
728 }
729 }
730
731 /**
732 * This routine will try to send an EATA command to the DPT HBA.
733 * It will, by default, try AHZ times, waiting 10ms between tries.
734 * It returns 0 on success and 1 on failure.
735 * It assumes the caller protects it with splbio() or some such.
736 *
737 * IMPORTANT: We do NOT protect the ports from multiple access in here.
738 * You are expected to do it in the calling routine.
739 * Here, we cannot have any clue as to the scope of your work.
740 */
741
742 static int
743 dpt_send_eata_command(dpt_softc_t * dpt, eata_ccb_t * cmd_block,
744 u_int8_t command, int32_t retries,
745 u_int8_t ifc, u_int8_t code, u_int8_t code2)
746 {
747 int32_t loop;
748 u_int8_t result;
749 u_int32_t test;
750 u_int32_t swapped_cmdaddr;
751
752 if (!retries)
753 retries = 1000;
754
755 /*
756 * I hate this polling nonsense. Wish there was a way to tell the DPT
757 * to go get commands at its own pace, or to interrupt when ready.
758 * In the mean time we will measure how many itterations it really
759 * takes.
760 */
761 for (loop = 0; loop < retries; loop++) {
762 if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0)
763 break;
764 else
765 DELAY(50);
766 }
767
768 if (loop < retries) {
769 #ifdef DPT_MEASURE_PERFORMANCE
770 if (loop > dpt->performance.max_eata_tries)
771 dpt->performance.max_eata_tries = loop;
772
773 if (loop < dpt->performance.min_eata_tries)
774 dpt->performance.min_eata_tries = loop;
775 #endif
776 } else {
777 #ifdef DPT_MEASURE_PERFORMANCE
778 ++dpt->performance.command_too_busy;
779 #endif
780 return (1);
781 }
782
783 if (cmd_block != NULL) {
784 swapped_cmdaddr = vtophys(cmd_block);
785
786 #if (BYTE_ORDER == BIG_ENDIAN)
787 swapped_cmdaddr = ((swapped_cmdaddr >> 24) & 0xFF)
788 | ((swapped_cmdaddr >> 16) & 0xFF)
789 | ((swapped_cmdaddr >> 8) & 0xFF)
790 | (swapped_cmdaddr & 0xFF);
791 #endif
792 } else {
793 swapped_cmdaddr = 0;
794 }
795 /* And now the address */
796 dpt_outl(dpt, HA_WDMAADDR, swapped_cmdaddr);
797
798 if (command == EATA_CMD_IMMEDIATE) {
799 if (cmd_block == NULL) {
800 dpt_outb(dpt, HA_WCODE2, code2);
801 dpt_outb(dpt, HA_WCODE, code);
802 }
803 dpt_outb(dpt, HA_WIFC, ifc);
804 }
805 dpt_outb(dpt, HA_WCOMMAND, command);
806
807 return (0);
808 }
809
810 /**
811 * Send a command for immediate execution by the DPT
812 * See above function for IMPORTANT notes.
813 */
814
815 static INLINE int
816 dpt_send_immediate(dpt_softc_t * dpt, eata_ccb_t * cmd_block,
817 u_int8_t ifc, u_int8_t code, u_int8_t code2)
818 {
819 return (dpt_send_eata_command(dpt, cmd_block, EATA_CMD_IMMEDIATE,
820 /* retries */ 1000000, ifc, code, code2));
821 }
822
823 /* Return the state of the blinking DPT LED's */
824 u_int8_t
825 dpt_blinking_led(dpt_softc_t * dpt)
826 {
827 int ndx;
828 int ospl;
829 u_int32_t state;
830 u_int32_t previous;
831 u_int8_t result;
832
833 ospl = splcam();
834
835 result = 0;
836
837 for (ndx = 0, state = 0, previous = 0;
838 (ndx < 10) && (state != previous);
839 ndx++) {
840 previous = state;
841 state = dpt_inl(dpt, 1);
842 }
843
844 if ((state == previous) && (state == DPT_BLINK_INDICATOR))
845 result = dpt_inb(dpt, 5);
846
847 splx(ospl);
848 return (result);
849 }
850
851 /**
852 * Execute a command which did not come from the kernel's SCSI layer.
853 * The only way to map user commands to bus and target is to comply with the
854 * standard DPT wire-down scheme:
855 */
856
857 int
858 dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd,
859 caddr_t cmdarg, int minor_no)
860 {
861 int channel, target, lun;
862 int huh;
863 int result;
864 int ospl;
865 int submitted;
866 dpt_ccb_t *ccb;
867 void *data;
868 struct timeval now;
869
870 data = NULL;
871 channel = minor2hba(minor_no);
872 target = minor2target(minor_no);
873 lun = minor2lun(minor_no);
874
875 if ((channel > (dpt->channels - 1))
876 || (target > dpt->max_id)
877 || (lun > dpt->max_lun))
878 return (ENXIO);
879
880 if (target == dpt->sc_scsi_link[channel].adapter_targ) {
881 /* This one is for the controller itself */
882 if ((user_cmd->eataID[0] != 'E')
883 || (user_cmd->eataID[1] != 'A')
884 || (user_cmd->eataID[2] != 'T')
885 || (user_cmd->eataID[3] != 'A')) {
886 return (ENXIO);
887 }
888 }
889 /* Get a DPT CCB, so we can prepare a command */
890 ospl = splsoftcam();
891
892 /* Process the free list */
893 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
894 printf("dpt%d ERROR: Cannot allocate any more free CCB's.\n"
895 " Please try later\n",
896 dpt->unit);
897 splx(ospl);
898 return (EFAULT);
899 }
900 /* Now grab the newest CCB */
901 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
902 splx(ospl);
903 panic("dpt%d: Got a NULL CCB from pop_free()\n", dpt->unit);
904 } else {
905 splx(ospl);
906 /* Clean up the leftover of the previous tenant */
907 ccb->status = DPT_CCB_STATE_NEW;
908 }
909
910 bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb,
911 sizeof(eata_ccb_t));
912
913 /* We do not want to do user specified scatter/gather. Why?? */
914 if (ccb->eata_ccb.scatter == 1)
915 return (EINVAL);
916
917 ccb->eata_ccb.Auto_Req_Sen = 1;
918 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data));
919 ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen));
920 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA));
921 ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA));
922 ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA));
923 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb;
924
925 if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) {
926 /* Data I/O is involved in this command. Alocate buffer */
927 if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) {
928 data = contigmalloc(ccb->eata_ccb.cp_datalen,
929 M_TEMP, M_WAITOK, 0, ~0,
930 ccb->eata_ccb.cp_datalen,
931 0x10000);
932 } else {
933 data = malloc(ccb->eata_ccb.cp_datalen, M_TEMP,
934 M_WAITOK);
935 }
936
937 if (data == NULL) {
938 printf("dpt%d: Cannot allocate %d bytes "
939 "for EATA command\n", dpt->unit,
940 ccb->eata_ccb.cp_datalen);
941 return (EFAULT);
942 }
943 #define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA
944 if (ccb->eata_ccb.DataIn == 1) {
945 if (copyin(usr_cmd_DMA,
946 data, ccb->eata_ccb.cp_datalen) == -1)
947 return (EFAULT);
948 }
949 } else {
950 /* No data I/O involved here. Make sure the DPT knows that */
951 ccb->eata_ccb.cp_datalen = 0;
952 data = NULL;
953 }
954
955 if (ccb->eata_ccb.FWNEST == 1)
956 ccb->eata_ccb.FWNEST = 0;
957
958 if (ccb->eata_ccb.cp_datalen != 0) {
959 if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen,
960 data) != 0) {
961 if (data != NULL)
962 free(data, M_TEMP);
963 return (EFAULT);
964 }
965 }
966 /**
967 * We are required to quiet a SCSI bus.
968 * since we do not queue comands on a bus basis,
969 * we wait for ALL commands on a controller to complete.
970 * In the mean time, sched_queue() will not schedule new commands.
971 */
972 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
973 && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) {
974 /* We wait for ALL traffic for this HBa to subside */
975 ospl = splsoftcam();
976 dpt->state |= DPT_HA_QUIET;
977 splx(ospl);
978
979 while ((submitted = dpt->submitted_ccbs_count) != 0) {
980 huh = tsleep((void *) dpt, PCATCH | PRIBIO, "dptqt",
981 100 * hz);
982 switch (huh) {
983 case 0:
984 /* Wakeup call received */
985 break;
986 case EWOULDBLOCK:
987 /* Timer Expired */
988 break;
989 default:
990 /* anything else */
991 break;
992 }
993 }
994 }
995 /* Resume normal operation */
996 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
997 && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) {
998 ospl = splsoftcam();
999 dpt->state &= ~DPT_HA_QUIET;
1000 splx(ospl);
1001 }
1002 /**
1003 * Schedule the command and submit it.
1004 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET
1005 */
1006 ccb->xs = NULL;
1007 ccb->flags = 0;
1008 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */
1009
1010 ccb->transaction_id = ++dpt->commands_processed;
1011 ccb->std_callback = (ccb_callback) dpt_user_cmd_done;
1012 ccb->result = (u_int32_t) & cmdarg;
1013 ccb->data = data;
1014
1015 #ifdef DPT_MEASURE_PERFORMANCE
1016 ++dpt->performance.command_count[(int) ccb->eata_ccb.cp_scsi_cmd];
1017 microtime(&now);
1018 ccb->command_started = now;
1019 #endif
1020 ospl = splcam();
1021 dpt_Qadd_waiting(dpt, ccb);
1022 splx(ospl);
1023
1024 dpt_sched_queue(dpt);
1025
1026 /* Wait for the command to complete */
1027 (void) tsleep((void *) ccb, PCATCH | PRIBIO, "dptucw", 100 * hz);
1028
1029 /* Free allocated memory */
1030 if (data != NULL)
1031 free(data, M_TEMP);
1032
1033 return (0);
1034 }
1035
1036 static void
1037 dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
1038 {
1039 int ospl = splsoftcam();
1040 u_int32_t result;
1041 caddr_t cmd_arg;
1042
1043 /**
1044 * If Auto Request Sense is on, copyout the sense struct
1045 */
1046 #define usr_pckt_DMA (caddr_t)ntohl(ccb->eata_ccb.cp_reqDMA)
1047 #define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen)
1048 if (ccb->eata_ccb.Auto_Req_Sen == 1) {
1049 if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA,
1050 sizeof(struct scsi_sense_data))) {
1051 ccb->result = EFAULT;
1052 dpt_Qpush_free(dpt, ccb);
1053 splx(ospl);
1054 wakeup(ccb);
1055 return;
1056 }
1057 }
1058 /* If DataIn is on, copyout the data */
1059 if ((ccb->eata_ccb.DataIn == 1)
1060 && (ccb->status_packet.hba_stat == HA_NO_ERROR)) {
1061 if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) {
1062 dpt_Qpush_free(dpt, ccb);
1063 ccb->result = EFAULT;
1064
1065 splx(ospl);
1066 wakeup(ccb);
1067 return;
1068 }
1069 }
1070 /* Copyout the status */
1071 result = ccb->status_packet.hba_stat;
1072 cmd_arg = (caddr_t) ccb->result;
1073
1074 if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) {
1075 dpt_Qpush_free(dpt, ccb);
1076 ccb->result = EFAULT;
1077 splx(ospl);
1078 wakeup(ccb);
1079 return;
1080 }
1081 /* Put the CCB back in the freelist */
1082 ccb->state |= DPT_CCB_STATE_COMPLETED;
1083 dpt_Qpush_free(dpt, ccb);
1084
1085 /* Free allocated memory */
1086 splx(ospl);
1087 return;
1088 }
1089
1090 /* Detect Cache parameters and size */
1091
1092 void
1093 dpt_detect_cache(dpt_softc_t * dpt)
1094 {
1095 int size;
1096 int bytes;
1097 int result;
1098 int ospl;
1099 int ndx;
1100 u_int8_t status;
1101 char name[64];
1102 char *param;
1103 char *buff;
1104 eata_ccb_t cp;
1105
1106 dpt_sp_t sp;
1107 struct scsi_sense_data snp;
1108
1109 /**
1110 * We lock out the hardware early, so that we can either complete the
1111 * operation or bust out right away.
1112 */
1113
1114 sprintf(name, "FreeBSD DPT Driver, version %d.%d.%d",
1115 DPT_RELEASE, DPT_VERSION, DPT_PATCH);
1116
1117 /**
1118 * Default setting, for best perfromance..
1119 * This is what virtually all cards default to..
1120 */
1121 dpt->cache_type = DPT_CACHE_WRITEBACK;
1122 dpt->cache_size = 0;
1123
1124 if ((buff = malloc(512, M_DEVBUF, M_NOWAIT)) == NULL) {
1125 printf("dpt%d: Failed to allocate %d bytes for a work "
1126 "buffer\n",
1127 dpt->unit, 512);
1128 return;
1129 }
1130 bzero(&cp, sizeof(eata_ccb_t));
1131 bzero((int8_t *) & sp, sizeof(dpt_sp_t));
1132 bzero((int8_t *) & snp, sizeof(struct scsi_sense_data));
1133 bzero(buff, 512);
1134
1135 /* Setup the command structure */
1136 cp.Interpret = 1;
1137 cp.DataIn = 1;
1138 cp.Auto_Req_Sen = 1;
1139 cp.reqlen = (u_int8_t) sizeof(struct scsi_sense_data);
1140
1141 cp.cp_id = 0; /* who cares? The HBA will interpret.. */
1142 cp.cp_LUN = 0; /* In the EATA packet */
1143 cp.cp_lun = 0; /* In the SCSI command */
1144 cp.cp_channel = 0;
1145
1146 cp.cp_scsi_cmd = EATA_CMD_DMA_SEND_CP;
1147 cp.cp_len = 56;
1148 cp.cp_dataDMA = htonl(vtophys(buff));
1149 cp.cp_statDMA = htonl(vtophys(&sp));
1150 cp.cp_reqDMA = htonl(vtophys(&snp));
1151
1152 cp.cp_identify = 1;
1153 cp.cp_dispri = 1;
1154
1155 /**
1156 * Build the EATA Command Packet structure
1157 * for a Log Sense Command.
1158 */
1159
1160 cp.cp_cdb[0] = 0x4d;
1161 cp.cp_cdb[1] = 0x0;
1162 cp.cp_cdb[2] = 0x40 | 0x33;
1163 cp.cp_cdb[7] = 1;
1164
1165 cp.cp_datalen = htonl(512);
1166
1167 ospl = splcam();
1168 result = dpt_send_eata_command(dpt, &cp, EATA_CMD_DMA_SEND_CP,
1169 10000, 0, 0, 0);
1170 if (result != 0) {
1171 printf("dpt%d WARNING: detect_cache() failed (%d) to send "
1172 "EATA_CMD_DMA_SEND_CP\n", dpt->unit, result);
1173 free(buff, M_TEMP);
1174 splx(ospl);
1175 return;
1176 }
1177 /* Wait for two seconds for a response. This can be slow... */
1178 for (ndx = 0;
1179 (ndx < 20000) &&
1180 !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
1181 ndx++) {
1182 DELAY(50);
1183 }
1184
1185 /* Grab the status and clear interrupts */
1186 status = dpt_inb(dpt, HA_RSTATUS);
1187 splx(ospl);
1188
1189 /**
1190 * Sanity check
1191 */
1192 if (buff[0] != 0x33) {
1193 return;
1194 }
1195 bytes = DPT_HCP_LENGTH(buff);
1196 param = DPT_HCP_FIRST(buff);
1197
1198 if (DPT_HCP_CODE(param) != 1) {
1199 /**
1200 * DPT Log Page layout error
1201 */
1202 printf("dpt%d: NOTICE: Log Page (1) layout error\n",
1203 dpt->unit);
1204 return;
1205 }
1206 if (!(param[4] & 0x4)) {
1207 dpt->cache_type = DPT_NO_CACHE;
1208 return;
1209 }
1210 while (DPT_HCP_CODE(param) != 6) {
1211 param = DPT_HCP_NEXT(param);
1212 if ((param < buff)
1213 || (param >= &buff[bytes])) {
1214 return;
1215 }
1216 }
1217
1218 if (param[4] & 0x2) {
1219 /**
1220 * Cache disabled
1221 */
1222 dpt->cache_type = DPT_NO_CACHE;
1223 return;
1224 }
1225 if (param[4] & 0x4) {
1226 dpt->cache_type = DPT_CACHE_WRITETHROUGH;
1227 return;
1228 }
1229 dpt->cache_size = param[5]
1230 | (param[6] < 8)
1231 | (param[7] << 16)
1232 | (param[8] << 24);
1233
1234 return;
1235 }
1236
1237 /**
1238 * Initializes the softc structure and allocate all sorts of storage.
1239 * Returns 0 on good luck, 1-n otherwise (error condition sensitive).
1240 */
1241
1242 int
1243 dpt_setup(dpt_softc_t * dpt, dpt_conf_t * conf)
1244 {
1245 dpt_inq_t *board_data;
1246 u_long rev;
1247 int ndx;
1248 int ospl;
1249 dpt_ccb_t *ccb;
1250
1251 board_data = dpt_get_board_data(dpt, conf->scsi_id0);
1252 if (board_data == NULL) {
1253 printf("dpt%d ERROR: Get_board_data() failure. "
1254 "Setup ignored!\n", dpt->unit);
1255 return (1);
1256 }
1257 dpt->total_ccbs_count = 0;
1258 dpt->free_ccbs_count = 0;
1259 dpt->waiting_ccbs_count = 0;
1260 dpt->submitted_ccbs_count = 0;
1261 dpt->completed_ccbs_count = 0;
1262
1263 switch (ntohl(conf->splen)) {
1264 case DPT_EATA_REVA:
1265 dpt->EATA_revision = 'a';
1266 break;
1267 case DPT_EATA_REVB:
1268 dpt->EATA_revision = 'b';
1269 break;
1270 case DPT_EATA_REVC:
1271 dpt->EATA_revision = 'c';
1272 break;
1273 case DPT_EATA_REVZ:
1274 dpt->EATA_revision = 'z';
1275 break;
1276 default:
1277 dpt->EATA_revision = '?';
1278 }
1279
1280 (void) memcpy(&dpt->board_data, board_data, sizeof(dpt_inq_t));
1281
1282 dpt->bustype = IS_PCI; /* We only support and operate on PCI devices */
1283 dpt->channels = conf->MAX_CHAN + 1;
1284 dpt->max_id = conf->MAX_ID;
1285 dpt->max_lun = conf->MAX_LUN;
1286 dpt->state |= DPT_HA_OK;
1287
1288 if (conf->SECOND)
1289 dpt->primary = FALSE;
1290 else
1291 dpt->primary = TRUE;
1292
1293 dpt->more_support = conf->MORE_support;
1294
1295 if (board_data == NULL) {
1296 rev = ('?' << 24)
1297 | ('-' << 16)
1298 | ('?' << 8)
1299 | '-';
1300 } else {
1301 /* Convert from network byte order to a "string" */
1302 rev = (dpt->board_data.firmware[0] << 24)
1303 | (dpt->board_data.firmware[1] << 16)
1304 | (dpt->board_data.firmware[2] << 8)
1305 | dpt->board_data.firmware[3];
1306 }
1307
1308 if (rev >= (('' << 24) + ('7' << 16) + ('G' << 8) + ''))
1309 dpt->immediate_support = 1;
1310 else
1311 dpt->immediate_support = 0;
1312
1313 dpt->broken_INQUIRY = FALSE;
1314
1315 for (ndx = 0; ndx < MAX_CHANNELS; ndx++)
1316 dpt->resetlevel[ndx] = DPT_HA_OK;
1317
1318 dpt->cplen = ntohl(conf->cplen);
1319 dpt->cppadlen = ntohs(conf->cppadlen);
1320 dpt->queuesize = ntohs(conf->queuesiz);
1321
1322 dpt->hostid[0] = conf->scsi_id0;
1323 dpt->hostid[1] = conf->scsi_id1;
1324 dpt->hostid[2] = conf->scsi_id2;
1325
1326 if (conf->SG_64K) {
1327 dpt->sgsize = SG_SIZE_BIG;
1328 } else if ((ntohs(conf->SGsiz) < 1)
1329 || (ntohs(conf->SGsiz) > SG_SIZE)) {
1330 /* Just a sanity check */
1331 dpt->sgsize = SG_SIZE;
1332 } else {
1333 dpt->sgsize = ntohs(conf->SGsiz);
1334 }
1335
1336 if (dpt->sgsize > dpt_max_segs)
1337 dpt->sgsize = dpt_max_segs;
1338
1339 if (dpt_alloc_freelist(dpt) != 0) {
1340 return (2);
1341 }
1342 /* Prepare for Target Mode */
1343 ospl = splsoftcam();
1344 dpt->target_mode_enabled = 1;
1345 splx(ospl);
1346
1347 return (0);
1348 }
1349
1350 /**
1351 * The following function returns a pointer to a buffer which MUST be freed by
1352 * The caller, a la free(result, M_DEVBUF)
1353 *
1354 * This function (and its like) assumes it is only running during system
1355 * initialization!
1356 */
1357 static dpt_inq_t *
1358 dpt_get_board_data(dpt_softc_t * dpt, u_int32_t target_id)
1359 {
1360 /* get_conf returns 512 bytes, most of which are zeros... */
1361 return ((dpt_inq_t *) dpt_get_conf(dpt, 0, target_id,
1362 sizeof(dpt_inq_t), 0));
1363 }
1364
1365 /**
1366 * The following function returns a pointer to a buffer which MUST be freed by
1367 * the caller, a la ``free(result, M_TEMP);''
1368 */
1369 dpt_conf_t *
1370 dpt_get_conf(dpt_softc_t * dpt, u_int8_t page, u_int8_t target,
1371 u_int8_t size, int extent)
1372 {
1373 dpt_sp_t sp;
1374 eata_ccb_t cp;
1375
1376 /* Get_conf returns 512 bytes, most of which are zeros... */
1377 dpt_conf_t *config;
1378
1379 u_short *ip;
1380 u_int8_t status, sig1, sig2, sig3;
1381
1382 int ndx;
1383 int ospl;
1384 int result;
1385
1386 struct scsi_sense_data snp;
1387 if ((config = (dpt_conf_t *) malloc(512, M_TEMP, M_WAITOK)) == NULL)
1388 return (NULL);
1389
1390 bzero(&cp, sizeof(eata_ccb_t));
1391 bzero((int8_t *) & sp, sizeof(dpt_sp_t));
1392 bzero(config, size);
1393
1394 cp.Interpret = 1;
1395 cp.DataIn = 1;
1396 cp.Auto_Req_Sen = 1;
1397 cp.reqlen = sizeof(struct scsi_sense_data);
1398
1399 cp.cp_id = target;
1400 cp.cp_LUN = 0; /* In the EATA packet */
1401 cp.cp_lun = 0; /* In the SCSI command */
1402
1403 cp.cp_scsi_cmd = INQUIRY;
1404 cp.cp_len = size;
1405
1406 cp.cp_extent = extent;
1407
1408 cp.cp_page = page;
1409 cp.cp_channel = 0; /* DNC, Interpret mode is set */
1410 cp.cp_identify = 1;
1411 cp.cp_datalen = htonl(size);
1412 cp.cp_dataDMA = htonl(vtophys(config));
1413 cp.cp_statDMA = htonl(vtophys(&sp));
1414 cp.cp_reqDMA = htonl(vtophys(&snp));
1415 cp.cp_viraddr = (u_int32_t) & cp;
1416
1417 ospl = splcam();
1418
1419 #ifdef DPT_RESET_BOARD
1420 printf("dpt%d: get_conf() resetting HBA at %x.\n",
1421 dpt->unit, BaseRegister(dpt));
1422 dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET);
1423 DELAY(750000);
1424 #endif
1425
1426 /**
1427 * This could be a simple for loop, but we suspected the compiler To
1428 * have optimized it a bit too much. Wait for the controller to
1429 * become ready
1430 */
1431 while ((((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC))
1432 && (status != (HA_SREADY | HA_SSC | HA_SERROR))
1433 && /* This results from the `wd' probe at our
1434 * addresses */
1435 (status != (HA_SDRDY | HA_SERROR | HA_SDRQ)))
1436 || (dpt_wait(dpt, HA_SBUSY, 0))) {
1437 /**
1438 * RAID Drives still Spinning up? (This should only occur if
1439 * the DPT controller is in a NON PC (PCI?) platform).
1440 */
1441 if (dpt_raid_busy(dpt)) {
1442 printf("dpt%d WARNING: Get_conf() RSUS failed for "
1443 "HBA at %x\n", dpt->unit, BaseRegister(dpt));
1444 free(config, M_TEMP);
1445 splx(ospl);
1446 return (NULL);
1447 }
1448 }
1449
1450 DptStat_Reset_BUSY(&sp);
1451
1452 /**
1453 * XXXX We might want to do something more clever than aborting at
1454 * this point, like resetting (rebooting) the controller and trying
1455 * again.
1456 */
1457 if ((result = dpt_send_eata_command(dpt, &cp, EATA_CMD_DMA_SEND_CP,
1458 10000, 0, 0, 0)) != 0) {
1459 printf("dpt%d WARNING: Get_conf() failed (%d) to send "
1460 "EATA_CMD_DMA_READ_CONFIG\n",
1461 dpt->unit, result);
1462 free(config, M_TEMP);
1463 splx(ospl);
1464 return (NULL);
1465 }
1466 /* Wait for two seconds for a response. This can be slow */
1467 for (ndx = 0;
1468 (ndx < 20000)
1469 && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
1470 ndx++) {
1471 DELAY(50);
1472 }
1473
1474 /* Grab the status and clear interrupts */
1475 status = dpt_inb(dpt, HA_RSTATUS);
1476
1477 splx(ospl);
1478
1479 /**
1480 * Check the status carefully. Return only if the
1481 * command was successful.
1482 */
1483 if (((status & HA_SERROR) == 0)
1484 && (sp.hba_stat == 0)
1485 && (sp.scsi_stat == 0)
1486 && (sp.residue_len == 0)) {
1487 return (config);
1488 }
1489 free(config, M_TEMP);
1490 return (NULL);
1491 }
1492
1493 /* This gets called once per SCSI bus defined in config! */
1494
1495 int
1496 dpt_attach(dpt_softc_t * dpt)
1497 {
1498 struct scsibus_data *scbus;
1499
1500 int ndx;
1501 int idx;
1502 int channel;
1503 int target;
1504 int lun;
1505
1506 struct scsi_inquiry_data *inq;
1507
1508 for (ndx = 0; ndx < dpt->channels; ndx++) {
1509 /**
1510 * We do not setup target nor lun on the assumption that
1511 * these are being set for individual devices that will be
1512 * attached to the bus later.
1513 */
1514 dpt->sc_scsi_link[ndx].adapter_unit = dpt->unit;
1515 dpt->sc_scsi_link[ndx].adapter_targ = dpt->hostid[ndx];
1516 dpt->sc_scsi_link[ndx].fordriver = 0;
1517 dpt->sc_scsi_link[ndx].adapter_softc = dpt;
1518 dpt->sc_scsi_link[ndx].adapter = &dpt_switch;
1519
1520 /*
1521 * These appear to be the # of openings per that DEVICE, not
1522 * the DPT!
1523 */
1524 dpt->sc_scsi_link[ndx].opennings = dpt->queuesize;
1525 dpt->sc_scsi_link[ndx].device = &dpt_dev;
1526 dpt->sc_scsi_link[ndx].adapter_bus = ndx;
1527
1528 /**
1529 * Prepare the scsibus_data area for the upperlevel scsi
1530 * code.
1531 */
1532 if ((scbus = scsi_alloc_bus()) == NULL)
1533 return 0;
1534
1535 dpt->sc_scsi_link[ndx].scsibus = ndx;
1536 scbus->maxtarg = dpt->max_id;
1537 scbus->adapter_link = &dpt->sc_scsi_link[ndx];
1538
1539 /*
1540 * Invite the SCSI control layer to probe the busses.
1541 */
1542
1543 dpt->handle_interrupts = 1; /* Now we are ready to work */
1544 scsi_attachdevs(scbus);
1545 scbus = (struct scsibus_data *) NULL;
1546 }
1547
1548 return (1);
1549 }
1550
1551 /**
1552 * Allocate another chunk of CCB's. Return 0 on success, 1 otherwise.
1553 * If the free list is empty, we allocate a block of entries and add them
1554 * to the list. We obtain, at most, DPT_FREE_LIST_INCREMENT CCB's at a time.
1555 * If we cannot, we will try fewer entries until we succeed.
1556 * For every CCB, we allocate a maximal Scatter/Gather list.
1557 * This routine also initializes all the static data that pertains to this CCB.
1558 */
1559
1560 /**
1561 * XXX JGibbs - How big are your SG lists? Remeber that the kernel malloc
1562 * uses buckets and mallocs in powers of two. So, if your
1563 * SG list is not a power of two (up to PAGESIZE), you might
1564 * waste a lot of memory. This was the reason the ahc driver
1565 * allocats multiple SG lists at a time up to a PAGESIZE.
1566 * Just something to keep in mind.
1567 * YYY Simon - Up to 8192 entries, each entry is two ulongs, comes to 64K.
1568 * In reality they are much smaller, so you are right.
1569 */
1570 static int
1571 dpt_alloc_freelist(dpt_softc_t * dpt)
1572 {
1573 dpt_ccb_t *nccbp;
1574 dpt_sg_t *sg;
1575 u_int8_t *buff;
1576 int ospl;
1577 int incr;
1578 int ndx;
1579 int ccb_count;
1580
1581 ccb_count = DPT_FREE_LIST_INCREMENT;
1582
1583 #ifdef DPT_RESTRICTED_FREELIST
1584 if (dpt->total_ccbs_count != 0) {
1585 printf("dpt%d: Restricted FreeList, No more than %d entries "
1586 "allowed\n", dpt->unit, dpt->total_ccbs_count);
1587 return (-1);
1588 }
1589 #endif
1590
1591 /**
1592 * Allocate a group of dpt_ccb's. Work on the CCB's, one at a time
1593 */
1594 ospl = splsoftcam();
1595 for (ndx = 0; ndx < ccb_count; ndx++) {
1596 size_t alloc_size;
1597 dpt_sg_t *sgbuff;
1598
1599 alloc_size = sizeof(dpt_ccb_t); /* About 200 bytes */
1600
1601 if (alloc_size > PAGE_SIZE) {
1602 /*
1603 * Does not fit in a page. we try to fit in a
1604 * contigious block of memory. If not, we will, later
1605 * try to allocate smaller, and smaller chunks. There
1606 * is a tradeof between memory and performance here.
1607 * We know.this (crude) algorithm works well on
1608 * machines with plenty of memory. We have seen it
1609 * allocate in excess of 8MB.
1610 */
1611 nccbp = (dpt_ccb_t *) contigmalloc(alloc_size,
1612 M_DEVBUF, M_NOWAIT,
1613 0, ~0,
1614 PAGE_SIZE,
1615 0x10000);
1616 } else {
1617 /* fits all in one page */
1618 nccbp = (dpt_ccb_t *) malloc(alloc_size, M_DEVBUF,
1619 M_NOWAIT);
1620 }
1621
1622 if (nccbp == (dpt_ccb_t *) NULL) {
1623 printf("dpt%d ERROR: Alloc_free_list() failed to "
1624 "allocate %d\n",
1625 dpt->unit, ndx);
1626 splx(ospl);
1627 return (-1);
1628 }
1629 alloc_size = sizeof(dpt_sg_t) * dpt->sgsize;
1630
1631 if (alloc_size > PAGE_SIZE) {
1632 /* Does not fit in a page */
1633 sgbuff = (dpt_sg_t *) contigmalloc(alloc_size,
1634 M_DEVBUF, M_NOWAIT,
1635 0, ~0,
1636 PAGE_SIZE,
1637 0x10000);
1638 } else {
1639 /* fits all in one page */
1640 sgbuff = (dpt_sg_t *) malloc(alloc_size, M_DEVBUF,
1641 M_NOWAIT);
1642 }
1643
1644 /**
1645 * If we cannot allocate sg lists, we do not want the entire
1646 * list
1647 */
1648 if (sgbuff == (dpt_sg_t *) NULL) {
1649 free(nccbp, M_DEVBUF);
1650 --ndx;
1651 break;
1652 }
1653 /* Clean up the mailboxes */
1654 bzero(sgbuff, alloc_size);
1655 bzero(nccbp, sizeof(dpt_ccb_t));
1656 /*
1657 * this line is nullified by the one below.
1658 * nccbp->eata_ccb.cp_dataDMA = (u_int32_t) sgbuff; Thanx,
1659 * Mike!
1660 */
1661 nccbp->sg_list = sgbuff;
1662
1663 /**
1664 * Now that we have a new block of free CCB's, put them into
1665 * the free list. We always add to the head of the list and
1666 * always take form the head of the list (LIFO). Each ccb
1667 * has its own Scatter/Gather list. They are all of the same
1668 * size, Regardless of how much is used.
1669 *
1670 * While looping through all the new CCB's, we initialize them
1671 * properly. These items NEVER change; They are mostly
1672 * self-pointers, relative to the CCB itself.
1673 */
1674 dpt_Qpush_free(dpt, nccbp);
1675 ++dpt->total_ccbs_count;
1676
1677 nccbp->eata_ccb.cp_dataDMA = htonl(vtophys(nccbp->sg_list));
1678 nccbp->eata_ccb.cp_viraddr = (u_int32_t) nccbp; /* Unique */
1679 nccbp->eata_ccb.cp_statDMA = htonl(vtophys(&dpt->sp));
1680
1681 /**
1682 * See dpt_intr for why we make ALL CCB's ``have the same''
1683 * Status Packet
1684 */
1685 nccbp->eata_ccb.cp_reqDMA = htonl(vtophys(&nccbp->sense_data));
1686 }
1687
1688 splx(ospl);
1689
1690 return (0);
1691 }
1692
1693 /**
1694 * Prepare the data area for DMA.
1695 */
1696 static int
1697 dpt_scatter_gather(dpt_softc_t * dpt, dpt_ccb_t * ccb, u_int32_t data_length,
1698 caddr_t data)
1699 {
1700 int seg;
1701 int thiskv;
1702 int bytes_this_seg;
1703 int bytes_this_page;
1704 u_int32_t datalen;
1705 vm_offset_t vaddr;
1706 u_int32_t paddr;
1707 u_int32_t nextpaddr;
1708 dpt_sg_t *sg;
1709
1710 /* we start with Scatter/Gather OFF */
1711 ccb->eata_ccb.scatter = 0;
1712
1713 if (data_length) {
1714 if (ccb->flags & SCSI_DATA_IN) {
1715 ccb->eata_ccb.DataIn = 1;
1716 }
1717 if (ccb->flags & SCSI_DATA_OUT) {
1718 ccb->eata_ccb.DataOut = 1;
1719 }
1720 seg = 0;
1721 datalen = data_length;
1722 vaddr = (vm_offset_t) data;
1723 paddr = vtophys(vaddr);
1724 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->sg_list));
1725 sg = ccb->sg_list;
1726
1727 while ((datalen > 0) && (seg < dpt->sgsize)) {
1728 /* put in the base address and length */
1729 sg->seg_addr = paddr;
1730 sg->seg_len = 0;
1731
1732 /* do it at least once */
1733 nextpaddr = paddr;
1734
1735 while ((datalen > 0) && (paddr == nextpaddr)) {
1736 u_int32_t size;
1737
1738 /**
1739 * This page is contiguous (physically) with
1740 * the the last, just extend the length
1741 */
1742
1743 /* how far to the end of the page */
1744 nextpaddr = trunc_page(paddr) + PAGE_SIZE;
1745
1746 /* Compute the maximum size */
1747
1748 size = nextpaddr - paddr;
1749 if (size > datalen)
1750 size = datalen;
1751
1752 sg->seg_len += size;
1753 vaddr += size;
1754 datalen -= size;
1755 if (datalen > 0)
1756 paddr = vtophys(vaddr);
1757 }
1758
1759 /* Next page isn't contiguous, finish the seg */
1760 sg->seg_addr = htonl(sg->seg_addr);
1761 sg->seg_len = htonl(sg->seg_len);
1762 seg++;
1763 sg++;
1764 }
1765
1766 if (datalen) {
1767 /* There's still data, must have run out of segs! */
1768 printf("dpt%d: scsi_cmd() Too Many (%d) DMA segs "
1769 "(%d bytes left)\n",
1770 dpt->unit, dpt->sgsize, datalen);
1771 return (1);
1772 }
1773 if (seg == 1) {
1774 /**
1775 * After going through all this trouble, we
1776 * still have only one segment. As an
1777 * optimization measure, we will do the
1778 * I/O as a single, non-S/G operation.
1779 */
1780 ccb->eata_ccb.cp_dataDMA = ccb->sg_list[0].seg_addr;
1781 ccb->eata_ccb.cp_datalen = ccb->sg_list[0].seg_len;
1782 } else {
1783 /**
1784 * There is more than one segment. Use S/G.
1785 */
1786 ccb->eata_ccb.scatter = 1;
1787 ccb->eata_ccb.cp_datalen =
1788 htonl(seg * sizeof(dpt_sg_t));
1789 }
1790 } else { /* datalen == 0 */
1791 /* No data xfer */
1792 ccb->eata_ccb.cp_datalen = 0;
1793 ccb->eata_ccb.cp_dataDMA = 0;
1794 }
1795
1796 return (0);
1797 }
1798
1799 /**
1800 * This function obtains a CCB for a command and attempts to queue it to the
1801 * Controller.
1802 *
1803 * CCB Obtaining: Is done by getting the first entry in the free list for the
1804 * HBA. If we fail to get an scb, we send a TRY_LATER to the caller.
1805 *
1806 * XXX - JGibbs: XS_DRIVER_STUFFUP is equivalent to failing the I/O in the
1807 * current SCSI layer.
1808 *
1809 * Command Queuing: Is done by putting the command at the end of the waiting
1810 * queue. This assures fair chance for all commands to be processed.
1811 * If the queue was empty (has only this, current command in it, we try to
1812 * submit it to the HBA. Otherwise we return SUCCESSFULLY_QUEUED.
1813 */
1814
1815 static int32_t
1816 dpt_scsi_cmd(struct scsi_xfer * xs)
1817 {
1818 dpt_softc_t *dpt;
1819 int incr;
1820 int ndx;
1821 int ospl;
1822 int huh;
1823
1824 u_int32_t flags;
1825 dpt_ccb_t *ccb;
1826 u_int8_t status;
1827 u_int32_t aux_status = 0; /* Initialized to shut GCC up */
1828 int result;
1829
1830 int channel, target, lun;
1831
1832 struct scsi_inquiry_data *inq;
1833
1834 dpt = (dpt_softc_t *) xs->sc_link->adapter_softc;
1835
1836 flags = xs->flags;
1837 channel = xs->sc_link->adapter_bus;
1838 target = xs->sc_link->target;
1839 lun = xs->sc_link->lun;
1840
1841 #ifdef DPT_HANDLE_TIMEOUTS
1842 ospl = splsoftcam();
1843 if ((dpt->state & DPT_HA_TIMEOUTS_SET) == 0) {
1844 dpt->state |= DPT_HA_TIMEOUTS_SET;
1845 timeout(dpt_timeout, dpt, hz * 10);
1846 }
1847 splx(ospl);
1848 #endif
1849
1850 #ifdef DPT_LOST_IRQ
1851 ospl = splcam();
1852 if ((dpt->state & DPT_LOST_IRQ_SET) == 0) {
1853 printf("dpt%d: Initializing Lost IRQ Timer\n", dpt->unit);
1854 dpt->state |= DPT_LOST_IRQ_SET;
1855 timeout(dpt_irq_timeout, dpt, hz);
1856 }
1857 splx(ospl);
1858 #endif
1859
1860 /**
1861 * Examine the command flags and handle properly. XXXX We are not
1862 * handling external resets right now. Needs to be added. We do not
1863 * care about the SCSI_NOSLEEP flag as we do not sleep here. We have
1864 * to observe the SCSI_NOMASK flag, though.
1865 */
1866 if (xs->flags & SCSI_RESET) {
1867 printf("dpt%d: Unsupported option...\n"
1868 " I refuse to Reset b%dt%du%d...!\n",
1869 __FILE__, __LINE__, channel, target, lun);
1870 xs->error = XS_DRIVER_STUFFUP;
1871 return (COMPLETE);
1872 }
1873 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) {
1874 printf("dpt%d ERROR: Command \"%s\" recieved for b%dt%du%d\n"
1875 " but controller is shutdown; Aborting...\n",
1876 dpt->unit,
1877 scsi_cmd_name(xs->cmd->opcode),
1878 channel, target, lun);
1879 xs->error = XS_DRIVER_STUFFUP;
1880 return (COMPLETE);
1881 }
1882 if (flags & ITSDONE) {
1883 printf("dpt%d WARNING: scsi_cmd(%s) already done on "
1884 "b%dt%du%d?!\n",
1885 dpt->unit, scsi_cmd_name(xs->cmd->opcode),
1886 channel, target, lun);
1887 xs->flags &= ~ITSDONE;
1888 }
1889 if (!(flags & INUSE)) {
1890 printf("dpt%d WARNING: Unit not in use in scsi_cmd(%s) "
1891 "on b%dt%du%d?!\n",
1892 dpt->unit, scsi_cmd_name(xs->cmd->opcode), channel,
1893 target, lun);
1894 xs->flags |= INUSE;
1895 }
1896 /**
1897 * We do not want to be disrupted when doing this, or another caller
1898 * may do the same thing.
1899 */
1900 ospl = splsoftcam();
1901
1902 /* Process the free list */
1903 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
1904 printf("dpt%d ERROR: Cannot allocate any more free CCB's.\n"
1905 " Will try later\n",
1906 dpt->unit);
1907 xs->error = XS_DRIVER_STUFFUP;
1908 splx(ospl);
1909 return (COMPLETE);
1910 }
1911 /* Now grab the newest CCB */
1912 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
1913 /*
1914 * No need to panic here. We can continue with only as many
1915 * CCBs as we have.
1916 */
1917 printf("dpt%d ERROR: Got a NULL CCB from pop_free()\n",
1918 dpt->unit);
1919 xs->error = XS_DRIVER_STUFFUP;
1920 splx(ospl);
1921 return (COMPLETE);
1922 }
1923 #ifdef DPT_HANDLE_TIMEOUTS
1924 ccb->status &= ~(DPT_CCB_STATE_ABORTED | DPT_CCB_STATE_MARKED_LOST);
1925 #endif
1926
1927 splx(ospl);
1928 bcopy(xs->cmd, ccb->eata_ccb.cp_cdb, xs->cmdlen);
1929
1930 /* Put all the CCB population stuff below */
1931 ccb->xs = xs;
1932 ccb->flags = flags;
1933 /* We NEVER reset the bus from a command */
1934 ccb->eata_ccb.SCSI_Reset = 0;
1935 /* We NEVER re-boot the HBA from a * command */
1936 ccb->eata_ccb.HBA_Init = 0;
1937 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */
1938 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data));
1939 ccb->std_callback = NULL;
1940 ccb->wrbuff_callback = NULL;
1941
1942 if (xs->sc_link->target == xs->sc_link->adapter_targ) {
1943 ccb->eata_ccb.Interpret = 1;
1944 } else {
1945 ccb->eata_ccb.Interpret = 0;
1946 }
1947
1948 ccb->eata_ccb.scatter = 0; /* S/G is OFF now */
1949 ccb->eata_ccb.DataIn = 0;
1950 ccb->eata_ccb.DataOut = 0;
1951
1952 /* At this time we do not deal with the RAID internals */
1953 ccb->eata_ccb.FWNEST = 0;
1954 ccb->eata_ccb.Phsunit = 0;
1955 /* We do not do SMARTROM kind of things */
1956 ccb->eata_ccb.I_AT = 0;
1957 /* We do not inhibit the cache at this time */
1958 ccb->eata_ccb.Disable_Cache = 0;
1959 ccb->eata_ccb.cp_channel = channel;
1960 ccb->eata_ccb.cp_id = target;
1961 ccb->eata_ccb.cp_LUN = lun; /**
1962 * In the EATA packet. We do not
1963 * change the SCSI command yet
1964 */
1965 /* We are currently dealing with target LUN's, not ROUTINEs */
1966 ccb->eata_ccb.cp_luntar = 0;
1967
1968 /**
1969 * XXXX - We grant the target disconnect prvileges, except in polled
1970 * mode (????).
1971 */
1972 if ((ccb->flags & SCSI_NOMASK) || !dpt->handle_interrupts) {
1973 ccb->eata_ccb.cp_dispri = 0;
1974 } else {
1975 ccb->eata_ccb.cp_dispri = 1;
1976 }
1977
1978 /* we always ask for Identify */
1979 ccb->eata_ccb.cp_identify = 1;
1980
1981 /**
1982 * These three are used for command queues and tags. How do we use
1983 * them?
1984 *
1985 * XXX - JGibbs: Most likely like so: ccb->eata_ccb.cp_msg[0] =
1986 * MSG_SIMPLEQ_TAG; ccb->eata_ccb.cp_msg[1] = tagid;
1987 * ccb->eata_ccb.cp_msg[2] = 0;
1988 *
1989 * YYY - Shimon: Thanx! We still do not do that as the current
1990 * firmware does it automatically, including on RAID arrays.
1991 */
1992
1993 ccb->eata_ccb.cp_msg[0] = 0;
1994 ccb->eata_ccb.cp_msg[1] = 0;
1995 ccb->eata_ccb.cp_msg[2] = 0;
1996
1997 /* End of CCB population */
1998
1999 if (dpt_scatter_gather(dpt, ccb, xs->datalen, xs->data) != 0) {
2000 xs->error = XS_DRIVER_STUFFUP;
2001 ospl = splsoftcam();
2002 dpt_Qpush_free(dpt, ccb);
2003 splx(ospl);
2004 return (COMPLETE);
2005 }
2006 xs->resid = 0;
2007 xs->status = 0;
2008
2009 /**
2010 * This is the polled mode section. If we are here to honor
2011 * SCSI_NOMASK, during scsi_attachdevs(), please notice that
2012 * interrupts are ENABLED in the system (2.2.1) and that the DPT
2013 * WILL generate them, unless we turn them off!
2014 */
2015
2016 /**
2017 * XXX - JGibbs: Polled mode was a botch at best. It's nice to
2018 * know that it goes completely away with the CAM code.
2019 * YYY - Simon: Take it out once the rest is stable. Be careful about
2020 * how you wait for commands to complete when you switch
2021 * to interrupt mode in the scanning code (initiated by
2022 * scsi_attachdevs).
2023 * Disabling it in 2.2 causes a hung system.
2024 */
2025
2026 if ((ccb->flags & SCSI_NOMASK) || !dpt->handle_interrupts) {
2027 /**
2028 * This is an ``immediate'' command. Poll it! We poll by
2029 * partially bypassing the queues. We first submit the
2030 * command by asking dpt_run_queue() to queue it. Then we
2031 * poll its status packet, until it completes. Then we give
2032 * it to dpt_process_completion() to analyze and then we
2033 * return.
2034 */
2035
2036 /*
2037 * Increase the number of commands queueable for a device. We
2038 * force each device to the maximum allowed for its HBA. This
2039 * appears wrong but all it will do is cause excessive
2040 * commands to sit in our queue. On the other hand, we can
2041 * burst as many commands as the DPT can take for a single
2042 * device. We do it here, so only while in polled mode (early
2043 * boot) do we waste time on it. We have no clean way
2044 * to overrule sdattach() zeal in depressing the opennings
2045 * back to one if it is more than 1.
2046 */
2047 if (xs->sc_link->opennings < dpt->queuesize) {
2048 xs->sc_link->opennings = dpt->queuesize;
2049 }
2050 /**
2051 * This test only protects us from submitting polled
2052 * commands during Non-polled times. We assumed polled
2053 * commands go in serially, one at a time. BTW, we have NOT
2054 * checked, nor verified the scope of the disaster that WILL
2055 * follow going into polled mode after being in interrupt
2056 * mode for any length of time.
2057 */
2058 if (dpt->submitted_ccbs_count < dpt->queuesize) {
2059 /**
2060 * Submit the request to the DPT. Unfortunately, ALL
2061 * this must be done as an atomic operation :-(
2062 */
2063 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb;
2064 #define dpt_SP htonl(vtophys(&ccb->status_packet))
2065 #define dpt_sense htonl(vtophys(&ccb->sense_data))
2066 ccb->eata_ccb.cp_statDMA = dpt_SP;
2067 ccb->eata_ccb.cp_reqDMA = dpt_sense;
2068
2069 /* Try to queue a command */
2070 ospl = splcam();
2071 result = dpt_send_eata_command(dpt, &ccb->eata_ccb,
2072 EATA_CMD_DMA_SEND_CP,
2073 0, 0, 0, 0);
2074
2075 if (result != 0) {
2076 dpt_Qpush_free(dpt, ccb);
2077 xs->error = XS_DRIVER_STUFFUP;
2078 splx(ospl);
2079 return (COMPLETE);
2080 }
2081 } else {
2082 xs->error = XS_DRIVER_STUFFUP;
2083 dpt_Qpush_free(dpt, ccb);
2084 splx(ospl);
2085 return (COMPLETE);
2086 }
2087
2088 for (ndx = 0;
2089 (ndx < xs->timeout)
2090 && !((aux_status = dpt_inb(dpt, HA_RAUXSTAT))
2091 & HA_AIRQ);
2092 ndx++) {
2093 DELAY(50);
2094 }
2095
2096 /**
2097 * Get the status and clear the interrupt flag on the
2098 * controller
2099 */
2100 status = dpt_inb(dpt, HA_RSTATUS);
2101 splx(ospl);
2102
2103 ccb->status_reg = status;
2104 ccb->aux_status_reg = aux_status;
2105 /* This will setup the xs flags */
2106 dpt_process_completion(dpt, ccb);
2107
2108 ospl = splsoftcam();
2109 if ((status & HA_SERROR) || (ndx == xs->timeout)) {
2110 xs->error = XS_DRIVER_STUFFUP;
2111 }
2112
2113 dpt_Qpush_free(dpt, ccb);
2114 splx(ospl);
2115 return (COMPLETE);
2116 } else {
2117 struct timeval junk;
2118
2119 /**
2120 * Not a polled command.
2121 * The command can be queued normally.
2122 * We start a critical section PRIOR to submitting to the DPT,
2123 * and end it AFTER it moves to the submitted queue.
2124 * If not, we cal (and will!) be hit with a completion
2125 * interrupt while the command is in suspense between states.
2126 */
2127
2128 ospl = splsoftcam();
2129 ccb->transaction_id = ++dpt->commands_processed;
2130
2131 #ifdef DPT_MEASURE_PERFORMANCE
2132 #define cmd_ndx (int)ccb->eata_ccb.cp_scsi_cmd
2133 ++dpt->performance.command_count[cmd_ndx];
2134 microtime(&junk);
2135 ccb->command_started = junk;
2136 #endif
2137 dpt_Qadd_waiting(dpt, ccb);
2138 splx(ospl);
2139
2140 dpt_sched_queue(dpt);
2141 }
2142
2143 return (SUCCESSFULLY_QUEUED);
2144 }
2145
2146 /**
2147 * This function returns the transfer size in bytes,
2148 * as a function of the maximum number of Scatter/Gather
2149 * segments. It should do so for a given HBA, but right now it returns
2150 * dpt_min_segs, which is the SMALLEST number, from the ``weakest'' HBA found.
2151 */
2152
2153 static void
2154 dptminphys(struct buf * bp)
2155 {
2156 /**
2157 * This IS a performance sensitive routine.
2158 * It gets called at least once per I/O. Sometimes more
2159 */
2160
2161 if (dpt_min_segs == 0) {
2162 panic("DPT: Minphys without attach!\n");
2163 }
2164 if (bp->b_bcount > ((dpt_min_segs - 1) * PAGE_SIZE)) {
2165 #ifdef DPT_DEBUG_MINPHYS
2166 printf("DPT: Block size of %x is larger than %x. Truncating\n",
2167 bp->b_bcount, ((dpt_min_segs - 1) * PAGE_SIZE));
2168 #endif
2169 bp->b_bcount = ((dpt_min_segs - 1) * PAGE_SIZE);
2170 }
2171 }
2172
2173 /*
2174 * This function goes to the waiting queue, peels off a request, gives it to
2175 * the DPT HBA and returns. It takes care of some housekeeping details first.
2176 * The requests argument tells us how many requests to try and send to the
2177 * DPT. A requests = 0 will attempt to send as many as the controller can
2178 * take.
2179 */
2180
2181 static void
2182 dpt_run_queue(dpt_softc_t * dpt, int requests)
2183 {
2184 int req;
2185 int ospl;
2186 int ndx;
2187 int result;
2188
2189 u_int8_t status, aux_status;
2190
2191 eata_ccb_t *ccb;
2192 dpt_ccb_t *dccb;
2193
2194 if (TAILQ_EMPTY(&dpt->waiting_ccbs)) {
2195 return; /* Nothing to do if the list is empty */
2196 }
2197 if (!requests)
2198 requests = dpt->queuesize;
2199
2200 /* Main work loop */
2201 for (req = 0; (req < requests) && dpt->waiting_ccbs_count
2202 && (dpt->submitted_ccbs_count < dpt->queuesize); req++) {
2203 /**
2204 * Move the request from the waiting list to the submitted
2205 * list, and submit to the DPT.
2206 * We enter a critical section BEFORE even looking at the
2207 * queue, and exit it AFTER the ccb has moved to a
2208 * destination queue.
2209 * This is normally the submitted queue but can be the waiting
2210 * queue again, if pushing the command into the DPT failed.
2211 */
2212
2213 ospl = splsoftcam();
2214 dccb = TAILQ_FIRST(&dpt->waiting_ccbs);
2215
2216 if (dccb == NULL) {
2217 /* We have yet to see one report of this condition */
2218 panic("dpt%d ERROR: Race condition in run_queue "
2219 "(w%ds%d)\n",
2220 dpt->unit, dpt->waiting_ccbs_count,
2221 dpt->submitted_ccbs_count);
2222 splx(ospl);
2223 return;
2224 }
2225 dpt_Qremove_waiting(dpt, dccb);
2226 splx(ospl);
2227
2228 /**
2229 * Assign exact values here. We manipulate these values
2230 * indirectly elsewhere, so BE CAREFUL!
2231 */
2232 dccb->eata_ccb.cp_viraddr = (u_int32_t) dccb;
2233 dccb->eata_ccb.cp_statDMA = htonl(vtophys(&dpt->sp));
2234 dccb->eata_ccb.cp_reqDMA = htonl(vtophys(&dccb->sense_data));
2235
2236 if (dccb->xs != NULL)
2237 bzero(&dccb->xs->sense, sizeof(struct scsi_sense_data));
2238
2239 /* Try to queue a command */
2240 ospl = splcam();
2241
2242 if ((result = dpt_send_eata_command(dpt, &dccb->eata_ccb,
2243 EATA_CMD_DMA_SEND_CP, 0,
2244 0, 0, 0)) != 0) {
2245 dpt_Qpush_waiting(dpt, dccb);
2246 splx(ospl);
2247 return;
2248 }
2249 dpt_Qadd_submitted(dpt, dccb);
2250 splx(ospl);
2251 }
2252 }
2253
2254 /**
2255 * This is the interrupt handler for the DPT driver.
2256 * This routine runs at splcam (or whatever was configured for this device).
2257 */
2258
2259 void
2260 dpt_intr(void *arg)
2261 {
2262 dpt_softc_t *dpt;
2263 dpt_softc_t *ldpt;
2264
2265 u_int8_t status, aux_status;
2266
2267 dpt_ccb_t *dccb;
2268 dpt_ccb_t *tccb;
2269 eata_ccb_t *ccb;
2270
2271 dpt = (dpt_softc_t *) arg;
2272
2273 #ifdef DPT_INTR_DELAY
2274 DELAY(DPT_INTR_DELAY);
2275 #endif
2276
2277 #ifdef DPT_MEASURE_PERFORMANCE
2278 {
2279 struct timeval junk;
2280
2281 microtime(&junk);
2282 dpt->performance.intr_started = junk;
2283 }
2284 #endif
2285
2286 /* First order of business is to check if this interrupt is for us */
2287 aux_status = dpt_inb(dpt, HA_RAUXSTAT);
2288 if (!(aux_status & HA_AIRQ)) {
2289 #ifdef DPT_LOST_IRQ
2290 if (dpt->state & DPT_LOST_IRQ_ACTIVE) {
2291 dpt->state &= ~DPT_LOST_IRQ_ACTIVE;
2292 return;
2293 }
2294 #endif
2295 #ifdef DPT_MEASURE_PERFORMANCE
2296 ++dpt->performance.spurious_interrupts;
2297 #endif
2298 return;
2299 }
2300 if (!dpt->handle_interrupts) {
2301 #ifdef DPT_MEASURE_PERFORMANCE
2302 ++dpt->performance.aborted_interrupts;
2303 #endif
2304 status = dpt_inb(dpt, HA_RSTATUS); /* This CLEARS
2305 * interrupts */
2306 return;
2307 }
2308 /**
2309 * What we want to do now, is to capture the status, all of it, move
2310 * it where it belongs, wake up whoever sleeps waiting to process
2311 * this result, and get out of here.
2312 */
2313
2314 dccb = dpt->sp.ccb; /**
2315 * There is a very SERIOUS and dangerous
2316 * assumption here. We assume that EVERY
2317 * interrupt is in response to some request we
2318 * put to the DPT. IOW, we assume that the
2319 * Virtual Address of CP always has a valid
2320 * pointer that we put in! How will the DPT
2321 * behave if it is in Target mode? How does it
2322 * (and our driver) know it switches from
2323 * Initiator to target? What will the SP be
2324 * when a target mode interrupt is received?
2325 */
2326
2327 #ifdef DPT_VERIFY_HINTR
2328 dpt->sp.ccb = (dpt_ccb_t *) 0x55555555;
2329 #else
2330 dpt->sp.ccb = (dpt_ccb_t *) NULL;
2331 #endif
2332
2333 #ifdef DPT_HANDLE_TIMEOUTS
2334 if (dccb->state & DPT_CCB_STATE_MARKED_LOST) {
2335 struct timeval now;
2336 u_int32_t age;
2337 struct scsi_xfer *xs = dccb->xs;
2338
2339 microtime(&now);
2340 age = dpt_time_delta(dccb->command_started, now);
2341
2342 printf("dpt%d: Salvaging Tx %d from the jaws of destruction "
2343 "(%d/%d)\n",
2344 dpt->unit, dccb->transaction_id, xs->timeout, age);
2345 dccb->state |= DPT_CCB_STATE_MARKED_SALVAGED;
2346 dccb->state &= ~DPT_CCB_STATE_MARKED_LOST;
2347 }
2348 #endif
2349
2350 /* Ignore status packets with EOC not set */
2351 if (dpt->sp.EOC == 0) {
2352 printf("dpt%d ERROR: Request %d recieved with clear EOC.\n"
2353 " Marking as LOST.\n",
2354 dpt->unit, dccb->transaction_id);
2355 #ifdef DPT_VERIFY_HINTR
2356 dpt->sp.ccb = (dpt_sp_t *) 0x55555555;
2357 #else
2358 dpt->sp.ccb = (dpt_sp_t *) NULL;
2359 #endif
2360
2361 #ifdef DPT_MEASURE_PERFORMANCE
2362 ++dpt->performance.aborted_interrupts;
2363 #endif
2364
2365 #ifdef DPT_HANDLE_TIMEOUTS
2366 dccb->state |= DPT_CCB_STATE_MARKED_LOST;
2367 #endif
2368 /* This CLEARS the interrupt! */
2369 status = dpt_inb(dpt, HA_RSTATUS);
2370 return;
2371 }
2372 dpt->sp.EOC = 0;
2373
2374 #ifdef DPT_VERIFY_HINTR
2375 /*
2376 * Make SURE the next caller is legitimate. If they are not, we will
2377 * find 0x55555555 here. We see 0x000000 or 0xffffffff when the PCi
2378 * bus has DMA troubles (as when behing a PCI-PCI * bridge .
2379 */
2380 if ((dccb == NULL)
2381 || (dccb == (dpt_ccb_t *) ~ 0)
2382 || (dccb == (dpt_ccb_t *) 0x55555555)) {
2383 printf("dpt%d: BAD (%x) CCB in SP (AUX status = %s).\n",
2384 dpt->unit, dccb, i2bin((unsigned long) aux_status,
2385 sizeof(aux_status) * 8));
2386 #ifdef DPT_MEASURE_PERFORMANCE
2387 ++dpt->performance.aborted_interrupts;
2388 #endif
2389 /* This CLEARS the interrupt! */
2390 status = dpt_inb(dpt, HA_RSTATUS);
2391 return;
2392 }
2393 for (tccb = TAILQ_FIRST(&dpt->submitted_ccbs);
2394 (tccb != NULL) && (tccb != dccb);
2395 tccb = TAILQ_NEXT(tccb, links));
2396 if (tccb == NULL) {
2397 printf("dpt%d: %x is not in the SUBMITTED queue\n",
2398 dpt->unit, dccb);
2399
2400 for (tccb = TAILQ_FIRST(&dpt->completed_ccbs);
2401 (tccb != NULL) && (tccb != dccb);
2402 tccb = TAILQ_NEXT(tccb, links));
2403 if (tccb != NULL)
2404 printf("dpt%d: %x is in the COMPLETED queue\n",
2405 dpt->unit, dccb);
2406
2407 for (tccb = TAILQ_FIRST(&dpt->waiting_ccbs);
2408 (tccb != NULL) && (tccb != dccb);
2409 tccb = TAILQ_NEXT(tccb, links));
2410 if (tccb != NULL)
2411 printf("dpt%d: %x is in the WAITING queue\n",
2412 dpt->unit, dccb);
2413
2414 for (tccb = TAILQ_FIRST(&dpt->free_ccbs);
2415 (tccb != NULL) && (tccb != dccb);
2416 tccb = TAILQ_NEXT(tccb, links));
2417 if (tccb != NULL)
2418 printf("dpt%d: %x is in the FREE queue\n",
2419 dpt->unit, dccb);
2420
2421 #ifdef DPT_MEASURE_PERFORMANCE
2422 ++dpt->performance.aborted_interrupts;
2423 #endif
2424 /* This CLEARS the interrupt! */
2425 status = dpt_inb(dpt, HA_RSTATUS);
2426 return;
2427 }
2428 #endif /* DPT_VERIFY_HINTR */
2429
2430 /**
2431 * Copy the status packet from the general area to the dpt_ccb.
2432 * According to Mark Salyzyn, we only need few pieces of it.
2433 * Originally we had:
2434 * bcopy((void *) &dpt->sp, (void *) &dccb->status_packet,
2435 * sizeof(dpt_sp_t));
2436 */
2437 dccb->status_packet.hba_stat = dpt->sp.hba_stat;
2438 dccb->status_packet.scsi_stat = dpt->sp.scsi_stat;
2439 dccb->status_packet.residue_len = dpt->sp.residue_len;
2440
2441 /* Make sure the EOC bit is OFF! */
2442 dpt->sp.EOC = 0;
2443
2444 /* Clear interrupts, check for error */
2445 if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) {
2446 /**
2447 * Error Condition. Check for magic cookie. Exit this test
2448 * on earliest sign of non-reset condition
2449 */
2450
2451 /* Check that this is not a board reset interrupt */
2452 if (dpt_just_reset(dpt)) {
2453 printf("dpt%d: HBA rebooted.\n"
2454 " All transactions should be "
2455 "resubmitted\n",
2456 dpt->unit);
2457
2458 printf("dpt%d: >>---->> This is incomplete, fix me"
2459 ".... <<----<<",
2460 dpt->unit);
2461 printf(" Incomplete Code; Re-queue the lost "
2462 "commands\n",
2463 dpt->unit);
2464 Debugger("DPT Rebooted");
2465
2466 #ifdef DPT_MEASURE_PERFORMANCE
2467 ++dpt->performance.aborted_interrupts;
2468 #endif
2469 return;
2470 }
2471 }
2472 dccb->status_reg = status;
2473 dccb->aux_status_reg = aux_status;
2474
2475 /* Mark BOTH queues as busy */
2476 dpt->queue_status |= (DPT_SUBMITTED_QUEUE_ACTIVE
2477 | DPT_COMPLETED_QUEUE_ACTIVE);
2478 dpt_Qremove_submitted(dpt, dccb);
2479 dpt_Qadd_completed(dpt, dccb);
2480 dpt->queue_status &= ~(DPT_SUBMITTED_QUEUE_ACTIVE
2481 | DPT_COMPLETED_QUEUE_ACTIVE);
2482 dpt_sched_queue(dpt);
2483
2484 #ifdef DPT_MEASURE_PERFORMANCE
2485 {
2486 u_int32_t result;
2487 struct timeval junk;
2488
2489 microtime(&junk);
2490
2491 result = dpt_time_delta(dpt->performance.intr_started, junk);
2492
2493 if (result != ~0) {
2494 if (dpt->performance.max_intr_time < result)
2495 dpt->performance.max_intr_time = result;
2496
2497 if (result < dpt->performance.min_intr_time) {
2498 dpt->performance.min_intr_time = result;
2499 }
2500 }
2501 }
2502 #endif
2503 }
2504
2505 /*
2506 * This function is the DPT_ISR Software Interrupt Service Routine. When the
2507 * DPT completes a SCSI command, it puts the results in a Status Packet, sets
2508 * up two 1-byte registers and generates an interrupt. We catch this
2509 * interrupt in dpt_intr and copy the whole status to the proper CCB. Once
2510 * this is done, we generate a software interrupt that calls this routine.
2511 * The routine then scans ALL the complete queues of all the DPT HBA's and
2512 * processes ALL the commands that are in the queue.
2513 *
2514 * XXXX REMEMBER: We always scan ALL the queues of all the HBA's. Always
2515 * starting with the first controller registered (dpt0). This creates
2516 * an ``unfair'' opportunity for the first controllers in being served.
2517 * Careful instrumentation may prove a need to change this policy.
2518 *
2519 * This command rns at splSOFTcam. Remember that.
2520 */
2521
2522 static void
2523 dpt_sintr(void)
2524 {
2525 dpt_softc_t *dpt;
2526 int ospl;
2527
2528 /* Find which DPT needs help */
2529 for (dpt = TAILQ_FIRST(&dpt_softc_list);
2530 dpt != NULL;
2531 dpt = TAILQ_NEXT(dpt, links)) {
2532 /*
2533 * Drain the completed queue, to make room for new, " waiting
2534 * requests. We change to splcam to block interrupts from
2535 * mucking with " the completed queue
2536 */
2537 ospl = splcam();
2538 if (dpt->queue_status & DPT_SINTR_ACTIVE) {
2539 splx(ospl);
2540 continue;
2541 }
2542 dpt->queue_status |= DPT_SINTR_ACTIVE;
2543
2544 if (!TAILQ_EMPTY(&dpt->completed_ccbs)) {
2545 splx(ospl);
2546 dpt_complete(dpt);
2547 ospl = splcam();
2548 }
2549 /* Submit as many waiting requests as the DPT can take */
2550 if (!TAILQ_EMPTY(&dpt->waiting_ccbs)) {
2551 dpt_run_queue(dpt, 0);
2552 }
2553 dpt->queue_status &= ~DPT_SINTR_ACTIVE;
2554 splx(ospl);
2555 }
2556 }
2557
2558 /**
2559 * Scan the complete queue for a given controller and process ALL the completed
2560 * commands in the queue.
2561 */
2562
2563 static void
2564 dpt_complete(dpt_softc_t * dpt)
2565 {
2566 dpt_ccb_t *ccb;
2567 int ospl;
2568
2569 ospl = splcam();
2570
2571 if (dpt->queue_status & DPT_COMPLETED_QUEUE_ACTIVE) {
2572 splx(ospl);
2573 return;
2574 }
2575 dpt->queue_status |= DPT_COMPLETED_QUEUE_ACTIVE;
2576
2577 while ((ccb = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) {
2578 struct scsi_xfer *xs;
2579
2580 dpt_Qremove_completed(dpt, ccb);
2581 splx(ospl);
2582
2583 /* Process this completed request */
2584 if (dpt_process_completion(dpt, ccb) == 0) {
2585 xs = ccb->xs;
2586
2587 if (ccb->std_callback != NULL) {
2588 (ccb->std_callback) (dpt, ccb->eata_ccb.cp_channel,
2589 ccb);
2590 } else {
2591 ospl = splcam();
2592 dpt_Qpush_free(dpt, ccb);
2593 splx(ospl);
2594
2595 #ifdef DPT_MEASURE_PERFORMANCE
2596 {
2597 u_int32_t result;
2598 struct timeval junk;
2599
2600 microtime(&junk);
2601 ccb->command_ended = junk;
2602 #define time_delta dpt_time_delta(ccb->command_started, ccb->command_ended)
2603 result = time_delta;
2604 #define maxctime dpt->performance.max_command_time[ccb->eata_ccb.cp_scsi_cmd]
2605 #define minctime dpt->performance.min_command_time[ccb->eata_ccb.cp_scsi_cmd]
2606
2607 if (result != ~0) {
2608 if (maxctime < result) {
2609 maxctime = result;
2610 }
2611 if ((minctime == 0)
2612 || (minctime > result))
2613 minctime = result;
2614 }
2615 }
2616 #endif
2617
2618 scsi_done(xs);
2619 }
2620 ospl = splcam();
2621 }
2622 }
2623 splx(ospl);
2624
2625 /**
2626 * As per Justin's suggestion, we now will call the run_queue for
2627 * this HBA. This is done in case there are left-over requests that
2628 * were not submitted yet.
2629 */
2630 dpt_run_queue(dpt, 0);
2631 ospl = splsoftcam();
2632 dpt->queue_status &= ~DPT_COMPLETED_QUEUE_ACTIVE;
2633 splx(ospl);
2634 }
2635
2636 #ifdef DPT_MEASURE_PERFORMANCE
2637 /**
2638 * Given a dpt_ccb and a scsi_xfr structures,
2639 * this functions translates the result of a SCSI operation.
2640 * It returns values in the structures pointed by the arguments.
2641 * This function does NOT attempt to protect itself from bad influence!
2642 */
2643
2644 #define WRITE_OP 1
2645 #define READ_OP 2
2646 #define min_submitR dpt->performance.read_by_size_min_time[index]
2647 #define max_submitR dpt->performance.read_by_size_max_time[index]
2648 #define min_submitW dpt->performance.write_by_size_min_time[index]
2649 #define max_submitW dpt->performance.write_by_size_max_time[index]
2650
2651 static void
2652 dpt_IObySize(dpt_softc_t * dpt, dpt_ccb_t * ccb, int op, int index)
2653 {
2654 if (op == READ_OP) {
2655 ++dpt->performance.read_by_size_count[index];
2656 if (ccb->submitted_time < min_submitR)
2657 min_submitR = ccb->submitted_time;
2658
2659 if (ccb->submitted_time > max_submitR)
2660 max_submitR = ccb->submitted_time;
2661 } else { /* WRITE operation */
2662 ++dpt->performance.write_by_size_count[index];
2663 if (ccb->submitted_time < min_submitW)
2664 min_submitW = ccb->submitted_time;
2665
2666 if (ccb->submitted_time > max_submitW)
2667 max_submitW = ccb->submitted_time;
2668 }
2669 }
2670 #endif
2671
2672 static int
2673 dpt_process_completion(dpt_softc_t * dpt,
2674 dpt_ccb_t * ccb)
2675 {
2676 int ospl;
2677 struct scsi_xfer *xs;
2678
2679 if (ccb == NULL) {
2680 panic("dpt%d: Improper argumet to process_completion (%p%p)\n",
2681 dpt->unit, ccb);
2682 } else {
2683 xs = ccb->xs;
2684 }
2685
2686 #ifdef DPT_MEASURE_PERFORMANCE
2687 {
2688 u_int32_t size;
2689 struct scsi_rw_big *cmd;
2690 int op_type;
2691
2692 cmd = (struct scsi_rw_big *) & ccb->eata_ccb.cp_scsi_cmd;
2693
2694 switch (cmd->op_code) {
2695 case 0xa8: /* 12-byte READ */
2696 case 0x08: /* 6-byte READ */
2697 case 0x28: /* 10-byte READ */
2698 op_type = READ_OP;
2699 break;
2700 case 0x0a: /* 6-byte WRITE */
2701 case 0xaa: /* 12-byte WRITE */
2702 case 0x2a: /* 10-byte WRITE */
2703 op_type = WRITE_OP;
2704 break;
2705 default:
2706 op_type = 0;
2707 break;
2708 }
2709
2710 if (op_type != 0) {
2711
2712 size = (((u_int32_t) cmd->length2 << 8)
2713 | ((u_int32_t) cmd->length1)) << 9;
2714
2715 switch (size) {
2716 case 512:
2717 dpt_IObySize(dpt, ccb, op_type, SIZE_512);
2718 break;
2719 case 1024:
2720 dpt_IObySize(dpt, ccb, op_type, SIZE_1K);
2721 break;
2722 case 2048:
2723 dpt_IObySize(dpt, ccb, op_type, SIZE_2K);
2724 break;
2725 case 4096:
2726 dpt_IObySize(dpt, ccb, op_type, SIZE_4K);
2727 break;
2728 case 8192:
2729 dpt_IObySize(dpt, ccb, op_type, SIZE_8K);
2730 break;
2731 case 16384:
2732 dpt_IObySize(dpt, ccb, op_type, SIZE_16K);
2733 break;
2734 case 32768:
2735 dpt_IObySize(dpt, ccb, op_type, SIZE_32K);
2736 break;
2737 case 65536:
2738 dpt_IObySize(dpt, ccb, op_type, SIZE_64K);
2739 break;
2740 default:
2741 if (size > (1 << 16))
2742 dpt_IObySize(dpt, ccb, op_type,
2743 SIZE_BIGGER);
2744
2745 else
2746 dpt_IObySize(dpt, ccb, op_type,
2747 SIZE_OTHER);
2748 break;
2749 }
2750 }
2751 }
2752 #endif /* DPT_MEASURE_PERFORMANCE */
2753
2754
2755 switch ((int) ccb->status_packet.hba_stat) {
2756 case HA_NO_ERROR:
2757 if (xs != NULL) {
2758 xs->error = XS_NOERROR;
2759 xs->flags |= SCSI_ITSDONE;
2760 }
2761 break;
2762 case HA_ERR_SEL_TO:
2763 case HA_ERR_CMD_TO:
2764 if (xs != NULL) {
2765 xs->error |= XS_SELTIMEOUT;
2766 xs->flags |= SCSI_ITSDONE;
2767 }
2768 break;
2769 case HA_SCSIBUS_RESET:
2770 case HA_CP_ABORTED:
2771 case HA_CP_RESET:
2772 case HA_PCI_PARITY:
2773 case HA_PCI_MABORT:
2774 case HA_PCI_TABORT:
2775 case HA_PCI_STABORT:
2776 case HA_BUS_PARITY:
2777 case HA_UNX_MSGRJCT:
2778 if (ccb->retries++ > DPT_RETRIES) {
2779 if (xs != NULL) {
2780 xs->error |= XS_SENSE;
2781 xs->flags |= SCSI_ITSDONE;
2782 }
2783 } else {
2784 ospl = splsoftcam();
2785 dpt_Qpush_waiting(dpt, ccb);
2786 splx(ospl);
2787 dpt_sched_queue(dpt);
2788 }
2789 break;
2790 case HA_HBA_POWER_UP:
2791 case HA_UNX_BUSPHASE:
2792 case HA_UNX_BUS_FREE:
2793 case HA_SCSI_HUNG:
2794 case HA_RESET_STUCK:
2795 if (ccb->retries++ > DPT_RETRIES) {
2796 if (xs != NULL) {
2797 xs->error |= XS_SENSE;
2798 xs->flags |= SCSI_ITSDONE;
2799 }
2800 } else {
2801 ospl = splsoftcam();
2802 dpt_Qpush_waiting(dpt, ccb);
2803 splx(ospl);
2804 dpt_sched_queue(dpt);
2805 return (1);
2806 }
2807 break;
2808 case HA_RSENSE_FAIL:
2809 if (ccb->status_packet.EOC) {
2810 if (xs != NULL) {
2811 xs->error |= XS_SENSE;
2812 xs->flags |= SCSI_ITSDONE;
2813 }
2814 } else {
2815 if (ccb->retries++ > DPT_RETRIES) {
2816 if (xs != NULL) {
2817 xs->error |= XS_SENSE;
2818 xs->flags |= SCSI_ITSDONE;
2819 }
2820 } else {
2821 ospl = splsoftcam();
2822 dpt_Qpush_waiting(dpt, ccb);
2823 splx(ospl);
2824 dpt_sched_queue(dpt);
2825 return (1);
2826 }
2827 }
2828 break;
2829 case HA_PARITY_ERR:
2830 case HA_CP_ABORT_NA:
2831 case HA_CP_RESET_NA:
2832 case HA_ECC_ERR:
2833 if (xs != NULL) {
2834 xs->error |= XS_SENSE;
2835 xs->flags |= SCSI_ITSDONE;
2836 }
2837 break;
2838 default:
2839 printf("dpt%d: Undocumented Error %x",
2840 dpt->unit, ccb->status_packet.hba_stat);
2841 if (xs != NULL) {
2842 xs->error |= XS_SENSE;
2843 xs->flags |= SCSI_ITSDONE;
2844 }
2845 Debugger("Please mail this message to shimon@i-connect.net");
2846 break;
2847 }
2848
2849 if (xs != NULL) {
2850 if ((xs->error & XS_SENSE))
2851 bcopy(&ccb->sense_data, &xs->sense,
2852 sizeof(struct scsi_sense_data));
2853
2854 if (ccb->status_packet.residue_len != 0) {
2855 xs->flags |= SCSI_RESID_VALID;
2856 xs->resid = ccb->status_packet.residue_len;
2857 }
2858 }
2859 return (0);
2860 }
2861
2862 #ifdef DPT_LOST_IRQ
2863 /**
2864 * This functions handles the calling of the interrupt routine on a periodic
2865 * basis.
2866 * It is a completely ugly hack which purpose is to handle the problem of
2867 * missing interrupts on certain platforms..
2868 */
2869
2870 static void
2871 dpt_irq_timeout(void *arg)
2872 {
2873 dpt_softc_t *dpt = (dpt_softc_t *) arg;
2874 int ospl;
2875
2876
2877 if (!(dpt->state & DPT_LOST_IRQ_ACTIVE)) {
2878 ospl = splcam();
2879 dpt->state |= DPT_LOST_IRQ_ACTIVE;
2880 dpt_intr(dpt);
2881 splx(ospl);
2882 if (dpt->state & DPT_LOST_IRQ_ACTIVE) {
2883 printf("dpt %d: %d lost Interrupts Recovered\n",
2884 dpt->unit, ++dpt->lost_interrupts);
2885 }
2886 dpt->state &= ~DPT_LOST_IRQ_ACTIVE;
2887 }
2888 timeout(dpt_irq_timeout, (caddr_t) dpt, hz * 1);
2889 }
2890
2891 #endif /* DPT_LOST_IRQ */
2892
2893 #ifdef DPT_HANDLE_TIMEOUTS
2894 /**
2895 * This function walks down the SUBMITTED queue.
2896 * Every request that is too old gets aborted and marked.
2897 * Since the DPT will complete (interrupt) immediately (what does that mean?),
2898 * We just walk the list, aborting old commands and marking them as such.
2899 * The dpt_complete function will get rid of the that were interrupted in the
2900 * normal manner.
2901 *
2902 * This function needs to run at splcam(), as it interacts with the submitted
2903 * queue, as well as the completed and free queues. Just like dpt_intr() does.
2904 * To run it at any ISPL other than that of dpt_intr(), will mean that dpt_intr
2905 * willbe able to pre-empt it, grab a transaction in progress (towards
2906 * destruction) and operate on it. The state of this transaction will be not
2907 * very clear.
2908 * The only other option, is to lock it only as long as necessary but have
2909 * dpt_intr() spin-wait on it. In a UP environment this makes no sense and in
2910 * a SMP environment, the advantage is dubvious for a function that runs once
2911 * every ten seconds for few microseconds and, on systems with healthy
2912 * hardware, does not do anything anyway.
2913 */
2914
2915 static void
2916 dpt_handle_timeouts(dpt_softc_t * dpt)
2917 {
2918 dpt_ccb_t *ccb;
2919 int ospl;
2920
2921 ospl = splcam();
2922
2923 if (dpt->state & DPT_HA_TIMEOUTS_ACTIVE) {
2924 printf("dpt%d WARNING: Timeout Handling Collision\n",
2925 dpt->unit);
2926 splx(ospl);
2927 return;
2928 }
2929 dpt->state |= DPT_HA_TIMEOUTS_ACTIVE;
2930
2931 /* Loop through the entire submitted queue, looking for lost souls */
2932 for (ccb = TAILQ_FIRST(&dpt->submitted_ccbs);
2933 ccb != NULL;
2934 ccb = TAILQ_NEXT(ccb, links)) {
2935 struct scsi_xfer *xs;
2936 struct timeval now;
2937 u_int32_t age, max_age;
2938
2939 xs = ccb->xs;
2940
2941 microtime(&now);
2942 age = dpt_time_delta(ccb->command_started, now);
2943
2944 #define TenSec 10000000
2945
2946 if (xs == NULL) { /* Local, non-kernel call */
2947 max_age = TenSec;
2948 } else {
2949 max_age = (((xs->timeout * (dpt->submitted_ccbs_count
2950 + DPT_TIMEOUT_FACTOR))
2951 > TenSec)
2952 ? (xs->timeout * (dpt->submitted_ccbs_count
2953 + DPT_TIMEOUT_FACTOR))
2954 : TenSec);
2955 }
2956
2957 /*
2958 * If a transaction is marked lost and is TWICE as old as we
2959 * care, then, and only then do we destroy it!
2960 */
2961 if (ccb->state & DPT_CCB_STATE_MARKED_LOST) {
2962 /* Remember who is next */
2963 if (age > (max_age * 2)) {
2964 dpt_Qremove_submitted(dpt, ccb);
2965 ccb->state &= ~DPT_CCB_STATE_MARKED_LOST;
2966 ccb->state |= DPT_CCB_STATE_ABORTED;
2967 #define cmd_name scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd)
2968 if (ccb->retries++ > DPT_RETRIES) {
2969 printf("dpt%d ERROR: Destroying stale "
2970 "%d (%s)\n"
2971 " on "
2972 "c%db%dt%du%d (%d/%d)\n",
2973 dpt->unit, ccb->transaction_id,
2974 cmd_name,
2975 dpt->unit,
2976 ccb->eata_ccb.cp_channel,
2977 ccb->eata_ccb.cp_id,
2978 ccb->eata_ccb.cp_LUN, age,
2979 ccb->retries);
2980 #define send_ccb &ccb->eata_ccb
2981 #define ESA EATA_SPECIFIC_ABORT
2982 (void) dpt_send_immediate(dpt,
2983 send_ccb,
2984 ESA,
2985 0, 0);
2986 dpt_Qpush_free(dpt, ccb);
2987
2988 /* The SCSI layer should re-try */
2989 xs->error |= XS_TIMEOUT;
2990 xs->flags |= SCSI_ITSDONE;
2991 scsi_done(xs);
2992 } else {
2993 printf("dpt%d ERROR: Stale %d (%s) on "
2994 "c%db%dt%du%d (%d)\n"
2995 " gets another "
2996 "chance(%d/%d)\n",
2997 dpt->unit, ccb->transaction_id,
2998 cmd_name,
2999 dpt->unit,
3000 ccb->eata_ccb.cp_channel,
3001 ccb->eata_ccb.cp_id,
3002 ccb->eata_ccb.cp_LUN,
3003 age, ccb->retries, DPT_RETRIES);
3004
3005 dpt_Qpush_waiting(dpt, ccb);
3006 dpt_sched_queue(dpt);
3007 }
3008 }
3009 } else {
3010 /*
3011 * This is a transaction that is not to be destroyed
3012 * (yet) But it is too old for our liking. We wait as
3013 * long as the upper layer thinks. Not really, we
3014 * multiply that by the number of commands in the
3015 * submitted queue + 1.
3016 */
3017 if (!(ccb->state & DPT_CCB_STATE_MARKED_LOST) &&
3018 (age != ~0) && (age > max_age)) {
3019 printf("dpt%d ERROR: Marking %d (%s) on "
3020 "c%db%dt%du%d \n"
3021 " as late after %dusec\n",
3022 dpt->unit, ccb->transaction_id,
3023 cmd_name,
3024 dpt->unit, ccb->eata_ccb.cp_channel,
3025 ccb->eata_ccb.cp_id,
3026 ccb->eata_ccb.cp_LUN, age);
3027 ccb->state |= DPT_CCB_STATE_MARKED_LOST;
3028 }
3029 }
3030 }
3031
3032 dpt->state &= ~DPT_HA_TIMEOUTS_ACTIVE;
3033 splx(ospl);
3034 }
3035
3036 static void
3037 dpt_timeout(void *arg)
3038 {
3039 dpt_softc_t *dpt = (dpt_softc_t *) arg;
3040
3041 if (!(dpt->state & DPT_HA_TIMEOUTS_ACTIVE))
3042 dpt_handle_timeouts(dpt);
3043
3044 timeout(dpt_timeout, (caddr_t) dpt, hz * 10);
3045 }
3046
3047 #endif /* DPT_HANDLE_TIMEOUTS */
3048
3049 /*
3050 * Remove a ccb from the completed queue
3051 */
3052 static INLINE_Q void
3053 dpt_Qremove_completed(dpt_softc_t * dpt, dpt_ccb_t * ccb)
3054 {
3055 #ifdef DPT_MEASURE_PERFORMANCE
3056 u_int32_t complete_time;
3057 struct timeval now;
3058
3059 microtime(&now);
3060 complete_time = dpt_time_delta(ccb->command_ended, now);
3061
3062 if (complete_time != ~0) {
3063 if (dpt->performance.max_complete_time < complete_time)
3064 dpt->performance.max_complete_time = complete_time;
3065 if (complete_time < dpt->performance.min_complete_time)
3066 dpt->performance.min_complete_time = complete_time;
3067 }
3068 #endif
3069
3070 TAILQ_REMOVE(&dpt->completed_ccbs, ccb, links);
3071 --dpt->completed_ccbs_count; /* One less completed ccb in the
3072 * queue */
3073 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE)
3074 wakeup(&dpt);
3075 }
3076
3077 /**
3078 * Pop the most recently used ccb off the (HEAD of the) FREE ccb queue
3079 */
3080 static INLINE_Q dpt_ccb_t *
3081 dpt_Qpop_free(dpt_softc_t * dpt)
3082 {
3083 dpt_ccb_t *ccb;
3084
3085 if ((ccb = TAILQ_FIRST(&dpt->free_ccbs)) == NULL) {
3086 if (dpt_alloc_freelist(dpt))
3087 return (ccb);
3088 else
3089 return (dpt_Qpop_free(dpt));
3090 } else {
3091 TAILQ_REMOVE(&dpt->free_ccbs, ccb, links);
3092 --dpt->free_ccbs_count;
3093 }
3094
3095 return (ccb);
3096 }
3097
3098 /**
3099 * Put a (now freed) ccb back into the HEAD of the FREE ccb queue
3100 */
3101 static INLINE_Q void
3102 dpt_Qpush_free(dpt_softc_t * dpt, dpt_ccb_t * ccb)
3103 {
3104 #ifdef DPT_FREELIST_IS_STACK
3105 TAILQ_INSERT_HEAD(&dpt->free_ccbs, ccb, links);
3106 #else
3107 TAILQ_INSERT_TAIL(&dpt->free_ccbs, ccb, links);
3108 #endif
3109
3110 ++dpt->free_ccbs_count;
3111 }
3112
3113 /**
3114 * Add a request to the TAIL of the WAITING ccb queue
3115 */
3116 static INLINE_Q void
3117 dpt_Qadd_waiting(dpt_softc_t * dpt, dpt_ccb_t * ccb)
3118 {
3119 struct timeval junk;
3120
3121 TAILQ_INSERT_TAIL(&dpt->waiting_ccbs, ccb, links);
3122 ++dpt->waiting_ccbs_count;
3123
3124 #ifdef DPT_MEASURE_PERFORMANCE
3125 microtime(&junk);
3126 ccb->command_ended = junk;
3127 if (dpt->waiting_ccbs_count > dpt->performance.max_waiting_count)
3128 dpt->performance.max_waiting_count = dpt->waiting_ccbs_count;
3129 #endif
3130
3131 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE)
3132 wakeup(&dpt);
3133 }
3134
3135 /**
3136 * Add a request to the HEAD of the WAITING ccb queue
3137 */
3138 static INLINE_Q void
3139 dpt_Qpush_waiting(dpt_softc_t * dpt, dpt_ccb_t * ccb)
3140 {
3141 struct timeval junk;
3142
3143 TAILQ_INSERT_HEAD(&dpt->waiting_ccbs, ccb, links);
3144 ++dpt->waiting_ccbs_count;
3145
3146 #ifdef DPT_MEASURE_PERFORMANCE
3147 microtime(&junk);
3148 ccb->command_ended = junk;
3149
3150 if (dpt->performance.max_waiting_count < dpt->waiting_ccbs_count)
3151 dpt->performance.max_waiting_count = dpt->waiting_ccbs_count;
3152
3153 #endif
3154
3155 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE)
3156 wakeup(&dpt);
3157 }
3158
3159 /**
3160 * Remove a ccb from the waiting queue
3161 */
3162 static INLINE_Q void
3163 dpt_Qremove_waiting(dpt_softc_t * dpt, dpt_ccb_t * ccb)
3164 {
3165 #ifdef DPT_MEASURE_PERFORMANCE
3166 struct timeval now;
3167 u_int32_t waiting_time;
3168
3169 microtime(&now);
3170 waiting_time = dpt_time_delta(ccb->command_ended, now);
3171
3172 if (waiting_time != ~0) {
3173 if (dpt->performance.max_waiting_time < waiting_time)
3174 dpt->performance.max_waiting_time = waiting_time;
3175 if (waiting_time < dpt->performance.min_waiting_time)
3176 dpt->performance.min_waiting_time = waiting_time;
3177 }
3178 #endif
3179
3180 TAILQ_REMOVE(&dpt->waiting_ccbs, ccb, links);
3181 --dpt->waiting_ccbs_count; /* One less waiting ccb in the queue */
3182
3183 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE)
3184 wakeup(&dpt);
3185 }
3186
3187 /**
3188 * Add a request to the TAIL of the SUBMITTED ccb queue
3189 */
3190 static INLINE_Q void
3191 dpt_Qadd_submitted(dpt_softc_t * dpt, dpt_ccb_t * ccb)
3192 {
3193 struct timeval junk;
3194
3195 TAILQ_INSERT_TAIL(&dpt->submitted_ccbs, ccb, links);
3196 ++dpt->submitted_ccbs_count;
3197
3198 #ifdef DPT_MEASURE_PERFORMANCE
3199 microtime(&junk);
3200 ccb->command_ended = junk;
3201 if (dpt->performance.max_submit_count < dpt->submitted_ccbs_count)
3202 dpt->performance.max_submit_count = dpt->submitted_ccbs_count;
3203 #endif
3204
3205 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE)
3206 wakeup(&dpt);
3207 }
3208
3209 /**
3210 * Add a request to the TAIL of the Completed ccb queue
3211 */
3212 static INLINE_Q void
3213 dpt_Qadd_completed(dpt_softc_t * dpt, dpt_ccb_t * ccb)
3214 {
3215 struct timeval junk;
3216
3217 TAILQ_INSERT_TAIL(&dpt->completed_ccbs, ccb, links);
3218 ++dpt->completed_ccbs_count;
3219
3220 #ifdef DPT_MEASURE_PERFORMANCE
3221 microtime(&junk);
3222 ccb->command_ended = junk;
3223 if (dpt->performance.max_complete_count < dpt->completed_ccbs_count)
3224 dpt->performance.max_complete_count =
3225 dpt->completed_ccbs_count;
3226 #endif
3227
3228 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE)
3229 wakeup(&dpt);
3230 }
3231
3232 /**
3233 * Remove a ccb from the submitted queue
3234 */
3235 static INLINE_Q void
3236 dpt_Qremove_submitted(dpt_softc_t * dpt, dpt_ccb_t * ccb)
3237 {
3238 #ifdef DPT_MEASURE_PERFORMANCE
3239 struct timeval now;
3240 u_int32_t submit_time;
3241
3242 microtime(&now);
3243 submit_time = dpt_time_delta(ccb->command_ended, now);
3244
3245 if (submit_time != ~0) {
3246 ccb->submitted_time = submit_time;
3247 if (dpt->performance.max_submit_time < submit_time)
3248 dpt->performance.max_submit_time = submit_time;
3249 if (submit_time < dpt->performance.min_submit_time)
3250 dpt->performance.min_submit_time = submit_time;
3251 } else {
3252 ccb->submitted_time = 0;
3253 }
3254
3255 #endif
3256
3257 TAILQ_REMOVE(&dpt->submitted_ccbs, ccb, links);
3258 --dpt->submitted_ccbs_count; /* One less submitted ccb in the
3259 * queue */
3260
3261 if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE)
3262 || (dpt->state & DPT_HA_QUIET))
3263 wakeup(&dpt);
3264 }
3265
3266 /**
3267 * Handle Shutdowns.
3268 * Gets registered by the dpt_pci.c registar and called AFTER the system did
3269 * all its sync work.
3270 */
3271
3272 void
3273 dpt_shutdown(int howto, void *arg_dpt)
3274 {
3275 dpt_softc_t *ldpt;
3276 u_int8_t channel;
3277 u_int32_t target;
3278 u_int32_t lun;
3279 int waiting;
3280 int submitted;
3281 int completed;
3282 int huh;
3283 int wait_is_over;
3284 int ospl;
3285 dpt_softc_t *dpt;
3286
3287 dpt = (dpt_softc_t *) arg_dpt;
3288
3289 printf("dpt%d: Shutting down (mode %d) HBA. Please wait...",
3290 dpt->unit, howto);
3291 wait_is_over = 0;
3292
3293 ospl = splcam();
3294 dpt->state |= DPT_HA_SHUTDOWN_ACTIVE;
3295 splx(ospl);
3296
3297 while ((((waiting = dpt->waiting_ccbs_count) != 0)
3298 || ((submitted = dpt->submitted_ccbs_count) != 0)
3299 || ((completed = dpt->completed_ccbs_count) != 0))
3300 && (wait_is_over == 0)) {
3301 #ifdef DPT_DEBUG_SHUTDOWN
3302 printf("dpt%d: Waiting for queues w%ds%dc%d to deplete\n",
3303 dpt->unit, dpt->waiting_ccbs_count,
3304 dpt->submitted_ccbs_count,
3305 dpt->completed_ccbs_count);
3306 #endif
3307 huh = tsleep((void *) dpt, PCATCH | PRIBIO, "dptoff", 100 * hz);
3308 switch (huh) {
3309 case 0:
3310 /* Wakeup call received */
3311 goto checkit;
3312 break;
3313 case EWOULDBLOCK:
3314 /* Timer Expired */
3315 printf("dpt%d: Shutdown timer expired with queues at "
3316 "w%ds%dc%d\n",
3317 dpt->unit, dpt->waiting_ccbs_count,
3318 dpt->submitted_ccbs_count,
3319 dpt->completed_ccbs_count);
3320 ++wait_is_over;
3321 break;
3322 default:
3323 /* anything else */
3324 printf("dpt%d: Shutdown UNKNOWN with qeueues at "
3325 "w%ds%dc%d\n",
3326 dpt->unit, dpt->waiting_ccbs_count,
3327 dpt->submitted_ccbs_count,
3328 dpt->completed_ccbs_count);
3329 ++wait_is_over;
3330 break;
3331 }
3332 checkit:
3333
3334 }
3335
3336 /**
3337 * What we do for a shutdown, is give the DPT early power loss
3338 * warning
3339 . */
3340 (void) dpt_send_immediate(dpt, NULL, EATA_POWER_OFF_WARN, 0, 0);
3341 printf("dpt%d: Controller was warned of shutdown and is now "
3342 "disabled\n",
3343 dpt->unit);
3344
3345 return;
3346 }
3347
3348 /* A primitive subset of isgraph. Used by hex_dump below */
3349 #define IsGraph(val) ((((val) >= ' ') && ((val) <= '~')))
3350
3351 /**
3352 * This function dumps bytes to the screen in hex format.
3353 */
3354 static void
3355 hex_dump(u_int8_t * data, int length, char *name, int no)
3356 {
3357 int line, column, ndx;
3358
3359 printf("Kernel Hex Dump for %s-%d at %p (%d bytes)\n",
3360 name, no, data, length);
3361
3362 /* Zero out all the counters and repeat for as many bytes as we have */
3363 for (ndx = 0, column = 0, line = 0; ndx < length; ndx++) {
3364 /* Print relative offset at the beginning of every line */
3365 if (column == 0)
3366 printf("%04x ", ndx);
3367
3368 /* Print the byte as two hex digits, followed by a space */
3369 printf("%02x ", data[ndx]);
3370
3371 /* Split the row of 16 bytes in half */
3372 if (++column == 8) {
3373 printf(" ");
3374 }
3375 /* St the end of each row of 16 bytes, put a space ... */
3376 if (column == 16) {
3377 printf(" ");
3378
3379 /* ... and then print the ASCII-visible on a line. */
3380 for (column = 0; column < 16; column++) {
3381 int ascii_pos = ndx - 15 + column;
3382
3383 /**
3384 * Non-printable and non-ASCII are just a
3385 * dot. ;-(
3386 */
3387 if (IsGraph(data[ascii_pos]))
3388 printf("%c", data[ascii_pos]);
3389 else
3390 printf(".");
3391 }
3392
3393 /* Each line ends with a new line */
3394 printf("\n");
3395 column = 0;
3396
3397 /**
3398 * Every 256 bytes (16 lines of 16 bytes each) have
3399 * an empty line, separating them from the next
3400 * ``page''. Yes, I programmed on a Z-80, where a
3401 * page was 256 bytes :-)
3402 */
3403 if (++line > 15) {
3404 printf("\n");
3405 line = 0;
3406 }
3407 }
3408 }
3409
3410 /**
3411 * We are basically done. We do want, however, to handle the ASCII
3412 * translation of fractional lines.
3413 */
3414 if ((ndx == length) && (column != 0)) {
3415 int modulus = 16 - column, spaces = modulus * 3,
3416 skip;
3417
3418 /**
3419 * Skip to the right, as many spaces as there are bytes
3420 * ``missing'' ...
3421 */
3422 for (skip = 0; skip < spaces; skip++)
3423 printf(" ");
3424
3425 /* ... And the gap separating the hex dump from the ASCII */
3426 printf(" ");
3427
3428 /**
3429 * Do not forget the extra space that splits the hex dump
3430 * vertically
3431 */
3432 if (column < 8)
3433 printf(" ");
3434
3435 for (column = 0; column < (16 - modulus); column++) {
3436 int ascii_pos = ndx - (16 - modulus) + column;
3437
3438 if (IsGraph(data[ascii_pos]))
3439 printf("%c", data[ascii_pos]);
3440 else
3441 printf(".");
3442 }
3443 printf("\n");
3444 }
3445 }
3446
3447 /**
3448 * and this one presents an integer as ones and zeros
3449 */
3450 static char i2bin_bitmap[48]; /* Used for binary dump of registers */
3451
3452 char *
3453 i2bin(unsigned int no, int length)
3454 {
3455 int ndx, rind;
3456
3457 for (ndx = 0, rind = 0; ndx < 32; ndx++, rind++) {
3458 i2bin_bitmap[rind] = (((no << ndx) & 0x80000000) ? '1' : '');
3459
3460 if (((ndx % 4) == 3))
3461 i2bin_bitmap[++rind] = ' ';
3462 }
3463
3464 if ((ndx % 4) == 3)
3465 i2bin_bitmap[rind - 1] = '\0';
3466 else
3467 i2bin_bitmap[rind] = '\0';
3468
3469 switch (length) {
3470 case 8:
3471 return (i2bin_bitmap + 30);
3472 break;
3473 case 16:
3474 return (i2bin_bitmap + 20);
3475 break;
3476 case 24:
3477 return (i2bin_bitmap + 10);
3478 break;
3479 case 32:
3480 return (i2bin_bitmap);
3481 default:
3482 return ("i2bin: Invalid length Specs");
3483 break;
3484 }
3485 }
3486
3487 /**
3488 * This function translates a SCSI command numeric code to a human readable
3489 * string.
3490 * The string contains the class of devices, scope, description, (length),
3491 * and [SCSI III documentation section].
3492 */
3493
3494 static char *
3495 scsi_cmd_name(u_int8_t cmd)
3496 {
3497 switch (cmd) {
3498 case 0x40:
3499 return ("Change Definition [7.1]");
3500 break;
3501 case 0x39:
3502 return ("Compare [7,2]");
3503 break;
3504 case 0x18:
3505 return ("Copy [7.3]");
3506 break;
3507 case 0x3a:
3508 return ("Copy and Verify [7.4]");
3509 break;
3510 case 0x04:
3511 return ("Format Unit [6.1.1]");
3512 break;
3513 case 0x12:
3514 return ("Inquiry [7.5]");
3515 break;
3516 case 0x36:
3517 return ("lock/Unlock Cache [6.1.2]");
3518 break;
3519 case 0x4c:
3520 return ("Log Select [7.6]");
3521 break;
3522 case 0x4d:
3523 return ("Log Sense [7.7]");
3524 break;
3525 case 0x15:
3526 return ("Mode select (6) [7.8]");
3527 break;
3528 case 0x55:
3529 return ("Mode Select (10) [7.9]");
3530 break;
3531 case 0x1a:
3532 return ("Mode Sense (6) [7.10]");
3533 break;
3534 case 0x5a:
3535 return ("Mode Sense (10) [7.11]");
3536 break;
3537 case 0xa7:
3538 return ("Move Medium Attached [SMC]");
3539 break;
3540 case 0x5e:
3541 return ("Persistent Reserve In [7.12]");
3542 break;
3543 case 0x5f:
3544 return ("Persistent Reserve Out [7.13]");
3545 break;
3546 case 0x1e:
3547 return ("Prevent/Allow Medium Removal [7.14]");
3548 break;
3549 case 0x08:
3550 return ("Read, Receive (6) [6.1.5]");
3551 break;
3552 case 0x28:
3553 return ("Read (10) [6.1.5]");
3554 break;
3555 case 0xa8:
3556 return ("Read (12) [6.1.5]");
3557 break;
3558 case 0x3c:
3559 return ("Read Buffer [7.15]");
3560 break;
3561 case 0x25:
3562 return ("Read Capacity [6.1.6]");
3563 break;
3564 case 0x37:
3565 return ("Read Defect Data (10) [6.1.7]");
3566 break;
3567 case 0xb7:
3568 return ("Read Defect Data (12) [6.2.5]");
3569 break;
3570 case 0xb4:
3571 return ("Read Element Status Attached [SMC]");
3572 break;
3573 case 0x3e:
3574 return ("Read Long [6.1.8]");
3575 break;
3576 case 0x07:
3577 return ("Reassign Blocks [6.1.9]");
3578 break;
3579 case 0x81:
3580 return ("Rebuild [6.1.10]");
3581 break;
3582 case 0x1c:
3583 return ("Receive Diagnostics Result [7.16]");
3584 break;
3585 case 0x82:
3586 return ("Regenerate [6.1.11]");
3587 break;
3588 case 0x17:
3589 return ("Release(6) [7.17]");
3590 break;
3591 case 0x57:
3592 return ("Release(10) [7.18]");
3593 break;
3594 case 0xa0:
3595 return ("Report LUNs [7.19]");
3596 break;
3597 case 0x03:
3598 return ("Request Sense [7.20]");
3599 break;
3600 case 0x16:
3601 return ("Resereve (6) [7.21]");
3602 break;
3603 case 0x56:
3604 return ("Reserve(10) [7.22]");
3605 break;
3606 case 0x2b:
3607 return ("Reserve(10) [6.1.12]");
3608 break;
3609 case 0x1d:
3610 return ("Send Disagnostics [7.23]");
3611 break;
3612 case 0x33:
3613 return ("Set Limit (10) [6.1.13]");
3614 break;
3615 case 0xb3:
3616 return ("Set Limit (12) [6.2.8]");
3617 break;
3618 case 0x1b:
3619 return ("Start/Stop Unit [6.1.14]");
3620 break;
3621 case 0x35:
3622 return ("Synchronize Cache [6.1.15]");
3623 break;
3624 case 0x00:
3625 return ("Test Unit Ready [7.24]");
3626 break;
3627 case 0x3d:
3628 return ("Update Block (6.2.9");
3629 break;
3630 case 0x2f:
3631 return ("Verify (10) [6.1.16, 6.2.10]");
3632 break;
3633 case 0xaf:
3634 return ("Verify (12) [6.2.11]");
3635 break;
3636 case 0x0a:
3637 return ("Write, Send (6) [6.1.17, 9.2]");
3638 break;
3639 case 0x2a:
3640 return ("Write (10) [6.1.18]");
3641 break;
3642 case 0xaa:
3643 return ("Write (12) [6.2.13]");
3644 break;
3645 case 0x2e:
3646 return ("Write and Verify (10) [6.1.19, 6.2.14]");
3647 break;
3648 case 0xae:
3649 return ("Write and Verify (12) [6.1.19, 6.2.15]");
3650 break;
3651 case 0x03b:
3652 return ("Write Buffer [7.25]");
3653 break;
3654 case 0x03f:
3655 return ("Write Long [6.1.20]");
3656 break;
3657 case 0x041:
3658 return ("Write Same [6.1.21]");
3659 break;
3660 case 0x052:
3661 return ("XD Read [6.1.22]");
3662 break;
3663 case 0x050:
3664 return ("XD Write [6.1.22]");
3665 break;
3666 case 0x080:
3667 return ("XD Write Extended [6.1.22]");
3668 break;
3669 case 0x051:
3670 return ("XO Write [6.1.22]");
3671 break;
3672 default:
3673 return ("Unknown SCSI Command");
3674 }
3675 }
3676
3677 /* End of the DPT driver */
3678
3679 /**
3680 * Hello emacs, these are the
3681 * Local Variables:
3682 * c-indent-level: 8
3683 * c-continued-statement-offset: 8
3684 * c-continued-brace-offset: 0
3685 * c-brace-offset: -8
3686 * c-brace-imaginary-offset: 0
3687 * c-argdecl-indent: 8
3688 * c-label-offset: -8
3689 * c++-hanging-braces: 1
3690 * c++-access-specifier-offset: -8
3691 * c++-empty-arglist-indent: 8
3692 * c++-friend-offset: 0
3693 * End:
3694 */
Cache object: b735a53094cce6a820a1ae70386921ef
|