FreeBSD/Linux Kernel Cross Reference
sys/dev/iir/iir.c
1 /* $FreeBSD: releng/5.1/sys/dev/iir/iir.c 114001 2003-04-25 05:37:04Z scottl $ */
2 /*
3 * Copyright (c) 2000-03 Intel Corporation
4 * All Rights Reserved
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification, immediately at the beginning of the file.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
33 *
34 * Written by: Achim Leubner <achim.leubner@intel.com>
35 * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
36 *
37 * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers.
38 * Mike Smith; Some driver source code.
39 * FreeBSD.ORG; Great O/S to work on and for.
40 *
41 * TODO:
42 */
43
44 #ident "$Id: iir.c 1.3 2003/03/21 16:28:32 achim Exp $"
45
46 #define _IIR_C_
47
48 /* #include "opt_iir.h" */
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/endian.h>
52 #include <sys/eventhandler.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/bus.h>
56
57 #include <machine/bus_memio.h>
58 #include <machine/bus_pio.h>
59 #include <machine/bus.h>
60 #include <machine/clock.h>
61 #include <machine/stdarg.h>
62
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_debug.h>
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_message.h>
70
71 #include <vm/vm.h>
72 #include <vm/pmap.h>
73
74 #include <dev/iir/iir.h>
75
76 struct gdt_softc *gdt_wait_gdt;
77 int gdt_wait_index;
78
79 #ifdef GDT_DEBUG
80 int gdt_debug = GDT_DEBUG;
81 #ifdef __SERIAL__
82 #define MAX_SERBUF 160
83 static void ser_init(void);
84 static void ser_puts(char *str);
85 static void ser_putc(int c);
86 static char strbuf[MAX_SERBUF+1];
87 #ifdef __COM2__
88 #define COM_BASE 0x2f8
89 #else
90 #define COM_BASE 0x3f8
91 #endif
92 static void ser_init()
93 {
94 unsigned port=COM_BASE;
95
96 outb(port+3, 0x80);
97 outb(port+1, 0);
98 /* 19200 Baud, if 9600: outb(12,port) */
99 outb(port, 6);
100 outb(port+3, 3);
101 outb(port+1, 0);
102 }
103
104 static void ser_puts(char *str)
105 {
106 char *ptr;
107
108 ser_init();
109 for (ptr=str;*ptr;++ptr)
110 ser_putc((int)(*ptr));
111 }
112
113 static void ser_putc(int c)
114 {
115 unsigned port=COM_BASE;
116
117 while ((inb(port+5) & 0x20)==0);
118 outb(port, c);
119 if (c==0x0a)
120 {
121 while ((inb(port+5) & 0x20)==0);
122 outb(port, 0x0d);
123 }
124 }
125
126 int ser_printf(const char *fmt, ...)
127 {
128 va_list args;
129 int i;
130
131 va_start(args,fmt);
132 i = vsprintf(strbuf,fmt,args);
133 ser_puts(strbuf);
134 va_end(args);
135 return i;
136 }
137 #endif
138 #endif
139
140 /* The linked list of softc structures */
141 struct gdt_softc_list gdt_softcs = TAILQ_HEAD_INITIALIZER(gdt_softcs);
142 /* controller cnt. */
143 int gdt_cnt = 0;
144 /* event buffer */
145 static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
146 static int elastidx, eoldidx;
147 /* statistics */
148 gdt_statist_t gdt_stat;
149
150 /* Definitions for our use of the SIM private CCB area */
151 #define ccb_sim_ptr spriv_ptr0
152 #define ccb_priority spriv_field1
153
154 static void iir_action(struct cam_sim *sim, union ccb *ccb);
155 static void iir_poll(struct cam_sim *sim);
156 static void iir_shutdown(void *arg, int howto);
157 static void iir_timeout(void *arg);
158 static void iir_watchdog(void *arg);
159
160 static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
161 int *secs);
162 static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
163 u_int8_t service, u_int16_t opcode,
164 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
165 static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
166 int timeout);
167
168 static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
169 static u_int32_t gdt_ccb_vtop(struct gdt_softc *gdt,
170 struct gdt_ccb *gccb);
171
172 static int gdt_sync_event(struct gdt_softc *gdt, int service,
173 u_int8_t index, struct gdt_ccb *gccb);
174 static int gdt_async_event(struct gdt_softc *gdt, int service);
175 static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,
176 union ccb *ccb, int *lock);
177 static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt,
178 union ccb *ccb, int *lock);
179 static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt,
180 gdt_ucmd_t *ucmd, int *lock);
181 static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb);
182
183 static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
184 int nseg, int error);
185 static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
186 int nseg, int error);
187
188 int
189 iir_init(struct gdt_softc *gdt)
190 {
191 u_int16_t cdev_cnt;
192 int i, id, drv_cyls, drv_hds, drv_secs;
193 struct gdt_ccb *gccb;
194
195 GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
196
197 gdt->sc_state = GDT_POLLING;
198 gdt_clear_events();
199 bzero(&gdt_stat, sizeof(gdt_statist_t));
200
201 SLIST_INIT(&gdt->sc_free_gccb);
202 SLIST_INIT(&gdt->sc_pending_gccb);
203 TAILQ_INIT(&gdt->sc_ccb_queue);
204 TAILQ_INIT(&gdt->sc_ucmd_queue);
205 TAILQ_INSERT_TAIL(&gdt_softcs, gdt, links);
206
207 /* DMA tag for mapping buffers into device visible space. */
208 if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
209 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
210 /*highaddr*/BUS_SPACE_MAXADDR,
211 /*filter*/NULL, /*filterarg*/NULL,
212 /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG,
213 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
214 /*flags*/BUS_DMA_ALLOCNOW,
215 &gdt->sc_buffer_dmat) != 0) {
216 printf("iir%d: bus_dma_tag_create(...,gdt->sc_buffer_dmat) failed\n",
217 gdt->sc_hanum);
218 return (1);
219 }
220 gdt->sc_init_level++;
221
222 /* DMA tag for our ccb structures */
223 if (bus_dma_tag_create(gdt->sc_parent_dmat,
224 /*alignment*/1,
225 /*boundary*/0,
226 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
227 /*highaddr*/BUS_SPACE_MAXADDR,
228 /*filter*/NULL,
229 /*filterarg*/NULL,
230 GDT_MAXCMDS * sizeof(struct gdt_ccb), /* maxsize */
231 /*nsegments*/1,
232 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
233 /*flags*/0, &gdt->sc_gccb_dmat) != 0) {
234 printf("iir%d: bus_dma_tag_create(...,gdt->sc_gccb_dmat) failed\n",
235 gdt->sc_hanum);
236 return (1);
237 }
238 gdt->sc_init_level++;
239
240 /* Allocation for our ccbs */
241 if (bus_dmamem_alloc(gdt->sc_gccb_dmat, (void **)&gdt->sc_gccbs,
242 BUS_DMA_NOWAIT, &gdt->sc_gccb_dmamap) != 0) {
243 printf("iir%d: bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n",
244 gdt->sc_hanum);
245 return (1);
246 }
247 gdt->sc_init_level++;
248
249 /* And permanently map them */
250 bus_dmamap_load(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap,
251 gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb),
252 gdtmapmem, &gdt->sc_gccb_busbase, /*flags*/0);
253 gdt->sc_init_level++;
254
255 /* Clear them out. */
256 bzero(gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb));
257
258 /* Initialize the ccbs */
259 for (i = GDT_MAXCMDS-1; i >= 0; i--) {
260 gdt->sc_gccbs[i].gc_cmd_index = i + 2;
261 gdt->sc_gccbs[i].gc_flags = GDT_GCF_UNUSED;
262 gdt->sc_gccbs[i].gc_map_flag = FALSE;
263 if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
264 &gdt->sc_gccbs[i].gc_dmamap) != 0)
265 return(1);
266 gdt->sc_gccbs[i].gc_map_flag = TRUE;
267 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, &gdt->sc_gccbs[i], sle);
268 }
269 gdt->sc_init_level++;
270
271 /* create the control device */
272 gdt->sc_dev = gdt_make_dev(gdt->sc_hanum);
273
274 /* allocate ccb for gdt_internal_cmd() */
275 gccb = gdt_get_ccb(gdt);
276 if (gccb == NULL) {
277 printf("iir%d: No free command index found\n",
278 gdt->sc_hanum);
279 return (1);
280 }
281
282 if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
283 0, 0, 0)) {
284 printf("iir%d: Screen service initialization error %d\n",
285 gdt->sc_hanum, gdt->sc_status);
286 gdt_free_ccb(gdt, gccb);
287 return (1);
288 }
289
290 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
291 GDT_LINUX_OS, 0, 0)) {
292 printf("iir%d: Cache service initialization error %d\n",
293 gdt->sc_hanum, gdt->sc_status);
294 gdt_free_ccb(gdt, gccb);
295 return (1);
296 }
297 gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
298 0, 0, 0);
299
300 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_MOUNT,
301 0xffff, 1, 0)) {
302 printf("iir%d: Cache service mount error %d\n",
303 gdt->sc_hanum, gdt->sc_status);
304 gdt_free_ccb(gdt, gccb);
305 return (1);
306 }
307
308 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
309 GDT_LINUX_OS, 0, 0)) {
310 printf("iir%d: Cache service post-mount initialization error %d\n",
311 gdt->sc_hanum, gdt->sc_status);
312 gdt_free_ccb(gdt, gccb);
313 return (1);
314 }
315 cdev_cnt = (u_int16_t)gdt->sc_info;
316 gdt->sc_fw_vers = gdt->sc_service;
317
318 /* Detect number of buses */
319 gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
320 gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
321 gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
322 gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
323 gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
324 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
325 GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
326 GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
327 gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
328 for (i = 0; i < gdt->sc_bus_cnt; i++) {
329 id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
330 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
331 gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
332 }
333 } else {
334 /* New method failed, use fallback. */
335 for (i = 0; i < GDT_MAXBUS; i++) {
336 gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
337 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
338 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
339 GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
340 GDT_GETCH_SZ)) {
341 if (i == 0) {
342 printf("iir%d: Cannot get channel count, "
343 "error %d\n", gdt->sc_hanum, gdt->sc_status);
344 gdt_free_ccb(gdt, gccb);
345 return (1);
346 }
347 break;
348 }
349 gdt->sc_bus_id[i] =
350 (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
351 gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
352 }
353 gdt->sc_bus_cnt = i;
354 }
355 /* add one "virtual" channel for the host drives */
356 gdt->sc_virt_bus = gdt->sc_bus_cnt;
357 gdt->sc_bus_cnt++;
358
359 if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
360 0, 0, 0)) {
361 printf("iir%d: Raw service initialization error %d\n",
362 gdt->sc_hanum, gdt->sc_status);
363 gdt_free_ccb(gdt, gccb);
364 return (1);
365 }
366
367 /* Set/get features raw service (scatter/gather) */
368 gdt->sc_raw_feat = 0;
369 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
370 GDT_SCATTER_GATHER, 0, 0)) {
371 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
372 0, 0, 0)) {
373 gdt->sc_raw_feat = gdt->sc_info;
374 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
375 panic("iir%d: Scatter/Gather Raw Service "
376 "required but not supported!\n", gdt->sc_hanum);
377 gdt_free_ccb(gdt, gccb);
378 return (1);
379 }
380 }
381 }
382
383 /* Set/get features cache service (scatter/gather) */
384 gdt->sc_cache_feat = 0;
385 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
386 0, GDT_SCATTER_GATHER, 0)) {
387 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
388 0, 0, 0)) {
389 gdt->sc_cache_feat = gdt->sc_info;
390 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
391 panic("iir%d: Scatter/Gather Cache Service "
392 "required but not supported!\n", gdt->sc_hanum);
393 gdt_free_ccb(gdt, gccb);
394 return (1);
395 }
396 }
397 }
398
399 /* OEM */
400 gdt_enc32(gccb->gc_scratch + GDT_OEM_VERSION, 0x01);
401 gdt_enc32(gccb->gc_scratch + GDT_OEM_BUFSIZE, sizeof(gdt_oem_record_t));
402 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
403 GDT_OEM_STR_RECORD, GDT_INVALID_CHANNEL,
404 sizeof(gdt_oem_str_record_t))) {
405 strncpy(gdt->oem_name, ((gdt_oem_str_record_t *)
406 gccb->gc_scratch)->text.scsi_host_drive_inquiry_vendor_id, 7);
407 gdt->oem_name[7]='\0';
408 } else {
409 /* Old method, based on PCI ID */
410 if (gdt->sc_vendor == INTEL_VENDOR_ID)
411 strcpy(gdt->oem_name,"Intel ");
412 else
413 strcpy(gdt->oem_name,"ICP ");
414 }
415
416 /* Scan for cache devices */
417 for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
418 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
419 i, 0, 0)) {
420 gdt->sc_hdr[i].hd_present = 1;
421 gdt->sc_hdr[i].hd_size = gdt->sc_info;
422
423 /*
424 * Evaluate mapping (sectors per head, heads per cyl)
425 */
426 gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
427 if (gdt->sc_info2 == 0)
428 gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
429 &drv_cyls, &drv_hds, &drv_secs);
430 else {
431 drv_hds = gdt->sc_info2 & 0xff;
432 drv_secs = (gdt->sc_info2 >> 8) & 0xff;
433 drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
434 drv_secs;
435 }
436 gdt->sc_hdr[i].hd_heads = drv_hds;
437 gdt->sc_hdr[i].hd_secs = drv_secs;
438 /* Round the size */
439 gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
440
441 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
442 GDT_DEVTYPE, i, 0, 0))
443 gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
444 }
445 }
446
447 GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
448 gdt->sc_dpmembase,
449 gdt->sc_bus_cnt, cdev_cnt,
450 cdev_cnt == 1 ? "" : "s"));
451 gdt_free_ccb(gdt, gccb);
452
453 gdt_cnt++;
454 return (0);
455 }
456
457 void
458 iir_free(struct gdt_softc *gdt)
459 {
460 int i;
461
462 GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
463
464 switch (gdt->sc_init_level) {
465 default:
466 gdt_destroy_dev(gdt->sc_dev);
467 case 5:
468 for (i = GDT_MAXCMDS-1; i >= 0; i--)
469 if (gdt->sc_gccbs[i].gc_map_flag)
470 bus_dmamap_destroy(gdt->sc_buffer_dmat,
471 gdt->sc_gccbs[i].gc_dmamap);
472 bus_dmamap_unload(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap);
473 case 4:
474 bus_dmamem_free(gdt->sc_gccb_dmat, gdt->sc_gccbs, gdt->sc_gccb_dmamap);
475 case 3:
476 bus_dma_tag_destroy(gdt->sc_gccb_dmat);
477 case 2:
478 bus_dma_tag_destroy(gdt->sc_buffer_dmat);
479 case 1:
480 bus_dma_tag_destroy(gdt->sc_parent_dmat);
481 case 0:
482 break;
483 }
484 TAILQ_REMOVE(&gdt_softcs, gdt, links);
485 }
486
487 void
488 iir_attach(struct gdt_softc *gdt)
489 {
490 struct cam_devq *devq;
491 int i;
492
493 GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
494
495 /*
496 * Create the device queue for our SIM.
497 */
498 devq = cam_simq_alloc(GDT_MAXCMDS);
499 if (devq == NULL)
500 return;
501
502 for (i = 0; i < gdt->sc_bus_cnt; i++) {
503 /*
504 * Construct our SIM entry
505 */
506 gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
507 gdt, gdt->sc_hanum, /*untagged*/2,
508 /*tagged*/GDT_MAXCMDS, devq);
509 if (xpt_bus_register(gdt->sims[i], i) != CAM_SUCCESS) {
510 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
511 break;
512 }
513
514 if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
515 cam_sim_path(gdt->sims[i]),
516 CAM_TARGET_WILDCARD,
517 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
518 xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
519 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
520 break;
521 }
522 }
523 if (i > 0)
524 EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
525 gdt, SHUTDOWN_PRI_DEFAULT);
526 /* iir_watchdog(gdt); */
527 gdt->sc_state = GDT_NORMAL;
528 }
529
530 static void
531 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
532 {
533 *cyls = size / GDT_HEADS / GDT_SECS;
534 if (*cyls < GDT_MAXCYLS) {
535 *heads = GDT_HEADS;
536 *secs = GDT_SECS;
537 } else {
538 /* Too high for 64 * 32 */
539 *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
540 if (*cyls < GDT_MAXCYLS) {
541 *heads = GDT_MEDHEADS;
542 *secs = GDT_MEDSECS;
543 } else {
544 /* Too high for 127 * 63 */
545 *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
546 *heads = GDT_BIGHEADS;
547 *secs = GDT_BIGSECS;
548 }
549 }
550 }
551
552 static int
553 gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
554 int timeout)
555 {
556 int rv = 0;
557
558 GDT_DPRINTF(GDT_D_INIT,
559 ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
560
561 gdt->sc_state |= GDT_POLL_WAIT;
562 do {
563 iir_intr(gdt);
564 if (gdt == gdt_wait_gdt &&
565 gccb->gc_cmd_index == gdt_wait_index) {
566 rv = 1;
567 break;
568 }
569 DELAY(1);
570 } while (--timeout);
571 gdt->sc_state &= ~GDT_POLL_WAIT;
572
573 while (gdt->sc_test_busy(gdt))
574 DELAY(1); /* XXX correct? */
575
576 return (rv);
577 }
578
579 static int
580 gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
581 u_int8_t service, u_int16_t opcode,
582 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
583 {
584 int retries;
585
586 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
587 gdt, service, opcode, arg1, arg2, arg3));
588
589 bzero(gdt->sc_cmd, GDT_CMD_SZ);
590
591 for (retries = GDT_RETRIES; ; ) {
592 gccb->gc_service = service;
593 gccb->gc_flags = GDT_GCF_INTERNAL;
594
595 gdt->sc_set_sema0(gdt);
596 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
597 gccb->gc_cmd_index);
598 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
599
600 switch (service) {
601 case GDT_CACHESERVICE:
602 if (opcode == GDT_IOCTL) {
603 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
604 GDT_IOCTL_SUBFUNC, arg1);
605 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
606 GDT_IOCTL_CHANNEL, arg2);
607 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
608 GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
609 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
610 gdt_ccb_vtop(gdt, gccb) +
611 offsetof(struct gdt_ccb, gc_scratch[0]));
612 } else {
613 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
614 GDT_CACHE_DEVICENO, (u_int16_t)arg1);
615 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
616 GDT_CACHE_BLOCKNO, arg2);
617 }
618 break;
619
620 case GDT_SCSIRAWSERVICE:
621 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
622 GDT_RAW_DIRECTION, arg1);
623 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
624 (u_int8_t)arg2;
625 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
626 (u_int8_t)arg3;
627 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
628 (u_int8_t)(arg3 >> 8);
629 }
630
631 gdt->sc_cmd_len = GDT_CMD_SZ;
632 gdt->sc_cmd_off = 0;
633 gdt->sc_cmd_cnt = 0;
634 gdt->sc_copy_cmd(gdt, gccb);
635 gdt->sc_release_event(gdt);
636 DELAY(20);
637 if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
638 return (0);
639 if (gdt->sc_status != GDT_S_BSY || --retries == 0)
640 break;
641 DELAY(1);
642 }
643 return (gdt->sc_status == GDT_S_OK);
644 }
645
646 static struct gdt_ccb *
647 gdt_get_ccb(struct gdt_softc *gdt)
648 {
649 struct gdt_ccb *gccb;
650 int lock;
651
652 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
653
654 lock = splcam();
655 gccb = SLIST_FIRST(&gdt->sc_free_gccb);
656 if (gccb != NULL) {
657 SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
658 SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
659 ++gdt_stat.cmd_index_act;
660 if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
661 gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
662 }
663 splx(lock);
664 return (gccb);
665 }
666
667 void
668 gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
669 {
670 int lock;
671
672 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
673
674 lock = splcam();
675 gccb->gc_flags = GDT_GCF_UNUSED;
676 SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
677 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
678 --gdt_stat.cmd_index_act;
679 splx(lock);
680 if (gdt->sc_state & GDT_SHUTDOWN)
681 wakeup(gccb);
682 }
683
684 static u_int32_t
685 gdt_ccb_vtop(struct gdt_softc *gdt, struct gdt_ccb *gccb)
686 {
687 return (gdt->sc_gccb_busbase
688 + (u_int32_t)((caddr_t)gccb - (caddr_t)gdt->sc_gccbs));
689 }
690
691 void
692 gdt_next(struct gdt_softc *gdt)
693 {
694 int lock;
695 union ccb *ccb;
696 gdt_ucmd_t *ucmd;
697 struct cam_sim *sim;
698 int bus, target, lun;
699 int next_cmd;
700
701 struct ccb_scsiio *csio;
702 struct ccb_hdr *ccbh;
703 struct gdt_ccb *gccb = NULL;
704 u_int8_t cmd;
705
706 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
707
708 lock = splcam();
709 if (gdt->sc_test_busy(gdt)) {
710 if (!(gdt->sc_state & GDT_POLLING)) {
711 splx(lock);
712 return;
713 }
714 while (gdt->sc_test_busy(gdt))
715 DELAY(1);
716 }
717
718 gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
719 next_cmd = TRUE;
720 for (;;) {
721 /* I/Os in queue? controller ready? */
722 if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
723 !TAILQ_FIRST(&gdt->sc_ccb_queue))
724 break;
725
726 /* 1.: I/Os without ccb (IOCTLs) */
727 ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
728 if (ucmd != NULL) {
729 TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
730 if ((gccb = gdt_ioctl_cmd(gdt, ucmd, &lock)) == NULL) {
731 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
732 break;
733 }
734 break;
735 /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
736 }
737
738 /* 2.: I/Os with ccb */
739 ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
740 /* ist dann immer != NULL, da oben getestet */
741 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
742 bus = cam_sim_bus(sim);
743 target = ccb->ccb_h.target_id;
744 lun = ccb->ccb_h.target_lun;
745
746 TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
747 --gdt_stat.req_queue_act;
748 /* ccb->ccb_h.func_code is XPT_SCSI_IO */
749 GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
750 ccb->ccb_h.flags));
751 csio = &ccb->csio;
752 ccbh = &ccb->ccb_h;
753 cmd = csio->cdb_io.cdb_bytes[0];
754 /* Max CDB length is 12 bytes */
755 if (csio->cdb_len > 12) {
756 ccbh->status = CAM_REQ_INVALID;
757 --gdt_stat.io_count_act;
758 xpt_done(ccb);
759 } else if (bus != gdt->sc_virt_bus) {
760 /* raw service command */
761 if ((gccb = gdt_raw_cmd(gdt, ccb, &lock)) == NULL) {
762 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
763 sim_links.tqe);
764 ++gdt_stat.req_queue_act;
765 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
766 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
767 next_cmd = FALSE;
768 }
769 } else if (target >= GDT_MAX_HDRIVES ||
770 !gdt->sc_hdr[target].hd_present || lun != 0) {
771 ccbh->status = CAM_DEV_NOT_THERE;
772 --gdt_stat.io_count_act;
773 xpt_done(ccb);
774 } else {
775 /* cache service command */
776 if (cmd == READ_6 || cmd == WRITE_6 ||
777 cmd == READ_10 || cmd == WRITE_10) {
778 if ((gccb = gdt_cache_cmd(gdt, ccb, &lock)) == NULL) {
779 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
780 sim_links.tqe);
781 ++gdt_stat.req_queue_act;
782 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
783 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
784 next_cmd = FALSE;
785 }
786 } else {
787 splx(lock);
788 gdt_internal_cache_cmd(gdt, ccb);
789 lock = splcam();
790 }
791 }
792 if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
793 break;
794 }
795 if (gdt->sc_cmd_cnt > 0)
796 gdt->sc_release_event(gdt);
797
798 splx(lock);
799
800 if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
801 gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
802 }
803 }
804
805 static struct gdt_ccb *
806 gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
807 {
808 struct gdt_ccb *gccb;
809 struct cam_sim *sim;
810
811 GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
812
813 if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
814 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
815 gdt->sc_ic_all_size) {
816 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_raw_cmd(): DPMEM overflow\n",
817 gdt->sc_hanum));
818 return (NULL);
819 }
820
821 bzero(gdt->sc_cmd, GDT_CMD_SZ);
822
823 gccb = gdt_get_ccb(gdt);
824 if (gccb == NULL) {
825 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: No free command index found\n",
826 gdt->sc_hanum));
827 return (gccb);
828 }
829 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
830 gccb->gc_ccb = ccb;
831 gccb->gc_service = GDT_SCSIRAWSERVICE;
832 gccb->gc_flags = GDT_GCF_SCSI;
833
834 if (gdt->sc_cmd_cnt == 0)
835 gdt->sc_set_sema0(gdt);
836 splx(*lock);
837 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
838 gccb->gc_cmd_index);
839 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
840
841 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
842 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
843 GDT_DATA_IN : GDT_DATA_OUT);
844 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
845 ccb->csio.dxfer_len);
846 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
847 ccb->csio.cdb_len);
848 bcopy(ccb->csio.cdb_io.cdb_bytes, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
849 ccb->csio.cdb_len);
850 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
851 ccb->ccb_h.target_id;
852 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
853 ccb->ccb_h.target_lun;
854 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
855 cam_sim_bus(sim);
856 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
857 sizeof(struct scsi_sense_data));
858 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
859 gdt_ccb_vtop(gdt, gccb) +
860 offsetof(struct gdt_ccb, gc_scratch[0]));
861
862 /*
863 * If we have any data to send with this command,
864 * map it into bus space.
865 */
866 /* Only use S/G if there is a transfer */
867 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
868 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
869 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
870 int s;
871 int error;
872
873 /* vorher unlock von splcam() ??? */
874 s = splsoftvm();
875 error =
876 bus_dmamap_load(gdt->sc_buffer_dmat,
877 gccb->gc_dmamap,
878 ccb->csio.data_ptr,
879 ccb->csio.dxfer_len,
880 gdtexecuteccb,
881 gccb, /*flags*/0);
882 if (error == EINPROGRESS) {
883 xpt_freeze_simq(sim, 1);
884 gccb->gc_state |= CAM_RELEASE_SIMQ;
885 }
886 splx(s);
887 } else {
888 struct bus_dma_segment seg;
889
890 /* Pointer to physical buffer */
891 seg.ds_addr =
892 (bus_addr_t)ccb->csio.data_ptr;
893 seg.ds_len = ccb->csio.dxfer_len;
894 gdtexecuteccb(gccb, &seg, 1, 0);
895 }
896 } else {
897 struct bus_dma_segment *segs;
898
899 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
900 panic("iir%d: iir_action - Physical "
901 "segment pointers unsupported", gdt->sc_hanum);
902
903 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
904 panic("iir%d: iir_action - Virtual "
905 "segment addresses unsupported", gdt->sc_hanum);
906
907 /* Just use the segments provided */
908 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
909 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
910 }
911 } else {
912 gdtexecuteccb(gccb, NULL, 0, 0);
913 }
914
915 *lock = splcam();
916 return (gccb);
917 }
918
919 static struct gdt_ccb *
920 gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
921 {
922 struct gdt_ccb *gccb;
923 struct cam_sim *sim;
924 u_int8_t *cmdp;
925 u_int16_t opcode;
926 u_int32_t blockno, blockcnt;
927
928 GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
929
930 if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
931 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
932 gdt->sc_ic_all_size) {
933 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_cache_cmd(): DPMEM overflow\n",
934 gdt->sc_hanum));
935 return (NULL);
936 }
937
938 bzero(gdt->sc_cmd, GDT_CMD_SZ);
939
940 gccb = gdt_get_ccb(gdt);
941 if (gccb == NULL) {
942 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
943 gdt->sc_hanum));
944 return (gccb);
945 }
946 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
947 gccb->gc_ccb = ccb;
948 gccb->gc_service = GDT_CACHESERVICE;
949 gccb->gc_flags = GDT_GCF_SCSI;
950
951 if (gdt->sc_cmd_cnt == 0)
952 gdt->sc_set_sema0(gdt);
953 splx(*lock);
954 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
955 gccb->gc_cmd_index);
956 cmdp = ccb->csio.cdb_io.cdb_bytes;
957 opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
958 if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
959 opcode = GDT_WRITE_THR;
960 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
961
962 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
963 ccb->ccb_h.target_id);
964 if (ccb->csio.cdb_len == 6) {
965 struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
966 blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
967 blockcnt = rw->length ? rw->length : 0x100;
968 } else {
969 struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
970 blockno = scsi_4btoul(rw->addr);
971 blockcnt = scsi_2btoul(rw->length);
972 }
973 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
974 blockno);
975 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
976 blockcnt);
977
978 /*
979 * If we have any data to send with this command,
980 * map it into bus space.
981 */
982 /* Only use S/G if there is a transfer */
983 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
984 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
985 int s;
986 int error;
987
988 /* vorher unlock von splcam() ??? */
989 s = splsoftvm();
990 error =
991 bus_dmamap_load(gdt->sc_buffer_dmat,
992 gccb->gc_dmamap,
993 ccb->csio.data_ptr,
994 ccb->csio.dxfer_len,
995 gdtexecuteccb,
996 gccb, /*flags*/0);
997 if (error == EINPROGRESS) {
998 xpt_freeze_simq(sim, 1);
999 gccb->gc_state |= CAM_RELEASE_SIMQ;
1000 }
1001 splx(s);
1002 } else {
1003 struct bus_dma_segment seg;
1004
1005 /* Pointer to physical buffer */
1006 seg.ds_addr =
1007 (bus_addr_t)ccb->csio.data_ptr;
1008 seg.ds_len = ccb->csio.dxfer_len;
1009 gdtexecuteccb(gccb, &seg, 1, 0);
1010 }
1011 } else {
1012 struct bus_dma_segment *segs;
1013
1014 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
1015 panic("iir%d: iir_action - Physical "
1016 "segment pointers unsupported", gdt->sc_hanum);
1017
1018 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
1019 panic("iir%d: iir_action - Virtual "
1020 "segment addresses unsupported", gdt->sc_hanum);
1021
1022 /* Just use the segments provided */
1023 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1024 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
1025 }
1026
1027 *lock = splcam();
1028 return (gccb);
1029 }
1030
1031 static struct gdt_ccb *
1032 gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd, int *lock)
1033 {
1034 struct gdt_ccb *gccb;
1035 u_int32_t cnt;
1036
1037 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
1038
1039 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1040
1041 gccb = gdt_get_ccb(gdt);
1042 if (gccb == NULL) {
1043 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
1044 gdt->sc_hanum));
1045 return (gccb);
1046 }
1047 gccb->gc_ucmd = ucmd;
1048 gccb->gc_service = ucmd->service;
1049 gccb->gc_flags = GDT_GCF_IOCTL;
1050
1051 /* check DPMEM space, copy data buffer from user space */
1052 if (ucmd->service == GDT_CACHESERVICE) {
1053 if (ucmd->OpCode == GDT_IOCTL) {
1054 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
1055 sizeof(u_int32_t));
1056 cnt = ucmd->u.ioctl.param_size;
1057 if (cnt > GDT_SCRATCH_SZ) {
1058 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1059 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1060 gdt_free_ccb(gdt, gccb);
1061 return (NULL);
1062 }
1063 } else {
1064 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1065 GDT_SG_SZ, sizeof(u_int32_t));
1066 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1067 if (cnt > GDT_SCRATCH_SZ) {
1068 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1069 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1070 gdt_free_ccb(gdt, gccb);
1071 return (NULL);
1072 }
1073 }
1074 } else {
1075 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1076 GDT_SG_SZ, sizeof(u_int32_t));
1077 cnt = ucmd->u.raw.sdlen;
1078 if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
1079 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1080 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
1081 gdt_free_ccb(gdt, gccb);
1082 return (NULL);
1083 }
1084 }
1085 if (cnt != 0)
1086 bcopy(ucmd->data, gccb->gc_scratch, cnt);
1087
1088 if (gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
1089 gdt->sc_ic_all_size) {
1090 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_ioctl_cmd(): DPMEM overflow\n",
1091 gdt->sc_hanum));
1092 gdt_free_ccb(gdt, gccb);
1093 return (NULL);
1094 }
1095
1096 if (gdt->sc_cmd_cnt == 0)
1097 gdt->sc_set_sema0(gdt);
1098 splx(*lock);
1099
1100 /* fill cmd structure */
1101 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1102 gccb->gc_cmd_index);
1103 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE,
1104 ucmd->OpCode);
1105
1106 if (ucmd->service == GDT_CACHESERVICE) {
1107 if (ucmd->OpCode == GDT_IOCTL) {
1108 /* IOCTL */
1109 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
1110 ucmd->u.ioctl.param_size);
1111 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
1112 ucmd->u.ioctl.subfunc);
1113 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
1114 ucmd->u.ioctl.channel);
1115 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
1116 gdt_ccb_vtop(gdt, gccb) +
1117 offsetof(struct gdt_ccb, gc_scratch[0]));
1118 } else {
1119 /* cache service command */
1120 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
1121 ucmd->u.cache.DeviceNo);
1122 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
1123 ucmd->u.cache.BlockNo);
1124 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
1125 ucmd->u.cache.BlockCnt);
1126 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1127 0xffffffffUL);
1128 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1129 1);
1130 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1131 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1132 offsetof(struct gdt_ccb, gc_scratch[0]));
1133 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1134 GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
1135 }
1136 } else {
1137 /* raw service command */
1138 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
1139 ucmd->u.raw.direction);
1140 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1141 0xffffffffUL);
1142 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
1143 ucmd->u.raw.sdlen);
1144 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
1145 ucmd->u.raw.clen);
1146 bcopy(ucmd->u.raw.cmd, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
1147 12);
1148 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1149 ucmd->u.raw.target;
1150 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1151 ucmd->u.raw.lun;
1152 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1153 ucmd->u.raw.bus;
1154 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
1155 ucmd->u.raw.sense_len);
1156 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
1157 gdt_ccb_vtop(gdt, gccb) +
1158 offsetof(struct gdt_ccb, gc_scratch[ucmd->u.raw.sdlen]));
1159 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1160 1);
1161 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1162 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1163 offsetof(struct gdt_ccb, gc_scratch[0]));
1164 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1165 GDT_SG_LEN, ucmd->u.raw.sdlen);
1166 }
1167
1168 *lock = splcam();
1169 gdt_stat.sg_count_act = 1;
1170 gdt->sc_copy_cmd(gdt, gccb);
1171 return (gccb);
1172 }
1173
1174 static void
1175 gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
1176 {
1177 int t;
1178
1179 t = ccb->ccb_h.target_id;
1180 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
1181 gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
1182
1183 switch (ccb->csio.cdb_io.cdb_bytes[0]) {
1184 case TEST_UNIT_READY:
1185 case START_STOP:
1186 break;
1187 case REQUEST_SENSE:
1188 GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
1189 break;
1190 case INQUIRY:
1191 {
1192 struct scsi_inquiry_data *inq;
1193
1194 inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
1195 bzero(inq, sizeof(struct scsi_inquiry_data));
1196 inq->device = (gdt->sc_hdr[t].hd_devtype & 4) ?
1197 T_CDROM : T_DIRECT;
1198 inq->dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
1199 inq->version = SCSI_REV_2;
1200 inq->response_format = 2;
1201 inq->additional_length = 32;
1202 inq->flags = SID_CmdQue | SID_Sync;
1203 strcpy(inq->vendor, gdt->oem_name);
1204 sprintf(inq->product, "Host Drive #%02d", t);
1205 strcpy(inq->revision, " ");
1206 break;
1207 }
1208 case MODE_SENSE_6:
1209 {
1210 struct mpd_data {
1211 struct scsi_mode_hdr_6 hd;
1212 struct scsi_mode_block_descr bd;
1213 struct scsi_control_page cp;
1214 } *mpd;
1215 u_int8_t page;
1216
1217 mpd = (struct mpd_data *)ccb->csio.data_ptr;
1218 bzero(mpd, sizeof(struct mpd_data));
1219 mpd->hd.datalen = sizeof(struct scsi_mode_hdr_6) +
1220 sizeof(struct scsi_mode_block_descr);
1221 mpd->hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
1222 mpd->hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
1223 mpd->bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
1224 mpd->bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
1225 mpd->bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
1226 page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
1227 switch (page) {
1228 default:
1229 GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
1230 break;
1231 }
1232 break;
1233 }
1234 case READ_CAPACITY:
1235 {
1236 struct scsi_read_capacity_data *rcd;
1237
1238 rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;
1239 bzero(rcd, sizeof(struct scsi_read_capacity_data));
1240 scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd->addr);
1241 scsi_ulto4b(GDT_SECTOR_SIZE, rcd->length);
1242 break;
1243 }
1244 default:
1245 GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
1246 ccb->csio.cdb_io.cdb_bytes[0]));
1247 break;
1248 }
1249 ccb->ccb_h.status = CAM_REQ_CMP;
1250 --gdt_stat.io_count_act;
1251 xpt_done(ccb);
1252 }
1253
1254 static void
1255 gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1256 {
1257 bus_addr_t *busaddrp;
1258
1259 busaddrp = (bus_addr_t *)arg;
1260 *busaddrp = dm_segs->ds_addr;
1261 }
1262
1263 static void
1264 gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1265 {
1266 struct gdt_ccb *gccb;
1267 union ccb *ccb;
1268 struct gdt_softc *gdt;
1269 int i, lock;
1270
1271 lock = splcam();
1272
1273 gccb = (struct gdt_ccb *)arg;
1274 ccb = gccb->gc_ccb;
1275 gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
1276
1277 GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
1278 gdt, gccb, dm_segs, nseg, error));
1279 gdt_stat.sg_count_act = nseg;
1280 if (nseg > gdt_stat.sg_count_max)
1281 gdt_stat.sg_count_max = nseg;
1282
1283 /* Copy the segments into our SG list */
1284 if (gccb->gc_service == GDT_CACHESERVICE) {
1285 for (i = 0; i < nseg; ++i) {
1286 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1287 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1288 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1289 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1290 dm_segs++;
1291 }
1292 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1293 nseg);
1294 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1295 0xffffffffUL);
1296
1297 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1298 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1299 } else {
1300 for (i = 0; i < nseg; ++i) {
1301 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1302 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1303 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1304 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1305 dm_segs++;
1306 }
1307 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1308 nseg);
1309 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1310 0xffffffffUL);
1311
1312 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1313 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1314 }
1315
1316 if (nseg != 0) {
1317 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1318 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1319 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1320 }
1321
1322 /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
1323 * because command semaphore is already set!
1324 */
1325
1326 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1327 /* timeout handling */
1328 ccb->ccb_h.timeout_ch =
1329 timeout(iir_timeout, (caddr_t)gccb,
1330 (ccb->ccb_h.timeout * hz) / 1000);
1331
1332 gdt->sc_copy_cmd(gdt, gccb);
1333 splx(lock);
1334 }
1335
1336
1337 static void
1338 iir_action( struct cam_sim *sim, union ccb *ccb )
1339 {
1340 struct gdt_softc *gdt;
1341 int lock, bus, target, lun;
1342
1343 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1344 ccb->ccb_h.ccb_sim_ptr = sim;
1345 bus = cam_sim_bus(sim);
1346 target = ccb->ccb_h.target_id;
1347 lun = ccb->ccb_h.target_lun;
1348 GDT_DPRINTF(GDT_D_CMD,
1349 ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
1350 gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
1351 bus, target, lun));
1352 ++gdt_stat.io_count_act;
1353 if (gdt_stat.io_count_act > gdt_stat.io_count_max)
1354 gdt_stat.io_count_max = gdt_stat.io_count_act;
1355
1356 switch (ccb->ccb_h.func_code) {
1357 case XPT_SCSI_IO:
1358 lock = splcam();
1359 TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1360 ++gdt_stat.req_queue_act;
1361 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1362 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1363 splx(lock);
1364 gdt_next(gdt);
1365 break;
1366 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1367 case XPT_ABORT: /* Abort the specified CCB */
1368 /* XXX Implement */
1369 ccb->ccb_h.status = CAM_REQ_INVALID;
1370 --gdt_stat.io_count_act;
1371 xpt_done(ccb);
1372 break;
1373 case XPT_SET_TRAN_SETTINGS:
1374 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1375 --gdt_stat.io_count_act;
1376 xpt_done(ccb);
1377 break;
1378 case XPT_GET_TRAN_SETTINGS:
1379 /* Get default/user set transfer settings for the target */
1380 {
1381 struct ccb_trans_settings *cts;
1382 u_int target_mask;
1383
1384 cts = &ccb->cts;
1385 target_mask = 0x01 << target;
1386 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
1387 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
1388 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1389 cts->sync_period = 25; /* 10MHz */
1390 if (cts->sync_period != 0)
1391 cts->sync_offset = 15;
1392
1393 cts->valid = CCB_TRANS_SYNC_RATE_VALID
1394 | CCB_TRANS_SYNC_OFFSET_VALID
1395 | CCB_TRANS_BUS_WIDTH_VALID
1396 | CCB_TRANS_DISC_VALID
1397 | CCB_TRANS_TQ_VALID;
1398 ccb->ccb_h.status = CAM_REQ_CMP;
1399 } else {
1400 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1401 }
1402 --gdt_stat.io_count_act;
1403 xpt_done(ccb);
1404 break;
1405 }
1406 case XPT_CALC_GEOMETRY:
1407 {
1408 struct ccb_calc_geometry *ccg;
1409 u_int32_t secs_per_cylinder;
1410
1411 ccg = &ccb->ccg;
1412 ccg->heads = gdt->sc_hdr[target].hd_heads;
1413 ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
1414 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1415 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1416 ccb->ccb_h.status = CAM_REQ_CMP;
1417 --gdt_stat.io_count_act;
1418 xpt_done(ccb);
1419 break;
1420 }
1421 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1422 {
1423 /* XXX Implement */
1424 ccb->ccb_h.status = CAM_REQ_CMP;
1425 --gdt_stat.io_count_act;
1426 xpt_done(ccb);
1427 break;
1428 }
1429 case XPT_TERM_IO: /* Terminate the I/O process */
1430 /* XXX Implement */
1431 ccb->ccb_h.status = CAM_REQ_INVALID;
1432 --gdt_stat.io_count_act;
1433 xpt_done(ccb);
1434 break;
1435 case XPT_PATH_INQ: /* Path routing inquiry */
1436 {
1437 struct ccb_pathinq *cpi = &ccb->cpi;
1438
1439 cpi->version_num = 1;
1440 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1441 cpi->hba_inquiry |= PI_WIDE_16;
1442 cpi->target_sprt = 1;
1443 cpi->hba_misc = 0;
1444 cpi->hba_eng_cnt = 0;
1445 if (bus == gdt->sc_virt_bus)
1446 cpi->max_target = GDT_MAX_HDRIVES - 1;
1447 else if (gdt->sc_class & GDT_FC)
1448 cpi->max_target = GDT_MAXID_FC - 1;
1449 else
1450 cpi->max_target = GDT_MAXID - 1;
1451 cpi->max_lun = 7;
1452 cpi->unit_number = cam_sim_unit(sim);
1453 cpi->bus_id = bus;
1454 cpi->initiator_id =
1455 (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
1456 cpi->base_transfer_speed = 3300;
1457 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1458 strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
1459 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1460 cpi->ccb_h.status = CAM_REQ_CMP;
1461 --gdt_stat.io_count_act;
1462 xpt_done(ccb);
1463 break;
1464 }
1465 default:
1466 GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
1467 gdt, ccb->ccb_h.func_code));
1468 ccb->ccb_h.status = CAM_REQ_INVALID;
1469 --gdt_stat.io_count_act;
1470 xpt_done(ccb);
1471 break;
1472 }
1473 }
1474
1475 static void
1476 iir_poll( struct cam_sim *sim )
1477 {
1478 struct gdt_softc *gdt;
1479
1480 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1481 GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
1482 iir_intr(gdt);
1483 }
1484
1485 static void
1486 iir_timeout(void *arg)
1487 {
1488 GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", arg));
1489 }
1490
1491 static void
1492 iir_watchdog(void *arg)
1493 {
1494 struct gdt_softc *gdt;
1495
1496 gdt = (struct gdt_softc *)arg;
1497 GDT_DPRINTF(GDT_D_DEBUG, ("iir_watchdog(%p)\n", gdt));
1498
1499 {
1500 int ccbs = 0, ucmds = 0, frees = 0, pends = 0;
1501 struct gdt_ccb *p;
1502 struct ccb_hdr *h;
1503 struct gdt_ucmd *u;
1504
1505 for (h = TAILQ_FIRST(&gdt->sc_ccb_queue); h != NULL;
1506 h = TAILQ_NEXT(h, sim_links.tqe))
1507 ccbs++;
1508 for (u = TAILQ_FIRST(&gdt->sc_ucmd_queue); u != NULL;
1509 u = TAILQ_NEXT(u, links))
1510 ucmds++;
1511 for (p = SLIST_FIRST(&gdt->sc_free_gccb); p != NULL;
1512 p = SLIST_NEXT(p, sle))
1513 frees++;
1514 for (p = SLIST_FIRST(&gdt->sc_pending_gccb); p != NULL;
1515 p = SLIST_NEXT(p, sle))
1516 pends++;
1517
1518 GDT_DPRINTF(GDT_D_TIMEOUT, ("ccbs %d ucmds %d frees %d pends %d\n",
1519 ccbs, ucmds, frees, pends));
1520 }
1521
1522 timeout(iir_watchdog, (caddr_t)gdt, hz * 15);
1523 }
1524
1525 static void
1526 iir_shutdown( void *arg, int howto )
1527 {
1528 struct gdt_softc *gdt;
1529 struct gdt_ccb *gccb;
1530 gdt_ucmd_t *ucmd;
1531 int lock, i;
1532
1533 gdt = (struct gdt_softc *)arg;
1534 GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
1535
1536 printf("iir%d: Flushing all Host Drives. Please wait ... ",
1537 gdt->sc_hanum);
1538
1539 /* allocate ucmd buffer */
1540 ucmd = malloc(sizeof(gdt_ucmd_t), M_DEVBUF, M_NOWAIT);
1541 if (ucmd == NULL) {
1542 printf("iir%d: iir_shutdown(): Cannot allocate resource\n",
1543 gdt->sc_hanum);
1544 return;
1545 }
1546 bzero(ucmd, sizeof(gdt_ucmd_t));
1547
1548 /* wait for pending IOs */
1549 lock = splcam();
1550 gdt->sc_state = GDT_SHUTDOWN;
1551 splx(lock);
1552 if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
1553 (void) tsleep((void *)gccb, PCATCH | PRIBIO, "iirshw", 100 * hz);
1554
1555 /* flush */
1556 for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
1557 if (gdt->sc_hdr[i].hd_present) {
1558 ucmd->service = GDT_CACHESERVICE;
1559 ucmd->OpCode = GDT_FLUSH;
1560 ucmd->u.cache.DeviceNo = i;
1561 lock = splcam();
1562 TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
1563 ucmd->complete_flag = FALSE;
1564 splx(lock);
1565 gdt_next(gdt);
1566 if (!ucmd->complete_flag)
1567 (void) tsleep((void *)ucmd, PCATCH|PRIBIO, "iirshw", 10*hz);
1568 }
1569 }
1570
1571 free(ucmd, M_DEVBUF);
1572 printf("Done.\n");
1573 }
1574
1575 void
1576 iir_intr(void *arg)
1577 {
1578 struct gdt_softc *gdt = arg;
1579 struct gdt_intr_ctx ctx;
1580 int lock = 0;
1581 struct gdt_ccb *gccb;
1582 gdt_ucmd_t *ucmd;
1583 u_int32_t cnt;
1584
1585 GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
1586
1587 /* If polling and we were not called from gdt_wait, just return */
1588 if ((gdt->sc_state & GDT_POLLING) &&
1589 !(gdt->sc_state & GDT_POLL_WAIT))
1590 return;
1591
1592 if (!(gdt->sc_state & GDT_POLLING))
1593 lock = splcam();
1594 gdt_wait_index = 0;
1595
1596 ctx.istatus = gdt->sc_get_status(gdt);
1597 if (!ctx.istatus) {
1598 if (!(gdt->sc_state & GDT_POLLING))
1599 splx(lock);
1600 gdt->sc_status = GDT_S_NO_STATUS;
1601 return;
1602 }
1603
1604 gdt->sc_intr(gdt, &ctx);
1605
1606 gdt->sc_status = ctx.cmd_status;
1607 gdt->sc_service = ctx.service;
1608 gdt->sc_info = ctx.info;
1609 gdt->sc_info2 = ctx.info2;
1610
1611 if (gdt->sc_state & GDT_POLL_WAIT) {
1612 gdt_wait_gdt = gdt;
1613 gdt_wait_index = ctx.istatus;
1614 }
1615
1616 if (ctx.istatus == GDT_ASYNCINDEX) {
1617 gdt_async_event(gdt, ctx.service);
1618 if (!(gdt->sc_state & GDT_POLLING))
1619 splx(lock);
1620 return;
1621 }
1622 if (ctx.istatus == GDT_SPEZINDEX) {
1623 GDT_DPRINTF(GDT_D_INVALID,
1624 ("iir%d: Service unknown or not initialized!\n",
1625 gdt->sc_hanum));
1626 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1627 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1628 gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
1629 if (!(gdt->sc_state & GDT_POLLING))
1630 splx(lock);
1631 return;
1632 }
1633
1634 gccb = &gdt->sc_gccbs[ctx.istatus - 2];
1635 ctx.service = gccb->gc_service;
1636
1637 switch (gccb->gc_flags) {
1638 case GDT_GCF_UNUSED:
1639 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Index (%d) to unused command!\n",
1640 gdt->sc_hanum, ctx.istatus));
1641 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1642 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1643 gdt->sc_dvr.eu.driver.index = ctx.istatus;
1644 gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
1645 gdt_free_ccb(gdt, gccb);
1646 /* fallthrough */
1647
1648 case GDT_GCF_INTERNAL:
1649 if (!(gdt->sc_state & GDT_POLLING))
1650 splx(lock);
1651 break;
1652
1653 case GDT_GCF_IOCTL:
1654 ucmd = gccb->gc_ucmd;
1655 if (gdt->sc_status == GDT_S_BSY) {
1656 GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
1657 gdt, gccb));
1658 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
1659 if (!(gdt->sc_state & GDT_POLLING))
1660 splx(lock);
1661 } else {
1662 ucmd->status = gdt->sc_status;
1663 ucmd->info = gdt->sc_info;
1664 ucmd->complete_flag = TRUE;
1665 if (ucmd->service == GDT_CACHESERVICE) {
1666 if (ucmd->OpCode == GDT_IOCTL) {
1667 cnt = ucmd->u.ioctl.param_size;
1668 if (cnt != 0)
1669 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1670 } else {
1671 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1672 if (cnt != 0)
1673 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1674 }
1675 } else {
1676 cnt = ucmd->u.raw.sdlen;
1677 if (cnt != 0)
1678 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1679 if (ucmd->u.raw.sense_len != 0)
1680 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1681 }
1682 gdt_free_ccb(gdt, gccb);
1683 if (!(gdt->sc_state & GDT_POLLING))
1684 splx(lock);
1685 /* wakeup */
1686 wakeup(ucmd);
1687 }
1688 gdt_next(gdt);
1689 break;
1690
1691 default:
1692 gdt_free_ccb(gdt, gccb);
1693 gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
1694 if (!(gdt->sc_state & GDT_POLLING))
1695 splx(lock);
1696 gdt_next(gdt);
1697 break;
1698 }
1699 }
1700
1701 int
1702 gdt_async_event(struct gdt_softc *gdt, int service)
1703 {
1704 struct gdt_ccb *gccb;
1705
1706 GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
1707
1708 if (service == GDT_SCREENSERVICE) {
1709 if (gdt->sc_status == GDT_MSG_REQUEST) {
1710 while (gdt->sc_test_busy(gdt))
1711 DELAY(1);
1712 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1713 gccb = gdt_get_ccb(gdt);
1714 if (gccb == NULL) {
1715 printf("iir%d: No free command index found\n",
1716 gdt->sc_hanum);
1717 return (1);
1718 }
1719 gccb->gc_service = service;
1720 gccb->gc_flags = GDT_GCF_SCREEN;
1721 gdt->sc_set_sema0(gdt);
1722 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1723 gccb->gc_cmd_index);
1724 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1725 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1726 GDT_MSG_INV_HANDLE);
1727 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1728 gdt_ccb_vtop(gdt, gccb) +
1729 offsetof(struct gdt_ccb, gc_scratch[0]));
1730 gdt->sc_cmd_off = 0;
1731 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1732 sizeof(u_int32_t));
1733 gdt->sc_cmd_cnt = 0;
1734 gdt->sc_copy_cmd(gdt, gccb);
1735 printf("iir%d: [PCI %d/%d] ",
1736 gdt->sc_hanum,gdt->sc_bus,gdt->sc_slot);
1737 gdt->sc_release_event(gdt);
1738 }
1739
1740 } else {
1741 if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
1742 gdt->sc_dvr.size = 0;
1743 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1744 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1745 /* severity and event_string already set! */
1746 } else {
1747 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
1748 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1749 gdt->sc_dvr.eu.async.service = service;
1750 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1751 gdt->sc_dvr.eu.async.info = gdt->sc_info;
1752 *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord = gdt->sc_info2;
1753 }
1754 gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
1755 printf("iir%d: %s\n", gdt->sc_hanum, gdt->sc_dvr.event_string);
1756 }
1757
1758 return (0);
1759 }
1760
1761 int
1762 gdt_sync_event(struct gdt_softc *gdt, int service,
1763 u_int8_t index, struct gdt_ccb *gccb)
1764 {
1765 union ccb *ccb;
1766
1767 GDT_DPRINTF(GDT_D_INTR,
1768 ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
1769
1770 ccb = gccb->gc_ccb;
1771
1772 if (service == GDT_SCREENSERVICE) {
1773 u_int32_t msg_len;
1774
1775 msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
1776 if (msg_len)
1777 if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1778 gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
1779 gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
1780 printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
1781 }
1782
1783 if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
1784 !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
1785 while (gdt->sc_test_busy(gdt))
1786 DELAY(1);
1787 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1788 gccb = gdt_get_ccb(gdt);
1789 if (gccb == NULL) {
1790 printf("iir%d: No free command index found\n",
1791 gdt->sc_hanum);
1792 return (1);
1793 }
1794 gccb->gc_service = service;
1795 gccb->gc_flags = GDT_GCF_SCREEN;
1796 gdt->sc_set_sema0(gdt);
1797 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1798 gccb->gc_cmd_index);
1799 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1800 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1801 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1802 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1803 gdt_ccb_vtop(gdt, gccb) +
1804 offsetof(struct gdt_ccb, gc_scratch[0]));
1805 gdt->sc_cmd_off = 0;
1806 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1807 sizeof(u_int32_t));
1808 gdt->sc_cmd_cnt = 0;
1809 gdt->sc_copy_cmd(gdt, gccb);
1810 gdt->sc_release_event(gdt);
1811 return (0);
1812 }
1813
1814 if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1815 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
1816 /* default answers (getchar() not possible) */
1817 if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
1818 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
1819 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
1820 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
1821 } else {
1822 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
1823 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
1824 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
1825 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
1826 gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
1827 }
1828 gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
1829 gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
1830 while (gdt->sc_test_busy(gdt))
1831 DELAY(1);
1832 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1833 gccb = gdt_get_ccb(gdt);
1834 if (gccb == NULL) {
1835 printf("iir%d: No free command index found\n",
1836 gdt->sc_hanum);
1837 return (1);
1838 }
1839 gccb->gc_service = service;
1840 gccb->gc_flags = GDT_GCF_SCREEN;
1841 gdt->sc_set_sema0(gdt);
1842 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1843 gccb->gc_cmd_index);
1844 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
1845 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1846 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1847 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1848 gdt_ccb_vtop(gdt, gccb) +
1849 offsetof(struct gdt_ccb, gc_scratch[0]));
1850 gdt->sc_cmd_off = 0;
1851 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1852 sizeof(u_int32_t));
1853 gdt->sc_cmd_cnt = 0;
1854 gdt->sc_copy_cmd(gdt, gccb);
1855 gdt->sc_release_event(gdt);
1856 return (0);
1857 }
1858 printf("\n");
1859 return (0);
1860 } else {
1861 untimeout(iir_timeout, gccb, ccb->ccb_h.timeout_ch);
1862 if (gdt->sc_status == GDT_S_BSY) {
1863 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
1864 gdt, gccb));
1865 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1866 ++gdt_stat.req_queue_act;
1867 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1868 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1869 return (2);
1870 }
1871
1872 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1873 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1874 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1875
1876 ccb->csio.resid = 0;
1877 if (gdt->sc_status == GDT_S_OK) {
1878 ccb->ccb_h.status = CAM_REQ_CMP;
1879 } else {
1880 /* error */
1881 if (gccb->gc_service == GDT_CACHESERVICE) {
1882 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
1883 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1884 bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
1885 ccb->csio.sense_data.error_code =
1886 SSD_CURRENT_ERROR | SSD_ERRCODE_VALID;
1887 ccb->csio.sense_data.flags = SSD_KEY_NOT_READY;
1888
1889 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
1890 gdt->sc_dvr.eu.sync.ionode = gdt->sc_hanum;
1891 gdt->sc_dvr.eu.sync.service = service;
1892 gdt->sc_dvr.eu.sync.status = gdt->sc_status;
1893 gdt->sc_dvr.eu.sync.info = gdt->sc_info;
1894 gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
1895 if (gdt->sc_status >= 0x8000)
1896 gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
1897 else
1898 gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
1899 } else {
1900 /* raw service */
1901 if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
1902 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1903 } else {
1904 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
1905 ccb->csio.scsi_status = gdt->sc_info;
1906 bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
1907 ccb->csio.sense_len);
1908 }
1909 }
1910 }
1911 --gdt_stat.io_count_act;
1912 xpt_done(ccb);
1913 }
1914 return (0);
1915 }
1916
1917 /* Controller event handling functions */
1918 gdt_evt_str *gdt_store_event(u_int16_t source, u_int16_t idx,
1919 gdt_evt_data *evt)
1920 {
1921 gdt_evt_str *e;
1922 struct timeval tv;
1923
1924 GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
1925 if (source == 0) /* no source -> no event */
1926 return 0;
1927
1928 if (ebuffer[elastidx].event_source == source &&
1929 ebuffer[elastidx].event_idx == idx &&
1930 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
1931 !memcmp((char *)&ebuffer[elastidx].event_data.eu,
1932 (char *)&evt->eu, evt->size)) ||
1933 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
1934 !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
1935 (char *)&evt->event_string)))) {
1936 e = &ebuffer[elastidx];
1937 getmicrotime(&tv);
1938 e->last_stamp = tv.tv_sec;
1939 ++e->same_count;
1940 } else {
1941 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
1942 ++elastidx;
1943 if (elastidx == GDT_MAX_EVENTS)
1944 elastidx = 0;
1945 if (elastidx == eoldidx) { /* reached mark ? */
1946 ++eoldidx;
1947 if (eoldidx == GDT_MAX_EVENTS)
1948 eoldidx = 0;
1949 }
1950 }
1951 e = &ebuffer[elastidx];
1952 e->event_source = source;
1953 e->event_idx = idx;
1954 getmicrotime(&tv);
1955 e->first_stamp = e->last_stamp = tv.tv_sec;
1956 e->same_count = 1;
1957 e->event_data = *evt;
1958 e->application = 0;
1959 }
1960 return e;
1961 }
1962
1963 int gdt_read_event(int handle, gdt_evt_str *estr)
1964 {
1965 gdt_evt_str *e;
1966 int eindex, lock;
1967
1968 GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
1969 lock = splcam();
1970 if (handle == -1)
1971 eindex = eoldidx;
1972 else
1973 eindex = handle;
1974 estr->event_source = 0;
1975
1976 if (eindex >= GDT_MAX_EVENTS) {
1977 splx(lock);
1978 return eindex;
1979 }
1980 e = &ebuffer[eindex];
1981 if (e->event_source != 0) {
1982 if (eindex != elastidx) {
1983 if (++eindex == GDT_MAX_EVENTS)
1984 eindex = 0;
1985 } else {
1986 eindex = -1;
1987 }
1988 memcpy(estr, e, sizeof(gdt_evt_str));
1989 }
1990 splx(lock);
1991 return eindex;
1992 }
1993
1994 void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
1995 {
1996 gdt_evt_str *e;
1997 int found = FALSE;
1998 int eindex, lock;
1999
2000 GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
2001 lock = splcam();
2002 eindex = eoldidx;
2003 for (;;) {
2004 e = &ebuffer[eindex];
2005 if (e->event_source == 0)
2006 break;
2007 if ((e->application & application) == 0) {
2008 e->application |= application;
2009 found = TRUE;
2010 break;
2011 }
2012 if (eindex == elastidx)
2013 break;
2014 if (++eindex == GDT_MAX_EVENTS)
2015 eindex = 0;
2016 }
2017 if (found)
2018 memcpy(estr, e, sizeof(gdt_evt_str));
2019 else
2020 estr->event_source = 0;
2021 splx(lock);
2022 }
2023
2024 void gdt_clear_events()
2025 {
2026 GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
2027
2028 eoldidx = elastidx = 0;
2029 ebuffer[0].event_source = 0;
2030 }
Cache object: 8c9242a011d05ef329d37fb77617ec4c
|