1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011 HighPoint Technologies, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #include <dev/hpt27xx/hpt27xx_config.h>
32
33 #include <dev/hpt27xx/os_bsd.h>
34 #include <dev/hpt27xx/hptintf.h>
35
36 static HIM *hpt_match(device_t dev, int scan)
37 {
38 PCI_ID pci_id;
39 HIM *him;
40 int i;
41
42 for (him = him_list; him; him = him->next) {
43 for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
44 if (scan && him->get_controller_count)
45 him->get_controller_count(&pci_id,0,0);
46 if ((pci_get_vendor(dev) == pci_id.vid) &&
47 (pci_get_device(dev) == pci_id.did)){
48 return (him);
49 }
50 }
51 }
52 return (NULL);
53 }
54
55 static int hpt_probe(device_t dev)
56 {
57 HIM *him;
58
59 him = hpt_match(dev, 0);
60 if (him != NULL) {
61 KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
62 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
63 ));
64 device_set_desc(dev, him->name);
65 return (BUS_PROBE_DEFAULT);
66 }
67
68 return (ENXIO);
69 }
70
71 static int hpt_attach(device_t dev)
72 {
73 PHBA hba = (PHBA)device_get_softc(dev);
74 HIM *him;
75 PCI_ID pci_id;
76 HPT_UINT size;
77 PVBUS vbus;
78 PVBUS_EXT vbus_ext;
79
80 KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
81
82 him = hpt_match(dev, 1);
83 hba->ext_type = EXT_TYPE_HBA;
84 hba->ldm_adapter.him = him;
85 pci_enable_busmaster(dev);
86
87 pci_id.vid = pci_get_vendor(dev);
88 pci_id.did = pci_get_device(dev);
89 pci_id.rev = pci_get_revid(dev);
90 pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev);
91
92 size = him->get_adapter_size(&pci_id);
93 hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
94 if (!hba->ldm_adapter.him_handle)
95 return ENXIO;
96
97 hba->pcidev = dev;
98 hba->pciaddr.tree = 0;
99 hba->pciaddr.bus = pci_get_bus(dev);
100 hba->pciaddr.device = pci_get_slot(dev);
101 hba->pciaddr.function = pci_get_function(dev);
102
103 if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
104 free(hba->ldm_adapter.him_handle, M_DEVBUF);
105 return ENXIO;
106 }
107
108 os_printk("adapter at PCI %d:%d:%d, IRQ %d",
109 hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
110
111 if (!ldm_register_adapter(&hba->ldm_adapter)) {
112 size = ldm_get_vbus_size();
113 vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
114 if (!vbus_ext) {
115 free(hba->ldm_adapter.him_handle, M_DEVBUF);
116 return ENXIO;
117 }
118 memset(vbus_ext, 0, sizeof(VBUS_EXT));
119 vbus_ext->ext_type = EXT_TYPE_VBUS;
120 ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
121 ldm_register_adapter(&hba->ldm_adapter);
122 }
123
124 ldm_for_each_vbus(vbus, vbus_ext) {
125 if (hba->ldm_adapter.vbus==vbus) {
126 hba->vbus_ext = vbus_ext;
127 hba->next = vbus_ext->hba_list;
128 vbus_ext->hba_list = hba;
129 break;
130 }
131 }
132 return 0;
133 }
134
135 /*
136 * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
137 * but there are some problems currently (alignment, etc).
138 */
139 static __inline void *__get_free_pages(int order)
140 {
141 /* don't use low memory - other devices may get starved */
142 return contigmalloc(PAGE_SIZE<<order,
143 M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
144 }
145
146 static __inline void free_pages(void *p, int order)
147 {
148 contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
149 }
150
151 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
152 {
153 PHBA hba;
154 struct freelist *f;
155 HPT_UINT i;
156 void **p;
157
158 for (hba = vbus_ext->hba_list; hba; hba = hba->next)
159 hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
160
161 ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
162
163 for (f=vbus_ext->freelist_head; f; f=f->next) {
164 KdPrint(("%s: %d*%d=%d bytes",
165 f->tag, f->count, f->size, f->count*f->size));
166 for (i=0; i<f->count; i++) {
167 p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
168 if (!p) return (ENXIO);
169 *p = f->head;
170 f->head = p;
171 }
172 }
173
174 for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
175 int order, size, j;
176
177 HPT_ASSERT((f->size & (f->alignment-1))==0);
178
179 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1)
180 ;
181
182 KdPrint(("%s: %d*%d=%d bytes, order %d",
183 f->tag, f->count, f->size, f->count*f->size, order));
184 HPT_ASSERT(f->alignment<=PAGE_SIZE);
185
186 for (i=0; i<f->count;) {
187 p = (void **)__get_free_pages(order);
188 if (!p) return -1;
189 for (j = size/f->size; j && i<f->count; i++,j--) {
190 *p = f->head;
191 *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
192 f->head = p;
193 p = (void **)((unsigned long)p + f->size);
194 }
195 }
196 }
197
198 HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
199
200 for (i=0; i<os_max_cache_pages; i++) {
201 p = (void **)__get_free_pages(0);
202 if (!p) return -1;
203 HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
204 dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
205 }
206
207 return 0;
208 }
209
210 static void hpt_free_mem(PVBUS_EXT vbus_ext)
211 {
212 struct freelist *f;
213 void *p;
214 int i;
215 BUS_ADDRESS bus;
216
217 for (f=vbus_ext->freelist_head; f; f=f->next) {
218 #if DBG
219 if (f->count!=f->reserved_count) {
220 KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
221 }
222 #endif
223 while ((p=freelist_get(f)))
224 free(p, M_DEVBUF);
225 }
226
227 for (i=0; i<os_max_cache_pages; i++) {
228 p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
229 HPT_ASSERT(p);
230 free_pages(p, 0);
231 }
232
233 for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
234 int order, size;
235 #if DBG
236 if (f->count!=f->reserved_count) {
237 KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
238 }
239 #endif
240 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
241
242 while ((p=freelist_get_dma(f, &bus))) {
243 if (order)
244 free_pages(p, order);
245 else {
246 /* can't free immediately since other blocks in this page may still be in the list */
247 if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
248 dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
249 }
250 }
251 }
252
253 while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
254 free_pages(p, 0);
255 }
256
257 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
258 {
259 PHBA hba;
260
261 for (hba = vbus_ext->hba_list; hba; hba = hba->next)
262 if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
263 KdPrint(("fail to initialize %p", hba));
264 return -1;
265 }
266
267 ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
268 return 0;
269 }
270
271 static void hpt_flush_done(PCOMMAND pCmd)
272 {
273 PVDEV vd = pCmd->target;
274
275 if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
276 vd = vd->u.array.transform->target;
277 HPT_ASSERT(vd);
278 pCmd->target = vd;
279 pCmd->Result = RETURN_PENDING;
280 vdev_queue_cmd(pCmd);
281 return;
282 }
283
284 *(int *)pCmd->priv = 1;
285 wakeup(pCmd);
286 }
287
288 /*
289 * flush a vdev (without retry).
290 */
291 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
292 {
293 PCOMMAND pCmd;
294 int result = 0, done;
295 HPT_UINT count;
296
297 KdPrint(("flusing dev %p", vd));
298
299 hpt_lock_vbus(vbus_ext);
300
301 if (mIsArray(vd->type) && vd->u.array.transform)
302 count = max(vd->u.array.transform->source->cmds_per_request,
303 vd->u.array.transform->target->cmds_per_request);
304 else
305 count = vd->cmds_per_request;
306
307 pCmd = ldm_alloc_cmds(vd->vbus, count);
308
309 if (!pCmd) {
310 hpt_unlock_vbus(vbus_ext);
311 return -1;
312 }
313
314 pCmd->type = CMD_TYPE_FLUSH;
315 pCmd->flags.hard_flush = 1;
316 pCmd->target = vd;
317 pCmd->done = hpt_flush_done;
318 done = 0;
319 pCmd->priv = &done;
320
321 ldm_queue_cmd(pCmd);
322
323 if (!done) {
324 while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) {
325 ldm_reset_vbus(vd->vbus);
326 }
327 }
328
329 KdPrint(("flush result %d", pCmd->Result));
330
331 if (pCmd->Result!=RETURN_SUCCESS)
332 result = -1;
333
334 ldm_free_cmds(pCmd);
335
336 hpt_unlock_vbus(vbus_ext);
337
338 return result;
339 }
340
341 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
342 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
343 {
344 PVBUS vbus = (PVBUS)vbus_ext->vbus;
345 PHBA hba;
346 int i;
347
348 KdPrint(("hpt_shutdown_vbus"));
349
350 /* stop all ctl tasks and disable the worker taskqueue */
351 hpt_stop_tasks(vbus_ext);
352 vbus_ext->worker.ta_context = 0;
353
354 /* flush devices */
355 for (i=0; i<osm_max_targets; i++) {
356 PVDEV vd = ldm_find_target(vbus, i);
357 if (vd) {
358 /* retry once */
359 if (hpt_flush_vdev(vbus_ext, vd))
360 hpt_flush_vdev(vbus_ext, vd);
361 }
362 }
363
364 hpt_lock_vbus(vbus_ext);
365 ldm_shutdown(vbus);
366 hpt_unlock_vbus(vbus_ext);
367
368 ldm_release_vbus(vbus);
369
370 for (hba=vbus_ext->hba_list; hba; hba=hba->next)
371 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
372
373 hpt_free_mem(vbus_ext);
374
375 while ((hba=vbus_ext->hba_list)) {
376 vbus_ext->hba_list = hba->next;
377 free(hba->ldm_adapter.him_handle, M_DEVBUF);
378 }
379 callout_drain(&vbus_ext->timer);
380 mtx_destroy(&vbus_ext->lock);
381 free(vbus_ext, M_DEVBUF);
382 KdPrint(("hpt_shutdown_vbus done"));
383 }
384
385 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
386 {
387 OSM_TASK *tasks;
388
389 tasks = vbus_ext->tasks;
390 vbus_ext->tasks = 0;
391
392 while (tasks) {
393 OSM_TASK *t = tasks;
394 tasks = t->next;
395 t->next = 0;
396 t->func(vbus_ext->vbus, t->data);
397 }
398 }
399
400 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
401 {
402 if(vbus_ext){
403 hpt_lock_vbus(vbus_ext);
404 __hpt_do_tasks(vbus_ext);
405 hpt_unlock_vbus(vbus_ext);
406 }
407 }
408
409 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
410 static void hpt_poll(struct cam_sim *sim);
411 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
412 static void hpt_pci_intr(void *arg);
413
414 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
415 {
416 POS_CMDEXT p = vbus_ext->cmdext_list;
417 if (p)
418 vbus_ext->cmdext_list = p->next;
419 return p;
420 }
421
422 static __inline void cmdext_put(POS_CMDEXT p)
423 {
424 p->next = p->vbus_ext->cmdext_list;
425 p->vbus_ext->cmdext_list = p;
426 }
427
428 static void hpt_timeout(void *arg)
429 {
430 PCOMMAND pCmd = (PCOMMAND)arg;
431 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
432
433 KdPrint(("pCmd %p timeout", pCmd));
434
435 ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
436 }
437
438 static void os_cmddone(PCOMMAND pCmd)
439 {
440 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
441 union ccb *ccb = ext->ccb;
442
443 KdPrint(("<8>os_cmddone(%p, %d)", pCmd, pCmd->Result));
444 callout_stop(&ext->timeout);
445 switch(pCmd->Result) {
446 case RETURN_SUCCESS:
447 ccb->ccb_h.status = CAM_REQ_CMP;
448 break;
449 case RETURN_BAD_DEVICE:
450 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
451 break;
452 case RETURN_DEVICE_BUSY:
453 ccb->ccb_h.status = CAM_BUSY;
454 break;
455 case RETURN_INVALID_REQUEST:
456 ccb->ccb_h.status = CAM_REQ_INVALID;
457 break;
458 case RETURN_SELECTION_TIMEOUT:
459 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
460 break;
461 case RETURN_RETRY:
462 ccb->ccb_h.status = CAM_BUSY;
463 break;
464 default:
465 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
466 break;
467 }
468
469 if (pCmd->flags.data_in) {
470 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
471 }
472 else if (pCmd->flags.data_out) {
473 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
474 }
475
476 bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
477
478 cmdext_put(ext);
479 ldm_free_cmds(pCmd);
480 xpt_done(ccb);
481 }
482
483 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
484 {
485 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
486 union ccb *ccb = ext->ccb;
487
488 if(logical) {
489 os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
490 pSg->size = ccb->csio.dxfer_len;
491 pSg->eot = 1;
492 return TRUE;
493 }
494 /* since we have provided physical sg, nobody will ask us to build physical sg */
495 HPT_ASSERT(0);
496 return FALSE;
497 }
498
499 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
500 {
501 PCOMMAND pCmd = (PCOMMAND)arg;
502 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
503 PSG psg = pCmd->psg;
504 int idx;
505
506 HPT_ASSERT(pCmd->flags.physical_sg);
507
508 if (error)
509 panic("busdma error");
510
511 HPT_ASSERT(nsegs<=os_max_sg_descriptors);
512
513 if (nsegs != 0) {
514 for (idx = 0; idx < nsegs; idx++, psg++) {
515 psg->addr.bus = segs[idx].ds_addr;
516 psg->size = segs[idx].ds_len;
517 psg->eot = 0;
518 }
519 psg[-1].eot = 1;
520
521 if (pCmd->flags.data_in) {
522 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
523 BUS_DMASYNC_PREREAD);
524 }
525 else if (pCmd->flags.data_out) {
526 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
527 BUS_DMASYNC_PREWRITE);
528 }
529 }
530 callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
531 ldm_queue_cmd(pCmd);
532 }
533
534 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
535 {
536 PVBUS vbus = (PVBUS)vbus_ext->vbus;
537 PVDEV vd;
538 PCOMMAND pCmd;
539 POS_CMDEXT ext;
540 HPT_U8 *cdb;
541 int error;
542
543 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
544 cdb = ccb->csio.cdb_io.cdb_ptr;
545 else
546 cdb = ccb->csio.cdb_io.cdb_bytes;
547
548 KdPrint(("<8>hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
549 ccb,
550 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
551 *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
552 ));
553
554 /* ccb->ccb_h.path_id is not our bus id - don't check it */
555 if (ccb->ccb_h.target_lun != 0 ||
556 ccb->ccb_h.target_id >= osm_max_targets ||
557 (ccb->ccb_h.flags & CAM_CDB_PHYS))
558 {
559 ccb->ccb_h.status = CAM_TID_INVALID;
560 xpt_done(ccb);
561 return;
562 }
563
564 vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
565
566 if (!vd) {
567 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
568 xpt_done(ccb);
569 return;
570 }
571
572 switch (cdb[0]) {
573 case TEST_UNIT_READY:
574 case START_STOP_UNIT:
575 case SYNCHRONIZE_CACHE:
576 ccb->ccb_h.status = CAM_REQ_CMP;
577 break;
578
579 case INQUIRY:
580 {
581 PINQUIRYDATA inquiryData;
582 memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
583 inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
584
585 inquiryData->AdditionalLength = 31;
586 inquiryData->CommandQueue = 1;
587 memcpy(&inquiryData->VendorId, "HPT ", 8);
588 memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16);
589
590 if (vd->target_id / 10) {
591 inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '';
592 inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '';
593 }
594 else
595 inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '';
596
597 memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
598
599 ccb->ccb_h.status = CAM_REQ_CMP;
600 }
601 break;
602
603 case READ_CAPACITY:
604 {
605 HPT_U8 *rbuf = ccb->csio.data_ptr;
606 HPT_U32 cap;
607 HPT_U8 sector_size_shift = 0;
608 HPT_U64 new_cap;
609 HPT_U32 sector_size = 0;
610
611 if (mIsArray(vd->type))
612 sector_size_shift = vd->u.array.sector_size_shift;
613 else{
614 if(vd->type == VD_RAW){
615 sector_size = vd->u.raw.logical_sector_size;
616 }
617
618 switch (sector_size) {
619 case 0x1000:
620 KdPrint(("set 4k setctor size in READ_CAPACITY"));
621 sector_size_shift = 3;
622 break;
623 default:
624 break;
625 }
626 }
627 new_cap = vd->capacity >> sector_size_shift;
628
629 if (new_cap > 0xfffffffful)
630 cap = 0xffffffff;
631 else
632 cap = new_cap - 1;
633
634 rbuf[0] = (HPT_U8)(cap>>24);
635 rbuf[1] = (HPT_U8)(cap>>16);
636 rbuf[2] = (HPT_U8)(cap>>8);
637 rbuf[3] = (HPT_U8)cap;
638 rbuf[4] = 0;
639 rbuf[5] = 0;
640 rbuf[6] = 2 << sector_size_shift;
641 rbuf[7] = 0;
642
643 ccb->ccb_h.status = CAM_REQ_CMP;
644 break;
645 }
646 case REPORT_LUNS:
647 {
648 HPT_U8 *rbuf = ccb->csio.data_ptr;
649 memset(rbuf, 0, 16);
650 rbuf[3] = 8;
651 ccb->ccb_h.status = CAM_REQ_CMP;
652 break;
653 }
654 case SERVICE_ACTION_IN:
655 {
656 HPT_U8 *rbuf = ccb->csio.data_ptr;
657 HPT_U64 cap = 0;
658 HPT_U8 sector_size_shift = 0;
659 HPT_U32 sector_size = 0;
660
661 if(mIsArray(vd->type))
662 sector_size_shift = vd->u.array.sector_size_shift;
663 else{
664 if(vd->type == VD_RAW){
665 sector_size = vd->u.raw.logical_sector_size;
666 }
667
668 switch (sector_size) {
669 case 0x1000:
670 KdPrint(("set 4k setctor size in SERVICE_ACTION_IN"));
671 sector_size_shift = 3;
672 break;
673 default:
674 break;
675 }
676 }
677 cap = (vd->capacity >> sector_size_shift) - 1;
678
679 rbuf[0] = (HPT_U8)(cap>>56);
680 rbuf[1] = (HPT_U8)(cap>>48);
681 rbuf[2] = (HPT_U8)(cap>>40);
682 rbuf[3] = (HPT_U8)(cap>>32);
683 rbuf[4] = (HPT_U8)(cap>>24);
684 rbuf[5] = (HPT_U8)(cap>>16);
685 rbuf[6] = (HPT_U8)(cap>>8);
686 rbuf[7] = (HPT_U8)cap;
687 rbuf[8] = 0;
688 rbuf[9] = 0;
689 rbuf[10] = 2 << sector_size_shift;
690 rbuf[11] = 0;
691
692 ccb->ccb_h.status = CAM_REQ_CMP;
693 break;
694 }
695
696 case READ_6:
697 case READ_10:
698 case READ_16:
699 case WRITE_6:
700 case WRITE_10:
701 case WRITE_16:
702 case 0x13:
703 case 0x2f:
704 case 0x8f: /* VERIFY_16 */
705 {
706 HPT_U8 sector_size_shift = 0;
707 HPT_U32 sector_size = 0;
708 pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
709 if(!pCmd){
710 KdPrint(("Failed to allocate command!"));
711 ccb->ccb_h.status = CAM_BUSY;
712 break;
713 }
714
715 switch (cdb[0]) {
716 case READ_6:
717 case WRITE_6:
718 case 0x13:
719 pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
720 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
721 break;
722 case READ_16:
723 case WRITE_16:
724 case 0x8f: /* VERIFY_16 */
725 {
726 HPT_U64 block =
727 ((HPT_U64)cdb[2]<<56) |
728 ((HPT_U64)cdb[3]<<48) |
729 ((HPT_U64)cdb[4]<<40) |
730 ((HPT_U64)cdb[5]<<32) |
731 ((HPT_U64)cdb[6]<<24) |
732 ((HPT_U64)cdb[7]<<16) |
733 ((HPT_U64)cdb[8]<<8) |
734 ((HPT_U64)cdb[9]);
735 pCmd->uCmd.Ide.Lba = block;
736 pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
737 break;
738 }
739
740 default:
741 pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
742 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
743 break;
744 }
745
746 if(mIsArray(vd->type)) {
747 sector_size_shift = vd->u.array.sector_size_shift;
748 }
749 else{
750 if(vd->type == VD_RAW){
751 sector_size = vd->u.raw.logical_sector_size;
752 }
753
754 switch (sector_size) {
755 case 0x1000:
756 KdPrint(("<8>resize sector size from 4k to 512"));
757 sector_size_shift = 3;
758 break;
759 default:
760 break;
761 }
762 }
763 pCmd->uCmd.Ide.Lba <<= sector_size_shift;
764 pCmd->uCmd.Ide.nSectors <<= sector_size_shift;
765
766
767 switch (cdb[0]) {
768 case READ_6:
769 case READ_10:
770 case READ_16:
771 pCmd->flags.data_in = 1;
772 break;
773 case WRITE_6:
774 case WRITE_10:
775 case WRITE_16:
776 pCmd->flags.data_out = 1;
777 break;
778 }
779 pCmd->priv = ext = cmdext_get(vbus_ext);
780 HPT_ASSERT(ext);
781 ext->ccb = ccb;
782 pCmd->target = vd;
783 pCmd->done = os_cmddone;
784 pCmd->buildsgl = os_buildsgl;
785
786 pCmd->psg = ext->psg;
787 pCmd->flags.physical_sg = 1;
788 error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
789 ext->dma_map, ccb,
790 hpt_io_dmamap_callback, pCmd,
791 BUS_DMA_WAITOK
792 );
793 KdPrint(("<8>bus_dmamap_load return %d", error));
794 if (error && error!=EINPROGRESS) {
795 os_printk("bus_dmamap_load error %d", error);
796 cmdext_put(ext);
797 ldm_free_cmds(pCmd);
798 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
799 xpt_done(ccb);
800 }
801 return;
802 }
803
804 default:
805 ccb->ccb_h.status = CAM_REQ_INVALID;
806 break;
807 }
808
809 xpt_done(ccb);
810 return;
811 }
812
813 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
814 {
815 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
816
817 KdPrint(("<8>hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
818
819 hpt_assert_vbus_locked(vbus_ext);
820 switch (ccb->ccb_h.func_code) {
821
822 case XPT_SCSI_IO:
823 hpt_scsi_io(vbus_ext, ccb);
824 return;
825
826 case XPT_RESET_BUS:
827 ldm_reset_vbus((PVBUS)vbus_ext->vbus);
828 break;
829
830 case XPT_GET_TRAN_SETTINGS:
831 case XPT_SET_TRAN_SETTINGS:
832 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
833 break;
834
835 case XPT_CALC_GEOMETRY:
836 ccb->ccg.heads = 255;
837 ccb->ccg.secs_per_track = 63;
838 ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track);
839 ccb->ccb_h.status = CAM_REQ_CMP;
840 break;
841
842 case XPT_PATH_INQ:
843 {
844 struct ccb_pathinq *cpi = &ccb->cpi;
845
846 cpi->version_num = 1;
847 cpi->hba_inquiry = PI_SDTR_ABLE;
848 cpi->target_sprt = 0;
849 cpi->hba_misc = PIM_NOBUSRESET;
850 cpi->hba_eng_cnt = 0;
851 cpi->max_target = osm_max_targets;
852 cpi->max_lun = 0;
853 cpi->unit_number = cam_sim_unit(sim);
854 cpi->bus_id = cam_sim_bus(sim);
855 cpi->initiator_id = osm_max_targets;
856 cpi->base_transfer_speed = 3300;
857
858 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
859 strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN);
860 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
861 cpi->transport = XPORT_SPI;
862 cpi->transport_version = 2;
863 cpi->protocol = PROTO_SCSI;
864 cpi->protocol_version = SCSI_REV_2;
865 cpi->ccb_h.status = CAM_REQ_CMP;
866 break;
867 }
868
869 default:
870 ccb->ccb_h.status = CAM_REQ_INVALID;
871 break;
872 }
873
874 xpt_done(ccb);
875 return;
876 }
877
878 static void hpt_pci_intr(void *arg)
879 {
880 PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
881 hpt_lock_vbus(vbus_ext);
882 ldm_intr((PVBUS)vbus_ext->vbus);
883 hpt_unlock_vbus(vbus_ext);
884 }
885
886 static void hpt_poll(struct cam_sim *sim)
887 {
888 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
889
890 hpt_assert_vbus_locked(vbus_ext);
891 ldm_intr((PVBUS)vbus_ext->vbus);
892 }
893
894 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
895 {
896 KdPrint(("<8>hpt_async"));
897 }
898
899 static int hpt_shutdown(device_t dev)
900 {
901 KdPrint(("hpt_shutdown(dev=%p)", dev));
902 return 0;
903 }
904
905 static int hpt_detach(device_t dev)
906 {
907 /* we don't allow the driver to be unloaded. */
908 return EBUSY;
909 }
910
911 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
912 {
913 arg->ioctl_cmnd = 0;
914 wakeup(arg);
915 }
916
917 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
918 {
919 ioctl_args->result = -1;
920 ioctl_args->done = hpt_ioctl_done;
921 ioctl_args->ioctl_cmnd = (void *)1;
922
923 hpt_lock_vbus(vbus_ext);
924 ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
925
926 while (ioctl_args->ioctl_cmnd) {
927 if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
928 break;
929 ldm_reset_vbus((PVBUS)vbus_ext->vbus);
930 __hpt_do_tasks(vbus_ext);
931 }
932
933 /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
934
935 hpt_unlock_vbus(vbus_ext);
936 }
937
938 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
939 {
940 PVBUS vbus;
941 PVBUS_EXT vbus_ext;
942
943 ldm_for_each_vbus(vbus, vbus_ext) {
944 __hpt_do_ioctl(vbus_ext, ioctl_args);
945 if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
946 return;
947 }
948 }
949
950 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
951 IOCTL_ARG arg;\
952 arg.dwIoControlCode = code;\
953 arg.lpInBuffer = inbuf;\
954 arg.lpOutBuffer = outbuf;\
955 arg.nInBufferSize = insize;\
956 arg.nOutBufferSize = outsize;\
957 arg.lpBytesReturned = 0;\
958 hpt_do_ioctl(&arg);\
959 arg.result;\
960 })
961
962 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
963
964 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
965 {
966 int i;
967 HPT_U32 count = nMaxCount-1;
968
969 if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
970 &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
971 return -1;
972
973 nMaxCount = (int)pIds[0];
974 for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
975 return nMaxCount;
976 }
977
978 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
979 {
980 return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
981 &id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
982 }
983
984 /* not belong to this file logically, but we want to use ioctl interface */
985 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
986 {
987 LOGICAL_DEVICE_INFO_V3 devinfo;
988 int i, result;
989 DEVICEID param[2] = { id, 0 };
990
991 if (hpt_get_device_info_v3(id, &devinfo))
992 return -1;
993
994 if (devinfo.Type!=LDT_ARRAY)
995 return -1;
996
997 if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
998 param[1] = AS_REBUILD_ABORT;
999 else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
1000 param[1] = AS_VERIFY_ABORT;
1001 else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
1002 param[1] = AS_INITIALIZE_ABORT;
1003 else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
1004 param[1] = AS_TRANSFORM_ABORT;
1005 else
1006 return -1;
1007
1008 KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
1009 result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
1010 param, sizeof(param), 0, 0);
1011
1012 for (i=0; i<devinfo.u.array.nDisk; i++)
1013 if (DEVICEID_VALID(devinfo.u.array.Members[i]))
1014 __hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
1015
1016 return result;
1017 }
1018
1019 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
1020 {
1021 DEVICEID ids[32];
1022 int i, count;
1023
1024 count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
1025
1026 for (i=0; i<count; i++)
1027 __hpt_stop_tasks(vbus_ext, ids[i]);
1028 }
1029
1030 static d_open_t hpt_open;
1031 static d_close_t hpt_close;
1032 static d_ioctl_t hpt_ioctl;
1033 static int hpt_rescan_bus(void);
1034
1035 static struct cdevsw hpt_cdevsw = {
1036 .d_open = hpt_open,
1037 .d_close = hpt_close,
1038 .d_ioctl = hpt_ioctl,
1039 .d_name = driver_name,
1040 .d_version = D_VERSION,
1041 };
1042
1043 static struct intr_config_hook hpt_ich;
1044
1045 /*
1046 * hpt_final_init will be called after all hpt_attach.
1047 */
1048 static void hpt_final_init(void *dummy)
1049 {
1050 int i,unit_number=0;
1051 PVBUS_EXT vbus_ext;
1052 PVBUS vbus;
1053 PHBA hba;
1054
1055 /* Clear the config hook */
1056 config_intrhook_disestablish(&hpt_ich);
1057
1058 /* allocate memory */
1059 i = 0;
1060 ldm_for_each_vbus(vbus, vbus_ext) {
1061 if (hpt_alloc_mem(vbus_ext)) {
1062 os_printk("out of memory");
1063 return;
1064 }
1065 i++;
1066 }
1067
1068 if (!i) {
1069 if (bootverbose)
1070 os_printk("no controller detected.");
1071 return;
1072 }
1073
1074 /* initializing hardware */
1075 ldm_for_each_vbus(vbus, vbus_ext) {
1076 /* make timer available here */
1077 mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF);
1078 callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0);
1079 if (hpt_init_vbus(vbus_ext)) {
1080 os_printk("fail to initialize hardware");
1081 break; /* FIXME */
1082 }
1083 }
1084
1085 /* register CAM interface */
1086 ldm_for_each_vbus(vbus, vbus_ext) {
1087 struct cam_devq *devq;
1088 struct ccb_setasync ccb;
1089
1090 if (bus_dma_tag_create(NULL,/* parent */
1091 4, /* alignment */
1092 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1093 BUS_SPACE_MAXADDR, /* lowaddr */
1094 BUS_SPACE_MAXADDR, /* highaddr */
1095 NULL, NULL, /* filter, filterarg */
1096 PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */
1097 os_max_sg_descriptors, /* nsegments */
1098 0x10000, /* maxsegsize */
1099 BUS_DMA_WAITOK, /* flags */
1100 busdma_lock_mutex, /* lockfunc */
1101 &vbus_ext->lock, /* lockfuncarg */
1102 &vbus_ext->io_dmat /* tag */))
1103 {
1104 return ;
1105 }
1106
1107 for (i=0; i<os_max_queue_comm; i++) {
1108 POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1109 if (!ext) {
1110 os_printk("Can't alloc cmdext(%d)", i);
1111 return ;
1112 }
1113 ext->vbus_ext = vbus_ext;
1114 ext->next = vbus_ext->cmdext_list;
1115 vbus_ext->cmdext_list = ext;
1116
1117 if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1118 os_printk("Can't create dma map(%d)", i);
1119 return ;
1120 }
1121 callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0);
1122 }
1123
1124 if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1125 os_printk("cam_simq_alloc failed");
1126 return ;
1127 }
1128 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1129 vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq);
1130 unit_number++;
1131 if (!vbus_ext->sim) {
1132 os_printk("cam_sim_alloc failed");
1133 cam_simq_free(devq);
1134 return ;
1135 }
1136
1137 hpt_lock_vbus(vbus_ext);
1138 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) {
1139 hpt_unlock_vbus(vbus_ext);
1140 os_printk("xpt_bus_register failed");
1141 cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE);
1142 vbus_ext->sim = NULL;
1143 return ;
1144 }
1145
1146 if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1147 cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1148 CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1149 {
1150 hpt_unlock_vbus(vbus_ext);
1151 os_printk("xpt_create_path failed");
1152 xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1153 cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE);
1154 vbus_ext->sim = NULL;
1155 return ;
1156 }
1157
1158 memset(&ccb, 0, sizeof(ccb));
1159 xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1160 ccb.ccb_h.func_code = XPT_SASYNC_CB;
1161 ccb.event_enable = AC_LOST_DEVICE;
1162 ccb.callback = hpt_async;
1163 ccb.callback_arg = vbus_ext;
1164 xpt_action((union ccb *)&ccb);
1165 hpt_unlock_vbus(vbus_ext);
1166
1167 for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1168 int rid = 0;
1169 if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev,
1170 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1171 {
1172 os_printk("can't allocate interrupt");
1173 return ;
1174 }
1175 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
1176 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle))
1177 {
1178 os_printk("can't set up interrupt");
1179 return ;
1180 }
1181 hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1182
1183 }
1184
1185 vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1186 hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1187 if (!vbus_ext->shutdown_eh)
1188 os_printk("Shutdown event registration failed");
1189 }
1190
1191 ldm_for_each_vbus(vbus, vbus_ext) {
1192 TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1193 if (vbus_ext->tasks)
1194 TASK_ENQUEUE(&vbus_ext->worker);
1195 }
1196
1197 make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1198 S_IRUSR | S_IWUSR, "%s", driver_name);
1199 }
1200
1201 #if defined(KLD_MODULE)
1202
1203 typedef struct driverlink *driverlink_t;
1204 struct driverlink {
1205 kobj_class_t driver;
1206 TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */
1207 };
1208
1209 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1210
1211 struct devclass {
1212 TAILQ_ENTRY(devclass) link;
1213 devclass_t parent; /* parent in devclass hierarchy */
1214 driver_list_t drivers; /* bus devclasses store drivers for bus */
1215 char *name;
1216 device_t *devices; /* array of devices indexed by unit */
1217 int maxunit; /* size of devices array */
1218 };
1219
1220 static void override_kernel_driver(void)
1221 {
1222 driverlink_t dl, dlfirst;
1223 driver_t *tmpdriver;
1224 devclass_t dc = devclass_find("pci");
1225
1226 if (dc){
1227 dlfirst = TAILQ_FIRST(&dc->drivers);
1228 for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1229 if(strcmp(dl->driver->name, driver_name) == 0) {
1230 tmpdriver=dl->driver;
1231 dl->driver=dlfirst->driver;
1232 dlfirst->driver=tmpdriver;
1233 break;
1234 }
1235 }
1236 }
1237 }
1238
1239 #else
1240 #define override_kernel_driver()
1241 #endif
1242
1243 static void hpt_init(void *dummy)
1244 {
1245 if (bootverbose)
1246 os_printk("%s %s", driver_name_long, driver_ver);
1247
1248 override_kernel_driver();
1249 init_config();
1250
1251 hpt_ich.ich_func = hpt_final_init;
1252 hpt_ich.ich_arg = NULL;
1253 if (config_intrhook_establish(&hpt_ich) != 0) {
1254 printf("%s: cannot establish configuration hook\n",
1255 driver_name_long);
1256 }
1257
1258 }
1259 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1260
1261 /*
1262 * CAM driver interface
1263 */
1264 static device_method_t driver_methods[] = {
1265 /* Device interface */
1266 DEVMETHOD(device_probe, hpt_probe),
1267 DEVMETHOD(device_attach, hpt_attach),
1268 DEVMETHOD(device_detach, hpt_detach),
1269 DEVMETHOD(device_shutdown, hpt_shutdown),
1270 { 0, 0 }
1271 };
1272
1273 static driver_t hpt_pci_driver = {
1274 driver_name,
1275 driver_methods,
1276 sizeof(HBA)
1277 };
1278
1279 #ifndef TARGETNAME
1280 #error "no TARGETNAME found"
1281 #endif
1282
1283 /* use this to make TARGETNAME be expanded */
1284 #define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
1285 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1286 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1287 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0);
1288 __MODULE_VERSION(TARGETNAME, 1);
1289 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1290
1291 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
1292 {
1293 return 0;
1294 }
1295
1296 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td)
1297 {
1298 return 0;
1299 }
1300
1301 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
1302 {
1303 PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1304 IOCTL_ARG ioctl_args;
1305 HPT_U32 bytesReturned = 0;
1306
1307 switch (cmd){
1308 case HPT_DO_IOCONTROL:
1309 {
1310 if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1311 KdPrint(("<8>ioctl=%x in=%p len=%d out=%p len=%d\n",
1312 piop->dwIoControlCode,
1313 piop->lpInBuffer,
1314 piop->nInBufferSize,
1315 piop->lpOutBuffer,
1316 piop->nOutBufferSize));
1317
1318 memset(&ioctl_args, 0, sizeof(ioctl_args));
1319
1320 ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1321 ioctl_args.nInBufferSize = piop->nInBufferSize;
1322 ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1323 ioctl_args.lpBytesReturned = &bytesReturned;
1324
1325 if (ioctl_args.nInBufferSize) {
1326 ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1327 if (!ioctl_args.lpInBuffer)
1328 goto invalid;
1329 if (copyin((void*)piop->lpInBuffer,
1330 ioctl_args.lpInBuffer, piop->nInBufferSize))
1331 goto invalid;
1332 }
1333
1334 if (ioctl_args.nOutBufferSize) {
1335 ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
1336 if (!ioctl_args.lpOutBuffer)
1337 goto invalid;
1338 }
1339
1340 hpt_do_ioctl(&ioctl_args);
1341
1342 if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1343 if (piop->nOutBufferSize) {
1344 if (copyout(ioctl_args.lpOutBuffer,
1345 (void*)piop->lpOutBuffer, piop->nOutBufferSize))
1346 goto invalid;
1347 }
1348 if (piop->lpBytesReturned) {
1349 if (copyout(&bytesReturned,
1350 (void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1351 goto invalid;
1352 }
1353 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1354 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1355 return 0;
1356 }
1357 invalid:
1358 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1359 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1360 return EFAULT;
1361 }
1362 return EFAULT;
1363 }
1364
1365 case HPT_SCAN_BUS:
1366 {
1367 return hpt_rescan_bus();
1368 }
1369 default:
1370 KdPrint(("invalid command!"));
1371 return EFAULT;
1372 }
1373
1374 }
1375
1376 static int hpt_rescan_bus(void)
1377 {
1378 union ccb *ccb;
1379 PVBUS vbus;
1380 PVBUS_EXT vbus_ext;
1381
1382 ldm_for_each_vbus(vbus, vbus_ext) {
1383 if ((ccb = xpt_alloc_ccb()) == NULL)
1384 {
1385 return(ENOMEM);
1386 }
1387 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim),
1388 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1389 {
1390 xpt_free_ccb(ccb);
1391 return(EIO);
1392 }
1393 xpt_rescan(ccb);
1394 }
1395 return(0);
1396 }
1397
Cache object: 5b2dc17e45fff522222a0d258daee6cc
|