1 /*
2 ******************************************************************************************
3 ** O.S : FreeBSD
4 ** FILE NAME : arcmsr.c
5 ** BY : Erich Chen
6 ** Description: SCSI RAID Device Driver for
7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX) SATA/SAS RAID HOST Adapter
8 ** ARCMSR RAID Host adapter
9 ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
12 **
13 ** Copyright (c) 2004-2006 ARECA Co. Ltd.
14 ** Erich Chen, Taipei Taiwan All rights reserved.
15 **
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
18 ** are met:
19 ** 1. Redistributions of source code must retain the above copyright
20 ** notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 ** notice, this list of conditions and the following disclaimer in the
23 ** documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 ** derived from this software without specific prior written permission.
26 **
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
38 ** History
39 **
40 ** REV# DATE NAME DESCRIPTION
41 ** 1.00.00.00 3/31/2004 Erich Chen First release
42 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error
43 ** 1.20.00.03 4/19/2005 Erich Chen add SATA 24 Ports adapter type support
44 ** clean unused function
45 ** 1.20.00.12 9/12/2005 Erich Chen bug fix with abort command handling,
46 ** firmware version check
47 ** and firmware update notify for hardware bug fix
48 ** handling if none zero high part physical address
49 ** of srb resource
50 ** 1.20.00.13 8/18/2006 Erich Chen remove pending srb and report busy
51 ** add iop message xfer
52 ** with scsi pass-through command
53 ** add new device id of sas raid adapters
54 ** code fit for SPARC64 & PPC
55 ******************************************************************************************
56 * $FreeBSD: releng/6.2/sys/dev/arcmsr/arcmsr.c 165452 2006-12-21 21:59:19Z scottl $
57 */
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
62 #include <sys/bus.h>
63 #include <sys/queue.h>
64 #include <sys/stat.h>
65 #include <sys/devicestat.h>
66 #include <sys/kthread.h>
67 #include <sys/module.h>
68 #include <sys/proc.h>
69 #include <sys/lock.h>
70 #include <sys/sysctl.h>
71 #include <sys/poll.h>
72 #include <sys/ioccom.h>
73 #include <vm/vm.h>
74 #include <vm/vm_param.h>
75 #include <vm/pmap.h>
76
77 #include <isa/rtc.h>
78
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 #include <machine/atomic.h>
82 #include <sys/conf.h>
83 #include <sys/rman.h>
84
85 #include <cam/cam.h>
86 #include <cam/cam_ccb.h>
87 #include <cam/cam_sim.h>
88 #include <cam/cam_xpt_sim.h>
89 #include <cam/cam_debug.h>
90 #include <cam/scsi/scsi_all.h>
91 #include <cam/scsi/scsi_message.h>
92 /*
93 **************************************************************************
94 **************************************************************************
95 */
96 #if __FreeBSD_version >= 500005
97 #include <sys/selinfo.h>
98 #include <sys/mutex.h>
99 #include <sys/endian.h>
100 #include <dev/pci/pcivar.h>
101 #include <dev/pci/pcireg.h>
102 #define ARCMSR_LOCK_INIT(l, s) mtx_init(l, s, NULL, MTX_DEF|MTX_RECURSE)
103 #define ARCMSR_LOCK_ACQUIRE(l) mtx_lock(l)
104 #define ARCMSR_LOCK_RELEASE(l) mtx_unlock(l)
105 #define ARCMSR_LOCK_TRY(l) mtx_trylock(l)
106 #define arcmsr_htole32(x) htole32(x)
107 typedef struct mtx arcmsr_lock_t;
108 #else
109 #include <sys/select.h>
110 #include <pci/pcivar.h>
111 #include <pci/pcireg.h>
112 #define ARCMSR_LOCK_INIT(l, s) simple_lock_init(l)
113 #define ARCMSR_LOCK_ACQUIRE(l) simple_lock(l)
114 #define ARCMSR_LOCK_RELEASE(l) simple_unlock(l)
115 #define ARCMSR_LOCK_TRY(l) simple_lock_try(l)
116 #define arcmsr_htole32(x) (x)
117 typedef struct simplelock arcmsr_lock_t;
118 #endif
119 #include <dev/arcmsr/arcmsr.h>
120 #define ARCMSR_SRBS_POOL_SIZE ((sizeof(struct CommandControlBlock) * ARCMSR_MAX_FREESRB_NUM)+0x20)
121 /*
122 **************************************************************************
123 **************************************************************************
124 */
125 #define CHIP_REG_READ32(r) bus_space_read_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r))
126 #define CHIP_REG_WRITE32(r,d) bus_space_write_4(acb->btag, acb->bhandle, offsetof(struct MessageUnit,r), d)
127 /*
128 **************************************************************************
129 **************************************************************************
130 */
131 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
132 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
133 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
134 static u_int32_t arcmsr_probe(device_t dev);
135 static u_int32_t arcmsr_attach(device_t dev);
136 static u_int32_t arcmsr_detach(device_t dev);
137 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
138 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
139 static void arcmsr_shutdown(device_t dev);
140 static void arcmsr_interrupt(void *arg);
141 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
142 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
143 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
144 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
145 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
146 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
147 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
148 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb);
149 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
150 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
151 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
152 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
153 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
154 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
155 static int arcmsr_resume(device_t dev);
156 static int arcmsr_suspend(device_t dev);
157 /*
158 **************************************************************************
159 **************************************************************************
160 */
161 static void UDELAY(u_int32_t us) { DELAY(us); }
162 /*
163 **************************************************************************
164 **************************************************************************
165 */
166 static bus_dmamap_callback_t arcmsr_map_freesrb;
167 static bus_dmamap_callback_t arcmsr_executesrb;
168 /*
169 **************************************************************************
170 **************************************************************************
171 */
172 static d_open_t arcmsr_open;
173 static d_close_t arcmsr_close;
174 static d_ioctl_t arcmsr_ioctl;
175
176 static device_method_t arcmsr_methods[]={
177 DEVMETHOD(device_probe, arcmsr_probe),
178 DEVMETHOD(device_attach, arcmsr_attach),
179 DEVMETHOD(device_detach, arcmsr_detach),
180 DEVMETHOD(device_shutdown, arcmsr_shutdown),
181 DEVMETHOD(device_suspend, arcmsr_suspend),
182 DEVMETHOD(device_resume, arcmsr_resume),
183
184 DEVMETHOD(bus_print_child, bus_generic_print_child),
185 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
186 { 0, 0 }
187 };
188
189 static driver_t arcmsr_driver={
190 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
191 };
192
193 static devclass_t arcmsr_devclass;
194 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0);
195 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
196 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
197
198 #ifndef BUS_DMA_COHERENT
199 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */
200 #endif
201 #if __FreeBSD_version >= 501000
202 #ifndef D_NEEDGIANT
203 #define D_NEEDGIANT 0x00400000 /* driver want Giant */
204 #endif
205 #ifndef D_VERSION
206 #define D_VERSION 0x20011966
207 #endif
208 static struct cdevsw arcmsr_cdevsw={
209 #if __FreeBSD_version > 502010
210 .d_version = D_VERSION,
211 #endif
212 .d_flags = D_NEEDGIANT,
213 .d_open = arcmsr_open, /* open */
214 .d_close = arcmsr_close, /* close */
215 .d_ioctl = arcmsr_ioctl, /* ioctl */
216 .d_name = "arcmsr", /* name */
217 };
218 #else
219 #define ARCMSR_CDEV_MAJOR 180
220
221 static struct cdevsw arcmsr_cdevsw = {
222 arcmsr_open, /* open */
223 arcmsr_close, /* close */
224 noread, /* read */
225 nowrite, /* write */
226 arcmsr_ioctl, /* ioctl */
227 nopoll, /* poll */
228 nommap, /* mmap */
229 nostrategy, /* strategy */
230 "arcmsr", /* name */
231 ARCMSR_CDEV_MAJOR, /* major */
232 nodump, /* dump */
233 nopsize, /* psize */
234 0 /* flags */
235 };
236 #endif
237
238 #if __FreeBSD_version < 500005
239 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc)
240 #else
241 #if __FreeBSD_version < 503000
242 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc)
243 #else
244 static int arcmsr_open(struct cdev *dev, int flags, int fmt, d_thread_t *proc)
245 #endif
246 #endif
247 {
248 #if __FreeBSD_version < 503000
249 struct AdapterControlBlock *acb=dev->si_drv1;
250 #else
251 int unit = minor(dev);
252 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit);
253 #endif
254 if(acb==NULL) {
255 return ENXIO;
256 }
257 return 0;
258 }
259 /*
260 **************************************************************************
261 **************************************************************************
262 */
263 #if __FreeBSD_version < 500005
264 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc)
265 #else
266 #if __FreeBSD_version < 503000
267 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc)
268 #else
269 static int arcmsr_close(struct cdev *dev, int flags, int fmt, d_thread_t *proc)
270 #endif
271 #endif
272 {
273 #if __FreeBSD_version < 503000
274 struct AdapterControlBlock *acb=dev->si_drv1;
275 #else
276 int unit = minor(dev);
277 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit);
278 #endif
279 if(acb==NULL) {
280 return ENXIO;
281 }
282 return 0;
283 }
284 /*
285 **************************************************************************
286 **************************************************************************
287 */
288 #if __FreeBSD_version < 500005
289 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc)
290 #else
291 #if __FreeBSD_version < 503000
292 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc)
293 #else
294 static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, d_thread_t *proc)
295 #endif
296 #endif
297 {
298 #if __FreeBSD_version < 503000
299 struct AdapterControlBlock *acb=dev->si_drv1;
300 #else
301 int unit = minor(dev);
302 struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit);
303 #endif
304
305 if(acb==NULL) {
306 return ENXIO;
307 }
308 return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
309 }
310 /*
311 *******************************************************************************
312 *******************************************************************************
313 */
314 static int arcmsr_suspend(device_t dev)
315 {
316 struct AdapterControlBlock *acb = device_get_softc(dev);
317 u_int32_t intmask_org;
318
319 /* disable all outbound interrupt */
320 intmask_org=CHIP_REG_READ32(outbound_intmask);
321 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE));
322 /* flush controller */
323 arcmsr_iop_parking(acb);
324 return(0);
325 }
326 /*
327 *******************************************************************************
328 *******************************************************************************
329 */
330 static int arcmsr_resume(device_t dev)
331 {
332 struct AdapterControlBlock *acb = device_get_softc(dev);
333
334 arcmsr_iop_init(acb);
335 return(0);
336 }
337 /*
338 *********************************************************************************
339 *********************************************************************************
340 */
341 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
342 {
343 struct AdapterControlBlock *acb;
344 u_int8_t target_id, target_lun;
345 struct cam_sim * sim;
346
347 sim=(struct cam_sim *) cb_arg;
348 acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
349 switch (code) {
350 case AC_LOST_DEVICE:
351 target_id=xpt_path_target_id(path);
352 target_lun=xpt_path_lun_id(path);
353 if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) {
354 break;
355 }
356 printf("%s:scsi id%d lun%d device lost \n"
357 , device_get_name(acb->pci_dev), target_id, target_lun);
358 break;
359 default:
360 break;
361 }
362 }
363 /*
364 ************************************************************************
365 ************************************************************************
366 */
367 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
368 {
369 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
370 if(arcmsr_wait_msgint_ready(acb)) {
371 printf("arcmsr%d: wait 'flush adapter cache' timeout \n"
372 , acb->pci_unit);
373 }
374 return;
375 }
376 /*
377 **********************************************************************
378 **********************************************************************
379 */
380 static u_int8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb)
381 {
382 u_int32_t Index;
383 u_int8_t Retries=0x00;
384
385 do {
386 for(Index=0; Index < 100; Index++) {
387 if(CHIP_REG_READ32(outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
388 /*clear interrupt*/
389 CHIP_REG_WRITE32(outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
390 return 0x00;
391 }
392 /* one us delay */
393 UDELAY(10000);
394 }/*max 1 seconds*/
395 }while(Retries++ < 20);/*max 20 sec*/
396 return 0xff;
397 }
398 /*
399 **********************************************************************
400 **********************************************************************
401 */
402 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
403 {
404 struct AdapterControlBlock *acb=srb->acb;
405 union ccb * pccb=srb->pccb;
406
407 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
408 bus_dmasync_op_t op;
409
410 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
411 op = BUS_DMASYNC_POSTREAD;
412 } else {
413 op = BUS_DMASYNC_POSTWRITE;
414 }
415 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
416 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
417 }
418 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_done_lock);
419 if(stand_flag==1) {
420 atomic_subtract_int(&acb->srboutstandingcount, 1);
421 }
422 srb->startdone=ARCMSR_SRB_DONE;
423 srb->srb_flags=0;
424 acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
425 acb->workingsrb_doneindex++;
426 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
427 ARCMSR_LOCK_RELEASE(&acb->workingQ_done_lock);
428 xpt_done(pccb);
429 return;
430 }
431 /*
432 **********************************************************************
433 **********************************************************************
434 */
435 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
436 {
437 union ccb * pccb=srb->pccb;
438
439 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
440 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
441 if(&pccb->csio.sense_data) {
442 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
443 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
444 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
445 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
446 pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
447 }
448 return;
449 }
450 /*
451 *********************************************************************
452 **
453 *********************************************************************
454 */
455 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
456 {
457 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
458 if(arcmsr_wait_msgint_ready(acb)) {
459 printf("arcmsr%d: wait 'abort all outstanding command' timeout \n"
460 , acb->pci_unit);
461 }
462 return;
463 }
464 /*
465 ****************************************************************************
466 ****************************************************************************
467 */
468 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
469 {
470 struct CommandControlBlock *srb;
471 u_int32_t intmask_org, mask;
472 u_int32_t i=0;
473
474 if(acb->srboutstandingcount!=0)
475 {
476 /* talk to iop 331 outstanding command aborted*/
477 arcmsr_abort_allcmd(acb);
478 UDELAY(3000*1000);/*wait for 3 sec for all command aborted*/
479 /* disable all outbound interrupt */
480 intmask_org=CHIP_REG_READ32(outbound_intmask);
481 CHIP_REG_WRITE32(outbound_intmask
482 , intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
483 /*clear all outbound posted Q*/
484 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) {
485 CHIP_REG_READ32(outbound_queueport);
486 }
487 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
488 srb=acb->psrb_pool[i];
489 if(srb->startdone==ARCMSR_SRB_START) {
490 srb->startdone=ARCMSR_SRB_ABORTED;
491 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
492 arcmsr_srb_complete(srb, 1);
493 }
494 }
495 /* enable all outbound interrupt */
496 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE
497 |ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
498 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask);
499 /* post abort all outstanding command message to RAID controller */
500 }
501 atomic_set_int(&acb->srboutstandingcount, 0);
502 acb->workingsrb_doneindex=0;
503 acb->workingsrb_startindex=0;
504 return;
505 }
506 /*
507 **********************************************************************
508 **********************************************************************
509 */
510 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg)
511 {
512 struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
513 u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
514 u_int32_t address_lo, address_hi;
515 union ccb * pccb=srb->pccb;
516 struct ccb_scsiio * pcsio= &pccb->csio;
517 u_int32_t arccdbsize=0x30;
518
519 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
520 arcmsr_cdb->Bus=0;
521 arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
522 arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
523 arcmsr_cdb->Function=1;
524 arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
525 arcmsr_cdb->Context=(unsigned long)arcmsr_cdb;
526 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
527 if(nseg != 0) {
528 struct AdapterControlBlock *acb=srb->acb;
529 bus_dmasync_op_t op;
530 u_int32_t length, i, cdb_sgcount=0;
531
532 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
533 op=BUS_DMASYNC_PREREAD;
534 } else {
535 op=BUS_DMASYNC_PREWRITE;
536 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
537 srb->srb_flags|=SRB_FLAG_WRITE;
538 }
539 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
540 for(i=0;i<nseg;i++) {
541 /* Get the physical address of the current data pointer */
542 length=arcmsr_htole32(dm_segs[i].ds_len);
543 address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
544 address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
545 if(address_hi==0) {
546 struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
547 pdma_sg->address=address_lo;
548 pdma_sg->length=length;
549 psge += sizeof(struct SG32ENTRY);
550 arccdbsize += sizeof(struct SG32ENTRY);
551 } else {
552 u_int32_t sg64s_size=0, tmplength=length;
553
554 while(1) {
555 u_int64_t span4G, length0;
556 struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
557
558 span4G=(u_int64_t)address_lo + tmplength;
559 pdma_sg->addresshigh=address_hi;
560 pdma_sg->address=address_lo;
561 if(span4G > 0x100000000) {
562 /*see if cross 4G boundary*/
563 length0=0x100000000-address_lo;
564 pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
565 address_hi=address_hi+1;
566 address_lo=0;
567 tmplength=tmplength-(u_int32_t)length0;
568 sg64s_size += sizeof(struct SG64ENTRY);
569 psge += sizeof(struct SG64ENTRY);
570 cdb_sgcount++;
571 } else {
572 pdma_sg->length=tmplength|IS_SG64_ADDR;
573 sg64s_size += sizeof(struct SG64ENTRY);
574 psge += sizeof(struct SG64ENTRY);
575 break;
576 }
577 }
578 arccdbsize += sg64s_size;
579 }
580 cdb_sgcount++;
581 }
582 arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
583 arcmsr_cdb->DataLength=pcsio->dxfer_len;
584 if( arccdbsize > 256) {
585 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
586 }
587 }
588 return;
589 }
590 /*
591 **************************************************************************
592 **************************************************************************
593 */
594 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
595 {
596 u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
597 struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
598
599 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
600 (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
601 atomic_add_int(&acb->srboutstandingcount, 1);
602 srb->startdone=ARCMSR_SRB_START;
603 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
604 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
605 } else {
606 CHIP_REG_WRITE32(inbound_queueport, cdb_shifted_phyaddr);
607 }
608 return;
609 }
610 /*
611 **********************************************************************
612 **********************************************************************
613 */
614 static void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb)
615 {
616 u_int8_t * pQbuffer;
617 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer;
618 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data;
619 u_int32_t allxfer_len=0;
620
621 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
622 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
623 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) {
624 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex];
625 memcpy(iop_data, pQbuffer, 1);
626 acb->wqbuf_firstindex++;
627 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
628 /*if last index number set it to 0 */
629 iop_data++;
630 allxfer_len++;
631 }
632 pwbuffer->data_len=allxfer_len;
633 /*
634 ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
635 */
636 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
637 }
638 return;
639 }
640 /*
641 ************************************************************************
642 ************************************************************************
643 */
644 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
645 {
646 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
647 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
648 if(arcmsr_wait_msgint_ready(acb)) {
649 printf("arcmsr%d: wait 'stop adapter rebulid' timeout \n"
650 , acb->pci_unit);
651 }
652 return;
653 }
654 /*
655 ************************************************************************
656 ************************************************************************
657 */
658 static void arcmsr_poll(struct cam_sim * psim)
659 {
660 arcmsr_interrupt(cam_sim_softc(psim));
661 return;
662 }
663 /*
664 **********************************************************************
665 **********************************************************************
666 */
667 static void arcmsr_interrupt(void *arg)
668 {
669 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
670 struct CommandControlBlock *srb;
671 u_int32_t flag_srb, outbound_intstatus, outbound_doorbell;
672
673 /*
674 *********************************************
675 ** check outbound intstatus
676 *********************************************
677 */
678 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable;
679 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/
680 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
681 /*
682 *********************************************
683 ** DOORBELL
684 *********************************************
685 */
686 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell);
687 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */
688 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
689 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer;
690 u_int8_t * iop_data=(u_int8_t *)prbuffer->data;
691 u_int8_t * pQbuffer;
692 u_int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
693
694 /*check this iop data if overflow my rqbuffer*/
695 rqbuf_lastindex=acb->rqbuf_lastindex;
696 rqbuf_firstindex=acb->rqbuf_firstindex;
697 iop_len=prbuffer->data_len;
698 my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
699 if(my_empty_len>=iop_len) {
700 while(iop_len > 0) {
701 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
702 memcpy(pQbuffer, iop_data, 1);
703 acb->rqbuf_lastindex++;
704 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
705 /*if last index number set it to 0 */
706 iop_data++;
707 iop_len--;
708 }
709 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
710 /*signature, let IOP331 know data has been readed */
711 } else {
712 acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
713 }
714 }
715 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
716 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
717 /*
718 *********************************************
719 *********************************************
720 */
721 if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
722 u_int8_t * pQbuffer;
723 struct QBUFFER * pwbuffer=(struct QBUFFER *)&acb->pmu->message_wbuffer;
724 u_int8_t * iop_data=(u_int8_t *)pwbuffer->data;
725 u_int32_t allxfer_len=0;
726
727 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
728 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) {
729 pQbuffer= &acb->wqbuffer[acb->wqbuf_firstindex];
730 memcpy(iop_data, pQbuffer, 1);
731 acb->wqbuf_firstindex++;
732 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
733 /*if last index number set it to 0 */
734 iop_data++;
735 allxfer_len++;
736 }
737 pwbuffer->data_len=allxfer_len;
738 /*
739 ** push inbound doorbell tell iop driver data write ok
740 ** and wait reply on next hwinterrupt for next Qbuffer post
741 */
742 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
743 }
744 if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
745 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
746 }
747 }
748 }
749 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
750 int target, lun;
751 /*
752 *****************************************************************************
753 ** areca cdb command done
754 *****************************************************************************
755 */
756 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
757 while(1) {
758 if((flag_srb=CHIP_REG_READ32(outbound_queueport)) == 0xFFFFFFFF) {
759 break;/*chip FIFO no srb for completion already*/
760 }
761 /* check if command done with no error*/
762 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));
763 /*frame must be 32 bytes aligned*/
764 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
765 if(srb->startdone==ARCMSR_SRB_ABORTED) {
766 printf("arcmsr%d: srb='%p' isr got aborted command \n"
767 , acb->pci_unit, srb);
768 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
769 arcmsr_srb_complete(srb, 1);
770 continue;
771 }
772 printf("arcmsr%d: isr get an illegal srb command done"
773 "acb='%p' srb='%p' srbacb='%p' startdone=0x%x"
774 "srboutstandingcount=%d \n",
775 acb->pci_unit, acb, srb, srb->acb,
776 srb->startdone, acb->srboutstandingcount);
777 continue;
778 }
779 target=srb->pccb->ccb_h.target_id;
780 lun=srb->pccb->ccb_h.target_lun;
781 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) {
782 if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
783 acb->devstate[target][lun]=ARECA_RAID_GOOD;
784 }
785 srb->pccb->ccb_h.status |= CAM_REQ_CMP;
786 arcmsr_srb_complete(srb, 1);
787 } else {
788 switch(srb->arcmsr_cdb.DeviceStatus) {
789 case ARCMSR_DEV_SELECT_TIMEOUT: {
790 acb->devstate[target][lun]=ARECA_RAID_GONE;
791 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
792 arcmsr_srb_complete(srb, 1);
793 }
794 break;
795 case ARCMSR_DEV_ABORTED:
796 case ARCMSR_DEV_INIT_FAIL: {
797 acb->devstate[target][lun]=ARECA_RAID_GONE;
798 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
799 arcmsr_srb_complete(srb, 1);
800 }
801 break;
802 case SCSISTAT_CHECK_CONDITION: {
803 acb->devstate[target][lun]=ARECA_RAID_GOOD;
804 arcmsr_report_sense_info(srb);
805 arcmsr_srb_complete(srb, 1);
806 }
807 break;
808 default:
809 printf("arcmsr%d: scsi id=%d lun=%d"
810 "isr get command error done,"
811 "but got unknow DeviceStatus=0x%x \n"
812 , acb->pci_unit, target, lun
813 ,srb->arcmsr_cdb.DeviceStatus);
814 acb->devstate[target][lun]=ARECA_RAID_GONE;
815 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
816 /*unknow error or crc error just for retry*/
817 arcmsr_srb_complete(srb, 1);
818 break;
819 }
820 }
821 } /*drain reply FIFO*/
822 }
823 return;
824 }
825 /*
826 *******************************************************************************
827 **
828 *******************************************************************************
829 */
830 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
831 {
832 if(acb!=NULL) {
833 /* stop adapter background rebuild */
834 if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
835 arcmsr_stop_adapter_bgrb(acb);
836 arcmsr_flush_adapter_cache(acb);
837 }
838 }
839 }
840 /*
841 ***********************************************************************
842 **
843 ************************************************************************
844 */
845 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
846 {
847 struct CMD_MESSAGE_FIELD * pcmdmessagefld;
848 u_int32_t retvalue=EINVAL;
849
850 pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
851 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
852 return retvalue;
853 }
854 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
855 switch(ioctl_cmd) {
856 case ARCMSR_MESSAGE_READ_RQBUFFER: {
857 u_int8_t * pQbuffer;
858 u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
859 u_int32_t allxfer_len=0;
860
861 while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex) && (allxfer_len<1031)) {
862 /*copy READ QBUFFER to srb*/
863 pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
864 memcpy(ptmpQbuffer, pQbuffer, 1);
865 acb->rqbuf_firstindex++;
866 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
867 /*if last index number set it to 0 */
868 ptmpQbuffer++;
869 allxfer_len++;
870 }
871 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
872 struct QBUFFER * prbuffer=(struct QBUFFER *)&acb->pmu->message_rbuffer;
873 u_int8_t * iop_data=(u_int8_t *)prbuffer->data;
874 u_int32_t iop_len;
875
876 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
877 iop_len=(u_int32_t)prbuffer->data_len;
878 /*this iop data does no chance to make me overflow again here, so just do it*/
879 while(iop_len>0) {
880 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
881 memcpy(pQbuffer, iop_data, 1);
882 acb->rqbuf_lastindex++;
883 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
884 /*if last index number set it to 0 */
885 iop_data++;
886 iop_len--;
887 }
888 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
889 /*signature, let IOP331 know data has been readed */
890 }
891 pcmdmessagefld->cmdmessage.Length=allxfer_len;
892 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
893 retvalue=ARCMSR_MESSAGE_SUCCESS;
894 }
895 break;
896 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
897 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
898 u_int8_t * pQbuffer;
899 u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
900
901 user_len=pcmdmessagefld->cmdmessage.Length;
902 /*check if data xfer length of this request will overflow my array qbuffer */
903 wqbuf_lastindex=acb->wqbuf_lastindex;
904 wqbuf_firstindex=acb->wqbuf_firstindex;
905 if(wqbuf_lastindex!=wqbuf_firstindex) {
906 arcmsr_post_Qbuffer(acb);
907 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
908 } else {
909 my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
910 if(my_empty_len>=user_len) {
911 while(user_len>0) {
912 /*copy srb data to wqbuffer*/
913 pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
914 memcpy(pQbuffer, ptmpuserbuffer, 1);
915 acb->wqbuf_lastindex++;
916 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
917 /*if last index number set it to 0 */
918 ptmpuserbuffer++;
919 user_len--;
920 }
921 /*post fist Qbuffer*/
922 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
923 acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
924 arcmsr_post_Qbuffer(acb);
925 }
926 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
927 } else {
928 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
929 }
930 }
931 retvalue=ARCMSR_MESSAGE_SUCCESS;
932 }
933 break;
934 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
935 u_int8_t * pQbuffer=acb->rqbuffer;
936
937 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
938 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
939 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
940 /*signature, let IOP331 know data has been readed */
941 }
942 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
943 acb->rqbuf_firstindex=0;
944 acb->rqbuf_lastindex=0;
945 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
946 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
947 retvalue=ARCMSR_MESSAGE_SUCCESS;
948 }
949 break;
950 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
951 {
952 u_int8_t * pQbuffer=acb->wqbuffer;
953
954 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
955 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
956 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
957 /*signature, let IOP331 know data has been readed */
958 }
959 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READED);
960 acb->wqbuf_firstindex=0;
961 acb->wqbuf_lastindex=0;
962 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
963 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
964 retvalue=ARCMSR_MESSAGE_SUCCESS;
965 }
966 break;
967 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
968 u_int8_t * pQbuffer;
969
970 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
971 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
972 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
973 /*signature, let IOP331 know data has been readed */
974 }
975 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
976 |ACB_F_MESSAGE_RQBUFFER_CLEARED
977 |ACB_F_MESSAGE_WQBUFFER_READED);
978 acb->rqbuf_firstindex=0;
979 acb->rqbuf_lastindex=0;
980 acb->wqbuf_firstindex=0;
981 acb->wqbuf_lastindex=0;
982 pQbuffer=acb->rqbuffer;
983 memset(pQbuffer, 0, sizeof(struct QBUFFER));
984 pQbuffer=acb->wqbuffer;
985 memset(pQbuffer, 0, sizeof(struct QBUFFER));
986 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
987 retvalue=ARCMSR_MESSAGE_SUCCESS;
988 }
989 break;
990 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
991 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
992 retvalue=ARCMSR_MESSAGE_SUCCESS;
993 }
994 break;
995 case ARCMSR_MESSAGE_SAY_HELLO: {
996 u_int8_t * hello_string="Hello! I am ARCMSR";
997 u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
998
999 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1000 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1001 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1002 return ENOIOCTL;
1003 }
1004 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1005 retvalue=ARCMSR_MESSAGE_SUCCESS;
1006 }
1007 break;
1008 case ARCMSR_MESSAGE_SAY_GOODBYE: {
1009 arcmsr_iop_parking(acb);
1010 retvalue=ARCMSR_MESSAGE_SUCCESS;
1011 }
1012 break;
1013 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1014 arcmsr_flush_adapter_cache(acb);
1015 retvalue=ARCMSR_MESSAGE_SUCCESS;
1016 }
1017 break;
1018 }
1019 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1020 return retvalue;
1021 }
1022 /*
1023 **************************************************************************
1024 **************************************************************************
1025 */
1026 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
1027 {
1028 struct CommandControlBlock *srb=NULL;
1029 u_int32_t workingsrb_startindex, workingsrb_doneindex;
1030
1031 ARCMSR_LOCK_ACQUIRE(&acb->workingQ_start_lock);
1032 workingsrb_doneindex=acb->workingsrb_doneindex;
1033 workingsrb_startindex=acb->workingsrb_startindex;
1034 srb=acb->srbworkingQ[workingsrb_startindex];
1035 workingsrb_startindex++;
1036 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
1037 if(workingsrb_doneindex!=workingsrb_startindex) {
1038 acb->workingsrb_startindex=workingsrb_startindex;
1039 } else {
1040 srb=NULL;
1041 }
1042 ARCMSR_LOCK_RELEASE(&acb->workingQ_start_lock);
1043 return(srb);
1044 }
1045 /*
1046 **************************************************************************
1047 **************************************************************************
1048 */
1049 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
1050 {
1051 struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1052 int retvalue = 0, transfer_len = 0;
1053 char *buffer;
1054 uint32_t controlcode = (uint32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
1055 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
1056 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 |
1057 (uint32_t ) pccb->csio.cdb_io.cdb_bytes[8];
1058 /* 4 bytes: Areca io control code */
1059 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1060 buffer = pccb->csio.data_ptr;
1061 transfer_len = pccb->csio.dxfer_len;
1062 } else {
1063 retvalue = ARCMSR_MESSAGE_FAIL;
1064 goto message_out;
1065 }
1066 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
1067 retvalue = ARCMSR_MESSAGE_FAIL;
1068 goto message_out;
1069 }
1070 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
1071 switch(controlcode) {
1072 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1073 u_int8_t *pQbuffer;
1074 u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1075 int32_t allxfer_len = 0;
1076
1077 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1078 && (allxfer_len < 1031)) {
1079 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
1080 memcpy(ptmpQbuffer, pQbuffer, 1);
1081 acb->rqbuf_firstindex++;
1082 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1083 ptmpQbuffer++;
1084 allxfer_len++;
1085 }
1086 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1087 struct QBUFFER *prbuffer = (struct QBUFFER *) &acb->pmu->message_rbuffer;
1088 u_int8_t *iop_data = (u_int8_t *)prbuffer->data;
1089 int32_t iop_len;
1090
1091 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1092 iop_len =(u_int32_t)prbuffer->data_len;
1093 while (iop_len > 0) {
1094 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1095 memcpy(pQbuffer, iop_data, 1);
1096 acb->rqbuf_lastindex++;
1097 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1098 iop_data++;
1099 iop_len--;
1100 }
1101 CHIP_REG_WRITE32(inbound_doorbell,
1102 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1103 }
1104 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1105 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1106 retvalue=ARCMSR_MESSAGE_SUCCESS;
1107 }
1108 break;
1109 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1110 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1111 u_int8_t *pQbuffer;
1112 u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1113
1114 user_len = pcmdmessagefld->cmdmessage.Length;
1115 wqbuf_lastindex = acb->wqbuf_lastindex;
1116 wqbuf_firstindex = acb->wqbuf_firstindex;
1117 if (wqbuf_lastindex != wqbuf_firstindex) {
1118 arcmsr_post_Qbuffer(acb);
1119 /* has error report sensedata */
1120 if(&pccb->csio.sense_data) {
1121 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
1122 /* Valid,ErrorCode */
1123 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
1124 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
1125 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
1126 /* AdditionalSenseLength */
1127 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
1128 /* AdditionalSenseCode */
1129 }
1130 retvalue = ARCMSR_MESSAGE_FAIL;
1131 } else {
1132 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
1133 &(ARCMSR_MAX_QBUFFER - 1);
1134 if (my_empty_len >= user_len) {
1135 while (user_len > 0) {
1136 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
1137 memcpy(pQbuffer, ptmpuserbuffer, 1);
1138 acb->wqbuf_lastindex++;
1139 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1140 ptmpuserbuffer++;
1141 user_len--;
1142 }
1143 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1144 acb->acb_flags &=
1145 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1146 arcmsr_post_Qbuffer(acb);
1147 }
1148 } else {
1149 /* has error report sensedata */
1150 if(&pccb->csio.sense_data) {
1151 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
1152 /* Valid,ErrorCode */
1153 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
1154 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
1155 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
1156 /* AdditionalSenseLength */
1157 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
1158 /* AdditionalSenseCode */
1159 }
1160 retvalue = ARCMSR_MESSAGE_FAIL;
1161 }
1162 }
1163 }
1164 break;
1165 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1166 u_int8_t *pQbuffer = acb->rqbuffer;
1167
1168 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1169 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1170 CHIP_REG_WRITE32(inbound_doorbell
1171 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1172 }
1173 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1174 acb->rqbuf_firstindex = 0;
1175 acb->rqbuf_lastindex = 0;
1176 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1177 pcmdmessagefld->cmdmessage.ReturnCode =
1178 ARCMSR_MESSAGE_RETURNCODE_OK;
1179 }
1180 break;
1181 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1182 u_int8_t *pQbuffer = acb->wqbuffer;
1183
1184 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1185 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1186 CHIP_REG_WRITE32(inbound_doorbell
1187 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1188 }
1189 acb->acb_flags |=
1190 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1191 ACB_F_MESSAGE_WQBUFFER_READED);
1192 acb->wqbuf_firstindex = 0;
1193 acb->wqbuf_lastindex = 0;
1194 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1195 pcmdmessagefld->cmdmessage.ReturnCode =
1196 ARCMSR_MESSAGE_RETURNCODE_OK;
1197 }
1198 break;
1199 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1200 u_int8_t *pQbuffer;
1201
1202 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1203 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1204 CHIP_REG_WRITE32(inbound_doorbell
1205 , ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1206 }
1207 acb->acb_flags |=
1208 (ACB_F_MESSAGE_WQBUFFER_CLEARED
1209 | ACB_F_MESSAGE_RQBUFFER_CLEARED
1210 | ACB_F_MESSAGE_WQBUFFER_READED);
1211 acb->rqbuf_firstindex = 0;
1212 acb->rqbuf_lastindex = 0;
1213 acb->wqbuf_firstindex = 0;
1214 acb->wqbuf_lastindex = 0;
1215 pQbuffer = acb->rqbuffer;
1216 memset(pQbuffer, 0, sizeof (struct QBUFFER));
1217 pQbuffer = acb->wqbuffer;
1218 memset(pQbuffer, 0, sizeof (struct QBUFFER));
1219 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1220 }
1221 break;
1222 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1223 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1224 }
1225 break;
1226 case ARCMSR_MESSAGE_SAY_HELLO: {
1227 int8_t * hello_string = "Hello! I am ARCMSR";
1228
1229 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1230 , (int16_t)strlen(hello_string));
1231 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1232 }
1233 break;
1234 case ARCMSR_MESSAGE_SAY_GOODBYE:
1235 arcmsr_iop_parking(acb);
1236 break;
1237 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1238 arcmsr_flush_adapter_cache(acb);
1239 break;
1240 default:
1241 retvalue = ARCMSR_MESSAGE_FAIL;
1242 }
1243 message_out:
1244 return retvalue;
1245 }
1246 /*
1247 *********************************************************************
1248 *********************************************************************
1249 */
1250 static void arcmsr_executesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1251 {
1252 struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
1253 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
1254 union ccb * pccb;
1255 int target, lun;
1256
1257 pccb=srb->pccb;
1258 target=pccb->ccb_h.target_id;
1259 lun=pccb->ccb_h.target_lun;
1260 if(error != 0) {
1261 if(error != EFBIG) {
1262 printf("arcmsr%d: unexpected error %x returned from 'bus_dmamap_load' \n"
1263 , acb->pci_unit, error);
1264 }
1265 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1266 xpt_freeze_devq(pccb->ccb_h.path, /*count*/1);
1267 pccb->ccb_h.status |= (CAM_REQ_TOO_BIG|CAM_DEV_QFRZN);
1268 }
1269 arcmsr_srb_complete(srb, 0);
1270 return;
1271 }
1272 if(nseg > ARCMSR_MAX_SG_ENTRIES) {
1273 pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
1274 arcmsr_srb_complete(srb, 0);
1275 return;
1276 }
1277 if(acb->acb_flags & ACB_F_BUS_RESET) {
1278 printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
1279 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
1280 arcmsr_srb_complete(srb, 0);
1281 return;
1282 }
1283 if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
1284 u_int8_t block_cmd;
1285
1286 block_cmd=pccb->csio.cdb_io.cdb_bytes[0] & 0x0f;
1287 if(block_cmd==0x08 || block_cmd==0x0a) {
1288 printf("arcmsr%d:block 'read/write' command"
1289 "with gone raid volume Cmd=%2x, TargetId=%d, Lun=%d \n"
1290 , acb->pci_unit, block_cmd, target, lun);
1291 pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
1292 arcmsr_srb_complete(srb, 0);
1293 return;
1294 }
1295 }
1296 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1297 if(nseg != 0) {
1298 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
1299 }
1300 arcmsr_srb_complete(srb, 0);
1301 return;
1302 }
1303 pccb->ccb_h.status |= CAM_SIM_QUEUED;
1304 if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
1305 pccb->ccb_h.status |= CAM_SCSI_BUSY;
1306 arcmsr_srb_complete(srb, 0);
1307 return;
1308 }
1309 arcmsr_build_srb(srb, dm_segs, nseg);
1310 arcmsr_post_srb(acb, srb);
1311 return;
1312 }
1313 /*
1314 *****************************************************************************************
1315 *****************************************************************************************
1316 */
1317 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
1318 {
1319 struct CommandControlBlock *srb;
1320 struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
1321 u_int32_t intmask_org, mask;
1322 int i=0;
1323
1324 acb->num_aborts++;
1325 /*
1326 ***************************************************************************
1327 ** It is the upper layer do abort command this lock just prior to calling us.
1328 ** First determine if we currently own this command.
1329 ** Start by searching the device queue. If not found
1330 ** at all, and the system wanted us to just abort the
1331 ** command return success.
1332 ***************************************************************************
1333 */
1334 if(acb->srboutstandingcount!=0) {
1335 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
1336 srb=acb->psrb_pool[i];
1337 if(srb->startdone==ARCMSR_SRB_START) {
1338 if(srb->pccb==abortccb) {
1339 srb->startdone=ARCMSR_SRB_ABORTED;
1340 printf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
1341 "outstanding command \n"
1342 , acb->pci_unit, abortccb->ccb_h.target_id
1343 , abortccb->ccb_h.target_lun, srb);
1344 goto abort_outstanding_cmd;
1345 }
1346 }
1347 }
1348 }
1349 return(FALSE);
1350 abort_outstanding_cmd:
1351 /* do not talk to iop 331 abort command */
1352 UDELAY(3000*1000);/*wait for 3 sec for all command done*/
1353 /* disable all outbound interrupt */
1354 intmask_org=CHIP_REG_READ32(outbound_intmask);
1355 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
1356 arcmsr_polling_srbdone(acb, srb);
1357 /* enable all outbound interrupt */
1358 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
1359 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask);
1360 return (TRUE);
1361 }
1362 /*
1363 ****************************************************************************
1364 ****************************************************************************
1365 */
1366 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
1367 {
1368 int retry=0;
1369
1370 acb->num_resets++;
1371 acb->acb_flags |=ACB_F_BUS_RESET;
1372 while(acb->srboutstandingcount!=0 && retry < 400) {
1373 arcmsr_interrupt((void *)acb);
1374 UDELAY(25000);
1375 retry++;
1376 }
1377 arcmsr_iop_reset(acb);
1378 acb->acb_flags &= ~ACB_F_BUS_RESET;
1379 return;
1380 }
1381 /*
1382 **************************************************************************
1383 **************************************************************************
1384 */
1385 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1386 union ccb * pccb)
1387 {
1388 pccb->ccb_h.status |= CAM_REQ_CMP;
1389 switch (pccb->csio.cdb_io.cdb_bytes[0]) {
1390 case INQUIRY: {
1391 unsigned char inqdata[36];
1392 char *buffer=pccb->csio.data_ptr;;
1393
1394 if (pccb->ccb_h.target_lun) {
1395 pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
1396 xpt_done(pccb);
1397 return;
1398 }
1399 inqdata[0] = T_PROCESSOR;
1400 /* Periph Qualifier & Periph Dev Type */
1401 inqdata[1] = 0;
1402 /* rem media bit & Dev Type Modifier */
1403 inqdata[2] = 0;
1404 /* ISO, ECMA, & ANSI versions */
1405 inqdata[4] = 31;
1406 /* length of additional data */
1407 strncpy(&inqdata[8], "Areca ", 8);
1408 /* Vendor Identification */
1409 strncpy(&inqdata[16], "RAID controller ", 16);
1410 /* Product Identification */
1411 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1412 memcpy(buffer, inqdata, sizeof(inqdata));
1413 xpt_done(pccb);
1414 }
1415 break;
1416 case WRITE_BUFFER:
1417 case READ_BUFFER: {
1418 if (arcmsr_iop_message_xfer(acb, pccb)) {
1419 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1420 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1421 }
1422 xpt_done(pccb);
1423 }
1424 break;
1425 default:
1426 xpt_done(pccb);
1427 }
1428 }
1429 /*
1430 *********************************************************************
1431 *********************************************************************
1432 */
1433 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
1434 {
1435 struct AdapterControlBlock * acb;
1436
1437 acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
1438 if(acb==NULL) {
1439 pccb->ccb_h.status |= CAM_REQ_INVALID;
1440 xpt_done(pccb);
1441 return;
1442 }
1443 switch (pccb->ccb_h.func_code) {
1444 case XPT_SCSI_IO: {
1445 struct CommandControlBlock *srb;
1446 int target=pccb->ccb_h.target_id;
1447
1448 if(target == 16) {
1449 /* virtual device for iop message transfer */
1450 arcmsr_handle_virtual_command(acb, pccb);
1451 return;
1452 }
1453 if((srb=arcmsr_get_freesrb(acb)) == NULL) {
1454 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
1455 xpt_done(pccb);
1456 return;
1457 }
1458 pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
1459 pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
1460 srb->pccb=pccb;
1461 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1462 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
1463 /* Single buffer */
1464 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
1465 /* Buffer is virtual */
1466 u_int32_t error, s;
1467
1468 s=splsoftvm();
1469 error = bus_dmamap_load(acb->dm_segs_dmat
1470 , srb->dm_segs_dmamap
1471 , pccb->csio.data_ptr
1472 , pccb->csio.dxfer_len
1473 , arcmsr_executesrb, srb, /*flags*/0);
1474 if(error == EINPROGRESS) {
1475 xpt_freeze_simq(acb->psim, 1);
1476 pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1477 }
1478 splx(s);
1479 } else {
1480 /* Buffer is physical */
1481 panic("arcmsr: CAM_DATA_PHYS not supported");
1482 }
1483 } else {
1484 /* Scatter/gather list */
1485 struct bus_dma_segment *segs;
1486
1487 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
1488 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1489 pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
1490 xpt_done(pccb);
1491 free(srb, M_DEVBUF);
1492 return;
1493 }
1494 segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
1495 arcmsr_executesrb(srb, segs, pccb->csio.sglist_cnt, 0);
1496 }
1497 } else {
1498 arcmsr_executesrb(srb, NULL, 0, 0);
1499 }
1500 break;
1501 }
1502 case XPT_TARGET_IO: {
1503 /* target mode not yet support vendor specific commands. */
1504 pccb->ccb_h.status |= CAM_REQ_CMP;
1505 xpt_done(pccb);
1506 break;
1507 }
1508 case XPT_PATH_INQ: {
1509 struct ccb_pathinq *cpi= &pccb->cpi;
1510
1511 cpi->version_num=1;
1512 cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
1513 cpi->target_sprt=0;
1514 cpi->hba_misc=0;
1515 cpi->hba_eng_cnt=0;
1516 cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */
1517 cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */
1518 cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
1519 cpi->bus_id=cam_sim_bus(psim);
1520 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1521 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
1522 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
1523 cpi->unit_number=cam_sim_unit(psim);
1524 cpi->ccb_h.status |= CAM_REQ_CMP;
1525 xpt_done(pccb);
1526 break;
1527 }
1528 case XPT_ABORT: {
1529 union ccb *pabort_ccb;
1530
1531 pabort_ccb=pccb->cab.abort_ccb;
1532 switch (pabort_ccb->ccb_h.func_code) {
1533 case XPT_ACCEPT_TARGET_IO:
1534 case XPT_IMMED_NOTIFY:
1535 case XPT_CONT_TARGET_IO:
1536 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
1537 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
1538 xpt_done(pabort_ccb);
1539 pccb->ccb_h.status |= CAM_REQ_CMP;
1540 } else {
1541 xpt_print_path(pabort_ccb->ccb_h.path);
1542 printf("Not found\n");
1543 pccb->ccb_h.status |= CAM_PATH_INVALID;
1544 }
1545 break;
1546 case XPT_SCSI_IO:
1547 pccb->ccb_h.status |= CAM_UA_ABORT;
1548 break;
1549 default:
1550 pccb->ccb_h.status |= CAM_REQ_INVALID;
1551 break;
1552 }
1553 xpt_done(pccb);
1554 break;
1555 }
1556 case XPT_RESET_BUS:
1557 case XPT_RESET_DEV: {
1558 u_int32_t i;
1559
1560 arcmsr_bus_reset(acb);
1561 for (i=0; i < 500; i++) {
1562 DELAY(1000);
1563 }
1564 pccb->ccb_h.status |= CAM_REQ_CMP;
1565 xpt_done(pccb);
1566 break;
1567 }
1568 case XPT_TERM_IO: {
1569 pccb->ccb_h.status |= CAM_REQ_INVALID;
1570 xpt_done(pccb);
1571 break;
1572 }
1573 case XPT_GET_TRAN_SETTINGS: {
1574 struct ccb_trans_settings *cts;
1575
1576 if(pccb->ccb_h.target_id == 16) {
1577 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
1578 xpt_done(pccb);
1579 break;
1580 }
1581 cts= &pccb->cts;
1582 cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
1583 cts->sync_period=3;
1584 cts->sync_offset=32;
1585 cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
1586 cts->valid=CCB_TRANS_SYNC_RATE_VALID
1587 | CCB_TRANS_SYNC_OFFSET_VALID
1588 | CCB_TRANS_BUS_WIDTH_VALID
1589 | CCB_TRANS_DISC_VALID
1590 | CCB_TRANS_TQ_VALID;
1591 pccb->ccb_h.status |= CAM_REQ_CMP;
1592 xpt_done(pccb);
1593 break;
1594 }
1595 case XPT_SET_TRAN_SETTINGS: {
1596 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
1597 xpt_done(pccb);
1598 break;
1599 }
1600 case XPT_CALC_GEOMETRY: {
1601 struct ccb_calc_geometry *ccg;
1602 u_int32_t size_mb;
1603 u_int32_t secs_per_cylinder;
1604
1605 if(pccb->ccb_h.target_id == 16) {
1606 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
1607 xpt_done(pccb);
1608 break;
1609 }
1610 ccg= &pccb->ccg;
1611 if (ccg->block_size == 0) {
1612 pccb->ccb_h.status = CAM_REQ_INVALID;
1613 xpt_done(pccb);
1614 break;
1615 }
1616 if(((1024L * 1024L)/ccg->block_size) < 0) {
1617 pccb->ccb_h.status = CAM_REQ_INVALID;
1618 xpt_done(pccb);
1619 break;
1620 }
1621 size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size);
1622 if(size_mb > 1024 ) {
1623 ccg->heads=255;
1624 ccg->secs_per_track=63;
1625 } else {
1626 ccg->heads=64;
1627 ccg->secs_per_track=32;
1628 }
1629 secs_per_cylinder=ccg->heads * ccg->secs_per_track;
1630 ccg->cylinders=ccg->volume_size / secs_per_cylinder;
1631 pccb->ccb_h.status |= CAM_REQ_CMP;
1632 xpt_done(pccb);
1633 break;
1634 }
1635 default:
1636 pccb->ccb_h.status |= CAM_REQ_INVALID;
1637 xpt_done(pccb);
1638 break;
1639 }
1640 return;
1641 }
1642 /*
1643 **********************************************************************
1644 **********************************************************************
1645 */
1646 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
1647 {
1648 acb->acb_flags |= ACB_F_MSG_START_BGRB;
1649 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
1650 if(arcmsr_wait_msgint_ready(acb)) {
1651 printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
1652 }
1653 return;
1654 }
1655 /*
1656 **********************************************************************
1657 **********************************************************************
1658 */
1659 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
1660 {
1661 struct CommandControlBlock *srb;
1662 uint32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
1663 int id, lun;
1664
1665 polling_srb_retry:
1666 poll_count++;
1667 outbound_intstatus=CHIP_REG_READ32(outbound_intstatus) & acb->outbound_int_enable;
1668 CHIP_REG_WRITE32(outbound_intstatus, outbound_intstatus);/*clear interrupt*/
1669 while(1) {
1670 if((flag_srb=CHIP_REG_READ32(outbound_queueport))==0xFFFFFFFF) {
1671 if(poll_srb_done) {
1672 break;/*chip FIFO no ccb for completion already*/
1673 } else {
1674 UDELAY(25000);
1675 if(poll_count > 100) {
1676 break;
1677 }
1678 goto polling_srb_retry;
1679 }
1680 }
1681 /* check ifcommand done with no error*/
1682 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));
1683 /*frame must be 32 bytes aligned*/
1684 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
1685 if((srb->startdone==ARCMSR_SRB_ABORTED) && (srb==poll_srb)) {
1686 printf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
1687 "poll command abort successfully \n"
1688 , acb->pci_unit
1689 , srb->pccb->ccb_h.target_id
1690 , srb->pccb->ccb_h.target_lun, srb);
1691 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1692 arcmsr_srb_complete(srb, 1);
1693 poll_srb_done=1;
1694 continue;
1695 }
1696 printf("arcmsr%d: polling get an illegal srb command done srb='%p'"
1697 "srboutstandingcount=%d \n"
1698 , acb->pci_unit
1699 , srb, acb->srboutstandingcount);
1700 continue;
1701 }
1702 id=srb->pccb->ccb_h.target_id;
1703 lun=srb->pccb->ccb_h.target_lun;
1704 if((flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR)==0) {
1705 if(acb->devstate[id][lun]==ARECA_RAID_GONE) {
1706 acb->devstate[id][lun]=ARECA_RAID_GOOD;
1707 }
1708 srb->pccb->ccb_h.status |= CAM_REQ_CMP;
1709 arcmsr_srb_complete(srb, 1);
1710 } else {
1711 switch(srb->arcmsr_cdb.DeviceStatus) {
1712 case ARCMSR_DEV_SELECT_TIMEOUT: {
1713 acb->devstate[id][lun]=ARECA_RAID_GONE;
1714 srb->pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
1715 arcmsr_srb_complete(srb, 1);
1716 }
1717 break;
1718 case ARCMSR_DEV_ABORTED:
1719 case ARCMSR_DEV_INIT_FAIL: {
1720 acb->devstate[id][lun]=ARECA_RAID_GONE;
1721 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
1722 arcmsr_srb_complete(srb, 1);
1723 }
1724 break;
1725 case SCSISTAT_CHECK_CONDITION: {
1726 acb->devstate[id][lun]=ARECA_RAID_GOOD;
1727 arcmsr_report_sense_info(srb);
1728 arcmsr_srb_complete(srb, 1);
1729 }
1730 break;
1731 default:
1732 printf("arcmsr%d: scsi id=%d lun=%d"
1733 "polling and getting command error done"
1734 ", but got unknow DeviceStatus=0x%x \n"
1735 , acb->pci_unit, id, lun, srb->arcmsr_cdb.DeviceStatus);
1736 acb->devstate[id][lun]=ARECA_RAID_GONE;
1737 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
1738 /*unknow error or crc error just for retry*/
1739 arcmsr_srb_complete(srb, 1);
1740 break;
1741 }
1742 }
1743 } /*drain reply FIFO*/
1744 return;
1745 }
1746 /*
1747 **********************************************************************
1748 ** get firmware miscellaneous data
1749 **********************************************************************
1750 */
1751 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1752 {
1753 char *acb_firm_model=acb->firm_model;
1754 char *acb_firm_version=acb->firm_version;
1755 size_t iop_firm_model=offsetof(struct MessageUnit,message_rwbuffer[15]); /*firm_model,15,60-67*/
1756 size_t iop_firm_version=offsetof(struct MessageUnit,message_rwbuffer[17]); /*firm_version,17,68-83*/
1757 int i;
1758
1759 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1760 if(arcmsr_wait_msgint_ready(acb)) {
1761 printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n"
1762 , acb->pci_unit);
1763 }
1764 i=0;
1765 while(i<8) {
1766 *acb_firm_model=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_model+i);
1767 /* 8 bytes firm_model, 15, 60-67*/
1768 acb_firm_model++;
1769 i++;
1770 }
1771 i=0;
1772 while(i<16) {
1773 *acb_firm_version=bus_space_read_1(acb->btag, acb->bhandle, iop_firm_version+i);
1774 /* 16 bytes firm_version, 17, 68-83*/
1775 acb_firm_version++;
1776 i++;
1777 }
1778 printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
1779 printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
1780 acb->firm_request_len=CHIP_REG_READ32(message_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
1781 acb->firm_numbers_queue=CHIP_REG_READ32(message_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
1782 acb->firm_sdram_size=CHIP_REG_READ32(message_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
1783 acb->firm_ide_channels=CHIP_REG_READ32(message_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
1784 return;
1785 }
1786 /*
1787 **********************************************************************
1788 ** start background rebulid
1789 **********************************************************************
1790 */
1791 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
1792 {
1793 u_int32_t intmask_org, mask, outbound_doorbell, firmware_state=0;
1794
1795 do {
1796 firmware_state=CHIP_REG_READ32(outbound_msgaddr1);
1797 } while((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)==0);
1798 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
1799 CHIP_REG_WRITE32(outbound_intmask, intmask_org);
1800 intmask_org=CHIP_REG_READ32(outbound_intmask)|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
1801 arcmsr_get_firmware_spec(acb);
1802 arcmsr_start_adapter_bgrb(acb);
1803 /* clear Qbuffer if door bell ringed */
1804 outbound_doorbell=CHIP_REG_READ32(outbound_doorbell);
1805 CHIP_REG_WRITE32(outbound_doorbell, outbound_doorbell);/*clear interrupt */
1806 CHIP_REG_WRITE32(inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1807 /* enable outbound Post Queue, outbound message0, outbell doorbell Interrupt */
1808 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
1809 CHIP_REG_WRITE32(outbound_intmask, intmask_org & mask);
1810 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1811 acb->acb_flags |=ACB_F_IOP_INITED;
1812 return;
1813 }
1814 /*
1815 **********************************************************************
1816 **********************************************************************
1817 */
1818 static void arcmsr_map_freesrb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1819 {
1820 struct AdapterControlBlock *acb=arg;
1821 struct CommandControlBlock *srb_tmp;
1822 u_int8_t * dma_memptr;
1823 u_int32_t i, srb_phyaddr_hi32;
1824 unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
1825
1826 dma_memptr=acb->uncacheptr;
1827 srb_phyaddr=segs->ds_addr; /* We suppose bus_addr_t high part always 0 here*/
1828 if(((unsigned long)dma_memptr & 0x1F)!=0) {
1829 dma_memptr=dma_memptr+(0x20-((unsigned long)dma_memptr & 0x1F));
1830 srb_phyaddr=srb_phyaddr+(0x20-((unsigned long)srb_phyaddr & 0x1F));
1831 }
1832 srb_tmp=(struct CommandControlBlock *)dma_memptr;
1833 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
1834 /*srb address must 32 (0x20) boundary*/
1835 if(((unsigned long)srb_tmp & 0x1F)==0) {
1836 if(bus_dmamap_create(acb->dm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
1837 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
1838 printf("arcmsr%d: srb dmamap bus_dmamap_create error\n", acb->pci_unit);
1839 return;
1840 }
1841 srb_tmp->cdb_shifted_phyaddr=srb_phyaddr >> 5;
1842 srb_tmp->acb=acb;
1843 acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
1844 srb_phyaddr=srb_phyaddr+sizeof(struct CommandControlBlock);
1845 } else {
1846 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
1847 printf("arcmsr%d: dma_memptr=%p i=%d"
1848 "this srb cross 32 bytes boundary ignored srb_tmp=%p \n"
1849 , acb->pci_unit, dma_memptr, i, srb_tmp);
1850 return;
1851 }
1852 srb_tmp++;
1853 }
1854 acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr;
1855 /*
1856 ********************************************************************
1857 ** here we need to tell iop 331 our freesrb.HighPart
1858 ** if freesrb.HighPart is not zero
1859 ********************************************************************
1860 */
1861 srb_phyaddr_hi32=(uint32_t) ((srb_phyaddr>>16)>>16);
1862 if(srb_phyaddr_hi32!=0) {
1863 CHIP_REG_WRITE32(message_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
1864 CHIP_REG_WRITE32(message_rwbuffer[1], srb_phyaddr_hi32);
1865 CHIP_REG_WRITE32(inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
1866 if(arcmsr_wait_msgint_ready(acb)) {
1867 printf("arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
1868 }
1869 }
1870 return;
1871 }
1872 /*
1873 ************************************************************************
1874 **
1875 **
1876 ************************************************************************
1877 */
1878 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
1879 {
1880 /* remove the control device */
1881 if(acb->ioctl_dev != NULL) {
1882 destroy_dev(acb->ioctl_dev);
1883 }
1884 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
1885 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
1886 bus_dma_tag_destroy(acb->srb_dmat);
1887 bus_dma_tag_destroy(acb->dm_segs_dmat);
1888 bus_dma_tag_destroy(acb->parent_dmat);
1889 return;
1890 }
1891 /*
1892 ************************************************************************
1893 ************************************************************************
1894 */
1895 static u_int32_t arcmsr_initialize(device_t dev)
1896 {
1897 struct AdapterControlBlock *acb=device_get_softc(dev);
1898 u_int32_t intmask_org, rid=PCIR_BAR(0);
1899 vm_offset_t mem_base;
1900 u_int16_t pci_command;
1901 int i, j;
1902
1903 #if __FreeBSD_version >= 502010
1904 if(bus_dma_tag_create( /*parent*/ NULL,
1905 /*alignemnt*/ 1,
1906 /*boundary*/ 0,
1907 /*lowaddr*/ BUS_SPACE_MAXADDR,
1908 /*highaddr*/ BUS_SPACE_MAXADDR,
1909 /*filter*/ NULL,
1910 /*filterarg*/ NULL,
1911 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT,
1912 /*nsegments*/ BUS_SPACE_UNRESTRICTED,
1913 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
1914 /*flags*/ 0,
1915 /*lockfunc*/ NULL,
1916 /*lockarg*/ NULL,
1917 &acb->parent_dmat) != 0)
1918 #else
1919 if(bus_dma_tag_create( /*parent*/ NULL,
1920 /*alignemnt*/ 1,
1921 /*boundary*/ 0,
1922 /*lowaddr*/ BUS_SPACE_MAXADDR,
1923 /*highaddr*/ BUS_SPACE_MAXADDR,
1924 /*filter*/ NULL,
1925 /*filterarg*/ NULL,
1926 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT,
1927 /*nsegments*/ BUS_SPACE_UNRESTRICTED,
1928 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
1929 /*flags*/ 0,
1930 &acb->parent_dmat) != 0)
1931 #endif
1932 {
1933 printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", acb->pci_unit);
1934 return ENOMEM;
1935 }
1936 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
1937 #if __FreeBSD_version >= 502010
1938 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
1939 /*alignment*/ 1,
1940 /*boundary*/ 0,
1941 /*lowaddr*/ BUS_SPACE_MAXADDR,
1942 /*highaddr*/ BUS_SPACE_MAXADDR,
1943 /*filter*/ NULL,
1944 /*filterarg*/ NULL,
1945 /*maxsize*/ MAXBSIZE,
1946 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES,
1947 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
1948 /*flags*/ 0,
1949 /*lockfunc*/ busdma_lock_mutex,
1950 /*lockarg*/ &Giant,
1951 &acb->dm_segs_dmat) != 0)
1952 #else
1953 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
1954 /*alignment*/ 1,
1955 /*boundary*/ 0,
1956 /*lowaddr*/ BUS_SPACE_MAXADDR,
1957 /*highaddr*/ BUS_SPACE_MAXADDR,
1958 /*filter*/ NULL,
1959 /*filterarg*/ NULL,
1960 /*maxsize*/ MAXBSIZE,
1961 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES,
1962 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
1963 /*flags*/ 0,
1964 &acb->dm_segs_dmat) != 0)
1965 #endif
1966 {
1967 bus_dma_tag_destroy(acb->parent_dmat);
1968 printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", acb->pci_unit);
1969 return ENOMEM;
1970 }
1971 /* DMA tag for our srb structures.... Allocate the freesrb memory */
1972 #if __FreeBSD_version >= 502010
1973 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
1974 /*alignment*/ 1,
1975 /*boundary*/ 0,
1976 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
1977 /*highaddr*/ BUS_SPACE_MAXADDR,
1978 /*filter*/ NULL,
1979 /*filterarg*/ NULL,
1980 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE,
1981 /*nsegments*/ 1,
1982 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
1983 /*flags*/ 0,
1984 /*lockfunc*/ NULL,
1985 /*lockarg*/ NULL,
1986 &acb->srb_dmat) != 0)
1987 #else
1988 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
1989 /*alignment*/ 1,
1990 /*boundary*/ 0,
1991 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
1992 /*highaddr*/ BUS_SPACE_MAXADDR,
1993 /*filter*/ NULL,
1994 /*filterarg*/ NULL,
1995 /*maxsize*/ ARCMSR_SRBS_POOL_SIZE,
1996 /*nsegments*/ 1,
1997 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
1998 /*flags*/ 0,
1999 &acb->srb_dmat) != 0)
2000 #endif
2001 {
2002 bus_dma_tag_destroy(acb->dm_segs_dmat);
2003 bus_dma_tag_destroy(acb->parent_dmat);
2004 printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", acb->pci_unit);
2005 return ENXIO;
2006 }
2007 /* Allocation for our srbs */
2008 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr
2009 , BUS_DMA_WAITOK | BUS_DMA_COHERENT, &acb->srb_dmamap) != 0) {
2010 bus_dma_tag_destroy(acb->srb_dmat);
2011 bus_dma_tag_destroy(acb->dm_segs_dmat);
2012 bus_dma_tag_destroy(acb->parent_dmat);
2013 printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", acb->pci_unit);
2014 return ENXIO;
2015 }
2016 /* And permanently map them */
2017 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr
2018 , ARCMSR_SRBS_POOL_SIZE, arcmsr_map_freesrb, acb, /*flags*/0)) {
2019 bus_dma_tag_destroy(acb->srb_dmat);
2020 bus_dma_tag_destroy(acb->dm_segs_dmat);
2021 bus_dma_tag_destroy(acb->parent_dmat);
2022 printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", acb->pci_unit);
2023 return ENXIO;
2024 }
2025 pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
2026 pci_command |= PCIM_CMD_BUSMASTEREN;
2027 pci_command |= PCIM_CMD_PERRESPEN;
2028 pci_command |= PCIM_CMD_MWRICEN;
2029 /* Enable Busmaster/Mem */
2030 pci_command |= PCIM_CMD_MEMEN;
2031 pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
2032 acb->sys_res_arcmsr=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, 0x1000, RF_ACTIVE);
2033 if(acb->sys_res_arcmsr == NULL) {
2034 arcmsr_free_resource(acb);
2035 printf("arcmsr%d: bus_alloc_resource failure!\n", acb->pci_unit);
2036 return ENOMEM;
2037 }
2038 if(rman_get_start(acb->sys_res_arcmsr) <= 0) {
2039 arcmsr_free_resource(acb);
2040 printf("arcmsr%d: rman_get_start failure!\n", acb->pci_unit);
2041 return ENXIO;
2042 }
2043 mem_base=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr);
2044 if(mem_base==0) {
2045 arcmsr_free_resource(acb);
2046 printf("arcmsr%d: rman_get_virtual failure!\n", acb->pci_unit);
2047 return ENXIO;
2048 }
2049 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
2050 arcmsr_free_resource(acb);
2051 printf("arcmsr%d: map free srb failure!\n", acb->pci_unit);
2052 return ENXIO;
2053 }
2054 acb->btag=rman_get_bustag(acb->sys_res_arcmsr);
2055 acb->bhandle=rman_get_bushandle(acb->sys_res_arcmsr);
2056 acb->pmu=(struct MessageUnit *)mem_base;
2057 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
2058 |ACB_F_MESSAGE_RQBUFFER_CLEARED
2059 |ACB_F_MESSAGE_WQBUFFER_READED);
2060 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2061 /*
2062 ********************************************************************
2063 ** init raid volume state
2064 ********************************************************************
2065 */
2066 for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
2067 for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
2068 acb->devstate[i][j]=ARECA_RAID_GOOD;
2069 }
2070 }
2071 /* disable iop all outbound interrupt */
2072 intmask_org=CHIP_REG_READ32(outbound_intmask);
2073 CHIP_REG_WRITE32(outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
2074 arcmsr_iop_init(acb);
2075 return(0);
2076 }
2077 /*
2078 ************************************************************************
2079 ************************************************************************
2080 */
2081 static u_int32_t arcmsr_attach(device_t dev)
2082 {
2083 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
2084 u_int32_t unit=device_get_unit(dev);
2085 struct ccb_setasync csa;
2086 struct cam_devq *devq; /* Device Queue to use for this SIM */
2087 struct resource *irqres;
2088 int rid;
2089
2090 if(acb == NULL) {
2091 printf("arcmsr%d: cannot allocate softc\n", unit);
2092 return (ENOMEM);
2093 }
2094 bzero(acb, sizeof(struct AdapterControlBlock));
2095 if(arcmsr_initialize(dev)) {
2096 printf("arcmsr%d: initialize failure!\n", unit);
2097 return ENXIO;
2098 }
2099 /* After setting up the adapter, map our interrupt */
2100 rid=0;
2101 irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE);
2102 if(irqres == NULL ||
2103 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE
2104 , arcmsr_interrupt, acb, &acb->ih)) {
2105 arcmsr_free_resource(acb);
2106 printf("arcmsr%d: unable to register interrupt handler!\n", unit);
2107 return ENXIO;
2108 }
2109 acb->irqres=irqres;
2110 acb->pci_dev=dev;
2111 acb->pci_unit=unit;
2112 /*
2113 * Now let the CAM generic SCSI layer find the SCSI devices on
2114 * the bus * start queue to reset to the idle loop. *
2115 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2116 * max_sim_transactions
2117 */
2118 devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
2119 if(devq == NULL) {
2120 arcmsr_free_resource(acb);
2121 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
2122 printf("arcmsr%d: cam_simq_alloc failure!\n", unit);
2123 return ENXIO;
2124 }
2125 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll
2126 , "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
2127 if(acb->psim == NULL) {
2128 arcmsr_free_resource(acb);
2129 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
2130 cam_simq_free(devq);
2131 printf("arcmsr%d: cam_sim_alloc failure!\n", unit);
2132 return ENXIO;
2133 }
2134 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
2135 arcmsr_free_resource(acb);
2136 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
2137 cam_sim_free(acb->psim, /*free_devq*/TRUE);
2138 printf("arcmsr%d: xpt_bus_register failure!\n", unit);
2139 return ENXIO;
2140 }
2141 if(xpt_create_path(&acb->ppath, /* periph */ NULL
2142 , cam_sim_path(acb->psim)
2143 , CAM_TARGET_WILDCARD
2144 , CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2145 arcmsr_free_resource(acb);
2146 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
2147 xpt_bus_deregister(cam_sim_path(acb->psim));
2148 cam_sim_free(acb->psim, /* free_simq */ TRUE);
2149 printf("arcmsr%d: xpt_create_path failure!\n", unit);
2150 return ENXIO;
2151 }
2152 ARCMSR_LOCK_INIT(&acb->workingQ_done_lock, "arcmsr done working Q lock");
2153 ARCMSR_LOCK_INIT(&acb->workingQ_start_lock, "arcmsr start working Q lock");
2154 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
2155 /*
2156 ****************************************************
2157 */
2158 xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
2159 csa.ccb_h.func_code=XPT_SASYNC_CB;
2160 csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
2161 csa.callback=arcmsr_async;
2162 csa.callback_arg=acb->psim;
2163 xpt_action((union ccb *)&csa);
2164 /* Create the control device. */
2165 acb->ioctl_dev=make_dev(&arcmsr_cdevsw
2166 , unit
2167 , UID_ROOT
2168 , GID_WHEEL /* GID_OPERATOR */
2169 , S_IRUSR | S_IWUSR
2170 , "arcmsr%d", unit);
2171 #if __FreeBSD_version < 503000
2172 acb->ioctl_dev->si_drv1=acb;
2173 #endif
2174 #if __FreeBSD_version > 500005
2175 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
2176 #endif
2177 return 0;
2178 }
2179 /*
2180 ************************************************************************
2181 ************************************************************************
2182 */
2183 static u_int32_t arcmsr_probe(device_t dev)
2184 {
2185 u_int32_t id;
2186 static char buf[256];
2187 char *type;
2188 int raid6 = 1;
2189
2190 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
2191 return (ENXIO);
2192 }
2193 switch(id=pci_get_devid(dev)) {
2194 case PCIDevVenIDARC1110:
2195 case PCIDevVenIDARC1210:
2196 raid6 = 0;
2197 /*FALLTHRU*/
2198 case PCIDevVenIDARC1120:
2199 case PCIDevVenIDARC1130:
2200 case PCIDevVenIDARC1160:
2201 case PCIDevVenIDARC1170:
2202 case PCIDevVenIDARC1220:
2203 case PCIDevVenIDARC1230:
2204 case PCIDevVenIDARC1260:
2205 case PCIDevVenIDARC1270:
2206 case PCIDevVenIDARC1280:
2207 type = "SATA";
2208 break;
2209 case PCIDevVenIDARC1380:
2210 case PCIDevVenIDARC1381:
2211 case PCIDevVenIDARC1680:
2212 case PCIDevVenIDARC1681:
2213 type = "SAS";
2214 break;
2215 default:
2216 type = "X-TYPE";
2217 break;
2218 }
2219 sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n", type, raid6 ? "(RAID6 capable)" : "");
2220 device_set_desc_copy(dev, buf);
2221 return 0;
2222 }
2223 /*
2224 ************************************************************************
2225 ************************************************************************
2226 */
2227 static void arcmsr_shutdown(device_t dev)
2228 {
2229 u_int32_t i, poll_count=0;
2230 u_int32_t intmask_org;
2231 struct CommandControlBlock *srb;
2232 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
2233
2234 /* stop adapter background rebuild */
2235 arcmsr_stop_adapter_bgrb(acb);
2236 arcmsr_flush_adapter_cache(acb);
2237 /* disable all outbound interrupt */
2238 intmask_org=CHIP_REG_READ32(outbound_intmask);
2239 CHIP_REG_WRITE32(outbound_intmask, (intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE));
2240 /* abort all outstanding command */
2241 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
2242 acb->acb_flags &= ~ACB_F_IOP_INITED;
2243 if(acb->srboutstandingcount!=0) {
2244 while((acb->srboutstandingcount!=0) && (poll_count < 256)) {
2245 arcmsr_interrupt((void *)acb);
2246 UDELAY(25000);
2247 poll_count++;
2248 }
2249 if(acb->srboutstandingcount!=0) {
2250 arcmsr_abort_allcmd(acb);
2251 /*clear all outbound posted Q*/
2252 for(i=0;i<ARCMSR_MAX_OUTSTANDING_CMD;i++) {
2253 CHIP_REG_READ32(outbound_queueport);
2254 }
2255 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2256 srb=acb->psrb_pool[i];
2257 if(srb->startdone==ARCMSR_SRB_START) {
2258 srb->startdone=ARCMSR_SRB_ABORTED;
2259 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2260 arcmsr_srb_complete(srb, 1);
2261 }
2262 }
2263 }
2264 }
2265 atomic_set_int(&acb->srboutstandingcount, 0);
2266 acb->workingsrb_doneindex=0;
2267 acb->workingsrb_startindex=0;
2268 return;
2269 }
2270 /*
2271 ************************************************************************
2272 ************************************************************************
2273 */
2274 static u_int32_t arcmsr_detach(device_t dev)
2275 {
2276 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
2277
2278 arcmsr_shutdown(dev);
2279 arcmsr_free_resource(acb);
2280 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), acb->sys_res_arcmsr);
2281 bus_teardown_intr(dev, acb->irqres, acb->ih);
2282 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
2283 xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
2284 xpt_free_path(acb->ppath);
2285 xpt_bus_deregister(cam_sim_path(acb->psim));
2286 cam_sim_free(acb->psim, TRUE);
2287 return (0);
2288 }
2289
2290
Cache object: 9771c9199e17db946dd96cf696fc12aa
|