FreeBSD/Linux Kernel Cross Reference
sys/dev/tws/tws_cam.c
1 /*
2 * Copyright (c) 2010 LSI Corp.
3 * All rights reserved.
4 * Author : Manjunath Ranganathaiah <manjunath.ranganathaiah@lsi.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/8.4/sys/dev/tws/tws_cam.c 241763 2012-10-20 07:39:11Z delphij $
28 */
29
30 #include <dev/tws/tws.h>
31 #include <dev/tws/tws_services.h>
32 #include <dev/tws/tws_hdm.h>
33 #include <dev/tws/tws_user.h>
34 #include <cam/cam.h>
35 #include <cam/cam_ccb.h>
36 #include <cam/cam_sim.h>
37 #include <cam/cam_xpt_sim.h>
38 #include <cam/cam_debug.h>
39 #include <cam/cam_periph.h>
40
41 #include <cam/scsi/scsi_all.h>
42 #include <cam/scsi/scsi_message.h>
43
44 static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
45 static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
46
47 static void tws_action(struct cam_sim *sim, union ccb *ccb);
48 static void tws_poll(struct cam_sim *sim);
49 static void tws_scsi_complete(struct tws_request *req);
50
51
52
53 void tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
54 int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
55 int tws_bus_scan(struct tws_softc *sc);
56 int tws_cam_attach(struct tws_softc *sc);
57 void tws_cam_detach(struct tws_softc *sc);
58 void tws_reset(void *arg);
59
60 static void tws_reset_cb(void *arg);
61 static void tws_reinit(void *arg);
62 static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
63 static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req);
64 static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
65 int nseg, int error);
66 static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src,
67 void *sgl_dest, u_int16_t num_sgl_entries);
68 static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
69 static void tws_scsi_err_complete(struct tws_request *req,
70 struct tws_command_header *hdr);
71 static void tws_passthru_err_complete(struct tws_request *req,
72 struct tws_command_header *hdr);
73
74
75 void tws_timeout(void *arg);
76 static void tws_intr_attn_aen(struct tws_softc *sc);
77 static void tws_intr_attn_error(struct tws_softc *sc);
78 static void tws_intr_resp(struct tws_softc *sc);
79 void tws_intr(void *arg);
80 void tws_cmd_complete(struct tws_request *req);
81 void tws_aen_complete(struct tws_request *req);
82 int tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
83 void tws_getset_param_complete(struct tws_request *req);
84 int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
85 u_int32_t param_size, void *data);
86 int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
87 u_int32_t param_size, void *data);
88
89
90 extern struct tws_request *tws_get_request(struct tws_softc *sc,
91 u_int16_t type);
92 extern void *tws_release_request(struct tws_request *req);
93 extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
94 extern boolean tws_get_response(struct tws_softc *sc,
95 u_int16_t *req_id, u_int64_t *mfa);
96 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
97 u_int8_t q_type );
98 extern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
99 struct tws_request *req, u_int8_t q_type );
100 extern void tws_send_event(struct tws_softc *sc, u_int8_t event);
101
102 extern struct tws_sense *
103 tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
104
105 extern void tws_fetch_aen(void *arg);
106 extern void tws_disable_db_intr(struct tws_softc *sc);
107 extern void tws_enable_db_intr(struct tws_softc *sc);
108 extern void tws_passthru_complete(struct tws_request *req);
109 extern void tws_aen_synctime_with_host(struct tws_softc *sc);
110 extern void tws_circular_aenq_insert(struct tws_softc *sc,
111 struct tws_circular_q *cq, struct tws_event_packet *aen);
112 extern int tws_use_32bit_sgls;
113 extern boolean tws_ctlr_reset(struct tws_softc *sc);
114 extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc,
115 u_int8_t q_type );
116 extern void tws_turn_off_interrupts(struct tws_softc *sc);
117 extern void tws_turn_on_interrupts(struct tws_softc *sc);
118 extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
119 extern void tws_init_obfl_q(struct tws_softc *sc);
120 extern uint8_t tws_get_state(struct tws_softc *sc);
121 extern void tws_assert_soft_reset(struct tws_softc *sc);
122 extern boolean tws_ctlr_ready(struct tws_softc *sc);
123 extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
124 extern int tws_setup_intr(struct tws_softc *sc, int irqs);
125 extern int tws_teardown_intr(struct tws_softc *sc);
126
127
128
129 int
130 tws_cam_attach(struct tws_softc *sc)
131 {
132 struct cam_devq *devq;
133
134 TWS_TRACE_DEBUG(sc, "entry", 0, sc);
135 /* Create a device queue for sim */
136
137 /*
138 * if the user sets cam depth to less than 1
139 * cam may get confused
140 */
141 if ( tws_cam_depth < 1 )
142 tws_cam_depth = 1;
143 if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS) )
144 tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
145
146 TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
147
148 if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
149 tws_log(sc, CAM_SIMQ_ALLOC);
150 return(ENOMEM);
151 }
152
153 /*
154 * Create a SIM entry. Though we can support tws_cam_depth
155 * simultaneous requests, we claim to be able to handle only
156 * (tws_cam_depth), so that we always have reserved requests
157 * packet available to service ioctls and internal commands.
158 */
159 sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
160 device_get_unit(sc->tws_dev),
161 #if (__FreeBSD_version >= 700000)
162 &sc->sim_lock,
163 #endif
164 tws_cam_depth, 1, devq);
165 /* 1, 1, devq); */
166 if (sc->sim == NULL) {
167 cam_simq_free(devq);
168 tws_log(sc, CAM_SIM_ALLOC);
169 }
170 /* Register the bus. */
171 mtx_lock(&sc->sim_lock);
172 if (xpt_bus_register(sc->sim,
173 #if (__FreeBSD_version >= 700000)
174 sc->tws_dev,
175 #endif
176 0) != CAM_SUCCESS) {
177 cam_sim_free(sc->sim, TRUE); /* passing true will free the devq */
178 sc->sim = NULL; /* so cam_detach will not try to free it */
179 mtx_unlock(&sc->sim_lock);
180 tws_log(sc, TWS_XPT_BUS_REGISTER);
181 return(ENXIO);
182 }
183 if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
184 CAM_TARGET_WILDCARD,
185 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
186 xpt_bus_deregister(cam_sim_path(sc->sim));
187 /* Passing TRUE to cam_sim_free will free the devq as well. */
188 cam_sim_free(sc->sim, TRUE);
189 tws_log(sc, TWS_XPT_CREATE_PATH);
190 mtx_unlock(&sc->sim_lock);
191 return(ENXIO);
192 }
193 mtx_unlock(&sc->sim_lock);
194
195 return(0);
196 }
197
198 void
199 tws_cam_detach(struct tws_softc *sc)
200 {
201 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
202 mtx_lock(&sc->sim_lock);
203 if (sc->path)
204 xpt_free_path(sc->path);
205 if (sc->sim) {
206 xpt_bus_deregister(cam_sim_path(sc->sim));
207 cam_sim_free(sc->sim, TRUE);
208 }
209 mtx_unlock(&sc->sim_lock);
210 }
211
212 int
213 tws_bus_scan(struct tws_softc *sc)
214 {
215 union ccb *ccb;
216
217 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
218 if (!(sc->sim))
219 return(ENXIO);
220 mtx_assert(&sc->sim_lock, MA_OWNED);
221 if ((ccb = xpt_alloc_ccb()) == NULL)
222 return(ENOMEM);
223
224 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sc->sim),
225 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
226 xpt_free_ccb(ccb);
227 return(EIO);
228 }
229 xpt_rescan(ccb);
230
231 return(0);
232 }
233
234 static void
235 tws_action(struct cam_sim *sim, union ccb *ccb)
236 {
237 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
238
239
240 switch( ccb->ccb_h.func_code ) {
241 case XPT_SCSI_IO:
242 {
243 if ( tws_execute_scsi(sc, ccb) )
244 TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
245 break;
246 }
247 case XPT_ABORT:
248 {
249 TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
250 ccb->ccb_h.status = CAM_UA_ABORT;
251 xpt_done(ccb);
252 break;
253 }
254 case XPT_RESET_BUS:
255 {
256 TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
257 break;
258 }
259 case XPT_SET_TRAN_SETTINGS:
260 {
261 TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
262 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
263 xpt_done(ccb);
264
265 break;
266 }
267 case XPT_GET_TRAN_SETTINGS:
268 {
269 TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
270
271 #if (__FreeBSD_version >= 700000 )
272 ccb->cts.protocol = PROTO_SCSI;
273 ccb->cts.protocol_version = SCSI_REV_2;
274 ccb->cts.transport = XPORT_SPI;
275 ccb->cts.transport_version = 2;
276
277 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
278 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
279 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
280 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
281 #else
282 ccb->cts.valid = (CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID);
283 ccb->cts.flags &= ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
284 #endif
285 ccb->ccb_h.status = CAM_REQ_CMP;
286 xpt_done(ccb);
287
288 break;
289 }
290 case XPT_CALC_GEOMETRY:
291 {
292 TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb,
293 ccb->ccg.block_size);
294 cam_calc_geometry(&ccb->ccg, 1/* extended */);
295 xpt_done(ccb);
296
297 break;
298 }
299 case XPT_PATH_INQ:
300 {
301 TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
302 ccb->cpi.version_num = 1;
303 ccb->cpi.hba_inquiry = 0;
304 ccb->cpi.target_sprt = 0;
305 ccb->cpi.hba_misc = 0;
306 ccb->cpi.hba_eng_cnt = 0;
307 ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
308 ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
309 ccb->cpi.unit_number = cam_sim_unit(sim);
310 ccb->cpi.bus_id = cam_sim_bus(sim);
311 ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
312 ccb->cpi.base_transfer_speed = 6000000;
313 strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
314 strncpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
315 strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
316 #if (__FreeBSD_version >= 700000 )
317 ccb->cpi.transport = XPORT_SPI;
318 ccb->cpi.transport_version = 2;
319 ccb->cpi.protocol = PROTO_SCSI;
320 ccb->cpi.protocol_version = SCSI_REV_2;
321 ccb->cpi.maxio = TWS_MAX_IO_SIZE;
322 #endif
323 ccb->ccb_h.status = CAM_REQ_CMP;
324 xpt_done(ccb);
325
326 break;
327 }
328 default:
329 TWS_TRACE_DEBUG(sc, "default", sim, ccb);
330 ccb->ccb_h.status = CAM_REQ_INVALID;
331 xpt_done(ccb);
332 break;
333 }
334 }
335
336 static void
337 tws_scsi_complete(struct tws_request *req)
338 {
339 struct tws_softc *sc = req->sc;
340
341 mtx_lock(&sc->q_lock);
342 tws_q_remove_request(sc, req, TWS_BUSY_Q);
343 mtx_unlock(&sc->q_lock);
344
345 untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
346 tws_unmap_request(req->sc, req);
347
348
349 req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
350 mtx_lock(&sc->sim_lock);
351 xpt_done(req->ccb_ptr);
352 mtx_unlock(&sc->sim_lock);
353
354 mtx_lock(&sc->q_lock);
355 tws_q_insert_tail(sc, req, TWS_FREE_Q);
356 mtx_unlock(&sc->q_lock);
357 }
358
359 void
360 tws_getset_param_complete(struct tws_request *req)
361 {
362 struct tws_softc *sc = req->sc;
363
364 TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
365
366 untimeout(tws_timeout, req, req->thandle);
367 tws_unmap_request(sc, req);
368
369 free(req->data, M_TWS);
370
371 req->state = TWS_REQ_STATE_FREE;
372 }
373
374 void
375 tws_aen_complete(struct tws_request *req)
376 {
377 struct tws_softc *sc = req->sc;
378 struct tws_command_header *sense;
379 struct tws_event_packet event;
380 u_int16_t aen_code=0;
381
382 TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
383
384 untimeout(tws_timeout, req, req->thandle);
385 tws_unmap_request(sc, req);
386
387 sense = (struct tws_command_header *)req->data;
388
389 TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0],
390 sense->sense_data[2]);
391 TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id,
392 sense->status_block.res__severity);
393 TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum,
394 sense->status_block.error);
395 TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header,
396 sense->header_desc.size_sense);
397
398 aen_code = sense->status_block.error;
399
400 switch ( aen_code ) {
401 case TWS_AEN_SYNC_TIME_WITH_HOST :
402 tws_aen_synctime_with_host(sc);
403 break;
404 case TWS_AEN_QUEUE_EMPTY :
405 break;
406 default :
407 bzero(&event, sizeof(struct tws_event_packet));
408 event.sequence_id = sc->seq_id;
409 event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
410 event.aen_code = sense->status_block.error;
411 event.severity = sense->status_block.res__severity & 0x7;
412 event.event_src = TWS_SRC_CTRL_EVENT;
413 strcpy(event.severity_str, tws_sev_str[event.severity]);
414 event.retrieved = TWS_AEN_NOT_RETRIEVED;
415
416 bcopy(sense->err_specific_desc, event.parameter_data,
417 TWS_ERROR_SPECIFIC_DESC_LEN);
418 event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
419 event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
420
421 if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
422 event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
423 event.parameter_len) + 1);
424 }
425
426 device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
427 event.severity_str,
428 event.event_src,
429 event.aen_code,
430 event.parameter_data +
431 (strlen(event.parameter_data) + 1),
432 event.parameter_data);
433
434 mtx_lock(&sc->gen_lock);
435 tws_circular_aenq_insert(sc, &sc->aen_q, &event);
436 sc->seq_id++;
437 mtx_unlock(&sc->gen_lock);
438 break;
439
440 }
441
442 free(req->data, M_TWS);
443
444 req->state = TWS_REQ_STATE_FREE;
445
446 if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
447 /* timeout(tws_fetch_aen, sc, 1);*/
448 sc->stats.num_aens++;
449 tws_fetch_aen((void *)sc);
450 }
451 }
452
453 void
454 tws_cmd_complete(struct tws_request *req)
455 {
456 struct tws_softc *sc = req->sc;
457
458 untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
459 tws_unmap_request(sc, req);
460 }
461
462 static void
463 tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
464 {
465 struct tws_command_header *hdr;
466 struct tws_sense *sen;
467 struct tws_request *req;
468 u_int16_t req_id;
469 u_int32_t reg, status;
470
471 if ( !mfa ) {
472 TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
473 return;
474 } else {
475 /* lookup the sense */
476 sen = tws_find_sense_from_mfa(sc, mfa);
477 if ( sen == NULL ) {
478 TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
479 return;
480 }
481 hdr = sen->hdr;
482 TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
483 req_id = hdr->header_desc.request_id;
484 req = &sc->reqs[req_id];
485 TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
486 if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS )
487 TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
488 }
489
490 switch (req->type) {
491 case TWS_REQ_TYPE_PASSTHRU :
492 tws_passthru_err_complete(req, hdr);
493 break;
494 case TWS_REQ_TYPE_GETSET_PARAM :
495 tws_getset_param_complete(req);
496 break;
497 case TWS_REQ_TYPE_SCSI_IO :
498 tws_scsi_err_complete(req, hdr);
499 break;
500
501 }
502
503 mtx_lock(&sc->io_lock);
504 hdr->header_desc.size_header = 128;
505 reg = (u_int32_t)( mfa>>32);
506 tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
507 reg = (u_int32_t)(mfa);
508 tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
509
510 status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
511 if ( status & TWS_BIT13 ) {
512 device_printf(sc->tws_dev, "OBFL Overrun\n");
513 sc->obfl_q_overrun = true;
514 }
515 mtx_unlock(&sc->io_lock);
516 }
517
518 static void
519 tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
520 {
521 u_int8_t *sense_data;
522 struct tws_softc *sc = req->sc;
523 union ccb *ccb = req->ccb_ptr;
524
525 TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
526 req->cmd_pkt->cmd.pkt_a.status);
527 if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
528 hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
529
530 if ( ccb->ccb_h.target_lun ) {
531 TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
532 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
533 } else {
534 TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
535 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
536 }
537
538 } else {
539 TWS_TRACE_DEBUG(sc, "scsi status error",0,0);
540 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
541 if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
542 (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
543 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
544 TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
545 }
546 }
547
548 /* if there were no error simply mark complete error */
549 if (ccb->ccb_h.status == 0)
550 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
551
552 sense_data = (u_int8_t *)&ccb->csio.sense_data;
553 if (sense_data) {
554 memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
555 ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
556 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
557 }
558 ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
559
560 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
561 mtx_lock(&sc->sim_lock);
562 xpt_done(ccb);
563 mtx_unlock(&sc->sim_lock);
564
565 untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
566 tws_unmap_request(req->sc, req);
567 mtx_lock(&sc->q_lock);
568 tws_q_remove_request(sc, req, TWS_BUSY_Q);
569 tws_q_insert_tail(sc, req, TWS_FREE_Q);
570 mtx_unlock(&sc->q_lock);
571 }
572
573 static void
574 tws_passthru_err_complete(struct tws_request *req,
575 struct tws_command_header *hdr)
576 {
577 TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
578 req->error_code = hdr->status_block.error;
579 memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
580 tws_passthru_complete(req);
581 }
582
583 static void
584 tws_drain_busy_queue(struct tws_softc *sc)
585 {
586 struct tws_request *req;
587 union ccb *ccb;
588 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
589
590 mtx_lock(&sc->q_lock);
591 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
592 mtx_unlock(&sc->q_lock);
593 while ( req ) {
594 TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id);
595 untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
596
597 req->error_code = TWS_REQ_RET_RESET;
598 ccb = (union ccb *)(req->ccb_ptr);
599
600 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
601 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
602 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
603
604 tws_unmap_request(req->sc, req);
605
606 mtx_lock(&sc->sim_lock);
607 xpt_done(req->ccb_ptr);
608 mtx_unlock(&sc->sim_lock);
609
610 mtx_lock(&sc->q_lock);
611 tws_q_insert_tail(sc, req, TWS_FREE_Q);
612 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
613 mtx_unlock(&sc->q_lock);
614 }
615 }
616
617
618 static void
619 tws_drain_reserved_reqs(struct tws_softc *sc)
620 {
621 struct tws_request *r;
622
623 r = &sc->reqs[TWS_REQ_TYPE_AEN_FETCH];
624 if ( r->state != TWS_REQ_STATE_FREE ) {
625 TWS_TRACE_DEBUG(sc, "reset aen req", 0, 0);
626 untimeout(tws_timeout, r, r->thandle);
627 tws_unmap_request(sc, r);
628 free(r->data, M_TWS);
629 r->state = TWS_REQ_STATE_FREE;
630 r->error_code = TWS_REQ_RET_RESET;
631 }
632
633 r = &sc->reqs[TWS_REQ_TYPE_PASSTHRU];
634 if ( r->state == TWS_REQ_STATE_BUSY ) {
635 TWS_TRACE_DEBUG(sc, "reset passthru req", 0, 0);
636 r->error_code = TWS_REQ_RET_RESET;
637 }
638
639 r = &sc->reqs[TWS_REQ_TYPE_GETSET_PARAM];
640 if ( r->state != TWS_REQ_STATE_FREE ) {
641 TWS_TRACE_DEBUG(sc, "reset setparam req", 0, 0);
642 untimeout(tws_timeout, r, r->thandle);
643 tws_unmap_request(sc, r);
644 free(r->data, M_TWS);
645 r->state = TWS_REQ_STATE_FREE;
646 r->error_code = TWS_REQ_RET_RESET;
647 }
648 }
649
650 static void
651 tws_drain_response_queue(struct tws_softc *sc)
652 {
653 u_int16_t req_id;
654 u_int64_t mfa;
655 while ( tws_get_response(sc, &req_id, &mfa) );
656 }
657
658
659 static int32_t
660 tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
661 {
662 struct tws_command_packet *cmd_pkt;
663 struct tws_request *req;
664 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
665 struct ccb_scsiio *csio = &(ccb->csio);
666 int error;
667 u_int16_t lun;
668
669 mtx_assert(&sc->sim_lock, MA_OWNED);
670 if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
671 TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
672 ccb_h->status |= CAM_TID_INVALID;
673 xpt_done(ccb);
674 return(0);
675 }
676 if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
677 TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
678 ccb_h->status |= CAM_LUN_INVALID;
679 xpt_done(ccb);
680 return(0);
681 }
682
683 if(ccb_h->flags & CAM_CDB_PHYS) {
684 TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
685 ccb_h->status = CAM_REQ_INVALID;
686 xpt_done(ccb);
687 return(0);
688 }
689
690 /*
691 * We are going to work on this request. Mark it as enqueued (though
692 * we don't actually queue it...)
693 */
694 ccb_h->status |= CAM_SIM_QUEUED;
695
696 req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO);
697 if ( !req ) {
698 TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
699 ccb_h->status |= CAM_REQUEUE_REQ;
700 xpt_done(ccb);
701 return(0);
702 }
703
704 if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
705 if(ccb_h->flags & CAM_DIR_IN)
706 req->flags |= TWS_DIR_IN;
707 if(ccb_h->flags & CAM_DIR_OUT)
708 req->flags |= TWS_DIR_OUT;
709 } else {
710 req->flags = TWS_DIR_NONE; /* no data */
711 }
712
713 req->type = TWS_REQ_TYPE_SCSI_IO;
714 req->cb = tws_scsi_complete;
715
716 cmd_pkt = req->cmd_pkt;
717 /* cmd_pkt->hdr.header_desc.size_header = 128; */
718 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
719 cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
720 cmd_pkt->cmd.pkt_a.status = 0;
721 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
722
723 /* lower nibble */
724 lun = ccb_h->target_lun & 0XF;
725 lun = lun << 12;
726 cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
727 /* upper nibble */
728 lun = ccb_h->target_lun & 0XF0;
729 lun = lun << 8;
730 cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
731
732 #ifdef TWS_DEBUG
733 if ( csio->cdb_len > 16 )
734 TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
735 #endif
736
737 if(ccb_h->flags & CAM_CDB_POINTER)
738 bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
739 else
740 bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
741
742 if (!(ccb_h->flags & CAM_DATA_PHYS)) {
743 /* Virtual data addresses. Need to convert them... */
744 if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
745 if (csio->dxfer_len > TWS_MAX_IO_SIZE) {
746 TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0);
747 tws_release_request(req);
748 ccb_h->status = CAM_REQ_TOO_BIG;
749 xpt_done(ccb);
750 return(0);
751 }
752
753 req->length = csio->dxfer_len;
754 if (req->length) {
755 req->data = csio->data_ptr;
756 /* there is 1 sgl_entrie */
757 /* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */
758 }
759 } else {
760 TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun);
761 tws_release_request(req);
762 ccb_h->status = CAM_REQ_INVALID;
763 xpt_done(ccb);
764 return(0);
765 }
766 } else {
767 /* Data addresses are physical. */
768 TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun);
769 tws_release_request(req);
770 ccb_h->status = CAM_REQ_INVALID;
771 ccb_h->status &= ~CAM_SIM_QUEUED;
772 xpt_done(ccb);
773 return(0);
774 }
775 /* save ccb ptr */
776 req->ccb_ptr = ccb;
777 /*
778 * tws_map_load_data_callback will fill in the SGL,
779 * and submit the I/O.
780 */
781 sc->stats.scsi_ios++;
782 ccb_h->timeout_ch = timeout(tws_timeout, req, (ccb_h->timeout * hz)/1000);
783 error = tws_map_request(sc, req);
784 return(error);
785 }
786
787
788 int
789 tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
790 {
791 struct tws_request *req;
792 struct tws_command_packet *cmd_pkt;
793 int error;
794
795 TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
796 req = tws_get_request(sc, TWS_REQ_TYPE_AEN_FETCH);
797
798 if ( req == NULL )
799 return(ENOMEM);
800
801 req->cb = tws_aen_complete;
802
803 cmd_pkt = req->cmd_pkt;
804 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
805 cmd_pkt->cmd.pkt_a.status = 0;
806 cmd_pkt->cmd.pkt_a.unit = 0;
807 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
808 cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
809
810 cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
811 cmd_pkt->cmd.pkt_a.cdb[4] = 128;
812
813 req->length = TWS_SECTOR_SIZE;
814 req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
815 if ( req->data == NULL )
816 return(ENOMEM);
817 bzero(req->data, TWS_SECTOR_SIZE);
818 req->flags = TWS_DIR_IN;
819
820 req->thandle = timeout(tws_timeout, req, (TWS_IO_TIMEOUT * hz));
821 error = tws_map_request(sc, req);
822 return(error);
823
824 }
825
826 int
827 tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
828 u_int32_t param_size, void *data)
829 {
830 struct tws_request *req;
831 struct tws_command_packet *cmd_pkt;
832 union tws_command_giga *cmd;
833 struct tws_getset_param *param;
834 int error;
835
836 req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
837 if ( req == NULL ) {
838 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
839 return(ENOMEM);
840 }
841
842 req->length = TWS_SECTOR_SIZE;
843 req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
844 if ( req->data == NULL )
845 return(ENOMEM);
846 bzero(req->data, TWS_SECTOR_SIZE);
847 param = (struct tws_getset_param *)req->data;
848
849 req->cb = tws_getset_param_complete;
850 req->flags = TWS_DIR_OUT;
851 cmd_pkt = req->cmd_pkt;
852
853 cmd = &cmd_pkt->cmd.pkt_g;
854 cmd->param.sgl_off__opcode =
855 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
856 cmd->param.request_id = (u_int8_t)req->request_id;
857 cmd->param.host_id__unit = 0;
858 cmd->param.param_count = 1;
859 cmd->param.size = 2; /* map routine will add sgls */
860
861 /* Specify which parameter we want to set. */
862 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
863 param->parameter_id = (u_int8_t)(param_id);
864 param->parameter_size_bytes = (u_int16_t)param_size;
865 memcpy(param->data, data, param_size);
866
867 req->thandle = timeout(tws_timeout, req, (TWS_IOCTL_TIMEOUT * hz));
868 error = tws_map_request(sc, req);
869 return(error);
870
871 }
872
873 int
874 tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
875 u_int32_t param_size, void *data)
876 {
877 struct tws_request *req;
878 struct tws_command_packet *cmd_pkt;
879 union tws_command_giga *cmd;
880 struct tws_getset_param *param;
881 u_int16_t reqid;
882 u_int64_t mfa;
883 int error = SUCCESS;
884
885
886 req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
887 if ( req == NULL ) {
888 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
889 return(FAILURE);
890 }
891
892 req->length = TWS_SECTOR_SIZE;
893 req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
894 if ( req->data == NULL )
895 return(FAILURE);
896 bzero(req->data, TWS_SECTOR_SIZE);
897 param = (struct tws_getset_param *)req->data;
898
899 req->cb = NULL;
900 req->flags = TWS_DIR_IN;
901 cmd_pkt = req->cmd_pkt;
902
903 cmd = &cmd_pkt->cmd.pkt_g;
904 cmd->param.sgl_off__opcode =
905 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
906 cmd->param.request_id = (u_int8_t)req->request_id;
907 cmd->param.host_id__unit = 0;
908 cmd->param.param_count = 1;
909 cmd->param.size = 2; /* map routine will add sgls */
910
911 /* Specify which parameter we want to set. */
912 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
913 param->parameter_id = (u_int8_t)(param_id);
914 param->parameter_size_bytes = (u_int16_t)param_size;
915
916 error = tws_map_request(sc, req);
917 if (!error) {
918 reqid = tws_poll4_response(sc, &mfa);
919 tws_unmap_request(sc, req);
920
921 if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) {
922 memcpy(data, param->data, param_size);
923 } else {
924 error = FAILURE;
925 }
926 }
927
928 free(req->data, M_TWS);
929 req->state = TWS_REQ_STATE_FREE;
930 return(error);
931
932 }
933
934 void
935 tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
936 {
937 if (req->data != NULL) {
938 if ( req->flags & TWS_DIR_IN )
939 bus_dmamap_sync(sc->data_tag, req->dma_map,
940 BUS_DMASYNC_POSTREAD);
941 if ( req->flags & TWS_DIR_OUT )
942 bus_dmamap_sync(sc->data_tag, req->dma_map,
943 BUS_DMASYNC_POSTWRITE);
944 mtx_lock(&sc->io_lock);
945 bus_dmamap_unload(sc->data_tag, req->dma_map);
946 mtx_unlock(&sc->io_lock);
947 }
948 }
949
950 int32_t
951 tws_map_request(struct tws_softc *sc, struct tws_request *req)
952 {
953 int32_t error = 0;
954
955
956 /* If the command involves data, map that too. */
957 if (req->data != NULL) {
958 int my_flags = ((req->type == TWS_REQ_TYPE_SCSI_IO) ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
959
960 /*
961 * Map the data buffer into bus space and build the SG list.
962 */
963 mtx_lock(&sc->io_lock);
964 error = bus_dmamap_load(sc->data_tag, req->dma_map,
965 req->data, req->length,
966 tws_dmamap_data_load_cbfn, req,
967 my_flags);
968 mtx_unlock(&sc->io_lock);
969
970 if (error == EINPROGRESS) {
971 TWS_TRACE(sc, "in progress", 0, error);
972 tws_freeze_simq(sc, req);
973 error = 0; // EINPROGRESS is not a fatal error.
974 }
975 } else { /* no data involved */
976 error = tws_submit_command(sc, req);
977 }
978 return(error);
979 }
980
981
982 static void
983 tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
984 int nseg, int error)
985 {
986 struct tws_request *req = (struct tws_request *)arg;
987 struct tws_softc *sc = req->sc;
988 u_int16_t sgls = nseg;
989 void *sgl_ptr;
990 struct tws_cmd_generic *gcmd;
991
992
993 if ( error ) {
994 TWS_TRACE(sc, "SOMETHING BAD HAPPENED! error = %d\n", error, 0);
995 }
996
997 if ( error == EFBIG ) {
998 TWS_TRACE(sc, "not enough data segs", 0, nseg);
999 req->error_code = error;
1000 req->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
1001 return;
1002 }
1003
1004 if ( req->flags & TWS_DIR_IN )
1005 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
1006 BUS_DMASYNC_PREREAD);
1007 if ( req->flags & TWS_DIR_OUT )
1008 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
1009 BUS_DMASYNC_PREWRITE);
1010 if ( segs ) {
1011 if ( (req->type == TWS_REQ_TYPE_PASSTHRU &&
1012 GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) !=
1013 TWS_FW_CMD_EXECUTE_SCSI) ||
1014 req->type == TWS_REQ_TYPE_GETSET_PARAM) {
1015 gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
1016 sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
1017 gcmd->size += sgls *
1018 ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 : 2 );
1019 tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls);
1020
1021 } else {
1022 tws_fill_sg_list(req->sc, (void *)segs,
1023 (void *)&(req->cmd_pkt->cmd.pkt_a.sg_list), sgls);
1024 req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
1025 }
1026 }
1027
1028
1029 req->error_code = tws_submit_command(req->sc, req);
1030
1031 }
1032
1033
1034 static void
1035 tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest,
1036 u_int16_t num_sgl_entries)
1037 {
1038 int i;
1039
1040 if ( sc->is64bit ) {
1041 struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
1042
1043 if ( !tws_use_32bit_sgls ) {
1044 struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
1045 if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
1046 TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
1047 for (i = 0; i < num_sgl_entries; i++) {
1048 sgl_d[i].address = sgl_s->address;
1049 sgl_d[i].length = sgl_s->length;
1050 sgl_d[i].flag = 0;
1051 sgl_d[i].reserved = 0;
1052 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1053 sizeof(bus_dma_segment_t));
1054 }
1055 } else {
1056 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1057 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1058 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1059 for (i = 0; i < num_sgl_entries; i++) {
1060 sgl_d[i].address = sgl_s->address;
1061 sgl_d[i].length = sgl_s->length;
1062 sgl_d[i].flag = 0;
1063 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1064 sizeof(bus_dma_segment_t));
1065 }
1066 }
1067 } else {
1068 struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
1069 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1070
1071 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1072 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1073
1074
1075 for (i = 0; i < num_sgl_entries; i++) {
1076 sgl_d[i].address = sgl_s[i].address;
1077 sgl_d[i].length = sgl_s[i].length;
1078 sgl_d[i].flag = 0;
1079 }
1080 }
1081 }
1082
1083
1084 void
1085 tws_intr(void *arg)
1086 {
1087 struct tws_softc *sc = (struct tws_softc *)arg;
1088 u_int32_t histat=0, db=0;
1089
1090 if (!(sc)) {
1091 device_printf(sc->tws_dev, "null softc!!!\n");
1092 return;
1093 }
1094
1095 if ( tws_get_state(sc) == TWS_RESET ) {
1096 return;
1097 }
1098
1099 if ( tws_get_state(sc) != TWS_ONLINE ) {
1100 return;
1101 }
1102
1103 sc->stats.num_intrs++;
1104 histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
1105 if ( histat & TWS_BIT2 ) {
1106 TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
1107 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1108 if ( db & TWS_BIT21 ) {
1109 tws_intr_attn_error(sc);
1110 return;
1111 }
1112 if ( db & TWS_BIT18 ) {
1113 tws_intr_attn_aen(sc);
1114 }
1115 }
1116
1117 if ( histat & TWS_BIT3 ) {
1118 tws_intr_resp(sc);
1119 }
1120 }
1121
1122 static void
1123 tws_intr_attn_aen(struct tws_softc *sc)
1124 {
1125 u_int32_t db=0;
1126
1127 /* maskoff db intrs untill all the aens are fetched */
1128 /* tws_disable_db_intr(sc); */
1129 tws_fetch_aen((void *)sc);
1130 tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
1131 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1132
1133 }
1134
1135 static void
1136 tws_intr_attn_error(struct tws_softc *sc)
1137 {
1138 u_int32_t db=0;
1139
1140 TWS_TRACE(sc, "attn error", 0, 0);
1141 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
1142 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1143 device_printf(sc->tws_dev, "Micro controller error.\n");
1144 tws_reset(sc);
1145 }
1146
1147 static void
1148 tws_intr_resp(struct tws_softc *sc)
1149 {
1150 u_int16_t req_id;
1151 u_int64_t mfa;
1152
1153 while ( tws_get_response(sc, &req_id, &mfa) ) {
1154 sc->stats.reqs_out++;
1155 if ( req_id == TWS_INVALID_REQID ) {
1156 TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
1157 sc->stats.reqs_errored++;
1158 tws_err_complete(sc, mfa);
1159 continue;
1160 }
1161 sc->reqs[req_id].cb(&sc->reqs[req_id]);
1162 }
1163
1164 }
1165
1166
1167 static void
1168 tws_poll(struct cam_sim *sim)
1169 {
1170 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
1171 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
1172 tws_intr((void *) sc);
1173 }
1174
1175 void
1176 tws_timeout(void *arg)
1177 {
1178 struct tws_request *req = (struct tws_request *)arg;
1179 struct tws_softc *sc = req->sc;
1180
1181
1182 if ( req->error_code == TWS_REQ_RET_RESET ) {
1183 return;
1184 }
1185
1186 mtx_lock(&sc->gen_lock);
1187 if ( req->error_code == TWS_REQ_RET_RESET ) {
1188 mtx_unlock(&sc->gen_lock);
1189 return;
1190 }
1191
1192 if ( tws_get_state(sc) == TWS_RESET ) {
1193 mtx_unlock(&sc->gen_lock);
1194 return;
1195 }
1196
1197 tws_teardown_intr(sc);
1198 xpt_freeze_simq(sc->sim, 1);
1199
1200 tws_send_event(sc, TWS_RESET_START);
1201
1202 if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1203 device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n");
1204 } else if (req->type == TWS_REQ_TYPE_PASSTHRU) {
1205 device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n");
1206 } else {
1207 device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n");
1208 }
1209
1210 tws_assert_soft_reset(sc);
1211 tws_turn_off_interrupts(sc);
1212 tws_reset_cb( (void*) sc );
1213 tws_reinit( (void*) sc );
1214
1215 // device_printf(sc->tws_dev, "Controller Reset complete!\n");
1216 tws_send_event(sc, TWS_RESET_COMPLETE);
1217 mtx_unlock(&sc->gen_lock);
1218
1219 xpt_release_simq(sc->sim, 1);
1220 tws_setup_intr(sc, sc->irqs);
1221 }
1222
1223 void
1224 tws_reset(void *arg)
1225 {
1226 struct tws_softc *sc = (struct tws_softc *)arg;
1227
1228 mtx_lock(&sc->gen_lock);
1229 if ( tws_get_state(sc) == TWS_RESET ) {
1230 mtx_unlock(&sc->gen_lock);
1231 return;
1232 }
1233
1234 tws_teardown_intr(sc);
1235 xpt_freeze_simq(sc->sim, 1);
1236
1237 tws_send_event(sc, TWS_RESET_START);
1238
1239 device_printf(sc->tws_dev, "Resetting controller\n");
1240
1241 tws_assert_soft_reset(sc);
1242 tws_turn_off_interrupts(sc);
1243 tws_reset_cb( (void*) sc );
1244 tws_reinit( (void*) sc );
1245
1246 // device_printf(sc->tws_dev, "Controller Reset complete!\n");
1247 tws_send_event(sc, TWS_RESET_COMPLETE);
1248 mtx_unlock(&sc->gen_lock);
1249
1250 xpt_release_simq(sc->sim, 1);
1251 tws_setup_intr(sc, sc->irqs);
1252 }
1253
1254 static void
1255 tws_reset_cb(void *arg)
1256 {
1257 struct tws_softc *sc = (struct tws_softc *)arg;
1258 time_t endt;
1259 int found = 0;
1260 u_int32_t reg;
1261
1262 if ( tws_get_state(sc) != TWS_RESET ) {
1263 return;
1264 }
1265
1266 // device_printf(sc->tws_dev, "Draining Busy Queue\n");
1267 tws_drain_busy_queue(sc);
1268 // device_printf(sc->tws_dev, "Draining Reserved Reqs\n");
1269 tws_drain_reserved_reqs(sc);
1270 // device_printf(sc->tws_dev, "Draining Response Queue\n");
1271 tws_drain_response_queue(sc);
1272
1273 // device_printf(sc->tws_dev, "Looking for controller ready flag...\n");
1274 endt = TWS_LOCAL_TIME + TWS_POLL_TIMEOUT;
1275 while ((TWS_LOCAL_TIME <= endt) && (!found)) {
1276 reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
1277 if ( reg & TWS_BIT13 ) {
1278 found = 1;
1279 // device_printf(sc->tws_dev, " ... Got it!\n");
1280 }
1281 }
1282 if ( !found )
1283 device_printf(sc->tws_dev, " ... Controller ready flag NOT found!\n");
1284 }
1285
1286 static void
1287 tws_reinit(void *arg)
1288 {
1289 struct tws_softc *sc = (struct tws_softc *)arg;
1290 int timeout_val=0;
1291 int try=2;
1292 int done=0;
1293
1294
1295 // device_printf(sc->tws_dev, "Waiting for Controller Ready\n");
1296 while ( !done && try ) {
1297 if ( tws_ctlr_ready(sc) ) {
1298 done = 1;
1299 break;
1300 } else {
1301 timeout_val += 5;
1302 if ( timeout_val >= TWS_RESET_TIMEOUT ) {
1303 timeout_val = 0;
1304 if ( try )
1305 tws_assert_soft_reset(sc);
1306 try--;
1307 }
1308 mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz);
1309 }
1310 }
1311
1312 if (!done) {
1313 device_printf(sc->tws_dev, "FAILED to get Controller Ready!\n");
1314 return;
1315 }
1316
1317 sc->obfl_q_overrun = false;
1318 // device_printf(sc->tws_dev, "Sending initConnect\n");
1319 if ( tws_init_connect(sc, tws_queue_depth) ) {
1320 TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
1321 }
1322 tws_init_obfl_q(sc);
1323
1324 tws_turn_on_interrupts(sc);
1325
1326 wakeup_one(sc->chan);
1327 }
1328
1329
1330 static void
1331 tws_freeze_simq(struct tws_softc *sc, struct tws_request *req)
1332 {
1333 /* Only for IO commands */
1334 if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1335 union ccb *ccb = (union ccb *)(req->ccb_ptr);
1336
1337 xpt_freeze_simq(sc->sim, 1);
1338 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1339 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1340 }
1341 }
1342
1343
1344 TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);
Cache object: 8524844dbce5f785fd261db2343ad0cd
|