1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD: releng/8.4/sys/dev/isci/isci_io_request.c 244129 2012-12-12 00:39:32Z jimharris $");
33
34 #include <dev/isci/isci.h>
35
36 #include <cam/scsi/scsi_all.h>
37 #include <cam/scsi/scsi_message.h>
38
39 #include <dev/isci/scil/intel_sas.h>
40
41 #include <dev/isci/scil/sci_util.h>
42
43 #include <dev/isci/scil/scif_io_request.h>
44 #include <dev/isci/scil/scif_controller.h>
45 #include <dev/isci/scil/scif_remote_device.h>
46 #include <dev/isci/scil/scif_user_callback.h>
47
48 #include <dev/isci/scil/scic_io_request.h>
49 #include <dev/isci/scil/scic_user_callback.h>
50
51 /**
52 * @brief This user callback will inform the user that an IO request has
53 * completed.
54 *
55 * @param[in] controller This parameter specifies the controller on
56 * which the IO request is completing.
57 * @param[in] remote_device This parameter specifies the remote device on
58 * which this request is completing.
59 * @param[in] io_request This parameter specifies the IO request that has
60 * completed.
61 * @param[in] completion_status This parameter specifies the results of
62 * the IO request operation. SCI_IO_SUCCESS indicates
63 * successful completion.
64 *
65 * @return none
66 */
67 void
68 scif_cb_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
69 SCI_REMOTE_DEVICE_HANDLE_T remote_device,
70 SCI_IO_REQUEST_HANDLE_T io_request, SCI_IO_STATUS completion_status)
71 {
72 struct ISCI_IO_REQUEST *isci_request =
73 (struct ISCI_IO_REQUEST *)sci_object_get_association(io_request);
74
75 scif_controller_complete_io(scif_controller, remote_device, io_request);
76 isci_io_request_complete(scif_controller, remote_device, isci_request,
77 completion_status);
78 }
79
80 void
81 isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
82 SCI_REMOTE_DEVICE_HANDLE_T remote_device,
83 struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status)
84 {
85 struct ISCI_CONTROLLER *isci_controller;
86 struct ISCI_REMOTE_DEVICE *isci_remote_device;
87 union ccb *ccb;
88 BOOL complete_ccb;
89
90 complete_ccb = TRUE;
91 isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller);
92 isci_remote_device =
93 (struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device);
94
95 ccb = isci_request->ccb;
96
97 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
98
99 switch (completion_status) {
100 case SCI_IO_SUCCESS:
101 case SCI_IO_SUCCESS_COMPLETE_BEFORE_START:
102 #if __FreeBSD_version >= 900026
103 if (ccb->ccb_h.func_code == XPT_SMP_IO) {
104 void *smp_response =
105 scif_io_request_get_response_iu_address(
106 isci_request->sci_object);
107
108 memcpy(ccb->smpio.smp_response, smp_response,
109 ccb->smpio.smp_response_len);
110 }
111 #endif
112 ccb->ccb_h.status |= CAM_REQ_CMP;
113 break;
114
115 case SCI_IO_SUCCESS_IO_DONE_EARLY:
116 ccb->ccb_h.status |= CAM_REQ_CMP;
117 ccb->csio.resid = ccb->csio.dxfer_len -
118 scif_io_request_get_number_of_bytes_transferred(
119 isci_request->sci_object);
120 break;
121
122 case SCI_IO_FAILURE_RESPONSE_VALID:
123 {
124 SCI_SSP_RESPONSE_IU_T * response_buffer;
125 uint32_t sense_length;
126 int error_code, sense_key, asc, ascq;
127 struct ccb_scsiio *csio = &ccb->csio;
128
129 response_buffer = (SCI_SSP_RESPONSE_IU_T *)
130 scif_io_request_get_response_iu_address(
131 isci_request->sci_object);
132
133 sense_length = sci_ssp_get_sense_data_length(
134 response_buffer->sense_data_length);
135
136 sense_length = MIN(csio->sense_len, sense_length);
137
138 memcpy(&csio->sense_data, response_buffer->data, sense_length);
139
140 csio->sense_resid = csio->sense_len - sense_length;
141 csio->scsi_status = response_buffer->status;
142 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
143 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
144 scsi_extract_sense( &csio->sense_data, &error_code, &sense_key,
145 &asc, &ascq );
146 isci_log_message(1, "ISCI",
147 "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n",
148 ccb->ccb_h.path_id, ccb->ccb_h.target_id,
149 ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0],
150 csio->scsi_status, sense_key, asc, ascq);
151 break;
152 }
153
154 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
155 isci_remote_device_reset(isci_remote_device, NULL);
156
157 /* drop through */
158 case SCI_IO_FAILURE_TERMINATED:
159 ccb->ccb_h.status |= CAM_REQ_TERMIO;
160 isci_log_message(1, "ISCI",
161 "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n",
162 ccb->ccb_h.path_id, ccb->ccb_h.target_id,
163 ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]);
164 break;
165
166 case SCI_IO_FAILURE_INVALID_STATE:
167 case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES:
168 complete_ccb = FALSE;
169 break;
170
171 case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE:
172 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
173 break;
174
175 case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE:
176 {
177 struct ccb_relsim ccb_relsim;
178 struct cam_path *path;
179
180 xpt_create_path(&path, NULL,
181 cam_sim_path(isci_controller->sim),
182 isci_remote_device->index, 0);
183
184 xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5);
185 ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ;
186 ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE;
187 ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS;
188 ccb_relsim.openings =
189 scif_remote_device_get_max_queue_depth(remote_device);
190 xpt_action((union ccb *)&ccb_relsim);
191 xpt_free_path(path);
192 complete_ccb = FALSE;
193 }
194 break;
195
196 case SCI_IO_FAILURE:
197 case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT:
198 case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL:
199 case SCI_IO_FAILURE_PROTOCOL_VIOLATION:
200 case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE:
201 case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR:
202 default:
203 isci_log_message(1, "ISCI",
204 "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n",
205 ccb->ccb_h.path_id, ccb->ccb_h.target_id,
206 ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0],
207 completion_status);
208 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
209 break;
210 }
211
212 callout_stop(&isci_request->parent.timer);
213 bus_dmamap_sync(isci_request->parent.dma_tag,
214 isci_request->parent.dma_map,
215 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
216
217 bus_dmamap_unload(isci_request->parent.dma_tag,
218 isci_request->parent.dma_map);
219
220 isci_request->ccb = NULL;
221
222 sci_pool_put(isci_controller->request_pool,
223 (struct ISCI_REQUEST *)isci_request);
224
225 if (complete_ccb) {
226 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
227 /* ccb will be completed with some type of non-success
228 * status. So temporarily freeze the queue until the
229 * upper layers can act on the status. The
230 * CAM_DEV_QFRZN flag will then release the queue
231 * after the status is acted upon.
232 */
233 ccb->ccb_h.status |= CAM_DEV_QFRZN;
234 xpt_freeze_devq(ccb->ccb_h.path, 1);
235 }
236
237 if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
238
239 KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
240 ("multiple internally queued ccbs in flight"));
241
242 TAILQ_REMOVE(&isci_remote_device->queued_ccbs,
243 &ccb->ccb_h, sim_links.tqe);
244 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
245
246 /*
247 * This CCB that was in the queue was completed, so
248 * set the in_progress pointer to NULL denoting that
249 * we can retry another CCB from the queue. We only
250 * allow one CCB at a time from the queue to be
251 * in progress so that we can effectively maintain
252 * ordering.
253 */
254 isci_remote_device->queued_ccb_in_progress = NULL;
255 }
256
257 if (isci_remote_device->frozen_lun_mask != 0) {
258 isci_remote_device_release_device_queue(isci_remote_device);
259 }
260
261 xpt_done(ccb);
262
263 if (isci_controller->is_frozen == TRUE) {
264 isci_controller->is_frozen = FALSE;
265 xpt_release_simq(isci_controller->sim, TRUE);
266 }
267 } else {
268 isci_remote_device_freeze_lun_queue(isci_remote_device,
269 ccb->ccb_h.target_lun);
270
271 if (ccb->ccb_h.status & CAM_SIM_QUEUED) {
272
273 KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
274 ("multiple internally queued ccbs in flight"));
275
276 /*
277 * Do nothing, CCB is already on the device's queue.
278 * We leave it on the queue, to be retried again
279 * next time a CCB on this device completes, or we
280 * get a ready notification for this device.
281 */
282 isci_log_message(1, "ISCI", "already queued %p %x\n",
283 ccb, ccb->csio.cdb_io.cdb_bytes[0]);
284
285 isci_remote_device->queued_ccb_in_progress = NULL;
286
287 } else {
288 isci_log_message(1, "ISCI", "queue %p %x\n", ccb,
289 ccb->csio.cdb_io.cdb_bytes[0]);
290 ccb->ccb_h.status |= CAM_SIM_QUEUED;
291
292 TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs,
293 &ccb->ccb_h, sim_links.tqe);
294 }
295 }
296 }
297
298 /**
299 * @brief This callback method asks the user to provide the physical
300 * address for the supplied virtual address when building an
301 * io request object.
302 *
303 * @param[in] controller This parameter is the core controller object
304 * handle.
305 * @param[in] io_request This parameter is the io request object handle
306 * for which the physical address is being requested.
307 * @param[in] virtual_address This paramter is the virtual address which
308 * is to be returned as a physical address.
309 * @param[out] physical_address The physical address for the supplied virtual
310 * address.
311 *
312 * @return None.
313 */
314 void
315 scic_cb_io_request_get_physical_address(SCI_CONTROLLER_HANDLE_T controller,
316 SCI_IO_REQUEST_HANDLE_T io_request, void *virtual_address,
317 SCI_PHYSICAL_ADDRESS *physical_address)
318 {
319 SCI_IO_REQUEST_HANDLE_T scif_request =
320 sci_object_get_association(io_request);
321 struct ISCI_REQUEST *isci_request =
322 sci_object_get_association(scif_request);
323
324 if(isci_request != NULL) {
325 /* isci_request is not NULL, meaning this is a request initiated
326 * by CAM or the isci layer (i.e. device reset for I/O
327 * timeout). Therefore we can calculate the physical address
328 * based on the address we stored in the struct ISCI_REQUEST
329 * object.
330 */
331 *physical_address = isci_request->physical_address +
332 (uintptr_t)virtual_address -
333 (uintptr_t)isci_request;
334 } else {
335 /* isci_request is NULL, meaning this is a request generated
336 * internally by SCIL (i.e. for SMP requests or NCQ error
337 * recovery). Therefore we calculate the physical address
338 * based on the controller's uncached controller memory buffer,
339 * since we know that this is what SCIL uses for internal
340 * framework requests.
341 */
342 SCI_CONTROLLER_HANDLE_T scif_controller =
343 (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(controller);
344 struct ISCI_CONTROLLER *isci_controller =
345 (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller);
346 U64 virt_addr_offset = (uintptr_t)virtual_address -
347 (U64)isci_controller->uncached_controller_memory.virtual_address;
348
349 *physical_address =
350 isci_controller->uncached_controller_memory.physical_address
351 + virt_addr_offset;
352 }
353 }
354
355 /**
356 * @brief This callback method asks the user to provide the address for
357 * the command descriptor block (CDB) associated with this IO request.
358 *
359 * @param[in] scif_user_io_request This parameter points to the user's
360 * IO request object. It is a cookie that allows the user to
361 * provide the necessary information for this callback.
362 *
363 * @return This method returns the virtual address of the CDB.
364 */
365 void *
366 scif_cb_io_request_get_cdb_address(void * scif_user_io_request)
367 {
368 struct ISCI_IO_REQUEST *isci_request =
369 (struct ISCI_IO_REQUEST *)scif_user_io_request;
370
371 return (isci_request->ccb->csio.cdb_io.cdb_bytes);
372 }
373
374 /**
375 * @brief This callback method asks the user to provide the length of
376 * the command descriptor block (CDB) associated with this IO request.
377 *
378 * @param[in] scif_user_io_request This parameter points to the user's
379 * IO request object. It is a cookie that allows the user to
380 * provide the necessary information for this callback.
381 *
382 * @return This method returns the length of the CDB.
383 */
384 uint32_t
385 scif_cb_io_request_get_cdb_length(void * scif_user_io_request)
386 {
387 struct ISCI_IO_REQUEST *isci_request =
388 (struct ISCI_IO_REQUEST *)scif_user_io_request;
389
390 return (isci_request->ccb->csio.cdb_len);
391 }
392
393 /**
394 * @brief This callback method asks the user to provide the Logical Unit (LUN)
395 * associated with this IO request.
396 *
397 * @note The contents of the value returned from this callback are defined
398 * by the protocol standard (e.g. T10 SAS specification). Please
399 * refer to the transport command information unit description
400 * in the associated standard.
401 *
402 * @param[in] scif_user_io_request This parameter points to the user's
403 * IO request object. It is a cookie that allows the user to
404 * provide the necessary information for this callback.
405 *
406 * @return This method returns the LUN associated with this request.
407 */
408 uint32_t
409 scif_cb_io_request_get_lun(void * scif_user_io_request)
410 {
411 struct ISCI_IO_REQUEST *isci_request =
412 (struct ISCI_IO_REQUEST *)scif_user_io_request;
413
414 return (isci_request->ccb->ccb_h.target_lun);
415 }
416
417 /**
418 * @brief This callback method asks the user to provide the task attribute
419 * associated with this IO request.
420 *
421 * @note The contents of the value returned from this callback are defined
422 * by the protocol standard (e.g. T10 SAS specification). Please
423 * refer to the transport command information unit description
424 * in the associated standard.
425 *
426 * @param[in] scif_user_io_request This parameter points to the user's
427 * IO request object. It is a cookie that allows the user to
428 * provide the necessary information for this callback.
429 *
430 * @return This method returns the task attribute associated with this
431 * IO request.
432 */
433 uint32_t
434 scif_cb_io_request_get_task_attribute(void * scif_user_io_request)
435 {
436 struct ISCI_IO_REQUEST *isci_request =
437 (struct ISCI_IO_REQUEST *)scif_user_io_request;
438 uint32_t task_attribute;
439
440 if((isci_request->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0)
441 switch(isci_request->ccb->csio.tag_action) {
442 case MSG_HEAD_OF_Q_TAG:
443 task_attribute = SCI_SAS_HEAD_OF_QUEUE_ATTRIBUTE;
444 break;
445
446 case MSG_ORDERED_Q_TAG:
447 task_attribute = SCI_SAS_ORDERED_ATTRIBUTE;
448 break;
449
450 case MSG_ACA_TASK:
451 task_attribute = SCI_SAS_ACA_ATTRIBUTE;
452 break;
453
454 default:
455 task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE;
456 break;
457 }
458 else
459 task_attribute = SCI_SAS_SIMPLE_ATTRIBUTE;
460
461 return (task_attribute);
462 }
463
464 /**
465 * @brief This callback method asks the user to provide the command priority
466 * associated with this IO request.
467 *
468 * @note The contents of the value returned from this callback are defined
469 * by the protocol standard (e.g. T10 SAS specification). Please
470 * refer to the transport command information unit description
471 * in the associated standard.
472 *
473 * @param[in] scif_user_io_request This parameter points to the user's
474 * IO request object. It is a cookie that allows the user to
475 * provide the necessary information for this callback.
476 *
477 * @return This method returns the command priority associated with this
478 * IO request.
479 */
480 uint32_t
481 scif_cb_io_request_get_command_priority(void * scif_user_io_request)
482 {
483 return (0);
484 }
485
486 /**
487 * @brief This method simply returns the virtual address associated
488 * with the scsi_io and byte_offset supplied parameters.
489 *
490 * @note This callback is not utilized in the fast path. The expectation
491 * is that this method is utilized for items such as SCSI to ATA
492 * translation for commands like INQUIRY, READ CAPACITY, etc.
493 *
494 * @param[in] scif_user_io_request This parameter points to the user's
495 * IO request object. It is a cookie that allows the user to
496 * provide the necessary information for this callback.
497 * @param[in] byte_offset This parameter specifies the offset into the data
498 * buffers pointed to by the SGL. The byte offset starts at 0
499 * and continues until the last byte pointed to be the last SGL
500 * element.
501 *
502 * @return A virtual address pointer to the location specified by the
503 * parameters.
504 */
505 uint8_t *
506 scif_cb_io_request_get_virtual_address_from_sgl(void * scif_user_io_request,
507 uint32_t byte_offset)
508 {
509 struct ISCI_IO_REQUEST *isci_request =
510 (struct ISCI_IO_REQUEST *)scif_user_io_request;
511
512 return (isci_request->ccb->csio.data_ptr + byte_offset);
513 }
514
515 /**
516 * @brief This callback method asks the user to provide the number of
517 * bytes to be transfered as part of this request.
518 *
519 * @param[in] scif_user_io_request This parameter points to the user's
520 * IO request object. It is a cookie that allows the user to
521 * provide the necessary information for this callback.
522 *
523 * @return This method returns the number of payload data bytes to be
524 * transfered for this IO request.
525 */
526 uint32_t
527 scif_cb_io_request_get_transfer_length(void * scif_user_io_request)
528 {
529 struct ISCI_IO_REQUEST *isci_request =
530 (struct ISCI_IO_REQUEST *)scif_user_io_request;
531
532 return (isci_request->ccb->csio.dxfer_len);
533
534 }
535
536 /**
537 * @brief This callback method asks the user to provide the data direction
538 * for this request.
539 *
540 * @param[in] scif_user_io_request This parameter points to the user's
541 * IO request object. It is a cookie that allows the user to
542 * provide the necessary information for this callback.
543 *
544 * @return This method returns the value of SCI_IO_REQUEST_DATA_OUT,
545 * SCI_IO_REQUEST_DATA_IN, or SCI_IO_REQUEST_NO_DATA.
546 */
547 SCI_IO_REQUEST_DATA_DIRECTION
548 scif_cb_io_request_get_data_direction(void * scif_user_io_request)
549 {
550 struct ISCI_IO_REQUEST *isci_request =
551 (struct ISCI_IO_REQUEST *)scif_user_io_request;
552
553 switch (isci_request->ccb->ccb_h.flags & CAM_DIR_MASK) {
554 case CAM_DIR_IN:
555 return (SCI_IO_REQUEST_DATA_IN);
556 case CAM_DIR_OUT:
557 return (SCI_IO_REQUEST_DATA_OUT);
558 default:
559 return (SCI_IO_REQUEST_NO_DATA);
560 }
561 }
562
563 /**
564 * @brief This callback method asks the user to provide the address
565 * to where the next Scatter-Gather Element is located.
566 *
567 * Details regarding usage:
568 * - Regarding the first SGE: the user should initialize an index,
569 * or a pointer, prior to construction of the request that will
570 * reference the very first scatter-gather element. This is
571 * important since this method is called for every scatter-gather
572 * element, including the first element.
573 * - Regarding the last SGE: the user should return NULL from this
574 * method when this method is called and the SGL has exhausted
575 * all elements.
576 *
577 * @param[in] scif_user_io_request This parameter points to the user's
578 * IO request object. It is a cookie that allows the user to
579 * provide the necessary information for this callback.
580 * @param[in] current_sge_address This parameter specifies the address for
581 * the current SGE (i.e. the one that has just processed).
582 * @param[out] next_sge An address specifying the location for the next scatter
583 * gather element to be processed.
584 *
585 * @return None.
586 */
587 void
588 scif_cb_io_request_get_next_sge(void * scif_user_io_request,
589 void * current_sge_address, void ** next_sge)
590 {
591 struct ISCI_IO_REQUEST *isci_request =
592 (struct ISCI_IO_REQUEST *)scif_user_io_request;
593
594 if (isci_request->current_sge_index == isci_request->num_segments)
595 *next_sge = NULL;
596 else {
597 bus_dma_segment_t *sge =
598 &isci_request->sge[isci_request->current_sge_index];
599
600 isci_request->current_sge_index++;
601 *next_sge = sge;
602 }
603 }
604
605 /**
606 * @brief This callback method asks the user to provide the contents of the
607 * "address" field in the Scatter-Gather Element.
608 *
609 * @param[in] scif_user_io_request This parameter points to the user's
610 * IO request object. It is a cookie that allows the user to
611 * provide the necessary information for this callback.
612 * @param[in] sge_address This parameter specifies the address for the
613 * SGE from which to retrieve the address field.
614 *
615 * @return A physical address specifying the contents of the SGE's address
616 * field.
617 */
618 SCI_PHYSICAL_ADDRESS
619 scif_cb_sge_get_address_field(void *scif_user_io_request, void *sge_address)
620 {
621 bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address;
622
623 return ((SCI_PHYSICAL_ADDRESS)sge->ds_addr);
624 }
625
626 /**
627 * @brief This callback method asks the user to provide the contents of the
628 * "length" field in the Scatter-Gather Element.
629 *
630 * @param[in] scif_user_io_request This parameter points to the user's
631 * IO request object. It is a cookie that allows the user to
632 * provide the necessary information for this callback.
633 * @param[in] sge_address This parameter specifies the address for the
634 * SGE from which to retrieve the address field.
635 *
636 * @return This method returns the length field specified inside the SGE
637 * referenced by the sge_address parameter.
638 */
639 uint32_t
640 scif_cb_sge_get_length_field(void *scif_user_io_request, void *sge_address)
641 {
642 bus_dma_segment_t *sge = (bus_dma_segment_t *)sge_address;
643
644 return ((uint32_t)sge->ds_len);
645 }
646
647 void
648 isci_request_construct(struct ISCI_REQUEST *request,
649 SCI_CONTROLLER_HANDLE_T scif_controller_handle,
650 bus_dma_tag_t io_buffer_dma_tag, bus_addr_t physical_address)
651 {
652
653 request->controller_handle = scif_controller_handle;
654 request->dma_tag = io_buffer_dma_tag;
655 request->physical_address = physical_address;
656 bus_dmamap_create(request->dma_tag, 0, &request->dma_map);
657 callout_init(&request->timer, CALLOUT_MPSAFE);
658 }
659
660 static void
661 isci_io_request_construct(void *arg, bus_dma_segment_t *seg, int nseg,
662 int error)
663 {
664 union ccb *ccb;
665 struct ISCI_IO_REQUEST *io_request = (struct ISCI_IO_REQUEST *)arg;
666 SCI_REMOTE_DEVICE_HANDLE_T *device = io_request->parent.remote_device_handle;
667 SCI_STATUS status;
668
669 io_request->num_segments = nseg;
670 io_request->sge = seg;
671 ccb = io_request->ccb;
672
673 if (error != 0) {
674 ccb->ccb_h.status = CAM_REQ_INVALID;
675 xpt_done(ccb);
676 return;
677 }
678
679 status = scif_io_request_construct(
680 io_request->parent.controller_handle,
681 io_request->parent.remote_device_handle,
682 SCI_CONTROLLER_INVALID_IO_TAG, (void *)io_request,
683 (void *)((char*)io_request + sizeof(struct ISCI_IO_REQUEST)),
684 &io_request->sci_object);
685
686 if (status != SCI_SUCCESS) {
687 isci_io_request_complete(io_request->parent.controller_handle,
688 device, io_request, (SCI_IO_STATUS)status);
689 return;
690 }
691
692 sci_object_set_association(io_request->sci_object, io_request);
693
694 bus_dmamap_sync(io_request->parent.dma_tag, io_request->parent.dma_map,
695 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
696
697 status = (SCI_STATUS)scif_controller_start_io(
698 io_request->parent.controller_handle, device,
699 io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);
700
701 if (status != SCI_SUCCESS) {
702 isci_io_request_complete(io_request->parent.controller_handle,
703 device, io_request, (SCI_IO_STATUS)status);
704 return;
705 }
706
707 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
708 callout_reset(&io_request->parent.timer, ccb->ccb_h.timeout,
709 isci_io_request_timeout, io_request);
710 }
711
712 void
713 isci_io_request_execute_scsi_io(union ccb *ccb,
714 struct ISCI_CONTROLLER *controller)
715 {
716 struct ccb_scsiio *csio = &ccb->csio;
717 target_id_t target_id = ccb->ccb_h.target_id;
718 struct ISCI_REQUEST *request;
719 struct ISCI_IO_REQUEST *io_request;
720 struct ISCI_REMOTE_DEVICE *device =
721 controller->remote_device[target_id];
722 int error;
723
724 if (device == NULL) {
725 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
726 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
727 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
728 xpt_done(ccb);
729 return;
730 }
731
732 if (sci_pool_empty(controller->request_pool)) {
733 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
734 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
735 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
736 xpt_freeze_simq(controller->sim, 1);
737 controller->is_frozen = TRUE;
738 xpt_done(ccb);
739 return;
740 }
741
742 ASSERT(device->is_resetting == FALSE);
743
744 sci_pool_get(controller->request_pool, request);
745 io_request = (struct ISCI_IO_REQUEST *)request;
746
747 io_request->ccb = ccb;
748 io_request->current_sge_index = 0;
749 io_request->parent.remote_device_handle = device->sci_object;
750
751 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) != 0)
752 panic("Unexpected CAM_SCATTER_VALID flag! flags = 0x%x\n",
753 ccb->ccb_h.flags);
754
755 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
756 panic("Unexpected CAM_DATA_PHYS flag! flags = 0x%x\n",
757 ccb->ccb_h.flags);
758
759 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
760 error = bus_dmamap_load(io_request->parent.dma_tag,
761 io_request->parent.dma_map, csio->data_ptr, csio->dxfer_len,
762 isci_io_request_construct, io_request, 0x0);
763
764 /* A resource shortage from BUSDMA will be automatically
765 * continued at a later point, pushing the CCB processing
766 * forward, which will in turn unfreeze the simq.
767 */
768 if (error == EINPROGRESS) {
769 xpt_freeze_simq(controller->sim, 1);
770 ccb->ccb_h.flags |= CAM_RELEASE_SIMQ;
771 }
772 } else
773 isci_io_request_construct(io_request, NULL, 0, 0);
774 }
775
776 void
777 isci_io_request_timeout(void *arg)
778 {
779 struct ISCI_IO_REQUEST *request = (struct ISCI_IO_REQUEST *)arg;
780 struct ISCI_REMOTE_DEVICE *remote_device = (struct ISCI_REMOTE_DEVICE *)
781 sci_object_get_association(request->parent.remote_device_handle);
782 struct ISCI_CONTROLLER *controller = remote_device->domain->controller;
783
784 mtx_lock(&controller->lock);
785 isci_remote_device_reset(remote_device, NULL);
786 mtx_unlock(&controller->lock);
787 }
788
789 #if __FreeBSD_version >= 900026
790 /**
791 * @brief This callback method gets the size of and pointer to the buffer
792 * (if any) containing the request buffer for an SMP request.
793 *
794 * @param[in] core_request This parameter specifies the SCI core's request
795 * object associated with the SMP request.
796 * @param[out] smp_request_buffer This parameter returns a pointer to the
797 * payload portion of the SMP request - i.e. everything after
798 * the SMP request header.
799 *
800 * @return Size of the request buffer in bytes. This does *not* include
801 * the size of the SMP request header.
802 */
803 static uint32_t
804 smp_io_request_cb_get_request_buffer(SCI_IO_REQUEST_HANDLE_T core_request,
805 uint8_t ** smp_request_buffer)
806 {
807 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
808 sci_object_get_association(sci_object_get_association(core_request));
809
810 *smp_request_buffer = isci_request->ccb->smpio.smp_request +
811 sizeof(SMP_REQUEST_HEADER_T);
812
813 return (isci_request->ccb->smpio.smp_request_len -
814 sizeof(SMP_REQUEST_HEADER_T));
815 }
816
817 /**
818 * @brief This callback method gets the SMP function for an SMP request.
819 *
820 * @param[in] core_request This parameter specifies the SCI core's request
821 * object associated with the SMP request.
822 *
823 * @return SMP function for the SMP request.
824 */
825 static uint8_t
826 smp_io_request_cb_get_function(SCI_IO_REQUEST_HANDLE_T core_request)
827 {
828 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
829 sci_object_get_association(sci_object_get_association(core_request));
830 SMP_REQUEST_HEADER_T *header =
831 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
832
833 return (header->function);
834 }
835
836 /**
837 * @brief This callback method gets the SMP frame type for an SMP request.
838 *
839 * @param[in] core_request This parameter specifies the SCI core's request
840 * object associated with the SMP request.
841 *
842 * @return SMP frame type for the SMP request.
843 */
844 static uint8_t
845 smp_io_request_cb_get_frame_type(SCI_IO_REQUEST_HANDLE_T core_request)
846 {
847 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
848 sci_object_get_association(sci_object_get_association(core_request));
849 SMP_REQUEST_HEADER_T *header =
850 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
851
852 return (header->smp_frame_type);
853 }
854
855 /**
856 * @brief This callback method gets the allocated response length for an SMP request.
857 *
858 * @param[in] core_request This parameter specifies the SCI core's request
859 * object associated with the SMP request.
860 *
861 * @return Allocated response length for the SMP request.
862 */
863 static uint8_t
864 smp_io_request_cb_get_allocated_response_length(
865 SCI_IO_REQUEST_HANDLE_T core_request)
866 {
867 struct ISCI_IO_REQUEST *isci_request = (struct ISCI_IO_REQUEST *)
868 sci_object_get_association(sci_object_get_association(core_request));
869 SMP_REQUEST_HEADER_T *header =
870 (SMP_REQUEST_HEADER_T *)isci_request->ccb->smpio.smp_request;
871
872 return (header->allocated_response_length);
873 }
874
875 static SCI_STATUS
876 isci_smp_request_construct(struct ISCI_IO_REQUEST *request)
877 {
878 SCI_STATUS status;
879 SCIC_SMP_PASSTHRU_REQUEST_CALLBACKS_T callbacks;
880
881 status = scif_request_construct(request->parent.controller_handle,
882 request->parent.remote_device_handle, SCI_CONTROLLER_INVALID_IO_TAG,
883 (void *)request,
884 (void *)((char*)request + sizeof(struct ISCI_IO_REQUEST)),
885 &request->sci_object);
886
887 if (status == SCI_SUCCESS) {
888 callbacks.scic_cb_smp_passthru_get_request =
889 &smp_io_request_cb_get_request_buffer;
890 callbacks.scic_cb_smp_passthru_get_function =
891 &smp_io_request_cb_get_function;
892 callbacks.scic_cb_smp_passthru_get_frame_type =
893 &smp_io_request_cb_get_frame_type;
894 callbacks.scic_cb_smp_passthru_get_allocated_response_length =
895 &smp_io_request_cb_get_allocated_response_length;
896
897 /* create the smp passthrough part of the io request */
898 status = scic_io_request_construct_smp_pass_through(
899 scif_io_request_get_scic_handle(request->sci_object),
900 &callbacks);
901 }
902
903 return (status);
904 }
905
906 void
907 isci_io_request_execute_smp_io(union ccb *ccb,
908 struct ISCI_CONTROLLER *controller)
909 {
910 SCI_STATUS status;
911 target_id_t target_id = ccb->ccb_h.target_id;
912 struct ISCI_REQUEST *request;
913 struct ISCI_IO_REQUEST *io_request;
914 SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle;
915 struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id];
916
917 /* SMP commands are sent to an end device, because SMP devices are not
918 * exposed to the kernel. It is our responsibility to use this method
919 * to get the SMP device that contains the specified end device. If
920 * the device is direct-attached, the handle will come back NULL, and
921 * we'll just fail the SMP_IO with DEV_NOT_THERE.
922 */
923 scif_remote_device_get_containing_device(end_device->sci_object,
924 &smp_device_handle);
925
926 if (smp_device_handle == NULL) {
927 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
928 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
929 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
930 xpt_done(ccb);
931 return;
932 }
933
934 if (sci_pool_empty(controller->request_pool)) {
935 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
936 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
937 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
938 xpt_freeze_simq(controller->sim, 1);
939 controller->is_frozen = TRUE;
940 xpt_done(ccb);
941 return;
942 }
943
944 ASSERT(device->is_resetting == FALSE);
945
946 sci_pool_get(controller->request_pool, request);
947 io_request = (struct ISCI_IO_REQUEST *)request;
948
949 io_request->ccb = ccb;
950 io_request->parent.remote_device_handle = smp_device_handle;
951
952 status = isci_smp_request_construct(io_request);
953
954 if (status != SCI_SUCCESS) {
955 isci_io_request_complete(controller->scif_controller_handle,
956 smp_device_handle, io_request, (SCI_IO_STATUS)status);
957 return;
958 }
959
960 sci_object_set_association(io_request->sci_object, io_request);
961
962 status = (SCI_STATUS) scif_controller_start_io(
963 controller->scif_controller_handle, smp_device_handle,
964 io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);
965
966 if (status != SCI_SUCCESS) {
967 isci_io_request_complete(controller->scif_controller_handle,
968 smp_device_handle, io_request, (SCI_IO_STATUS)status);
969 return;
970 }
971
972 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
973 callout_reset(&io_request->parent.timer, ccb->ccb_h.timeout,
974 isci_io_request_timeout, request);
975 }
976 #endif
Cache object: d27cacd225d0303fb3aca074bcecd4f0
|