1 /*
2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include "dev/mrsas/mrsas.h"
37
38 #include <cam/cam.h>
39 #include <cam/cam_ccb.h>
40 #include <cam/cam_sim.h>
41 #include <cam/cam_xpt_sim.h>
42 #include <cam/cam_debug.h>
43 #include <cam/cam_periph.h>
44 #include <cam/cam_xpt_periph.h>
45
46 #include <cam/scsi/scsi_all.h>
47 #include <cam/scsi/scsi_message.h>
48 #include <sys/taskqueue.h>
49 #include <sys/kernel.h>
50
51 #include <sys/time.h> /* XXX for pcpu.h */
52 #include <sys/pcpu.h> /* XXX for PCPU_GET */
53
54 #define smp_processor_id() PCPU_GET(cpuid)
55
56 /*
57 * Function prototypes
58 */
59 int mrsas_cam_attach(struct mrsas_softc *sc);
60 int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb);
61 int mrsas_bus_scan(struct mrsas_softc *sc);
62 int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
63 int
64 mrsas_map_request(struct mrsas_softc *sc,
65 struct mrsas_mpt_cmd *cmd, union ccb *ccb);
66 int
67 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
68 union ccb *ccb);
69 int
70 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
71 union ccb *ccb);
72 int
73 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
74 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible);
75 int
76 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
77 union ccb *ccb, u_int32_t device_id,
78 MRSAS_RAID_SCSI_IO_REQUEST * io_request);
79 void mrsas_xpt_freeze(struct mrsas_softc *sc);
80 void mrsas_xpt_release(struct mrsas_softc *sc);
81 void mrsas_cam_detach(struct mrsas_softc *sc);
82 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
83 void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
84 void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
85 void
86 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
87 u_int32_t req_desc_hi);
88 void
89 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request,
90 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
91 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag,
92 u_int32_t ld_block_size);
93 static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
94 static void mrsas_cam_poll(struct cam_sim *sim);
95 static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
96 static void mrsas_scsiio_timeout(void *data);
97 static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t id, u_int32_t bus_id);
98 static void mrsas_tm_response_code(struct mrsas_softc *sc,
99 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply);
100 static int mrsas_issue_tm(struct mrsas_softc *sc,
101 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc);
102 static void
103 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
104 int nseg, int error);
105 static int32_t
106 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
107 union ccb *ccb);
108
109 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
110 bus_dma_segment_t *segs, int nsegs);
111 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd,
112 bus_dma_segment_t *segs, int nseg);
113 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd,
114 bus_dma_segment_t *segs, int nseg);
115
116 struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
117 MRSAS_REQUEST_DESCRIPTOR_UNION *
118 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
119
120 extern int mrsas_reset_targets(struct mrsas_softc *sc);
121 extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
122 extern u_int32_t
123 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
124 extern void mrsas_isr(void *arg);
125 extern void mrsas_aen_handler(struct mrsas_softc *sc);
126 extern u_int8_t
127 MR_BuildRaidContext(struct mrsas_softc *sc,
128 struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context,
129 MR_DRV_RAID_MAP_ALL * map);
130 extern u_int16_t
131 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
132 MR_DRV_RAID_MAP_ALL * map);
133 extern u_int16_t
134 mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
135 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info);
136 extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
137 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
138 extern void mrsas_disable_intr(struct mrsas_softc *sc);
139 extern void mrsas_enable_intr(struct mrsas_softc *sc);
140 void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
141 struct mrsas_mpt_cmd *cmd);
142
143 /*
144 * mrsas_cam_attach: Main entry to CAM subsystem
145 * input: Adapter instance soft state
146 *
147 * This function is called from mrsas_attach() during initialization to perform
148 * SIM allocations and XPT bus registration. If the kernel version is 7.4 or
149 * earlier, it would also initiate a bus scan.
150 */
151 int
152 mrsas_cam_attach(struct mrsas_softc *sc)
153 {
154 struct cam_devq *devq;
155 int mrsas_cam_depth;
156
157 mrsas_cam_depth = sc->max_scsi_cmds;
158
159 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
160 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
161 return (ENOMEM);
162 }
163 /*
164 * Create SIM for bus 0 and register, also create path
165 */
166 sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
167 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
168 mrsas_cam_depth, devq);
169 if (sc->sim_0 == NULL) {
170 cam_simq_free(devq);
171 device_printf(sc->mrsas_dev, "Cannot register SIM\n");
172 return (ENXIO);
173 }
174 /* Initialize taskqueue for Event Handling */
175 TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
176 sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
177 taskqueue_thread_enqueue, &sc->ev_tq);
178
179 /* Run the task queue with lowest priority */
180 taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
181 device_get_nameunit(sc->mrsas_dev));
182 mtx_lock(&sc->sim_lock);
183 if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) {
184 cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */
185 mtx_unlock(&sc->sim_lock);
186 return (ENXIO);
187 }
188 if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
189 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
190 xpt_bus_deregister(cam_sim_path(sc->sim_0));
191 cam_sim_free(sc->sim_0, TRUE); /* passing true will free the
192 * devq */
193 mtx_unlock(&sc->sim_lock);
194 return (ENXIO);
195 }
196 mtx_unlock(&sc->sim_lock);
197
198 /*
199 * Create SIM for bus 1 and register, also create path
200 */
201 sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
202 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
203 mrsas_cam_depth, devq);
204 if (sc->sim_1 == NULL) {
205 cam_simq_free(devq);
206 device_printf(sc->mrsas_dev, "Cannot register SIM\n");
207 return (ENXIO);
208 }
209 mtx_lock(&sc->sim_lock);
210 if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) {
211 cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */
212 mtx_unlock(&sc->sim_lock);
213 return (ENXIO);
214 }
215 if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
216 CAM_TARGET_WILDCARD,
217 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
218 xpt_bus_deregister(cam_sim_path(sc->sim_1));
219 cam_sim_free(sc->sim_1, TRUE);
220 mtx_unlock(&sc->sim_lock);
221 return (ENXIO);
222 }
223 mtx_unlock(&sc->sim_lock);
224
225 #if (__FreeBSD_version <= 704000)
226 if (mrsas_bus_scan(sc)) {
227 device_printf(sc->mrsas_dev, "Error in bus scan.\n");
228 return (1);
229 }
230 #endif
231 return (0);
232 }
233
234 /*
235 * mrsas_cam_detach: De-allocates and teardown CAM
236 * input: Adapter instance soft state
237 *
238 * De-registers and frees the paths and SIMs.
239 */
240 void
241 mrsas_cam_detach(struct mrsas_softc *sc)
242 {
243 if (sc->ev_tq != NULL)
244 taskqueue_free(sc->ev_tq);
245 mtx_lock(&sc->sim_lock);
246 if (sc->path_0)
247 xpt_free_path(sc->path_0);
248 if (sc->sim_0) {
249 xpt_bus_deregister(cam_sim_path(sc->sim_0));
250 cam_sim_free(sc->sim_0, FALSE);
251 }
252 if (sc->path_1)
253 xpt_free_path(sc->path_1);
254 if (sc->sim_1) {
255 xpt_bus_deregister(cam_sim_path(sc->sim_1));
256 cam_sim_free(sc->sim_1, TRUE);
257 }
258 mtx_unlock(&sc->sim_lock);
259 }
260
261 /*
262 * mrsas_action: SIM callback entry point
263 * input: pointer to SIM pointer to CAM Control Block
264 *
265 * This function processes CAM subsystem requests. The type of request is stored
266 * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because
267 * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier.
268 */
269 static void
270 mrsas_action(struct cam_sim *sim, union ccb *ccb)
271 {
272 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
273 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
274 u_int32_t device_id;
275
276 /*
277 * Check if the system going down
278 * or the adapter is in unrecoverable critical error
279 */
280 if (sc->remove_in_progress ||
281 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
282 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
283 xpt_done(ccb);
284 return;
285 }
286
287 switch (ccb->ccb_h.func_code) {
288 case XPT_SCSI_IO:
289 {
290 device_id = ccb_h->target_id;
291
292 /*
293 * bus 0 is LD, bus 1 is for system-PD
294 */
295 if (cam_sim_bus(sim) == 1 &&
296 sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
297 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
298 xpt_done(ccb);
299 } else {
300 if (mrsas_startio(sc, sim, ccb)) {
301 ccb->ccb_h.status |= CAM_REQ_INVALID;
302 xpt_done(ccb);
303 }
304 }
305 break;
306 }
307 case XPT_ABORT:
308 {
309 ccb->ccb_h.status = CAM_UA_ABORT;
310 xpt_done(ccb);
311 break;
312 }
313 case XPT_RESET_BUS:
314 {
315 xpt_done(ccb);
316 break;
317 }
318 case XPT_GET_TRAN_SETTINGS:
319 {
320 ccb->cts.protocol = PROTO_SCSI;
321 ccb->cts.protocol_version = SCSI_REV_2;
322 ccb->cts.transport = XPORT_SPI;
323 ccb->cts.transport_version = 2;
324 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
325 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
326 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
327 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
328 ccb->ccb_h.status = CAM_REQ_CMP;
329 xpt_done(ccb);
330 break;
331 }
332 case XPT_SET_TRAN_SETTINGS:
333 {
334 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
335 xpt_done(ccb);
336 break;
337 }
338 case XPT_CALC_GEOMETRY:
339 {
340 cam_calc_geometry(&ccb->ccg, 1);
341 xpt_done(ccb);
342 break;
343 }
344 case XPT_PATH_INQ:
345 {
346 ccb->cpi.version_num = 1;
347 ccb->cpi.hba_inquiry = 0;
348 ccb->cpi.target_sprt = 0;
349 #if (__FreeBSD_version >= 902001)
350 ccb->cpi.hba_misc = PIM_UNMAPPED;
351 #else
352 ccb->cpi.hba_misc = 0;
353 #endif
354 ccb->cpi.hba_eng_cnt = 0;
355 ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
356 ccb->cpi.unit_number = cam_sim_unit(sim);
357 ccb->cpi.bus_id = cam_sim_bus(sim);
358 ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
359 ccb->cpi.base_transfer_speed = 150000;
360 strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
361 strlcpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN);
362 strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
363 ccb->cpi.transport = XPORT_SPI;
364 ccb->cpi.transport_version = 2;
365 ccb->cpi.protocol = PROTO_SCSI;
366 ccb->cpi.protocol_version = SCSI_REV_2;
367 if (ccb->cpi.bus_id == 0)
368 ccb->cpi.max_target = MRSAS_MAX_PD - 1;
369 else
370 ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1;
371 #if (__FreeBSD_version > 704000)
372 ccb->cpi.maxio = sc->max_sectors_per_req * 512;
373 #endif
374 ccb->ccb_h.status = CAM_REQ_CMP;
375 xpt_done(ccb);
376 break;
377 }
378 default:
379 {
380 ccb->ccb_h.status = CAM_REQ_INVALID;
381 xpt_done(ccb);
382 break;
383 }
384 }
385 }
386
387 /*
388 * mrsas_scsiio_timeout: Callback function for IO timed out
389 * input: mpt command context
390 *
391 * This function will execute after timeout value provided by ccb header from
392 * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO
393 * coming from CAM layer. This function is callback function for IO timeout
394 * and it runs in no-sleep context. Set do_timedout_reset in Adapter context
395 * so that it will execute OCR/Kill adpter from ocr_thread context.
396 */
397 static void
398 mrsas_scsiio_timeout(void *data)
399 {
400 struct mrsas_mpt_cmd *cmd;
401 struct mrsas_softc *sc;
402 u_int32_t target_id;
403
404 if (!data)
405 return;
406
407 cmd = (struct mrsas_mpt_cmd *)data;
408 sc = cmd->sc;
409
410 if (cmd->ccb_ptr == NULL) {
411 printf("command timeout with NULL ccb\n");
412 return;
413 }
414
415 /*
416 * Below callout is dummy entry so that it will be cancelled from
417 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based
418 * on OCR enable/disable property of Controller from ocr_thread
419 * context.
420 */
421 #if (__FreeBSD_version >= 1000510)
422 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
423 mrsas_scsiio_timeout, cmd, 0);
424 #else
425 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
426 mrsas_scsiio_timeout, cmd);
427 #endif
428
429 if (cmd->ccb_ptr->cpi.bus_id == 0)
430 target_id = cmd->ccb_ptr->ccb_h.target_id;
431 else
432 target_id = (cmd->ccb_ptr->ccb_h.target_id + (MRSAS_MAX_PD - 1));
433
434 /* Save the cmd to be processed for TM, if it is not there in the array */
435 if (sc->target_reset_pool[target_id] == NULL) {
436 sc->target_reset_pool[target_id] = cmd;
437 mrsas_atomic_inc(&sc->target_reset_outstanding);
438 }
439
440 return;
441 }
442
443 /*
444 * mrsas_startio: SCSI IO entry point
445 * input: Adapter instance soft state
446 * pointer to CAM Control Block
447 *
448 * This function is the SCSI IO entry point and it initiates IO processing. It
449 * copies the IO and depending if the IO is read/write or inquiry, it would
450 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0
451 * if the command is sent to firmware successfully, otherwise it returns 1.
452 */
453 static int32_t
454 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
455 union ccb *ccb)
456 {
457 struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL;
458 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
459 struct ccb_scsiio *csio = &(ccb->csio);
460 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
461 u_int8_t cmd_type;
462
463 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE &&
464 (!sc->fw_sync_cache_support)) {
465 ccb->ccb_h.status = CAM_REQ_CMP;
466 xpt_done(ccb);
467 return (0);
468 }
469 ccb_h->status |= CAM_SIM_QUEUED;
470
471 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) {
472 ccb_h->status |= CAM_REQUEUE_REQ;
473 xpt_done(ccb);
474 mrsas_atomic_dec(&sc->fw_outstanding);
475 return (0);
476 }
477
478 cmd = mrsas_get_mpt_cmd(sc);
479
480 if (!cmd) {
481 ccb_h->status |= CAM_REQUEUE_REQ;
482 xpt_done(ccb);
483 mrsas_atomic_dec(&sc->fw_outstanding);
484 return (0);
485 }
486
487 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
488 if (ccb_h->flags & CAM_DIR_IN)
489 cmd->flags |= MRSAS_DIR_IN;
490 if (ccb_h->flags & CAM_DIR_OUT)
491 cmd->flags |= MRSAS_DIR_OUT;
492 } else
493 cmd->flags = MRSAS_DIR_NONE; /* no data */
494
495 /* For FreeBSD 9.2 and higher */
496 #if (__FreeBSD_version >= 902001)
497 /*
498 * XXX We don't yet support physical addresses here.
499 */
500 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
501 case CAM_DATA_PADDR:
502 case CAM_DATA_SG_PADDR:
503 device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n",
504 __func__);
505 mrsas_release_mpt_cmd(cmd);
506 ccb_h->status = CAM_REQ_INVALID;
507 ccb_h->status &= ~CAM_SIM_QUEUED;
508 goto done;
509 case CAM_DATA_SG:
510 device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n",
511 __func__);
512 mrsas_release_mpt_cmd(cmd);
513 ccb_h->status = CAM_REQ_INVALID;
514 goto done;
515 case CAM_DATA_VADDR:
516 cmd->length = csio->dxfer_len;
517 if (cmd->length)
518 cmd->data = csio->data_ptr;
519 break;
520 case CAM_DATA_BIO:
521 cmd->length = csio->dxfer_len;
522 if (cmd->length)
523 cmd->data = csio->data_ptr;
524 break;
525 default:
526 ccb->ccb_h.status = CAM_REQ_INVALID;
527 goto done;
528 }
529 #else
530 if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */
531 if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
532 cmd->length = csio->dxfer_len;
533 if (cmd->length)
534 cmd->data = csio->data_ptr;
535 } else {
536 mrsas_release_mpt_cmd(cmd);
537 ccb_h->status = CAM_REQ_INVALID;
538 goto done;
539 }
540 } else { /* Data addresses are physical. */
541 mrsas_release_mpt_cmd(cmd);
542 ccb_h->status = CAM_REQ_INVALID;
543 ccb_h->status &= ~CAM_SIM_QUEUED;
544 goto done;
545 }
546 #endif
547 /* save ccb ptr */
548 cmd->ccb_ptr = ccb;
549
550 req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1);
551 if (!req_desc) {
552 device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
553 return (FAIL);
554 }
555 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
556 cmd->request_desc = req_desc;
557
558 if (ccb_h->flags & CAM_CDB_POINTER)
559 bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
560 else
561 bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
562 mtx_lock(&sc->raidmap_lock);
563
564 /* Check for IO type READ-WRITE targeted for Logical Volume */
565 cmd_type = mrsas_find_io_type(sim, ccb);
566 switch (cmd_type) {
567 case READ_WRITE_LDIO:
568 /* Build READ-WRITE IO for Logical Volume */
569 if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
570 device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
571 mtx_unlock(&sc->raidmap_lock);
572 mrsas_release_mpt_cmd(cmd);
573 return (1);
574 }
575 break;
576 case NON_READ_WRITE_LDIO:
577 /* Build NON READ-WRITE IO for Logical Volume */
578 if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
579 device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
580 mtx_unlock(&sc->raidmap_lock);
581 mrsas_release_mpt_cmd(cmd);
582 return (1);
583 }
584 break;
585 case READ_WRITE_SYSPDIO:
586 case NON_READ_WRITE_SYSPDIO:
587 if (sc->secure_jbod_support &&
588 (cmd_type == NON_READ_WRITE_SYSPDIO)) {
589 /* Build NON-RW IO for JBOD */
590 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
591 device_printf(sc->mrsas_dev,
592 "Build SYSPDIO failed.\n");
593 mtx_unlock(&sc->raidmap_lock);
594 mrsas_release_mpt_cmd(cmd);
595 return (1);
596 }
597 } else {
598 /* Build RW IO for JBOD */
599 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
600 device_printf(sc->mrsas_dev,
601 "Build SYSPDIO failed.\n");
602 mtx_unlock(&sc->raidmap_lock);
603 mrsas_release_mpt_cmd(cmd);
604 return (1);
605 }
606 }
607 }
608 mtx_unlock(&sc->raidmap_lock);
609
610 if (cmd->flags == MRSAS_DIR_IN) /* from device */
611 cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_READ);
612 else if (cmd->flags == MRSAS_DIR_OUT) /* to device */
613 cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_WRITE);
614
615 cmd->io_request->SGLFlags = htole16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
616 cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
617 cmd->io_request->SenseBufferLowAddress = htole32(cmd->sense_phys_addr & 0xFFFFFFFF);
618 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
619
620 req_desc = cmd->request_desc;
621 req_desc->SCSIIO.SMID = htole16(cmd->index);
622
623 /*
624 * Start timer for IO timeout. Default timeout value is 90 second.
625 */
626 cmd->callout_owner = true;
627 #if (__FreeBSD_version >= 1000510)
628 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
629 mrsas_scsiio_timeout, cmd, 0);
630 #else
631 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
632 mrsas_scsiio_timeout, cmd);
633 #endif
634
635 if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
636 sc->io_cmds_highwater++;
637
638 /*
639 * if it is raid 1/10 fp write capable.
640 * try to get second command from pool and construct it.
641 * From FW, it has confirmed that lba values of two PDs corresponds to
642 * single R1/10 LD are always same
643 *
644 */
645 /*
646 * driver side count always should be less than max_fw_cmds to get
647 * new command
648 */
649 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
650 mrsas_prepare_secondRaid1_IO(sc, cmd);
651 mrsas_fire_cmd(sc, req_desc->addr.u.low,
652 req_desc->addr.u.high);
653 r1_cmd = cmd->peer_cmd;
654 mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low,
655 r1_cmd->request_desc->addr.u.high);
656 } else {
657 mrsas_fire_cmd(sc, req_desc->addr.u.low,
658 req_desc->addr.u.high);
659 }
660
661 return (0);
662
663 done:
664 xpt_done(ccb);
665 mrsas_atomic_dec(&sc->fw_outstanding);
666 return (0);
667 }
668
669 /*
670 * mrsas_find_io_type: Determines if IO is read/write or inquiry
671 * input: pointer to CAM Control Block
672 *
673 * This function determines if the IO is read/write or inquiry. It returns a 1
674 * if the IO is read/write and 0 if it is inquiry.
675 */
676 int
677 mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb)
678 {
679 struct ccb_scsiio *csio = &(ccb->csio);
680
681 switch (csio->cdb_io.cdb_bytes[0]) {
682 case READ_10:
683 case WRITE_10:
684 case READ_12:
685 case WRITE_12:
686 case READ_6:
687 case WRITE_6:
688 case READ_16:
689 case WRITE_16:
690 return (cam_sim_bus(sim) ?
691 READ_WRITE_SYSPDIO : READ_WRITE_LDIO);
692 default:
693 return (cam_sim_bus(sim) ?
694 NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO);
695 }
696 }
697
698 /*
699 * mrsas_get_mpt_cmd: Get a cmd from free command pool
700 * input: Adapter instance soft state
701 *
702 * This function removes an MPT command from the command free list and
703 * initializes it.
704 */
705 struct mrsas_mpt_cmd *
706 mrsas_get_mpt_cmd(struct mrsas_softc *sc)
707 {
708 struct mrsas_mpt_cmd *cmd = NULL;
709
710 mtx_lock(&sc->mpt_cmd_pool_lock);
711 if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) {
712 cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
713 TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
714 } else {
715 goto out;
716 }
717
718 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
719 cmd->data = NULL;
720 cmd->length = 0;
721 cmd->flags = 0;
722 cmd->error_code = 0;
723 cmd->load_balance = 0;
724 cmd->ccb_ptr = NULL;
725 out:
726 mtx_unlock(&sc->mpt_cmd_pool_lock);
727 return cmd;
728 }
729
730 /*
731 * mrsas_release_mpt_cmd: Return a cmd to free command pool
732 * input: Command packet for return to free command pool
733 *
734 * This function returns an MPT command to the free command list.
735 */
736 void
737 mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
738 {
739 struct mrsas_softc *sc = cmd->sc;
740
741 mtx_lock(&sc->mpt_cmd_pool_lock);
742 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
743 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
744 cmd->peer_cmd = NULL;
745 cmd->cmd_completed = 0;
746 memset((uint8_t *)cmd->io_request, 0,
747 sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
748 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
749 mtx_unlock(&sc->mpt_cmd_pool_lock);
750
751 return;
752 }
753
754 /*
755 * mrsas_get_request_desc: Get request descriptor from array
756 * input: Adapter instance soft state
757 * SMID index
758 *
759 * This function returns a pointer to the request descriptor.
760 */
761 MRSAS_REQUEST_DESCRIPTOR_UNION *
762 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
763 {
764 u_int8_t *p;
765
766 KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range"));
767 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
768
769 return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
770 }
771
772 /* mrsas_prepare_secondRaid1_IO
773 * It prepares the raid 1 second IO
774 */
775 void
776 mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
777 struct mrsas_mpt_cmd *cmd)
778 {
779 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
780 struct mrsas_mpt_cmd *r1_cmd;
781
782 r1_cmd = cmd->peer_cmd;
783 req_desc = cmd->request_desc;
784
785 /*
786 * copy the io request frame as well as 8 SGEs data for r1
787 * command
788 */
789 memcpy(r1_cmd->io_request, cmd->io_request,
790 (sizeof(MRSAS_RAID_SCSI_IO_REQUEST)));
791 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
792 (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION)));
793
794 /* sense buffer is different for r1 command */
795 r1_cmd->io_request->SenseBufferLowAddress = htole32(r1_cmd->sense_phys_addr & 0xFFFFFFFF);
796 r1_cmd->ccb_ptr = cmd->ccb_ptr;
797
798 req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1);
799 req_desc2->addr.Words = 0;
800 r1_cmd->request_desc = req_desc2;
801 req_desc2->SCSIIO.SMID = r1_cmd->index;
802 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
803 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
804 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
805 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
806 cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
807 r1_cmd->index;
808 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
809 cmd->index;
810 /*
811 * MSIxIndex of both commands request descriptors
812 * should be same
813 */
814 r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex;
815 /* span arm is different for r1 cmd */
816 r1_cmd->io_request->RaidContext.raid_context_g35.spanArm =
817 cmd->io_request->RaidContext.raid_context_g35.spanArm + 1;
818
819 }
820
821 /*
822 * mrsas_build_ldio_rw: Builds an LDIO command
823 * input: Adapter instance soft state
824 * Pointer to command packet
825 * Pointer to CCB
826 *
827 * This function builds the LDIO command packet. It returns 0 if the command is
828 * built successfully, otherwise it returns a 1.
829 */
830 int
831 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
832 union ccb *ccb)
833 {
834 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
835 struct ccb_scsiio *csio = &(ccb->csio);
836 u_int32_t device_id;
837 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
838
839 device_id = ccb_h->target_id;
840
841 io_request = cmd->io_request;
842 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
843 io_request->RaidContext.raid_context.status = 0;
844 io_request->RaidContext.raid_context.exStatus = 0;
845
846 /* just the cdb len, other flags zero, and ORed-in later for FP */
847 io_request->IoFlags = htole16(csio->cdb_len);
848
849 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
850 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
851
852 io_request->DataLength = htole32(cmd->length);
853
854 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
855 if (sc->is_ventura || sc->is_aero)
856 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
857 else {
858 /*
859 * numSGE store lower 8 bit of sge_count. numSGEExt store
860 * higher 8 bit of sge_count
861 */
862 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
863 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
864 }
865
866 } else {
867 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
868 return (FAIL);
869 }
870 return (0);
871 }
872
873 /* stream detection on read and and write IOs */
874 static void
875 mrsas_stream_detect(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
876 struct IO_REQUEST_INFO *io_info)
877 {
878 u_int32_t device_id = io_info->ldTgtId;
879 LD_STREAM_DETECT *current_ld_SD = sc->streamDetectByLD[device_id];
880 u_int32_t *track_stream = ¤t_ld_SD->mruBitMap;
881 u_int32_t streamNum, shiftedValues, unshiftedValues;
882 u_int32_t indexValueMask, shiftedValuesMask;
883 int i;
884 boolean_t isReadAhead = false;
885 STREAM_DETECT *current_SD;
886
887 /* find possible stream */
888 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
889 streamNum = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) &
890 STREAM_MASK;
891 current_SD = ¤t_ld_SD->streamTrack[streamNum];
892 /*
893 * if we found a stream, update the raid context and
894 * also update the mruBitMap
895 */
896 if (current_SD->nextSeqLBA &&
897 io_info->ldStartBlock >= current_SD->nextSeqLBA &&
898 (io_info->ldStartBlock <= (current_SD->nextSeqLBA+32)) &&
899 (current_SD->isRead == io_info->isRead)) {
900 if (io_info->ldStartBlock != current_SD->nextSeqLBA &&
901 (!io_info->isRead || !isReadAhead)) {
902 /*
903 * Once the API availible we need to change this.
904 * At this point we are not allowing any gap
905 */
906 continue;
907 }
908 cmd->io_request->RaidContext.raid_context_g35.streamDetected = TRUE;
909 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
910 /*
911 * update the mruBitMap LRU
912 */
913 shiftedValuesMask = (1 << i * BITS_PER_INDEX_STREAM) - 1 ;
914 shiftedValues = ((*track_stream & shiftedValuesMask) <<
915 BITS_PER_INDEX_STREAM);
916 indexValueMask = STREAM_MASK << i * BITS_PER_INDEX_STREAM;
917 unshiftedValues = (*track_stream) &
918 (~(shiftedValuesMask | indexValueMask));
919 *track_stream =
920 (unshiftedValues | shiftedValues | streamNum);
921 return;
922 }
923 }
924 /*
925 * if we did not find any stream, create a new one from the least recently used
926 */
927 streamNum = (*track_stream >>
928 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK;
929 current_SD = ¤t_ld_SD->streamTrack[streamNum];
930 current_SD->isRead = io_info->isRead;
931 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
932 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | streamNum);
933 return;
934 }
935
936 /*
937 * mrsas_setup_io: Set up data including Fast Path I/O
938 * input: Adapter instance soft state
939 * Pointer to command packet
940 * Pointer to CCB
941 *
942 * This function builds the DCDB inquiry command. It returns 0 if the command
943 * is built successfully, otherwise it returns a 1.
944 */
945 int
946 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
947 union ccb *ccb, u_int32_t device_id,
948 MRSAS_RAID_SCSI_IO_REQUEST * io_request)
949 {
950 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
951 struct ccb_scsiio *csio = &(ccb->csio);
952 struct IO_REQUEST_INFO io_info;
953 MR_DRV_RAID_MAP_ALL *map_ptr;
954 struct mrsas_mpt_cmd *r1_cmd = NULL;
955
956 MR_LD_RAID *raid;
957 u_int8_t fp_possible;
958 u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld;
959 u_int32_t datalength = 0;
960
961 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
962
963 start_lba_lo = 0;
964 start_lba_hi = 0;
965 fp_possible = 0;
966
967 /*
968 * READ_6 (0x08) or WRITE_6 (0x0A) cdb
969 */
970 if (csio->cdb_len == 6) {
971 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
972 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) |
973 ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) |
974 (u_int32_t)csio->cdb_io.cdb_bytes[3];
975 start_lba_lo &= 0x1FFFFF;
976 }
977 /*
978 * READ_10 (0x28) or WRITE_6 (0x2A) cdb
979 */
980 else if (csio->cdb_len == 10) {
981 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
982 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
983 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
984 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
985 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
986 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
987 }
988 /*
989 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
990 */
991 else if (csio->cdb_len == 12) {
992 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
993 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
994 ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
995 ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
996 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
997 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
998 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
999 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
1000 }
1001 /*
1002 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
1003 */
1004 else if (csio->cdb_len == 16) {
1005 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
1006 ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
1007 ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
1008 ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
1009 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) |
1010 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
1011 (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 |
1012 ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
1013 start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
1014 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
1015 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
1016 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
1017 }
1018 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1019 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
1020 io_info.numBlocks = datalength;
1021 io_info.ldTgtId = device_id;
1022 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
1023
1024 io_request->DataLength = htole32(cmd->length);
1025
1026 switch (ccb_h->flags & CAM_DIR_MASK) {
1027 case CAM_DIR_IN:
1028 io_info.isRead = 1;
1029 break;
1030 case CAM_DIR_OUT:
1031 io_info.isRead = 0;
1032 break;
1033 case CAM_DIR_NONE:
1034 default:
1035 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
1036 break;
1037 }
1038
1039 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1040 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr);
1041
1042 ld = MR_TargetIdToLdGet(device_id, map_ptr);
1043 if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) {
1044 io_request->RaidContext.raid_context.regLockFlags = 0;
1045 fp_possible = 0;
1046 } else {
1047 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext.raid_context, map_ptr))
1048 fp_possible = io_info.fpOkForIo;
1049 }
1050
1051 raid = MR_LdRaidGet(ld, map_ptr);
1052 /* Store the TM capability value in cmd */
1053 cmd->tmCapable = raid->capability.tmCapable;
1054
1055 cmd->request_desc->SCSIIO.MSIxIndex =
1056 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1057
1058 if (sc->is_ventura || sc->is_aero) {
1059 if (sc->streamDetectByLD) {
1060 mtx_lock(&sc->stream_lock);
1061 mrsas_stream_detect(sc, cmd, &io_info);
1062 mtx_unlock(&sc->stream_lock);
1063 /* In ventura if stream detected for a read and
1064 * it is read ahead capable make this IO as LDIO */
1065 if (io_request->RaidContext.raid_context_g35.streamDetected &&
1066 io_info.isRead && io_info.raCapable)
1067 fp_possible = FALSE;
1068 }
1069
1070 /* Set raid 1/10 fast path write capable bit in io_info.
1071 * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible
1072 * disabled after this point. Try not to add more check for
1073 * fp_possible toggle after this.
1074 */
1075 if (fp_possible &&
1076 (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) &&
1077 (raid->level == 1) && !io_info.isRead) {
1078 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) {
1079 fp_possible = FALSE;
1080 mrsas_atomic_dec(&sc->fw_outstanding);
1081 } else {
1082 r1_cmd = mrsas_get_mpt_cmd(sc);
1083 if (!r1_cmd) {
1084 fp_possible = FALSE;
1085 mrsas_atomic_dec(&sc->fw_outstanding);
1086 }
1087 else {
1088 cmd->peer_cmd = r1_cmd;
1089 r1_cmd->peer_cmd = cmd;
1090 }
1091 }
1092 }
1093 }
1094
1095 if (fp_possible) {
1096 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
1097 start_lba_lo, ld_block_size);
1098 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1099 cmd->request_desc->SCSIIO.RequestFlags =
1100 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1101 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1102 if (sc->mrsas_gen3_ctrl) {
1103 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
1104 cmd->request_desc->SCSIIO.RequestFlags =
1105 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1106 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1107 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1108 io_request->RaidContext.raid_context.nseg = 0x1;
1109 io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1110 io_request->RaidContext.raid_context.regLockFlags |=
1111 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1112 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1113 } else if (sc->is_ventura || sc->is_aero) {
1114 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1115 io_request->RaidContext.raid_context_g35.nseg = 0x1;
1116 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1117 io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1118 if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) {
1119 io_request->RaidContext.raid_context_g35.RAIDFlags =
1120 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
1121 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1122 }
1123 }
1124 if ((sc->load_balance_info[device_id].loadBalanceFlag) &&
1125 (io_info.isRead)) {
1126 io_info.devHandle =
1127 mrsas_get_updated_dev_handle(sc,
1128 &sc->load_balance_info[device_id], &io_info);
1129 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
1130 cmd->pd_r1_lb = io_info.pd_after_lb;
1131 if (sc->is_ventura || sc->is_aero)
1132 io_request->RaidContext.raid_context_g35.spanArm = io_info.span_arm;
1133 else
1134 io_request->RaidContext.raid_context.spanArm = io_info.span_arm;
1135 } else
1136 cmd->load_balance = 0;
1137
1138 if (sc->is_ventura || sc->is_aero)
1139 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
1140 else
1141 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
1142
1143 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1144 io_request->DevHandle = io_info.devHandle;
1145 cmd->pdInterface = io_info.pdInterface;
1146 } else {
1147 /* Not FP IO */
1148 io_request->RaidContext.raid_context.timeoutValue = htole16(map_ptr->raidMap.fpPdIoTimeoutSec);
1149 cmd->request_desc->SCSIIO.RequestFlags =
1150 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
1151 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1152 if (sc->mrsas_gen3_ctrl) {
1153 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
1154 cmd->request_desc->SCSIIO.RequestFlags =
1155 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1156 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1157 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1158 io_request->RaidContext.raid_context.regLockFlags |=
1159 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1160 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1161 io_request->RaidContext.raid_context.nseg = 0x1;
1162 } else if (sc->is_ventura || sc->is_aero) {
1163 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1164 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1165 io_request->RaidContext.raid_context_g35.nseg = 0x1;
1166 }
1167 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1168 io_request->DevHandle = htole16(device_id);
1169 }
1170 return (0);
1171 }
1172
1173 /*
1174 * mrsas_build_ldio_nonrw: Builds an LDIO command
1175 * input: Adapter instance soft state
1176 * Pointer to command packet
1177 * Pointer to CCB
1178 *
1179 * This function builds the LDIO command packet. It returns 0 if the command is
1180 * built successfully, otherwise it returns a 1.
1181 */
1182 int
1183 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1184 union ccb *ccb)
1185 {
1186 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1187 u_int32_t device_id, ld;
1188 MR_DRV_RAID_MAP_ALL *map_ptr;
1189 MR_LD_RAID *raid;
1190 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1191
1192 io_request = cmd->io_request;
1193 device_id = ccb_h->target_id;
1194
1195 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1196 ld = MR_TargetIdToLdGet(device_id, map_ptr);
1197 raid = MR_LdRaidGet(ld, map_ptr);
1198 /* Store the TM capability value in cmd */
1199 cmd->tmCapable = raid->capability.tmCapable;
1200
1201 /* FW path for LD Non-RW (SCSI management commands) */
1202 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1203 io_request->DevHandle = device_id;
1204 cmd->request_desc->SCSIIO.RequestFlags =
1205 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1206 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1207
1208 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1209 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1210 io_request->DataLength = cmd->length;
1211
1212 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1213 if (sc->is_ventura || sc->is_aero)
1214 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1215 else {
1216 /*
1217 * numSGE store lower 8 bit of sge_count. numSGEExt store
1218 * higher 8 bit of sge_count
1219 */
1220 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1221 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1222 }
1223 } else {
1224 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1225 return (1);
1226 }
1227 return (0);
1228 }
1229
1230 /*
1231 * mrsas_build_syspdio: Builds an DCDB command
1232 * input: Adapter instance soft state
1233 * Pointer to command packet
1234 * Pointer to CCB
1235 *
1236 * This function builds the DCDB inquiry command. It returns 0 if the command
1237 * is built successfully, otherwise it returns a 1.
1238 */
1239 int
1240 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1241 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
1242 {
1243 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1244 u_int32_t device_id;
1245 MR_DRV_RAID_MAP_ALL *local_map_ptr;
1246 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1247 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1248
1249 io_request = cmd->io_request;
1250 device_id = ccb_h->target_id;
1251 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1252 io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1253 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1254 io_request->RaidContext.raid_context.regLockFlags = 0;
1255 io_request->RaidContext.raid_context.regLockRowLBA = 0;
1256 io_request->RaidContext.raid_context.regLockLength = 0;
1257
1258 cmd->pdInterface = sc->target_list[device_id].interface_type;
1259
1260 /* If FW supports PD sequence number */
1261 if (sc->use_seqnum_jbod_fp &&
1262 sc->pd_list[device_id].driveType == 0x00) {
1263 //printf("Using Drv seq num\n");
1264 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
1265 cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable;
1266 /* More than 256 PD/JBOD support for Ventura */
1267 if (sc->support_morethan256jbod)
1268 io_request->RaidContext.raid_context.VirtualDiskTgtId =
1269 pd_sync->seq[device_id].pdTargetId;
1270 else
1271 io_request->RaidContext.raid_context.VirtualDiskTgtId =
1272 htole16(device_id + 255);
1273 io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum;
1274 io_request->DevHandle = pd_sync->seq[device_id].devHandle;
1275 if (sc->is_ventura || sc->is_aero)
1276 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1277 else
1278 io_request->RaidContext.raid_context.regLockFlags |=
1279 (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
1280 /* raid_context.Type = MPI2_TYPE_CUDA is valid only,
1281 * if FW support Jbod Sequence number
1282 */
1283 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1284 io_request->RaidContext.raid_context.nseg = 0x1;
1285 } else if (sc->fast_path_io) {
1286 //printf("Using LD RAID map\n");
1287 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
1288 io_request->RaidContext.raid_context.configSeqNum = 0;
1289 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1290 io_request->DevHandle =
1291 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1292 } else {
1293 //printf("Using FW PATH\n");
1294 /* Want to send all IO via FW path */
1295 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
1296 io_request->RaidContext.raid_context.configSeqNum = 0;
1297 io_request->DevHandle = MR_DEVHANDLE_INVALID;
1298 }
1299
1300 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1301 cmd->request_desc->SCSIIO.MSIxIndex =
1302 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1303
1304 if (!fp_possible) {
1305 /* system pd firmware path */
1306 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1307 cmd->request_desc->SCSIIO.RequestFlags =
1308 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1309 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1310 io_request->RaidContext.raid_context.timeoutValue =
1311 htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1312 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
1313 } else {
1314 /* system pd fast path */
1315 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1316 io_request->RaidContext.raid_context.timeoutValue = htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1317
1318 /*
1319 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
1320 * Because the NON RW cmds will now go via FW Queue
1321 * and not the Exception queue
1322 */
1323 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero)
1324 io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1325
1326 cmd->request_desc->SCSIIO.RequestFlags =
1327 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1328 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1329 }
1330
1331 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1332 io_request->DataLength = htole32(cmd->length);
1333
1334 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1335 if (sc->is_ventura || sc->is_aero)
1336 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1337 else {
1338 /*
1339 * numSGE store lower 8 bit of sge_count. numSGEExt store
1340 * higher 8 bit of sge_count
1341 */
1342 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1343 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1344 }
1345 } else {
1346 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1347 return (1);
1348 }
1349 return (0);
1350 }
1351
1352 /*
1353 * mrsas_is_prp_possible: This function will tell whether PRPs should be built or not
1354 * sc: Adapter instance soft state
1355 * cmd: MPT command frame pointer
1356 * nsesg: Number of OS SGEs
1357 *
1358 * This function will check whether IO is qualified to build PRPs
1359 * return: true: if PRP should be built
1360 * false: if IEEE SGLs should be built
1361 */
1362 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
1363 bus_dma_segment_t *segs, int nsegs)
1364 {
1365 struct mrsas_softc *sc = cmd->sc;
1366 int i;
1367 u_int32_t data_length = 0;
1368 bool build_prp = false;
1369 u_int32_t mr_nvme_pg_size;
1370
1371 mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE);
1372 data_length = cmd->length;
1373
1374 if (data_length > (mr_nvme_pg_size * 5))
1375 build_prp = true;
1376 else if ((data_length > (mr_nvme_pg_size * 4)) &&
1377 (data_length <= (mr_nvme_pg_size * 5))) {
1378 /* check if 1st SG entry size is < residual beyond 4 pages */
1379 if ((segs[0].ds_len) < (data_length - (mr_nvme_pg_size * 4)))
1380 build_prp = true;
1381 }
1382
1383 /*check for SGE holes here*/
1384 for (i = 0; i < nsegs; i++) {
1385 /* check for mid SGEs */
1386 if ((i != 0) && (i != (nsegs - 1))) {
1387 if ((segs[i].ds_addr % mr_nvme_pg_size) ||
1388 (segs[i].ds_len % mr_nvme_pg_size)) {
1389 build_prp = false;
1390 mrsas_atomic_inc(&sc->sge_holes);
1391 break;
1392 }
1393 }
1394
1395 /* check for first SGE*/
1396 if ((nsegs > 1) && (i == 0)) {
1397 if ((segs[i].ds_addr + segs[i].ds_len) % mr_nvme_pg_size) {
1398 build_prp = false;
1399 mrsas_atomic_inc(&sc->sge_holes);
1400 break;
1401 }
1402 }
1403
1404 /* check for Last SGE*/
1405 if ((nsegs > 1) && (i == (nsegs - 1))) {
1406 if (segs[i].ds_addr % mr_nvme_pg_size) {
1407 build_prp = false;
1408 mrsas_atomic_inc(&sc->sge_holes);
1409 break;
1410 }
1411 }
1412 }
1413
1414 return build_prp;
1415 }
1416
1417 /*
1418 * mrsas_map_request: Map and load data
1419 * input: Adapter instance soft state
1420 * Pointer to command packet
1421 *
1422 * For data from OS, map and load the data buffer into bus space. The SG list
1423 * is built in the callback. If the bus dmamap load is not successful,
1424 * cmd->error_code will contain the error code and a 1 is returned.
1425 */
1426 int
1427 mrsas_map_request(struct mrsas_softc *sc,
1428 struct mrsas_mpt_cmd *cmd, union ccb *ccb)
1429 {
1430 u_int32_t retcode = 0;
1431 struct cam_sim *sim;
1432
1433 sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
1434
1435 if (cmd->data != NULL) {
1436 /* Map data buffer into bus space */
1437 mtx_lock(&sc->io_lock);
1438 #if (__FreeBSD_version >= 902001)
1439 retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb,
1440 mrsas_data_load_cb, cmd, 0);
1441 #else
1442 retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data,
1443 cmd->length, mrsas_data_load_cb, cmd, BUS_DMA_NOWAIT);
1444 #endif
1445 mtx_unlock(&sc->io_lock);
1446 if (retcode)
1447 device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
1448 if (retcode == EINPROGRESS) {
1449 device_printf(sc->mrsas_dev, "request load in progress\n");
1450 mrsas_freeze_simq(cmd, sim);
1451 }
1452 }
1453 if (cmd->error_code)
1454 return (1);
1455 return (retcode);
1456 }
1457
1458 /*
1459 * mrsas_unmap_request: Unmap and unload data
1460 * input: Adapter instance soft state
1461 * Pointer to command packet
1462 *
1463 * This function unmaps and unloads data from OS.
1464 */
1465 void
1466 mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1467 {
1468 if (cmd->data != NULL) {
1469 if (cmd->flags & MRSAS_DIR_IN)
1470 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
1471 if (cmd->flags & MRSAS_DIR_OUT)
1472 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
1473 mtx_lock(&sc->io_lock);
1474 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
1475 mtx_unlock(&sc->io_lock);
1476 }
1477 }
1478
1479 /**
1480 * mrsas_build_ieee_sgl - Prepare IEEE SGLs
1481 * @sc: Adapter soft state
1482 * @segs: OS SGEs pointers
1483 * @nseg: Number of OS SGEs
1484 * @cmd: Fusion command frame
1485 * return: void
1486 */
1487 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1488 {
1489 struct mrsas_softc *sc = cmd->sc;
1490 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1491 pMpi25IeeeSgeChain64_t sgl_ptr;
1492 int i = 0, sg_processed = 0;
1493
1494 io_request = cmd->io_request;
1495 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
1496
1497 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1498 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
1499
1500 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
1501 sgl_ptr_end->Flags = 0;
1502 }
1503 if (nseg != 0) {
1504 for (i = 0; i < nseg; i++) {
1505 sgl_ptr->Address = htole64(segs[i].ds_addr);
1506 sgl_ptr->Length = htole32(segs[i].ds_len);
1507 sgl_ptr->Flags = 0;
1508 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1509 if (i == nseg - 1)
1510 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1511 }
1512 sgl_ptr++;
1513 sg_processed = i + 1;
1514 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
1515 (nseg > sc->max_sge_in_main_msg)) {
1516 pMpi25IeeeSgeChain64_t sg_chain;
1517
1518 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1519 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1520 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1521 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1522 else
1523 cmd->io_request->ChainOffset = 0;
1524 } else
1525 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1526 sg_chain = sgl_ptr;
1527 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero)
1528 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1529 else
1530 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1531 sg_chain->Length = htole32((sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed)));
1532 sg_chain->Address = htole64(cmd->chain_frame_phys_addr);
1533 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
1534 }
1535 }
1536 }
1537 }
1538
1539 /**
1540 * mrsas_build_prp_nvme - Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1541 * @sc: Adapter soft state
1542 * @segs: OS SGEs pointers
1543 * @nseg: Number of OS SGEs
1544 * @cmd: Fusion command frame
1545 * return: void
1546 */
1547 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1548 {
1549 struct mrsas_softc *sc = cmd->sc;
1550 int sge_len, offset, num_prp_in_chain = 0;
1551 pMpi25IeeeSgeChain64_t main_chain_element, ptr_first_sgl, sgl_ptr;
1552 u_int64_t *ptr_sgl;
1553 bus_addr_t ptr_sgl_phys;
1554 u_int64_t sge_addr;
1555 u_int32_t page_mask, page_mask_result, i = 0;
1556 u_int32_t first_prp_len;
1557 int data_len = cmd->length;
1558 u_int32_t mr_nvme_pg_size = max(sc->nvme_page_size,
1559 MR_DEFAULT_NVME_PAGE_SIZE);
1560
1561 sgl_ptr = (pMpi25IeeeSgeChain64_t) &cmd->io_request->SGL;
1562 /*
1563 * NVMe has a very convoluted PRP format. One PRP is required
1564 * for each page or partial page. We need to split up OS SG
1565 * entries if they are longer than one page or cross a page
1566 * boundary. We also have to insert a PRP list pointer entry as
1567 * the last entry in each physical page of the PRP list.
1568 *
1569 * NOTE: The first PRP "entry" is actually placed in the first
1570 * SGL entry in the main message in IEEE 64 format. The 2nd
1571 * entry in the main message is the chain element, and the rest
1572 * of the PRP entries are built in the contiguous PCIe buffer.
1573 */
1574 page_mask = mr_nvme_pg_size - 1;
1575 ptr_sgl = (u_int64_t *) cmd->chain_frame;
1576 ptr_sgl_phys = cmd->chain_frame_phys_addr;
1577 memset(ptr_sgl, 0, sc->max_chain_frame_sz);
1578
1579 /* Build chain frame element which holds all PRPs except first*/
1580 main_chain_element = (pMpi25IeeeSgeChain64_t)
1581 ((u_int8_t *)sgl_ptr + sizeof(MPI25_IEEE_SGE_CHAIN64));
1582
1583 main_chain_element->Address = cmd->chain_frame_phys_addr;
1584 main_chain_element->NextChainOffset = 0;
1585 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1586 IEEE_SGE_FLAGS_SYSTEM_ADDR |
1587 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
1588
1589 /* Build first PRP, SGE need not to be PAGE aligned*/
1590 ptr_first_sgl = sgl_ptr;
1591 sge_addr = segs[i].ds_addr;
1592 sge_len = segs[i].ds_len;
1593 i++;
1594
1595 offset = (u_int32_t) (sge_addr & page_mask);
1596 first_prp_len = mr_nvme_pg_size - offset;
1597
1598 ptr_first_sgl->Address = sge_addr;
1599 ptr_first_sgl->Length = first_prp_len;
1600
1601 data_len -= first_prp_len;
1602
1603 if (sge_len > first_prp_len) {
1604 sge_addr += first_prp_len;
1605 sge_len -= first_prp_len;
1606 } else if (sge_len == first_prp_len) {
1607 sge_addr = segs[i].ds_addr;
1608 sge_len = segs[i].ds_len;
1609 i++;
1610 }
1611
1612 for (;;) {
1613 offset = (u_int32_t) (sge_addr & page_mask);
1614
1615 /* Put PRP pointer due to page boundary*/
1616 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
1617 if (!page_mask_result) {
1618 device_printf(sc->mrsas_dev, "BRCM: Put prp pointer as we are at page boundary"
1619 " ptr_sgl: 0x%p\n", ptr_sgl);
1620 ptr_sgl_phys++;
1621 *ptr_sgl = (uintptr_t)ptr_sgl_phys;
1622 ptr_sgl++;
1623 num_prp_in_chain++;
1624 }
1625
1626 *ptr_sgl = sge_addr;
1627 ptr_sgl++;
1628 ptr_sgl_phys++;
1629 num_prp_in_chain++;
1630
1631 sge_addr += mr_nvme_pg_size;
1632 sge_len -= mr_nvme_pg_size;
1633 data_len -= mr_nvme_pg_size;
1634
1635 if (data_len <= 0)
1636 break;
1637
1638 if (sge_len > 0)
1639 continue;
1640
1641 sge_addr = segs[i].ds_addr;
1642 sge_len = segs[i].ds_len;
1643 i++;
1644 }
1645
1646 main_chain_element->Length = num_prp_in_chain * sizeof(u_int64_t);
1647 mrsas_atomic_inc(&sc->prp_count);
1648
1649 }
1650
1651 /*
1652 * mrsas_data_load_cb: Callback entry point to build SGLs
1653 * input: Pointer to command packet as argument
1654 * Pointer to segment
1655 * Number of segments Error
1656 *
1657 * This is the callback function of the bus dma map load. It builds SG list
1658 */
1659 static void
1660 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1661 {
1662 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
1663 struct mrsas_softc *sc = cmd->sc;
1664 boolean_t build_prp = false;
1665
1666 if (error) {
1667 cmd->error_code = error;
1668 device_printf(sc->mrsas_dev, "mrsas_data_load_cb_prp: error=%d\n", error);
1669 if (error == EFBIG) {
1670 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
1671 return;
1672 }
1673 }
1674 if (cmd->flags & MRSAS_DIR_IN)
1675 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1676 BUS_DMASYNC_PREREAD);
1677 if (cmd->flags & MRSAS_DIR_OUT)
1678 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1679 BUS_DMASYNC_PREWRITE);
1680
1681 /* Check for whether PRPs should be built or IEEE SGLs*/
1682 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
1683 (cmd->pdInterface == NVME_PD))
1684 build_prp = mrsas_is_prp_possible(cmd, segs, nseg);
1685
1686 if (build_prp == true)
1687 mrsas_build_prp_nvme(cmd, segs, nseg);
1688 else
1689 mrsas_build_ieee_sgl(cmd, segs, nseg);
1690
1691 cmd->sge_count = nseg;
1692 }
1693
1694 /*
1695 * mrsas_freeze_simq: Freeze SIM queue
1696 * input: Pointer to command packet
1697 * Pointer to SIM
1698 *
1699 * This function freezes the sim queue.
1700 */
1701 static void
1702 mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
1703 {
1704 union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
1705
1706 xpt_freeze_simq(sim, 1);
1707 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1708 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1709 }
1710
1711 void
1712 mrsas_xpt_freeze(struct mrsas_softc *sc)
1713 {
1714 xpt_freeze_simq(sc->sim_0, 1);
1715 xpt_freeze_simq(sc->sim_1, 1);
1716 }
1717
1718 void
1719 mrsas_xpt_release(struct mrsas_softc *sc)
1720 {
1721 xpt_release_simq(sc->sim_0, 1);
1722 xpt_release_simq(sc->sim_1, 1);
1723 }
1724
1725 /*
1726 * mrsas_cmd_done: Perform remaining command completion
1727 * input: Adapter instance soft state Pointer to command packet
1728 *
1729 * This function calls ummap request and releases the MPT command.
1730 */
1731 void
1732 mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1733 {
1734 mrsas_unmap_request(sc, cmd);
1735
1736 mtx_lock(&sc->sim_lock);
1737 if (cmd->callout_owner) {
1738 callout_stop(&cmd->cm_callout);
1739 cmd->callout_owner = false;
1740 }
1741 xpt_done(cmd->ccb_ptr);
1742 cmd->ccb_ptr = NULL;
1743 mtx_unlock(&sc->sim_lock);
1744 mrsas_release_mpt_cmd(cmd);
1745 }
1746
1747 /*
1748 * mrsas_cam_poll: Polling entry point
1749 * input: Pointer to SIM
1750 *
1751 * This is currently a stub function.
1752 */
1753 static void
1754 mrsas_cam_poll(struct cam_sim *sim)
1755 {
1756 int i;
1757 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
1758
1759 if (sc->msix_vectors != 0){
1760 for (i=0; i<sc->msix_vectors; i++){
1761 mrsas_complete_cmd(sc, i);
1762 }
1763 } else {
1764 mrsas_complete_cmd(sc, 0);
1765 }
1766 }
1767
1768 /*
1769 * mrsas_bus_scan: Perform bus scan
1770 * input: Adapter instance soft state
1771 *
1772 * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not
1773 * be called in FreeBSD 8.x and later versions, where the bus scan is
1774 * automatic.
1775 */
1776 int
1777 mrsas_bus_scan(struct mrsas_softc *sc)
1778 {
1779 union ccb *ccb_0;
1780 union ccb *ccb_1;
1781
1782 if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
1783 return (ENOMEM);
1784 }
1785 if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
1786 xpt_free_ccb(ccb_0);
1787 return (ENOMEM);
1788 }
1789 mtx_lock(&sc->sim_lock);
1790 if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
1791 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1792 xpt_free_ccb(ccb_0);
1793 xpt_free_ccb(ccb_1);
1794 mtx_unlock(&sc->sim_lock);
1795 return (EIO);
1796 }
1797 if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
1798 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1799 xpt_free_ccb(ccb_0);
1800 xpt_free_ccb(ccb_1);
1801 mtx_unlock(&sc->sim_lock);
1802 return (EIO);
1803 }
1804 mtx_unlock(&sc->sim_lock);
1805 xpt_rescan(ccb_0);
1806 xpt_rescan(ccb_1);
1807
1808 return (0);
1809 }
1810
1811 /*
1812 * mrsas_bus_scan_sim: Perform bus scan per SIM
1813 * input: adapter instance soft state
1814 *
1815 * This function will be called from Event handler on LD creation/deletion,
1816 * JBOD on/off.
1817 */
1818 int
1819 mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
1820 {
1821 union ccb *ccb;
1822
1823 if ((ccb = xpt_alloc_ccb()) == NULL) {
1824 return (ENOMEM);
1825 }
1826 mtx_lock(&sc->sim_lock);
1827 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
1828 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1829 xpt_free_ccb(ccb);
1830 mtx_unlock(&sc->sim_lock);
1831 return (EIO);
1832 }
1833 mtx_unlock(&sc->sim_lock);
1834 xpt_rescan(ccb);
1835
1836 return (0);
1837 }
1838
1839 /*
1840 * mrsas_track_scsiio: Track IOs for a given target in the mpt_cmd_list
1841 * input: Adapter instance soft state
1842 * Target ID of target
1843 * Bus ID of the target
1844 *
1845 * This function checks for any pending IO in the whole mpt_cmd_list pool
1846 * with the bus_id and target_id passed in arguments. If some IO is found
1847 * that means target reset is not successfully completed.
1848 *
1849 * Returns FAIL if IOs pending to the target device, else return SUCCESS
1850 */
1851 static int
1852 mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t tgt_id, u_int32_t bus_id)
1853 {
1854 int i;
1855 struct mrsas_mpt_cmd *mpt_cmd = NULL;
1856
1857 for (i = 0 ; i < sc->max_fw_cmds; i++) {
1858 mpt_cmd = sc->mpt_cmd_list[i];
1859
1860 /*
1861 * Check if the target_id and bus_id is same as the timeout IO
1862 */
1863 if (mpt_cmd->ccb_ptr) {
1864 /* bus_id = 1 denotes a VD */
1865 if (bus_id == 1)
1866 tgt_id =
1867 (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1));
1868
1869 if (mpt_cmd->ccb_ptr->cpi.bus_id == bus_id &&
1870 mpt_cmd->ccb_ptr->ccb_h.target_id == tgt_id) {
1871 device_printf(sc->mrsas_dev,
1872 "IO commands pending to target id %d\n", tgt_id);
1873 return FAIL;
1874 }
1875 }
1876 }
1877
1878 return SUCCESS;
1879 }
1880
1881 #if TM_DEBUG
1882 /*
1883 * mrsas_tm_response_code: Prints TM response code received from FW
1884 * input: Adapter instance soft state
1885 * MPI reply returned from firmware
1886 *
1887 * Returns nothing.
1888 */
1889 static void
1890 mrsas_tm_response_code(struct mrsas_softc *sc,
1891 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
1892 {
1893 char *desc;
1894
1895 switch (mpi_reply->ResponseCode) {
1896 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1897 desc = "task management request completed";
1898 break;
1899 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1900 desc = "invalid frame";
1901 break;
1902 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1903 desc = "task management request not supported";
1904 break;
1905 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1906 desc = "task management request failed";
1907 break;
1908 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1909 desc = "task management request succeeded";
1910 break;
1911 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1912 desc = "invalid lun";
1913 break;
1914 case 0xA:
1915 desc = "overlapped tag attempted";
1916 break;
1917 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1918 desc = "task queued, however not sent to target";
1919 break;
1920 default:
1921 desc = "unknown";
1922 break;
1923 }
1924 device_printf(sc->mrsas_dev, "response_code(%01x): %s\n",
1925 mpi_reply->ResponseCode, desc);
1926 device_printf(sc->mrsas_dev,
1927 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo\n"
1928 "0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
1929 mpi_reply->TerminationCount, mpi_reply->DevHandle,
1930 mpi_reply->Function, mpi_reply->TaskType,
1931 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
1932 }
1933 #endif
1934
1935 /*
1936 * mrsas_issue_tm: Fires the TM command to FW and waits for completion
1937 * input: Adapter instance soft state
1938 * request descriptor compiled by mrsas_reset_targets
1939 *
1940 * Returns FAIL if TM command TIMEDOUT from FW else SUCCESS.
1941 */
1942 static int
1943 mrsas_issue_tm(struct mrsas_softc *sc,
1944 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc)
1945 {
1946 int sleep_stat;
1947
1948 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
1949 sleep_stat = msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "tm_sleep", 50*hz);
1950
1951 if (sleep_stat == EWOULDBLOCK) {
1952 device_printf(sc->mrsas_dev, "tm cmd TIMEDOUT\n");
1953 return FAIL;
1954 }
1955
1956 return SUCCESS;
1957 }
1958
1959 /*
1960 * mrsas_reset_targets : Gathers info to fire a target reset command
1961 * input: Adapter instance soft state
1962 *
1963 * This function compiles data for a target reset command to be fired to the FW
1964 * and then traverse the target_reset_pool to see targets with TIMEDOUT IOs.
1965 *
1966 * Returns SUCCESS or FAIL
1967 */
1968 int mrsas_reset_targets(struct mrsas_softc *sc)
1969 {
1970 struct mrsas_mpt_cmd *tm_mpt_cmd = NULL;
1971 struct mrsas_mpt_cmd *tgt_mpt_cmd = NULL;
1972 MR_TASK_MANAGE_REQUEST *mr_request;
1973 MPI2_SCSI_TASK_MANAGE_REQUEST *tm_mpi_request;
1974 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1975 int retCode = FAIL, count, i, outstanding;
1976 u_int32_t MSIxIndex, bus_id;
1977 target_id_t tgt_id;
1978 #if TM_DEBUG
1979 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
1980 #endif
1981
1982 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
1983
1984 if (!outstanding) {
1985 device_printf(sc->mrsas_dev, "NO IOs pending...\n");
1986 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
1987 retCode = SUCCESS;
1988 goto return_status;
1989 } else if (sc->adprecovery != MRSAS_HBA_OPERATIONAL) {
1990 device_printf(sc->mrsas_dev, "Controller is not operational\n");
1991 goto return_status;
1992 } else {
1993 /* Some more error checks will be added in future */
1994 }
1995
1996 /* Get an mpt frame and an index to fire the TM cmd */
1997 tm_mpt_cmd = mrsas_get_mpt_cmd(sc);
1998 if (!tm_mpt_cmd) {
1999 retCode = FAIL;
2000 goto return_status;
2001 }
2002
2003 req_desc = mrsas_get_request_desc(sc, (tm_mpt_cmd->index) - 1);
2004 if (!req_desc) {
2005 device_printf(sc->mrsas_dev, "Cannot get request_descriptor for tm.\n");
2006 retCode = FAIL;
2007 goto release_mpt;
2008 }
2009 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
2010
2011 req_desc->HighPriority.SMID = tm_mpt_cmd->index;
2012 req_desc->HighPriority.RequestFlags =
2013 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
2014 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2015 req_desc->HighPriority.MSIxIndex = 0;
2016 req_desc->HighPriority.LMID = 0;
2017 req_desc->HighPriority.Reserved1 = 0;
2018 tm_mpt_cmd->request_desc = req_desc;
2019
2020 mr_request = (MR_TASK_MANAGE_REQUEST *) tm_mpt_cmd->io_request;
2021 memset(mr_request, 0, sizeof(MR_TASK_MANAGE_REQUEST));
2022
2023 tm_mpi_request = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
2024 tm_mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2025 tm_mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2026 tm_mpi_request->TaskMID = 0; /* smid task */
2027 tm_mpi_request->LUN[1] = 0;
2028
2029 /* Traverse the tm_mpt pool to get valid entries */
2030 for (i = 0 ; i < MRSAS_MAX_TM_TARGETS; i++) {
2031 if(!sc->target_reset_pool[i]) {
2032 continue;
2033 } else {
2034 tgt_mpt_cmd = sc->target_reset_pool[i];
2035 }
2036
2037 tgt_id = i;
2038
2039 /* See if the target is tm capable or NOT */
2040 if (!tgt_mpt_cmd->tmCapable) {
2041 device_printf(sc->mrsas_dev, "Task management NOT SUPPORTED for "
2042 "CAM target:%d\n", tgt_id);
2043
2044 retCode = FAIL;
2045 goto release_mpt;
2046 }
2047
2048 tm_mpi_request->DevHandle = tgt_mpt_cmd->io_request->DevHandle;
2049
2050 if (i < (MRSAS_MAX_PD - 1)) {
2051 mr_request->uTmReqReply.tmReqFlags.isTMForPD = 1;
2052 bus_id = 0;
2053 } else {
2054 mr_request->uTmReqReply.tmReqFlags.isTMForLD = 1;
2055 bus_id = 1;
2056 }
2057
2058 device_printf(sc->mrsas_dev, "TM will be fired for "
2059 "CAM target:%d and bus_id %d\n", tgt_id, bus_id);
2060
2061 sc->ocr_chan = (void *)&tm_mpt_cmd;
2062 retCode = mrsas_issue_tm(sc, req_desc);
2063 if (retCode == FAIL)
2064 goto release_mpt;
2065
2066 #if TM_DEBUG
2067 mpi_reply =
2068 (MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->uTmReqReply.TMReply;
2069 mrsas_tm_response_code(sc, mpi_reply);
2070 #endif
2071 mrsas_atomic_dec(&sc->target_reset_outstanding);
2072 sc->target_reset_pool[i] = NULL;
2073
2074 /* Check for pending cmds in the mpt_cmd_pool with the tgt_id */
2075 mrsas_disable_intr(sc);
2076 /* Wait for 1 second to complete parallel ISR calling same
2077 * mrsas_complete_cmd()
2078 */
2079 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_wakeup",
2080 1 * hz);
2081 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2082 mtx_unlock(&sc->sim_lock);
2083 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2084 mrsas_complete_cmd(sc, MSIxIndex);
2085 mtx_lock(&sc->sim_lock);
2086 retCode = mrsas_track_scsiio(sc, tgt_id, bus_id);
2087 mrsas_enable_intr(sc);
2088
2089 if (retCode == FAIL)
2090 goto release_mpt;
2091 }
2092
2093 device_printf(sc->mrsas_dev, "Number of targets outstanding "
2094 "after reset: %d\n", mrsas_atomic_read(&sc->target_reset_outstanding));
2095
2096 release_mpt:
2097 mrsas_release_mpt_cmd(tm_mpt_cmd);
2098 return_status:
2099 device_printf(sc->mrsas_dev, "target reset %s!!\n",
2100 (retCode == SUCCESS) ? "SUCCESS" : "FAIL");
2101
2102 return retCode;
2103 }
Cache object: b2fdbe63c8b1abb8e2157fa4e986b6fe
|