FreeBSD/Linux Kernel Cross Reference
sys/dev/mpr/mpr_user.c
1 /*-
2 * Copyright (c) 2008 Yahoo!, Inc.
3 * All rights reserved.
4 * Written by: John Baldwin <jhb@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the author nor the names of any co-contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD userland interface
31 */
32 /*-
33 * Copyright (c) 2011-2015 LSI Corp.
34 * Copyright (c) 2013-2016 Avago Technologies
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
59 *
60 * $FreeBSD$
61 */
62
63 #include <sys/cdefs.h>
64 __FBSDID("$FreeBSD$");
65
66 #include "opt_compat.h"
67
68 /* TODO Move headers to mprvar */
69 #include <sys/types.h>
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/selinfo.h>
74 #include <sys/module.h>
75 #include <sys/bus.h>
76 #include <sys/conf.h>
77 #include <sys/bio.h>
78 #include <sys/abi_compat.h>
79 #include <sys/malloc.h>
80 #include <sys/uio.h>
81 #include <sys/sysctl.h>
82 #include <sys/ioccom.h>
83 #include <sys/endian.h>
84 #include <sys/queue.h>
85 #include <sys/kthread.h>
86 #include <sys/taskqueue.h>
87 #include <sys/proc.h>
88 #include <sys/sysent.h>
89
90 #include <machine/bus.h>
91 #include <machine/resource.h>
92 #include <sys/rman.h>
93
94 #include <cam/cam.h>
95 #include <cam/cam_ccb.h>
96
97 #include <dev/mpr/mpi/mpi2_type.h>
98 #include <dev/mpr/mpi/mpi2.h>
99 #include <dev/mpr/mpi/mpi2_ioc.h>
100 #include <dev/mpr/mpi/mpi2_cnfg.h>
101 #include <dev/mpr/mpi/mpi2_init.h>
102 #include <dev/mpr/mpi/mpi2_tool.h>
103 #include <dev/mpr/mpi/mpi2_pci.h>
104 #include <dev/mpr/mpr_ioctl.h>
105 #include <dev/mpr/mprvar.h>
106 #include <dev/mpr/mpr_table.h>
107 #include <dev/mpr/mpr_sas.h>
108 #include <dev/pci/pcivar.h>
109 #include <dev/pci/pcireg.h>
110
111 static d_open_t mpr_open;
112 static d_close_t mpr_close;
113 static d_ioctl_t mpr_ioctl_devsw;
114
115 static struct cdevsw mpr_cdevsw = {
116 .d_version = D_VERSION,
117 .d_flags = 0,
118 .d_open = mpr_open,
119 .d_close = mpr_close,
120 .d_ioctl = mpr_ioctl_devsw,
121 .d_name = "mpr",
122 };
123
124 typedef int (mpr_user_f)(struct mpr_command *, struct mpr_usr_command *);
125 static mpr_user_f mpi_pre_ioc_facts;
126 static mpr_user_f mpi_pre_port_facts;
127 static mpr_user_f mpi_pre_fw_download;
128 static mpr_user_f mpi_pre_fw_upload;
129 static mpr_user_f mpi_pre_sata_passthrough;
130 static mpr_user_f mpi_pre_smp_passthrough;
131 static mpr_user_f mpi_pre_config;
132 static mpr_user_f mpi_pre_sas_io_unit_control;
133
134 static int mpr_user_read_cfg_header(struct mpr_softc *,
135 struct mpr_cfg_page_req *);
136 static int mpr_user_read_cfg_page(struct mpr_softc *,
137 struct mpr_cfg_page_req *, void *);
138 static int mpr_user_read_extcfg_header(struct mpr_softc *,
139 struct mpr_ext_cfg_page_req *);
140 static int mpr_user_read_extcfg_page(struct mpr_softc *,
141 struct mpr_ext_cfg_page_req *, void *);
142 static int mpr_user_write_cfg_page(struct mpr_softc *,
143 struct mpr_cfg_page_req *, void *);
144 static int mpr_user_setup_request(struct mpr_command *,
145 struct mpr_usr_command *);
146 static int mpr_user_command(struct mpr_softc *, struct mpr_usr_command *);
147
148 static int mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t *data);
149 static void mpr_user_get_adapter_data(struct mpr_softc *sc,
150 mpr_adapter_data_t *data);
151 static void mpr_user_read_pci_info(struct mpr_softc *sc, mpr_pci_info_t *data);
152 static uint8_t mpr_get_fw_diag_buffer_number(struct mpr_softc *sc,
153 uint32_t unique_id);
154 static int mpr_post_fw_diag_buffer(struct mpr_softc *sc,
155 mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
156 static int mpr_release_fw_diag_buffer(struct mpr_softc *sc,
157 mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
158 uint32_t diag_type);
159 static int mpr_diag_register(struct mpr_softc *sc,
160 mpr_fw_diag_register_t *diag_register, uint32_t *return_code);
161 static int mpr_diag_unregister(struct mpr_softc *sc,
162 mpr_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
163 static int mpr_diag_query(struct mpr_softc *sc, mpr_fw_diag_query_t *diag_query,
164 uint32_t *return_code);
165 static int mpr_diag_read_buffer(struct mpr_softc *sc,
166 mpr_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
167 uint32_t *return_code);
168 static int mpr_diag_release(struct mpr_softc *sc,
169 mpr_fw_diag_release_t *diag_release, uint32_t *return_code);
170 static int mpr_do_diag_action(struct mpr_softc *sc, uint32_t action,
171 uint8_t *diag_action, uint32_t length, uint32_t *return_code);
172 static int mpr_user_diag_action(struct mpr_softc *sc, mpr_diag_action_t *data);
173 static void mpr_user_event_query(struct mpr_softc *sc, mpr_event_query_t *data);
174 static void mpr_user_event_enable(struct mpr_softc *sc,
175 mpr_event_enable_t *data);
176 static int mpr_user_event_report(struct mpr_softc *sc,
177 mpr_event_report_t *data);
178 static int mpr_user_reg_access(struct mpr_softc *sc, mpr_reg_access_t *data);
179 static int mpr_user_btdh(struct mpr_softc *sc, mpr_btdh_mapping_t *data);
180
181 static MALLOC_DEFINE(M_MPRUSER, "mpr_user", "Buffers for mpr(4) ioctls");
182
183 /*
184 * MPI functions that support IEEE SGLs for SAS3.
185 */
186 static uint8_t ieee_sgl_func_list[] = {
187 MPI2_FUNCTION_SCSI_IO_REQUEST,
188 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
189 MPI2_FUNCTION_SMP_PASSTHROUGH,
190 MPI2_FUNCTION_SATA_PASSTHROUGH,
191 MPI2_FUNCTION_FW_UPLOAD,
192 MPI2_FUNCTION_FW_DOWNLOAD,
193 MPI2_FUNCTION_TARGET_ASSIST,
194 MPI2_FUNCTION_TARGET_STATUS_SEND,
195 MPI2_FUNCTION_TOOLBOX
196 };
197
198 int
199 mpr_attach_user(struct mpr_softc *sc)
200 {
201 int unit;
202
203 unit = device_get_unit(sc->mpr_dev);
204 sc->mpr_cdev = make_dev(&mpr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
205 "mpr%d", unit);
206
207 if (sc->mpr_cdev == NULL)
208 return (ENOMEM);
209
210 sc->mpr_cdev->si_drv1 = sc;
211 return (0);
212 }
213
214 void
215 mpr_detach_user(struct mpr_softc *sc)
216 {
217
218 /* XXX: do a purge of pending requests? */
219 if (sc->mpr_cdev != NULL)
220 destroy_dev(sc->mpr_cdev);
221 }
222
223 static int
224 mpr_open(struct cdev *dev, int flags, int fmt, struct thread *td)
225 {
226
227 return (0);
228 }
229
230 static int
231 mpr_close(struct cdev *dev, int flags, int fmt, struct thread *td)
232 {
233
234 return (0);
235 }
236
237 static int
238 mpr_user_read_cfg_header(struct mpr_softc *sc,
239 struct mpr_cfg_page_req *page_req)
240 {
241 MPI2_CONFIG_PAGE_HEADER *hdr;
242 struct mpr_config_params params;
243 int error;
244
245 hdr = ¶ms.hdr.Struct;
246 params.action = MPI2_CONFIG_ACTION_PAGE_HEADER;
247 params.page_address = le32toh(page_req->page_address);
248 hdr->PageVersion = 0;
249 hdr->PageLength = 0;
250 hdr->PageNumber = page_req->header.PageNumber;
251 hdr->PageType = page_req->header.PageType;
252 params.buffer = NULL;
253 params.length = 0;
254 params.callback = NULL;
255
256 if ((error = mpr_read_config_page(sc, ¶ms)) != 0) {
257 /*
258 * Leave the request. Without resetting the chip, it's
259 * still owned by it and we'll just get into trouble
260 * freeing it now. Mark it as abandoned so that if it
261 * shows up later it can be freed.
262 */
263 mpr_printf(sc, "read_cfg_header timed out\n");
264 return (ETIMEDOUT);
265 }
266
267 page_req->ioc_status = htole16(params.status);
268 if ((page_req->ioc_status & MPI2_IOCSTATUS_MASK) ==
269 MPI2_IOCSTATUS_SUCCESS) {
270 bcopy(hdr, &page_req->header, sizeof(page_req->header));
271 }
272
273 return (0);
274 }
275
276 static int
277 mpr_user_read_cfg_page(struct mpr_softc *sc, struct mpr_cfg_page_req *page_req,
278 void *buf)
279 {
280 MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr;
281 struct mpr_config_params params;
282 int error;
283
284 reqhdr = buf;
285 hdr = ¶ms.hdr.Struct;
286 hdr->PageVersion = reqhdr->PageVersion;
287 hdr->PageLength = reqhdr->PageLength;
288 hdr->PageNumber = reqhdr->PageNumber;
289 hdr->PageType = reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK;
290 params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
291 params.page_address = le32toh(page_req->page_address);
292 params.buffer = buf;
293 params.length = le32toh(page_req->len);
294 params.callback = NULL;
295
296 if ((error = mpr_read_config_page(sc, ¶ms)) != 0) {
297 mpr_printf(sc, "mpr_user_read_cfg_page timed out\n");
298 return (ETIMEDOUT);
299 }
300
301 page_req->ioc_status = htole16(params.status);
302 return (0);
303 }
304
305 static int
306 mpr_user_read_extcfg_header(struct mpr_softc *sc,
307 struct mpr_ext_cfg_page_req *ext_page_req)
308 {
309 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
310 struct mpr_config_params params;
311 int error;
312
313 hdr = ¶ms.hdr.Ext;
314 params.action = MPI2_CONFIG_ACTION_PAGE_HEADER;
315 hdr->PageVersion = ext_page_req->header.PageVersion;
316 hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
317 hdr->ExtPageLength = 0;
318 hdr->PageNumber = ext_page_req->header.PageNumber;
319 hdr->ExtPageType = ext_page_req->header.ExtPageType;
320 params.page_address = le32toh(ext_page_req->page_address);
321 params.buffer = NULL;
322 params.length = 0;
323 params.callback = NULL;
324
325 if ((error = mpr_read_config_page(sc, ¶ms)) != 0) {
326 /*
327 * Leave the request. Without resetting the chip, it's
328 * still owned by it and we'll just get into trouble
329 * freeing it now. Mark it as abandoned so that if it
330 * shows up later it can be freed.
331 */
332 mpr_printf(sc, "mpr_user_read_extcfg_header timed out\n");
333 return (ETIMEDOUT);
334 }
335
336 ext_page_req->ioc_status = htole16(params.status);
337 if ((ext_page_req->ioc_status & MPI2_IOCSTATUS_MASK) ==
338 MPI2_IOCSTATUS_SUCCESS) {
339 ext_page_req->header.PageVersion = hdr->PageVersion;
340 ext_page_req->header.PageNumber = hdr->PageNumber;
341 ext_page_req->header.PageType = hdr->PageType;
342 ext_page_req->header.ExtPageLength = hdr->ExtPageLength;
343 ext_page_req->header.ExtPageType = hdr->ExtPageType;
344 }
345
346 return (0);
347 }
348
349 static int
350 mpr_user_read_extcfg_page(struct mpr_softc *sc,
351 struct mpr_ext_cfg_page_req *ext_page_req, void *buf)
352 {
353 MPI2_CONFIG_EXTENDED_PAGE_HEADER *reqhdr, *hdr;
354 struct mpr_config_params params;
355 int error;
356
357 reqhdr = buf;
358 hdr = ¶ms.hdr.Ext;
359 params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
360 params.page_address = le32toh(ext_page_req->page_address);
361 hdr->PageVersion = reqhdr->PageVersion;
362 hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
363 hdr->PageNumber = reqhdr->PageNumber;
364 hdr->ExtPageType = reqhdr->ExtPageType;
365 hdr->ExtPageLength = reqhdr->ExtPageLength;
366 params.buffer = buf;
367 params.length = le32toh(ext_page_req->len);
368 params.callback = NULL;
369
370 if ((error = mpr_read_config_page(sc, ¶ms)) != 0) {
371 mpr_printf(sc, "mpr_user_read_extcfg_page timed out\n");
372 return (ETIMEDOUT);
373 }
374
375 ext_page_req->ioc_status = htole16(params.status);
376 return (0);
377 }
378
379 static int
380 mpr_user_write_cfg_page(struct mpr_softc *sc,
381 struct mpr_cfg_page_req *page_req, void *buf)
382 {
383 MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr;
384 struct mpr_config_params params;
385 u_int hdr_attr;
386 int error;
387
388 reqhdr = buf;
389 hdr = ¶ms.hdr.Struct;
390 hdr_attr = reqhdr->PageType & MPI2_CONFIG_PAGEATTR_MASK;
391 if (hdr_attr != MPI2_CONFIG_PAGEATTR_CHANGEABLE &&
392 hdr_attr != MPI2_CONFIG_PAGEATTR_PERSISTENT) {
393 mpr_printf(sc, "page type 0x%x not changeable\n",
394 reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK);
395 return (EINVAL);
396 }
397
398 /*
399 * There isn't any point in restoring stripped out attributes
400 * if you then mask them going down to issue the request.
401 */
402
403 hdr->PageVersion = reqhdr->PageVersion;
404 hdr->PageLength = reqhdr->PageLength;
405 hdr->PageNumber = reqhdr->PageNumber;
406 hdr->PageType = reqhdr->PageType;
407 params.action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
408 params.page_address = le32toh(page_req->page_address);
409 params.buffer = buf;
410 params.length = le32toh(page_req->len);
411 params.callback = NULL;
412
413 if ((error = mpr_write_config_page(sc, ¶ms)) != 0) {
414 mpr_printf(sc, "mpr_write_cfg_page timed out\n");
415 return (ETIMEDOUT);
416 }
417
418 page_req->ioc_status = htole16(params.status);
419 return (0);
420 }
421
422 void
423 mpr_init_sge(struct mpr_command *cm, void *req, void *sge)
424 {
425 int off, space;
426
427 space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4;
428 off = (uintptr_t)sge - (uintptr_t)req;
429
430 KASSERT(off < space, ("bad pointers %p %p, off %d, space %d",
431 req, sge, off, space));
432
433 cm->cm_sge = sge;
434 cm->cm_sglsize = space - off;
435 }
436
437 /*
438 * Prepare the mpr_command for an IOC_FACTS request.
439 */
440 static int
441 mpi_pre_ioc_facts(struct mpr_command *cm, struct mpr_usr_command *cmd)
442 {
443 MPI2_IOC_FACTS_REQUEST *req = (void *)cm->cm_req;
444 MPI2_IOC_FACTS_REPLY *rpl;
445
446 if (cmd->req_len != sizeof *req)
447 return (EINVAL);
448 if (cmd->rpl_len != sizeof *rpl)
449 return (EINVAL);
450
451 cm->cm_sge = NULL;
452 cm->cm_sglsize = 0;
453 return (0);
454 }
455
456 /*
457 * Prepare the mpr_command for a PORT_FACTS request.
458 */
459 static int
460 mpi_pre_port_facts(struct mpr_command *cm, struct mpr_usr_command *cmd)
461 {
462 MPI2_PORT_FACTS_REQUEST *req = (void *)cm->cm_req;
463 MPI2_PORT_FACTS_REPLY *rpl;
464
465 if (cmd->req_len != sizeof *req)
466 return (EINVAL);
467 if (cmd->rpl_len != sizeof *rpl)
468 return (EINVAL);
469
470 cm->cm_sge = NULL;
471 cm->cm_sglsize = 0;
472 return (0);
473 }
474
475 /*
476 * Prepare the mpr_command for a FW_DOWNLOAD request.
477 */
478 static int
479 mpi_pre_fw_download(struct mpr_command *cm, struct mpr_usr_command *cmd)
480 {
481 MPI25_FW_DOWNLOAD_REQUEST *req = (void *)cm->cm_req;
482 MPI2_FW_DOWNLOAD_REPLY *rpl;
483 int error;
484
485 if (cmd->req_len != sizeof *req)
486 return (EINVAL);
487 if (cmd->rpl_len != sizeof *rpl)
488 return (EINVAL);
489
490 if (cmd->len == 0)
491 return (EINVAL);
492
493 error = copyin(cmd->buf, cm->cm_data, cmd->len);
494 if (error != 0)
495 return (error);
496
497 mpr_init_sge(cm, req, &req->SGL);
498
499 /*
500 * For now, the F/W image must be provided in a single request.
501 */
502 if ((req->MsgFlags & MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT) == 0)
503 return (EINVAL);
504 if (req->TotalImageSize != cmd->len)
505 return (EINVAL);
506
507 req->ImageOffset = 0;
508 req->ImageSize = cmd->len;
509
510 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
511
512 return (mpr_push_ieee_sge(cm, &req->SGL, 0));
513 }
514
515 /*
516 * Prepare the mpr_command for a FW_UPLOAD request.
517 */
518 static int
519 mpi_pre_fw_upload(struct mpr_command *cm, struct mpr_usr_command *cmd)
520 {
521 MPI25_FW_UPLOAD_REQUEST *req = (void *)cm->cm_req;
522 MPI2_FW_UPLOAD_REPLY *rpl;
523
524 if (cmd->req_len != sizeof *req)
525 return (EINVAL);
526 if (cmd->rpl_len != sizeof *rpl)
527 return (EINVAL);
528
529 mpr_init_sge(cm, req, &req->SGL);
530 if (cmd->len == 0) {
531 /* Perhaps just asking what the size of the fw is? */
532 return (0);
533 }
534
535 req->ImageOffset = 0;
536 req->ImageSize = cmd->len;
537
538 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
539
540 return (mpr_push_ieee_sge(cm, &req->SGL, 0));
541 }
542
543 /*
544 * Prepare the mpr_command for a SATA_PASSTHROUGH request.
545 */
546 static int
547 mpi_pre_sata_passthrough(struct mpr_command *cm, struct mpr_usr_command *cmd)
548 {
549 MPI2_SATA_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req;
550 MPI2_SATA_PASSTHROUGH_REPLY *rpl;
551
552 if (cmd->req_len != sizeof *req)
553 return (EINVAL);
554 if (cmd->rpl_len != sizeof *rpl)
555 return (EINVAL);
556
557 mpr_init_sge(cm, req, &req->SGL);
558 return (0);
559 }
560
561 /*
562 * Prepare the mpr_command for a SMP_PASSTHROUGH request.
563 */
564 static int
565 mpi_pre_smp_passthrough(struct mpr_command *cm, struct mpr_usr_command *cmd)
566 {
567 MPI2_SMP_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req;
568 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
569
570 if (cmd->req_len != sizeof *req)
571 return (EINVAL);
572 if (cmd->rpl_len != sizeof *rpl)
573 return (EINVAL);
574
575 mpr_init_sge(cm, req, &req->SGL);
576 return (0);
577 }
578
579 /*
580 * Prepare the mpr_command for a CONFIG request.
581 */
582 static int
583 mpi_pre_config(struct mpr_command *cm, struct mpr_usr_command *cmd)
584 {
585 MPI2_CONFIG_REQUEST *req = (void *)cm->cm_req;
586 MPI2_CONFIG_REPLY *rpl;
587
588 if (cmd->req_len != sizeof *req)
589 return (EINVAL);
590 if (cmd->rpl_len != sizeof *rpl)
591 return (EINVAL);
592
593 mpr_init_sge(cm, req, &req->PageBufferSGE);
594 return (0);
595 }
596
597 /*
598 * Prepare the mpr_command for a SAS_IO_UNIT_CONTROL request.
599 */
600 static int
601 mpi_pre_sas_io_unit_control(struct mpr_command *cm,
602 struct mpr_usr_command *cmd)
603 {
604
605 cm->cm_sge = NULL;
606 cm->cm_sglsize = 0;
607 return (0);
608 }
609
610 /*
611 * A set of functions to prepare an mpr_command for the various
612 * supported requests.
613 */
614 struct mpr_user_func {
615 U8 Function;
616 mpr_user_f *f_pre;
617 } mpr_user_func_list[] = {
618 { MPI2_FUNCTION_IOC_FACTS, mpi_pre_ioc_facts },
619 { MPI2_FUNCTION_PORT_FACTS, mpi_pre_port_facts },
620 { MPI2_FUNCTION_FW_DOWNLOAD, mpi_pre_fw_download },
621 { MPI2_FUNCTION_FW_UPLOAD, mpi_pre_fw_upload },
622 { MPI2_FUNCTION_SATA_PASSTHROUGH, mpi_pre_sata_passthrough },
623 { MPI2_FUNCTION_SMP_PASSTHROUGH, mpi_pre_smp_passthrough},
624 { MPI2_FUNCTION_CONFIG, mpi_pre_config},
625 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, mpi_pre_sas_io_unit_control },
626 { 0xFF, NULL } /* list end */
627 };
628
629 static int
630 mpr_user_setup_request(struct mpr_command *cm, struct mpr_usr_command *cmd)
631 {
632 MPI2_REQUEST_HEADER *hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
633 struct mpr_user_func *f;
634
635 for (f = mpr_user_func_list; f->f_pre != NULL; f++) {
636 if (hdr->Function == f->Function)
637 return (f->f_pre(cm, cmd));
638 }
639 return (EINVAL);
640 }
641
642 static int
643 mpr_user_command(struct mpr_softc *sc, struct mpr_usr_command *cmd)
644 {
645 MPI2_REQUEST_HEADER *hdr;
646 MPI2_DEFAULT_REPLY *rpl = NULL;
647 void *buf = NULL;
648 struct mpr_command *cm = NULL;
649 int err = 0;
650 int sz;
651
652 mpr_lock(sc);
653 cm = mpr_alloc_command(sc);
654
655 if (cm == NULL) {
656 mpr_printf(sc, "%s: no mpr requests\n", __func__);
657 err = ENOMEM;
658 goto RetFree;
659 }
660 mpr_unlock(sc);
661
662 hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
663
664 mpr_dprint(sc, MPR_USER, "%s: req %p %d rpl %p %d\n", __func__,
665 cmd->req, cmd->req_len, cmd->rpl, cmd->rpl_len);
666
667 if (cmd->req_len > (int)sc->facts->IOCRequestFrameSize * 4) {
668 err = EINVAL;
669 goto RetFreeUnlocked;
670 }
671 err = copyin(cmd->req, hdr, cmd->req_len);
672 if (err != 0)
673 goto RetFreeUnlocked;
674
675 mpr_dprint(sc, MPR_USER, "%s: Function %02X MsgFlags %02X\n", __func__,
676 hdr->Function, hdr->MsgFlags);
677
678 if (cmd->len > 0) {
679 buf = malloc(cmd->len, M_MPRUSER, M_WAITOK|M_ZERO);
680 cm->cm_data = buf;
681 cm->cm_length = cmd->len;
682 } else {
683 cm->cm_data = NULL;
684 cm->cm_length = 0;
685 }
686
687 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE;
688 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
689
690 err = mpr_user_setup_request(cm, cmd);
691 if (err == EINVAL) {
692 mpr_printf(sc, "%s: unsupported parameter or unsupported "
693 "function in request (function = 0x%X)\n", __func__,
694 hdr->Function);
695 }
696 if (err != 0)
697 goto RetFreeUnlocked;
698
699 mpr_lock(sc);
700 err = mpr_wait_command(sc, &cm, 30, CAN_SLEEP);
701
702 if (err || (cm == NULL)) {
703 mpr_printf(sc, "%s: invalid request: error %d\n",
704 __func__, err);
705 goto RetFree;
706 }
707
708 if (cm != NULL)
709 rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
710 if (rpl != NULL)
711 sz = rpl->MsgLength * 4;
712 else
713 sz = 0;
714
715 if (sz > cmd->rpl_len) {
716 mpr_printf(sc, "%s: user reply buffer (%d) smaller than "
717 "returned buffer (%d)\n", __func__, cmd->rpl_len, sz);
718 sz = cmd->rpl_len;
719 }
720
721 mpr_unlock(sc);
722 copyout(rpl, cmd->rpl, sz);
723 if (buf != NULL)
724 copyout(buf, cmd->buf, cmd->len);
725 mpr_dprint(sc, MPR_USER, "%s: reply size %d\n", __func__, sz);
726
727 RetFreeUnlocked:
728 mpr_lock(sc);
729 RetFree:
730 if (cm != NULL)
731 mpr_free_command(sc, cm);
732 mpr_unlock(sc);
733 if (buf != NULL)
734 free(buf, M_MPRUSER);
735 return (err);
736 }
737
738 static int
739 mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t *data)
740 {
741 MPI2_REQUEST_HEADER *hdr, tmphdr;
742 MPI2_DEFAULT_REPLY *rpl;
743 Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply = NULL;
744 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
745 struct mpr_command *cm = NULL;
746 int i, err = 0, dir = 0, sz;
747 uint8_t tool, function = 0;
748 u_int sense_len;
749 struct mprsas_target *targ = NULL;
750
751 /*
752 * Only allow one passthru command at a time. Use the MPR_FLAGS_BUSY
753 * bit to denote that a passthru is being processed.
754 */
755 mpr_lock(sc);
756 if (sc->mpr_flags & MPR_FLAGS_BUSY) {
757 mpr_dprint(sc, MPR_USER, "%s: Only one passthru command "
758 "allowed at a single time.", __func__);
759 mpr_unlock(sc);
760 return (EBUSY);
761 }
762 sc->mpr_flags |= MPR_FLAGS_BUSY;
763 mpr_unlock(sc);
764
765 /*
766 * Do some validation on data direction. Valid cases are:
767 * 1) DataSize is 0 and direction is NONE
768 * 2) DataSize is non-zero and one of:
769 * a) direction is READ or
770 * b) direction is WRITE or
771 * c) direction is BOTH and DataOutSize is non-zero
772 * If valid and the direction is BOTH, change the direction to READ.
773 * if valid and the direction is not BOTH, make sure DataOutSize is 0.
774 */
775 if (((data->DataSize == 0) &&
776 (data->DataDirection == MPR_PASS_THRU_DIRECTION_NONE)) ||
777 ((data->DataSize != 0) &&
778 ((data->DataDirection == MPR_PASS_THRU_DIRECTION_READ) ||
779 (data->DataDirection == MPR_PASS_THRU_DIRECTION_WRITE) ||
780 ((data->DataDirection == MPR_PASS_THRU_DIRECTION_BOTH) &&
781 (data->DataOutSize != 0))))) {
782 if (data->DataDirection == MPR_PASS_THRU_DIRECTION_BOTH)
783 data->DataDirection = MPR_PASS_THRU_DIRECTION_READ;
784 else
785 data->DataOutSize = 0;
786 } else
787 return (EINVAL);
788
789 mpr_dprint(sc, MPR_USER, "%s: req 0x%jx %d rpl 0x%jx %d "
790 "data in 0x%jx %d data out 0x%jx %d data dir %d\n", __func__,
791 data->PtrRequest, data->RequestSize, data->PtrReply,
792 data->ReplySize, data->PtrData, data->DataSize,
793 data->PtrDataOut, data->DataOutSize, data->DataDirection);
794
795 /*
796 * copy in the header so we know what we're dealing with before we
797 * commit to allocating a command for it.
798 */
799 err = copyin(PTRIN(data->PtrRequest), &tmphdr, data->RequestSize);
800 if (err != 0)
801 goto RetFreeUnlocked;
802
803 if (data->RequestSize > (int)sc->facts->IOCRequestFrameSize * 4) {
804 err = EINVAL;
805 goto RetFreeUnlocked;
806 }
807
808 function = tmphdr.Function;
809 mpr_dprint(sc, MPR_USER, "%s: Function %02X MsgFlags %02X\n", __func__,
810 function, tmphdr.MsgFlags);
811
812 /*
813 * Handle a passthru TM request.
814 */
815 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
816 MPI2_SCSI_TASK_MANAGE_REQUEST *task;
817
818 mpr_lock(sc);
819 cm = mprsas_alloc_tm(sc);
820 if (cm == NULL) {
821 err = EINVAL;
822 goto Ret;
823 }
824
825 /* Copy the header in. Only a small fixup is needed. */
826 task = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
827 bcopy(&tmphdr, task, data->RequestSize);
828 task->TaskMID = cm->cm_desc.Default.SMID;
829
830 cm->cm_data = NULL;
831 cm->cm_desc.HighPriority.RequestFlags =
832 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
833 cm->cm_complete = NULL;
834 cm->cm_complete_data = NULL;
835
836 targ = mprsas_find_target_by_handle(sc->sassc, 0,
837 task->DevHandle);
838 if (targ == NULL) {
839 mpr_dprint(sc, MPR_INFO,
840 "%s %d : invalid handle for requested TM 0x%x \n",
841 __func__, __LINE__, task->DevHandle);
842 err = 1;
843 } else {
844 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
845 err = mpr_wait_command(sc, &cm, 30, CAN_SLEEP);
846 }
847
848 if (err != 0) {
849 err = EIO;
850 mpr_dprint(sc, MPR_FAULT, "%s: task management failed",
851 __func__);
852 }
853 /*
854 * Copy the reply data and sense data to user space.
855 */
856 if ((cm != NULL) && (cm->cm_reply != NULL)) {
857 rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
858 sz = rpl->MsgLength * 4;
859
860 if (sz > data->ReplySize) {
861 mpr_printf(sc, "%s: user reply buffer (%d) "
862 "smaller than returned buffer (%d)\n",
863 __func__, data->ReplySize, sz);
864 }
865 mpr_unlock(sc);
866 copyout(cm->cm_reply, PTRIN(data->PtrReply),
867 data->ReplySize);
868 mpr_lock(sc);
869 }
870 mprsas_free_tm(sc, cm);
871 goto Ret;
872 }
873
874 mpr_lock(sc);
875 cm = mpr_alloc_command(sc);
876
877 if (cm == NULL) {
878 mpr_printf(sc, "%s: no mpr requests\n", __func__);
879 err = ENOMEM;
880 goto Ret;
881 }
882 mpr_unlock(sc);
883
884 hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
885 bcopy(&tmphdr, hdr, data->RequestSize);
886
887 /*
888 * Do some checking to make sure the IOCTL request contains a valid
889 * request. Then set the SGL info.
890 */
891 mpr_init_sge(cm, hdr, (void *)((uint8_t *)hdr + data->RequestSize));
892
893 /*
894 * Set up for read, write or both. From check above, DataOutSize will
895 * be 0 if direction is READ or WRITE, but it will have some non-zero
896 * value if the direction is BOTH. So, just use the biggest size to get
897 * the cm_data buffer size. If direction is BOTH, 2 SGLs need to be set
898 * up; the first is for the request and the second will contain the
899 * response data. cm_out_len needs to be set here and this will be used
900 * when the SGLs are set up.
901 */
902 cm->cm_data = NULL;
903 cm->cm_length = MAX(data->DataSize, data->DataOutSize);
904 cm->cm_out_len = data->DataOutSize;
905 cm->cm_flags = 0;
906 if (cm->cm_length != 0) {
907 cm->cm_data = malloc(cm->cm_length, M_MPRUSER, M_WAITOK |
908 M_ZERO);
909 cm->cm_flags = MPR_CM_FLAGS_DATAIN;
910 if (data->DataOutSize) {
911 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
912 err = copyin(PTRIN(data->PtrDataOut),
913 cm->cm_data, data->DataOutSize);
914 } else if (data->DataDirection ==
915 MPR_PASS_THRU_DIRECTION_WRITE) {
916 cm->cm_flags = MPR_CM_FLAGS_DATAOUT;
917 err = copyin(PTRIN(data->PtrData),
918 cm->cm_data, data->DataSize);
919 }
920 if (err != 0)
921 mpr_dprint(sc, MPR_FAULT, "%s: failed to copy IOCTL "
922 "data from user space\n", __func__);
923 }
924 /*
925 * Set this flag only if processing a command that does not need an
926 * IEEE SGL. The CLI Tool within the Toolbox uses IEEE SGLs, so clear
927 * the flag only for that tool if processing a Toolbox function.
928 */
929 cm->cm_flags |= MPR_CM_FLAGS_SGE_SIMPLE;
930 for (i = 0; i < sizeof (ieee_sgl_func_list); i++) {
931 if (function == ieee_sgl_func_list[i]) {
932 if (function == MPI2_FUNCTION_TOOLBOX)
933 {
934 tool = (uint8_t)hdr->FunctionDependent1;
935 if (tool != MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL)
936 break;
937 }
938 cm->cm_flags &= ~MPR_CM_FLAGS_SGE_SIMPLE;
939 break;
940 }
941 }
942 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
943
944 if (function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
945 nvme_encap_request =
946 (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
947 cm->cm_desc.Default.RequestFlags =
948 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
949
950 /*
951 * Get the Physical Address of the sense buffer.
952 * Save the user's Error Response buffer address and use that
953 * field to hold the sense buffer address.
954 * Clear the internal sense buffer, which will potentially hold
955 * the Completion Queue Entry on return, or 0 if no Entry.
956 * Build the PRPs and set direction bits.
957 * Send the request.
958 */
959 cm->nvme_error_response =
960 (uint64_t *)(uintptr_t)(((uint64_t)nvme_encap_request->
961 ErrorResponseBaseAddress.High << 32) |
962 (uint64_t)nvme_encap_request->
963 ErrorResponseBaseAddress.Low);
964 nvme_encap_request->ErrorResponseBaseAddress.High =
965 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
966 nvme_encap_request->ErrorResponseBaseAddress.Low =
967 htole32(cm->cm_sense_busaddr);
968 memset(cm->cm_sense, 0, NVME_ERROR_RESPONSE_SIZE);
969 mpr_build_nvme_prp(sc, cm, nvme_encap_request, cm->cm_data,
970 data->DataSize, data->DataOutSize);
971 }
972
973 /*
974 * Set up Sense buffer and SGL offset for IO passthru. SCSI IO request
975 * uses SCSI IO or Fast Path SCSI IO descriptor.
976 */
977 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
978 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
979 MPI2_SCSI_IO_REQUEST *scsi_io_req;
980
981 scsi_io_req = (MPI2_SCSI_IO_REQUEST *)hdr;
982 /*
983 * Put SGE for data and data_out buffer at the end of
984 * scsi_io_request message header (64 bytes in total).
985 * Following above SGEs, the residual space will be used by
986 * sense data.
987 */
988 scsi_io_req->SenseBufferLength = (uint8_t)(data->RequestSize -
989 64);
990 scsi_io_req->SenseBufferLowAddress =
991 htole32(cm->cm_sense_busaddr);
992
993 /*
994 * Set SGLOffset0 value. This is the number of dwords that SGL
995 * is offset from the beginning of MPI2_SCSI_IO_REQUEST struct.
996 */
997 scsi_io_req->SGLOffset0 = 24;
998
999 /*
1000 * Setup descriptor info. RAID passthrough must use the
1001 * default request descriptor which is already set, so if this
1002 * is a SCSI IO request, change the descriptor to SCSI IO or
1003 * Fast Path SCSI IO. Also, if this is a SCSI IO request,
1004 * handle the reply in the mprsas_scsio_complete function.
1005 */
1006 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
1007 targ = mprsas_find_target_by_handle(sc->sassc, 0,
1008 scsi_io_req->DevHandle);
1009
1010 if (!targ) {
1011 printf("No Target found for handle %d\n",
1012 scsi_io_req->DevHandle);
1013 err = EINVAL;
1014 goto RetFreeUnlocked;
1015 }
1016
1017 if (targ->scsi_req_desc_type ==
1018 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1019 cm->cm_desc.FastPathSCSIIO.RequestFlags =
1020 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1021 if (!sc->atomic_desc_capable) {
1022 cm->cm_desc.FastPathSCSIIO.DevHandle =
1023 scsi_io_req->DevHandle;
1024 }
1025 scsi_io_req->IoFlags |=
1026 MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1027 } else {
1028 cm->cm_desc.SCSIIO.RequestFlags =
1029 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1030 if (!sc->atomic_desc_capable) {
1031 cm->cm_desc.SCSIIO.DevHandle =
1032 scsi_io_req->DevHandle;
1033 }
1034 }
1035
1036 /*
1037 * Make sure the DevHandle is not 0 because this is a
1038 * likely error.
1039 */
1040 if (scsi_io_req->DevHandle == 0) {
1041 err = EINVAL;
1042 goto RetFreeUnlocked;
1043 }
1044 }
1045 }
1046
1047 mpr_lock(sc);
1048
1049 err = mpr_wait_command(sc, &cm, 30, CAN_SLEEP);
1050
1051 if (err || (cm == NULL)) {
1052 mpr_printf(sc, "%s: invalid request: error %d\n", __func__,
1053 err);
1054 goto RetFree;
1055 }
1056
1057 /*
1058 * Sync the DMA data, if any. Then copy the data to user space.
1059 */
1060 if (cm->cm_data != NULL) {
1061 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
1062 dir = BUS_DMASYNC_POSTREAD;
1063 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
1064 dir = BUS_DMASYNC_POSTWRITE;
1065 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1066 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1067
1068 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) {
1069 mpr_unlock(sc);
1070 err = copyout(cm->cm_data,
1071 PTRIN(data->PtrData), data->DataSize);
1072 mpr_lock(sc);
1073 if (err != 0)
1074 mpr_dprint(sc, MPR_FAULT, "%s: failed to copy "
1075 "IOCTL data to user space\n", __func__);
1076 }
1077 }
1078
1079 /*
1080 * Copy the reply data and sense data to user space.
1081 */
1082 if (cm->cm_reply != NULL) {
1083 rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
1084 sz = rpl->MsgLength * 4;
1085
1086 if (sz > data->ReplySize) {
1087 mpr_printf(sc, "%s: user reply buffer (%d) smaller "
1088 "than returned buffer (%d)\n", __func__,
1089 data->ReplySize, sz);
1090 }
1091 mpr_unlock(sc);
1092 copyout(cm->cm_reply, PTRIN(data->PtrReply), data->ReplySize);
1093 mpr_lock(sc);
1094
1095 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
1096 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
1097 if (((MPI2_SCSI_IO_REPLY *)rpl)->SCSIState &
1098 MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1099 sense_len =
1100 MIN((le32toh(((MPI2_SCSI_IO_REPLY *)rpl)->
1101 SenseCount)), sizeof(struct
1102 scsi_sense_data));
1103 mpr_unlock(sc);
1104 copyout(cm->cm_sense, cm->cm_req + 64,
1105 sense_len);
1106 mpr_lock(sc);
1107 }
1108 }
1109
1110 /*
1111 * Copy out the NVMe Error Reponse to user. The Error Response
1112 * buffer is given by the user, but a sense buffer is used to
1113 * get that data from the IOC. The user's
1114 * ErrorResponseBaseAddress is saved in the
1115 * 'nvme_error_response' field before the command because that
1116 * field is set to a sense buffer. When the command is
1117 * complete, the Error Response data from the IOC is copied to
1118 * that user address after it is checked for validity.
1119 * Also note that 'sense' buffers are not defined for
1120 * NVMe commands. Sense terminalogy is only used here so that
1121 * the same IOCTL structure and sense buffers can be used for
1122 * NVMe.
1123 */
1124 if (function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
1125 if (cm->nvme_error_response == NULL) {
1126 mpr_dprint(sc, MPR_INFO, "NVMe Error Response "
1127 "buffer is NULL. Response data will not be "
1128 "returned.\n");
1129 mpr_unlock(sc);
1130 goto RetFreeUnlocked;
1131 }
1132
1133 nvme_error_reply =
1134 (Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
1135 sz = MIN(le32toh(nvme_error_reply->ErrorResponseCount),
1136 NVME_ERROR_RESPONSE_SIZE);
1137 mpr_unlock(sc);
1138 copyout(cm->cm_sense, cm->nvme_error_response, sz);
1139 mpr_lock(sc);
1140 }
1141 }
1142 mpr_unlock(sc);
1143
1144 RetFreeUnlocked:
1145 mpr_lock(sc);
1146
1147 RetFree:
1148 if (cm != NULL) {
1149 if (cm->cm_data)
1150 free(cm->cm_data, M_MPRUSER);
1151 mpr_free_command(sc, cm);
1152 }
1153 Ret:
1154 sc->mpr_flags &= ~MPR_FLAGS_BUSY;
1155 mpr_unlock(sc);
1156
1157 return (err);
1158 }
1159
1160 static void
1161 mpr_user_get_adapter_data(struct mpr_softc *sc, mpr_adapter_data_t *data)
1162 {
1163 Mpi2ConfigReply_t mpi_reply;
1164 Mpi2BiosPage3_t config_page;
1165
1166 /*
1167 * Use the PCI interface functions to get the Bus, Device, and Function
1168 * information.
1169 */
1170 data->PciInformation.u.bits.BusNumber = pci_get_bus(sc->mpr_dev);
1171 data->PciInformation.u.bits.DeviceNumber = pci_get_slot(sc->mpr_dev);
1172 data->PciInformation.u.bits.FunctionNumber =
1173 pci_get_function(sc->mpr_dev);
1174
1175 /*
1176 * Get the FW version that should already be saved in IOC Facts.
1177 */
1178 data->MpiFirmwareVersion = sc->facts->FWVersion.Word;
1179
1180 /*
1181 * General device info.
1182 */
1183 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC)
1184 data->AdapterType = MPRIOCTL_ADAPTER_TYPE_SAS35;
1185 else
1186 data->AdapterType = MPRIOCTL_ADAPTER_TYPE_SAS3;
1187 data->PCIDeviceHwId = pci_get_device(sc->mpr_dev);
1188 data->PCIDeviceHwRev = pci_read_config(sc->mpr_dev, PCIR_REVID, 1);
1189 data->SubSystemId = pci_get_subdevice(sc->mpr_dev);
1190 data->SubsystemVendorId = pci_get_subvendor(sc->mpr_dev);
1191
1192 /*
1193 * Get the driver version.
1194 */
1195 strcpy((char *)&data->DriverVersion[0], MPR_DRIVER_VERSION);
1196
1197 /*
1198 * Need to get BIOS Config Page 3 for the BIOS Version.
1199 */
1200 data->BiosVersion = 0;
1201 mpr_lock(sc);
1202 if (mpr_config_get_bios_pg3(sc, &mpi_reply, &config_page))
1203 printf("%s: Error while retrieving BIOS Version\n", __func__);
1204 else
1205 data->BiosVersion = config_page.BiosVersion;
1206 mpr_unlock(sc);
1207 }
1208
1209 static void
1210 mpr_user_read_pci_info(struct mpr_softc *sc, mpr_pci_info_t *data)
1211 {
1212 int i;
1213
1214 /*
1215 * Use the PCI interface functions to get the Bus, Device, and Function
1216 * information.
1217 */
1218 data->BusNumber = pci_get_bus(sc->mpr_dev);
1219 data->DeviceNumber = pci_get_slot(sc->mpr_dev);
1220 data->FunctionNumber = pci_get_function(sc->mpr_dev);
1221
1222 /*
1223 * Now get the interrupt vector and the pci header. The vector can
1224 * only be 0 right now. The header is the first 256 bytes of config
1225 * space.
1226 */
1227 data->InterruptVector = 0;
1228 for (i = 0; i < sizeof (data->PciHeader); i++) {
1229 data->PciHeader[i] = pci_read_config(sc->mpr_dev, i, 1);
1230 }
1231 }
1232
1233 static uint8_t
1234 mpr_get_fw_diag_buffer_number(struct mpr_softc *sc, uint32_t unique_id)
1235 {
1236 uint8_t index;
1237
1238 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
1239 if (sc->fw_diag_buffer_list[index].unique_id == unique_id) {
1240 return (index);
1241 }
1242 }
1243
1244 return (MPR_FW_DIAGNOSTIC_UID_NOT_FOUND);
1245 }
1246
1247 static int
1248 mpr_post_fw_diag_buffer(struct mpr_softc *sc,
1249 mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
1250 {
1251 MPI2_DIAG_BUFFER_POST_REQUEST *req;
1252 MPI2_DIAG_BUFFER_POST_REPLY *reply;
1253 struct mpr_command *cm = NULL;
1254 int i, status;
1255
1256 /*
1257 * If buffer is not enabled, just leave.
1258 */
1259 *return_code = MPR_FW_DIAG_ERROR_POST_FAILED;
1260 if (!pBuffer->enabled) {
1261 return (MPR_DIAG_FAILURE);
1262 }
1263
1264 /*
1265 * Clear some flags initially.
1266 */
1267 pBuffer->force_release = FALSE;
1268 pBuffer->valid_data = FALSE;
1269 pBuffer->owned_by_firmware = FALSE;
1270
1271 /*
1272 * Get a command.
1273 */
1274 cm = mpr_alloc_command(sc);
1275 if (cm == NULL) {
1276 mpr_printf(sc, "%s: no mpr requests\n", __func__);
1277 return (MPR_DIAG_FAILURE);
1278 }
1279
1280 /*
1281 * Build the request for releasing the FW Diag Buffer and send it.
1282 */
1283 req = (MPI2_DIAG_BUFFER_POST_REQUEST *)cm->cm_req;
1284 req->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1285 req->BufferType = pBuffer->buffer_type;
1286 req->ExtendedType = pBuffer->extended_type;
1287 req->BufferLength = pBuffer->size;
1288 for (i = 0; i < (sizeof(req->ProductSpecific) / 4); i++)
1289 req->ProductSpecific[i] = pBuffer->product_specific[i];
1290 mpr_from_u64(sc->fw_diag_busaddr, &req->BufferAddress);
1291 cm->cm_data = NULL;
1292 cm->cm_length = 0;
1293 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1294 cm->cm_complete_data = NULL;
1295
1296 /*
1297 * Send command synchronously.
1298 */
1299 status = mpr_wait_command(sc, &cm, 30, CAN_SLEEP);
1300 if (status || (cm == NULL)) {
1301 mpr_printf(sc, "%s: invalid request: error %d\n", __func__,
1302 status);
1303 status = MPR_DIAG_FAILURE;
1304 goto done;
1305 }
1306
1307 /*
1308 * Process POST reply.
1309 */
1310 reply = (MPI2_DIAG_BUFFER_POST_REPLY *)cm->cm_reply;
1311 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
1312 MPI2_IOCSTATUS_SUCCESS) {
1313 status = MPR_DIAG_FAILURE;
1314 mpr_dprint(sc, MPR_FAULT, "%s: post of FW Diag Buffer failed "
1315 "with IOCStatus = 0x%x, IOCLogInfo = 0x%x and "
1316 "TransferLength = 0x%x\n", __func__,
1317 le16toh(reply->IOCStatus), le32toh(reply->IOCLogInfo),
1318 le32toh(reply->TransferLength));
1319 goto done;
1320 }
1321
1322 /*
1323 * Post was successful.
1324 */
1325 pBuffer->valid_data = TRUE;
1326 pBuffer->owned_by_firmware = TRUE;
1327 *return_code = MPR_FW_DIAG_ERROR_SUCCESS;
1328 status = MPR_DIAG_SUCCESS;
1329
1330 done:
1331 if (cm != NULL)
1332 mpr_free_command(sc, cm);
1333 return (status);
1334 }
1335
1336 static int
1337 mpr_release_fw_diag_buffer(struct mpr_softc *sc,
1338 mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
1339 uint32_t diag_type)
1340 {
1341 MPI2_DIAG_RELEASE_REQUEST *req;
1342 MPI2_DIAG_RELEASE_REPLY *reply;
1343 struct mpr_command *cm = NULL;
1344 int status;
1345
1346 /*
1347 * If buffer is not enabled, just leave.
1348 */
1349 *return_code = MPR_FW_DIAG_ERROR_RELEASE_FAILED;
1350 if (!pBuffer->enabled) {
1351 mpr_dprint(sc, MPR_USER, "%s: This buffer type is not "
1352 "supported by the IOC", __func__);
1353 return (MPR_DIAG_FAILURE);
1354 }
1355
1356 /*
1357 * Clear some flags initially.
1358 */
1359 pBuffer->force_release = FALSE;
1360 pBuffer->valid_data = FALSE;
1361 pBuffer->owned_by_firmware = FALSE;
1362
1363 /*
1364 * Get a command.
1365 */
1366 cm = mpr_alloc_command(sc);
1367 if (cm == NULL) {
1368 mpr_printf(sc, "%s: no mpr requests\n", __func__);
1369 return (MPR_DIAG_FAILURE);
1370 }
1371
1372 /*
1373 * Build the request for releasing the FW Diag Buffer and send it.
1374 */
1375 req = (MPI2_DIAG_RELEASE_REQUEST *)cm->cm_req;
1376 req->Function = MPI2_FUNCTION_DIAG_RELEASE;
1377 req->BufferType = pBuffer->buffer_type;
1378 cm->cm_data = NULL;
1379 cm->cm_length = 0;
1380 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1381 cm->cm_complete_data = NULL;
1382
1383 /*
1384 * Send command synchronously.
1385 */
1386 status = mpr_wait_command(sc, &cm, 30, CAN_SLEEP);
1387 if (status || (cm == NULL)) {
1388 mpr_printf(sc, "%s: invalid request: error %d\n", __func__,
1389 status);
1390 status = MPR_DIAG_FAILURE;
1391 goto done;
1392 }
1393
1394 /*
1395 * Process RELEASE reply.
1396 */
1397 reply = (MPI2_DIAG_RELEASE_REPLY *)cm->cm_reply;
1398 if (((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
1399 MPI2_IOCSTATUS_SUCCESS) || pBuffer->owned_by_firmware) {
1400 status = MPR_DIAG_FAILURE;
1401 mpr_dprint(sc, MPR_FAULT, "%s: release of FW Diag Buffer "
1402 "failed with IOCStatus = 0x%x and IOCLogInfo = 0x%x\n",
1403 __func__, le16toh(reply->IOCStatus),
1404 le32toh(reply->IOCLogInfo));
1405 goto done;
1406 }
1407
1408 /*
1409 * Release was successful.
1410 */
1411 *return_code = MPR_FW_DIAG_ERROR_SUCCESS;
1412 status = MPR_DIAG_SUCCESS;
1413
1414 /*
1415 * If this was for an UNREGISTER diag type command, clear the unique ID.
1416 */
1417 if (diag_type == MPR_FW_DIAG_TYPE_UNREGISTER) {
1418 pBuffer->unique_id = MPR_FW_DIAG_INVALID_UID;
1419 }
1420
1421 done:
1422 if (cm != NULL)
1423 mpr_free_command(sc, cm);
1424
1425 return (status);
1426 }
1427
1428 static int
1429 mpr_diag_register(struct mpr_softc *sc, mpr_fw_diag_register_t *diag_register,
1430 uint32_t *return_code)
1431 {
1432 mpr_fw_diagnostic_buffer_t *pBuffer;
1433 uint8_t extended_type, buffer_type, i;
1434 uint32_t buffer_size;
1435 uint32_t unique_id;
1436 int status;
1437
1438 extended_type = diag_register->ExtendedType;
1439 buffer_type = diag_register->BufferType;
1440 buffer_size = diag_register->RequestedBufferSize;
1441 unique_id = diag_register->UniqueId;
1442
1443 /*
1444 * Check for valid buffer type
1445 */
1446 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
1447 *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
1448 return (MPR_DIAG_FAILURE);
1449 }
1450
1451 /*
1452 * Get the current buffer and look up the unique ID. The unique ID
1453 * should not be found. If it is, the ID is already in use.
1454 */
1455 i = mpr_get_fw_diag_buffer_number(sc, unique_id);
1456 pBuffer = &sc->fw_diag_buffer_list[buffer_type];
1457 if (i != MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1458 *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
1459 return (MPR_DIAG_FAILURE);
1460 }
1461
1462 /*
1463 * The buffer's unique ID should not be registered yet, and the given
1464 * unique ID cannot be 0.
1465 */
1466 if ((pBuffer->unique_id != MPR_FW_DIAG_INVALID_UID) ||
1467 (unique_id == MPR_FW_DIAG_INVALID_UID)) {
1468 *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
1469 return (MPR_DIAG_FAILURE);
1470 }
1471
1472 /*
1473 * If this buffer is already posted as immediate, just change owner.
1474 */
1475 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
1476 (pBuffer->unique_id == MPR_FW_DIAG_INVALID_UID)) {
1477 pBuffer->immediate = FALSE;
1478 pBuffer->unique_id = unique_id;
1479 return (MPR_DIAG_SUCCESS);
1480 }
1481
1482 /*
1483 * Post a new buffer after checking if it's enabled. The DMA buffer
1484 * that is allocated will be contiguous (nsegments = 1).
1485 */
1486 if (!pBuffer->enabled) {
1487 *return_code = MPR_FW_DIAG_ERROR_NO_BUFFER;
1488 return (MPR_DIAG_FAILURE);
1489 }
1490 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1491 1, 0, /* algnmnt, boundary */
1492 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1493 BUS_SPACE_MAXADDR, /* highaddr */
1494 NULL, NULL, /* filter, filterarg */
1495 buffer_size, /* maxsize */
1496 1, /* nsegments */
1497 buffer_size, /* maxsegsize */
1498 0, /* flags */
1499 NULL, NULL, /* lockfunc, lockarg */
1500 &sc->fw_diag_dmat)) {
1501 device_printf(sc->mpr_dev, "Cannot allocate FW diag buffer DMA "
1502 "tag\n");
1503 return (ENOMEM);
1504 }
1505 if (bus_dmamem_alloc(sc->fw_diag_dmat, (void **)&sc->fw_diag_buffer,
1506 BUS_DMA_NOWAIT, &sc->fw_diag_map)) {
1507 device_printf(sc->mpr_dev, "Cannot allocate FW diag buffer "
1508 "memory\n");
1509 return (ENOMEM);
1510 }
1511 bzero(sc->fw_diag_buffer, buffer_size);
1512 bus_dmamap_load(sc->fw_diag_dmat, sc->fw_diag_map, sc->fw_diag_buffer,
1513 buffer_size, mpr_memaddr_cb, &sc->fw_diag_busaddr, 0);
1514 pBuffer->size = buffer_size;
1515
1516 /*
1517 * Copy the given info to the diag buffer and post the buffer.
1518 */
1519 pBuffer->buffer_type = buffer_type;
1520 pBuffer->immediate = FALSE;
1521 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
1522 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
1523 i++) {
1524 pBuffer->product_specific[i] =
1525 diag_register->ProductSpecific[i];
1526 }
1527 }
1528 pBuffer->extended_type = extended_type;
1529 pBuffer->unique_id = unique_id;
1530 status = mpr_post_fw_diag_buffer(sc, pBuffer, return_code);
1531
1532 /*
1533 * In case there was a failure, free the DMA buffer.
1534 */
1535 if (status == MPR_DIAG_FAILURE) {
1536 if (sc->fw_diag_busaddr != 0)
1537 bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map);
1538 if (sc->fw_diag_buffer != NULL)
1539 bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer,
1540 sc->fw_diag_map);
1541 if (sc->fw_diag_dmat != NULL)
1542 bus_dma_tag_destroy(sc->fw_diag_dmat);
1543 }
1544
1545 return (status);
1546 }
1547
1548 static int
1549 mpr_diag_unregister(struct mpr_softc *sc,
1550 mpr_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
1551 {
1552 mpr_fw_diagnostic_buffer_t *pBuffer;
1553 uint8_t i;
1554 uint32_t unique_id;
1555 int status;
1556
1557 unique_id = diag_unregister->UniqueId;
1558
1559 /*
1560 * Get the current buffer and look up the unique ID. The unique ID
1561 * should be there.
1562 */
1563 i = mpr_get_fw_diag_buffer_number(sc, unique_id);
1564 if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1565 *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
1566 return (MPR_DIAG_FAILURE);
1567 }
1568
1569 pBuffer = &sc->fw_diag_buffer_list[i];
1570
1571 /*
1572 * Try to release the buffer from FW before freeing it. If release
1573 * fails, don't free the DMA buffer in case FW tries to access it
1574 * later. If buffer is not owned by firmware, can't release it.
1575 */
1576 if (!pBuffer->owned_by_firmware) {
1577 status = MPR_DIAG_SUCCESS;
1578 } else {
1579 status = mpr_release_fw_diag_buffer(sc, pBuffer, return_code,
1580 MPR_FW_DIAG_TYPE_UNREGISTER);
1581 }
1582
1583 /*
1584 * At this point, return the current status no matter what happens with
1585 * the DMA buffer.
1586 */
1587 pBuffer->unique_id = MPR_FW_DIAG_INVALID_UID;
1588 if (status == MPR_DIAG_SUCCESS) {
1589 if (sc->fw_diag_busaddr != 0)
1590 bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map);
1591 if (sc->fw_diag_buffer != NULL)
1592 bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer,
1593 sc->fw_diag_map);
1594 if (sc->fw_diag_dmat != NULL)
1595 bus_dma_tag_destroy(sc->fw_diag_dmat);
1596 }
1597
1598 return (status);
1599 }
1600
1601 static int
1602 mpr_diag_query(struct mpr_softc *sc, mpr_fw_diag_query_t *diag_query,
1603 uint32_t *return_code)
1604 {
1605 mpr_fw_diagnostic_buffer_t *pBuffer;
1606 uint8_t i;
1607 uint32_t unique_id;
1608
1609 unique_id = diag_query->UniqueId;
1610
1611 /*
1612 * If ID is valid, query on ID.
1613 * If ID is invalid, query on buffer type.
1614 */
1615 if (unique_id == MPR_FW_DIAG_INVALID_UID) {
1616 i = diag_query->BufferType;
1617 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
1618 *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
1619 return (MPR_DIAG_FAILURE);
1620 }
1621 } else {
1622 i = mpr_get_fw_diag_buffer_number(sc, unique_id);
1623 if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1624 *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
1625 return (MPR_DIAG_FAILURE);
1626 }
1627 }
1628
1629 /*
1630 * Fill query structure with the diag buffer info.
1631 */
1632 pBuffer = &sc->fw_diag_buffer_list[i];
1633 diag_query->BufferType = pBuffer->buffer_type;
1634 diag_query->ExtendedType = pBuffer->extended_type;
1635 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
1636 for (i = 0; i < (sizeof(diag_query->ProductSpecific) / 4);
1637 i++) {
1638 diag_query->ProductSpecific[i] =
1639 pBuffer->product_specific[i];
1640 }
1641 }
1642 diag_query->TotalBufferSize = pBuffer->size;
1643 diag_query->DriverAddedBufferSize = 0;
1644 diag_query->UniqueId = pBuffer->unique_id;
1645 diag_query->ApplicationFlags = 0;
1646 diag_query->DiagnosticFlags = 0;
1647
1648 /*
1649 * Set/Clear application flags
1650 */
1651 if (pBuffer->immediate) {
1652 diag_query->ApplicationFlags &= ~MPR_FW_DIAG_FLAG_APP_OWNED;
1653 } else {
1654 diag_query->ApplicationFlags |= MPR_FW_DIAG_FLAG_APP_OWNED;
1655 }
1656 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
1657 diag_query->ApplicationFlags |= MPR_FW_DIAG_FLAG_BUFFER_VALID;
1658 } else {
1659 diag_query->ApplicationFlags &= ~MPR_FW_DIAG_FLAG_BUFFER_VALID;
1660 }
1661 if (pBuffer->owned_by_firmware) {
1662 diag_query->ApplicationFlags |=
1663 MPR_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
1664 } else {
1665 diag_query->ApplicationFlags &=
1666 ~MPR_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
1667 }
1668
1669 return (MPR_DIAG_SUCCESS);
1670 }
1671
1672 static int
1673 mpr_diag_read_buffer(struct mpr_softc *sc,
1674 mpr_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
1675 uint32_t *return_code)
1676 {
1677 mpr_fw_diagnostic_buffer_t *pBuffer;
1678 uint8_t i, *pData;
1679 uint32_t unique_id;
1680 int status;
1681
1682 unique_id = diag_read_buffer->UniqueId;
1683
1684 /*
1685 * Get the current buffer and look up the unique ID. The unique ID
1686 * should be there.
1687 */
1688 i = mpr_get_fw_diag_buffer_number(sc, unique_id);
1689 if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1690 *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
1691 return (MPR_DIAG_FAILURE);
1692 }
1693
1694 pBuffer = &sc->fw_diag_buffer_list[i];
1695
1696 /*
1697 * Make sure requested read is within limits
1698 */
1699 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
1700 pBuffer->size) {
1701 *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
1702 return (MPR_DIAG_FAILURE);
1703 }
1704
1705 /*
1706 * Copy the requested data from DMA to the diag_read_buffer. The DMA
1707 * buffer that was allocated is one contiguous buffer.
1708 */
1709 pData = (uint8_t *)(sc->fw_diag_buffer +
1710 diag_read_buffer->StartingOffset);
1711 if (copyout(pData, ioctl_buf, diag_read_buffer->BytesToRead) != 0)
1712 return (MPR_DIAG_FAILURE);
1713 diag_read_buffer->Status = 0;
1714
1715 /*
1716 * Set or clear the Force Release flag.
1717 */
1718 if (pBuffer->force_release) {
1719 diag_read_buffer->Flags |= MPR_FW_DIAG_FLAG_FORCE_RELEASE;
1720 } else {
1721 diag_read_buffer->Flags &= ~MPR_FW_DIAG_FLAG_FORCE_RELEASE;
1722 }
1723
1724 /*
1725 * If buffer is to be reregistered, make sure it's not already owned by
1726 * firmware first.
1727 */
1728 status = MPR_DIAG_SUCCESS;
1729 if (!pBuffer->owned_by_firmware) {
1730 if (diag_read_buffer->Flags & MPR_FW_DIAG_FLAG_REREGISTER) {
1731 status = mpr_post_fw_diag_buffer(sc, pBuffer,
1732 return_code);
1733 }
1734 }
1735
1736 return (status);
1737 }
1738
1739 static int
1740 mpr_diag_release(struct mpr_softc *sc, mpr_fw_diag_release_t *diag_release,
1741 uint32_t *return_code)
1742 {
1743 mpr_fw_diagnostic_buffer_t *pBuffer;
1744 uint8_t i;
1745 uint32_t unique_id;
1746 int status;
1747
1748 unique_id = diag_release->UniqueId;
1749
1750 /*
1751 * Get the current buffer and look up the unique ID. The unique ID
1752 * should be there.
1753 */
1754 i = mpr_get_fw_diag_buffer_number(sc, unique_id);
1755 if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
1756 *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
1757 return (MPR_DIAG_FAILURE);
1758 }
1759
1760 pBuffer = &sc->fw_diag_buffer_list[i];
1761
1762 /*
1763 * If buffer is not owned by firmware, it's already been released.
1764 */
1765 if (!pBuffer->owned_by_firmware) {
1766 *return_code = MPR_FW_DIAG_ERROR_ALREADY_RELEASED;
1767 return (MPR_DIAG_FAILURE);
1768 }
1769
1770 /*
1771 * Release the buffer.
1772 */
1773 status = mpr_release_fw_diag_buffer(sc, pBuffer, return_code,
1774 MPR_FW_DIAG_TYPE_RELEASE);
1775 return (status);
1776 }
1777
1778 static int
1779 mpr_do_diag_action(struct mpr_softc *sc, uint32_t action, uint8_t *diag_action,
1780 uint32_t length, uint32_t *return_code)
1781 {
1782 mpr_fw_diag_register_t diag_register;
1783 mpr_fw_diag_unregister_t diag_unregister;
1784 mpr_fw_diag_query_t diag_query;
1785 mpr_diag_read_buffer_t diag_read_buffer;
1786 mpr_fw_diag_release_t diag_release;
1787 int status = MPR_DIAG_SUCCESS;
1788 uint32_t original_return_code;
1789
1790 original_return_code = *return_code;
1791 *return_code = MPR_FW_DIAG_ERROR_SUCCESS;
1792
1793 switch (action) {
1794 case MPR_FW_DIAG_TYPE_REGISTER:
1795 if (!length) {
1796 *return_code =
1797 MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
1798 status = MPR_DIAG_FAILURE;
1799 break;
1800 }
1801 if (copyin(diag_action, &diag_register,
1802 sizeof(diag_register)) != 0)
1803 return (MPR_DIAG_FAILURE);
1804 status = mpr_diag_register(sc, &diag_register,
1805 return_code);
1806 break;
1807
1808 case MPR_FW_DIAG_TYPE_UNREGISTER:
1809 if (length < sizeof(diag_unregister)) {
1810 *return_code =
1811 MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
1812 status = MPR_DIAG_FAILURE;
1813 break;
1814 }
1815 if (copyin(diag_action, &diag_unregister,
1816 sizeof(diag_unregister)) != 0)
1817 return (MPR_DIAG_FAILURE);
1818 status = mpr_diag_unregister(sc, &diag_unregister,
1819 return_code);
1820 break;
1821
1822 case MPR_FW_DIAG_TYPE_QUERY:
1823 if (length < sizeof (diag_query)) {
1824 *return_code =
1825 MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
1826 status = MPR_DIAG_FAILURE;
1827 break;
1828 }
1829 if (copyin(diag_action, &diag_query, sizeof(diag_query))
1830 != 0)
1831 return (MPR_DIAG_FAILURE);
1832 status = mpr_diag_query(sc, &diag_query, return_code);
1833 if (status == MPR_DIAG_SUCCESS)
1834 if (copyout(&diag_query, diag_action,
1835 sizeof (diag_query)) != 0)
1836 return (MPR_DIAG_FAILURE);
1837 break;
1838
1839 case MPR_FW_DIAG_TYPE_READ_BUFFER:
1840 if (copyin(diag_action, &diag_read_buffer,
1841 sizeof(diag_read_buffer)) != 0)
1842 return (MPR_DIAG_FAILURE);
1843 if (length < diag_read_buffer.BytesToRead) {
1844 *return_code =
1845 MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
1846 status = MPR_DIAG_FAILURE;
1847 break;
1848 }
1849 status = mpr_diag_read_buffer(sc, &diag_read_buffer,
1850 PTRIN(diag_read_buffer.PtrDataBuffer),
1851 return_code);
1852 if (status == MPR_DIAG_SUCCESS) {
1853 if (copyout(&diag_read_buffer, diag_action,
1854 sizeof(diag_read_buffer) -
1855 sizeof(diag_read_buffer.PtrDataBuffer)) !=
1856 0)
1857 return (MPR_DIAG_FAILURE);
1858 }
1859 break;
1860
1861 case MPR_FW_DIAG_TYPE_RELEASE:
1862 if (length < sizeof(diag_release)) {
1863 *return_code =
1864 MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
1865 status = MPR_DIAG_FAILURE;
1866 break;
1867 }
1868 if (copyin(diag_action, &diag_release,
1869 sizeof(diag_release)) != 0)
1870 return (MPR_DIAG_FAILURE);
1871 status = mpr_diag_release(sc, &diag_release,
1872 return_code);
1873 break;
1874
1875 default:
1876 *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
1877 status = MPR_DIAG_FAILURE;
1878 break;
1879 }
1880
1881 if ((status == MPR_DIAG_FAILURE) &&
1882 (original_return_code == MPR_FW_DIAG_NEW) &&
1883 (*return_code != MPR_FW_DIAG_ERROR_SUCCESS))
1884 status = MPR_DIAG_SUCCESS;
1885
1886 return (status);
1887 }
1888
1889 static int
1890 mpr_user_diag_action(struct mpr_softc *sc, mpr_diag_action_t *data)
1891 {
1892 int status;
1893
1894 /*
1895 * Only allow one diag action at one time.
1896 */
1897 if (sc->mpr_flags & MPR_FLAGS_BUSY) {
1898 mpr_dprint(sc, MPR_USER, "%s: Only one FW diag command "
1899 "allowed at a single time.", __func__);
1900 return (EBUSY);
1901 }
1902 sc->mpr_flags |= MPR_FLAGS_BUSY;
1903
1904 /*
1905 * Send diag action request
1906 */
1907 if (data->Action == MPR_FW_DIAG_TYPE_REGISTER ||
1908 data->Action == MPR_FW_DIAG_TYPE_UNREGISTER ||
1909 data->Action == MPR_FW_DIAG_TYPE_QUERY ||
1910 data->Action == MPR_FW_DIAG_TYPE_READ_BUFFER ||
1911 data->Action == MPR_FW_DIAG_TYPE_RELEASE) {
1912 status = mpr_do_diag_action(sc, data->Action,
1913 PTRIN(data->PtrDiagAction), data->Length,
1914 &data->ReturnCode);
1915 } else
1916 status = EINVAL;
1917
1918 sc->mpr_flags &= ~MPR_FLAGS_BUSY;
1919 return (status);
1920 }
1921
1922 /*
1923 * Copy the event recording mask and the event queue size out. For
1924 * clarification, the event recording mask (events_to_record) is not the same
1925 * thing as the event mask (event_mask). events_to_record has a bit set for
1926 * every event type that is to be recorded by the driver, and event_mask has a
1927 * bit cleared for every event that is allowed into the driver from the IOC.
1928 * They really have nothing to do with each other.
1929 */
1930 static void
1931 mpr_user_event_query(struct mpr_softc *sc, mpr_event_query_t *data)
1932 {
1933 uint8_t i;
1934
1935 mpr_lock(sc);
1936 data->Entries = MPR_EVENT_QUEUE_SIZE;
1937
1938 for (i = 0; i < 4; i++) {
1939 data->Types[i] = sc->events_to_record[i];
1940 }
1941 mpr_unlock(sc);
1942 }
1943
1944 /*
1945 * Set the driver's event mask according to what's been given. See
1946 * mpr_user_event_query for explanation of the event recording mask and the IOC
1947 * event mask. It's the app's responsibility to enable event logging by setting
1948 * the bits in events_to_record. Initially, no events will be logged.
1949 */
1950 static void
1951 mpr_user_event_enable(struct mpr_softc *sc, mpr_event_enable_t *data)
1952 {
1953 uint8_t i;
1954
1955 mpr_lock(sc);
1956 for (i = 0; i < 4; i++) {
1957 sc->events_to_record[i] = data->Types[i];
1958 }
1959 mpr_unlock(sc);
1960 }
1961
1962 /*
1963 * Copy out the events that have been recorded, up to the max events allowed.
1964 */
1965 static int
1966 mpr_user_event_report(struct mpr_softc *sc, mpr_event_report_t *data)
1967 {
1968 int status = 0;
1969 uint32_t size;
1970
1971 mpr_lock(sc);
1972 size = data->Size;
1973 if ((size >= sizeof(sc->recorded_events)) && (status == 0)) {
1974 mpr_unlock(sc);
1975 if (copyout((void *)sc->recorded_events,
1976 PTRIN(data->PtrEvents), size) != 0)
1977 status = EFAULT;
1978 mpr_lock(sc);
1979 } else {
1980 /*
1981 * data->Size value is not large enough to copy event data.
1982 */
1983 status = EFAULT;
1984 }
1985
1986 /*
1987 * Change size value to match the number of bytes that were copied.
1988 */
1989 if (status == 0)
1990 data->Size = sizeof(sc->recorded_events);
1991 mpr_unlock(sc);
1992
1993 return (status);
1994 }
1995
1996 /*
1997 * Record events into the driver from the IOC if they are not masked.
1998 */
1999 void
2000 mprsas_record_event(struct mpr_softc *sc,
2001 MPI2_EVENT_NOTIFICATION_REPLY *event_reply)
2002 {
2003 uint32_t event;
2004 int i, j;
2005 uint16_t event_data_len;
2006 boolean_t sendAEN = FALSE;
2007
2008 event = event_reply->Event;
2009
2010 /*
2011 * Generate a system event to let anyone who cares know that a
2012 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
2013 * event mask is set to.
2014 */
2015 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
2016 sendAEN = TRUE;
2017 }
2018
2019 /*
2020 * Record the event only if its corresponding bit is set in
2021 * events_to_record. event_index is the index into recorded_events and
2022 * event_number is the overall number of an event being recorded since
2023 * start-of-day. event_index will roll over; event_number will never
2024 * roll over.
2025 */
2026 i = (uint8_t)(event / 32);
2027 j = (uint8_t)(event % 32);
2028 if ((i < 4) && ((1 << j) & sc->events_to_record[i])) {
2029 i = sc->event_index;
2030 sc->recorded_events[i].Type = event;
2031 sc->recorded_events[i].Number = ++sc->event_number;
2032 bzero(sc->recorded_events[i].Data, MPR_MAX_EVENT_DATA_LENGTH *
2033 4);
2034 event_data_len = event_reply->EventDataLength;
2035
2036 if (event_data_len > 0) {
2037 /*
2038 * Limit data to size in m_event entry
2039 */
2040 if (event_data_len > MPR_MAX_EVENT_DATA_LENGTH) {
2041 event_data_len = MPR_MAX_EVENT_DATA_LENGTH;
2042 }
2043 for (j = 0; j < event_data_len; j++) {
2044 sc->recorded_events[i].Data[j] =
2045 event_reply->EventData[j];
2046 }
2047
2048 /*
2049 * check for index wrap-around
2050 */
2051 if (++i == MPR_EVENT_QUEUE_SIZE) {
2052 i = 0;
2053 }
2054 sc->event_index = (uint8_t)i;
2055
2056 /*
2057 * Set flag to send the event.
2058 */
2059 sendAEN = TRUE;
2060 }
2061 }
2062
2063 /*
2064 * Generate a system event if flag is set to let anyone who cares know
2065 * that an event has occurred.
2066 */
2067 if (sendAEN) {
2068 //SLM-how to send a system event (see kqueue, kevent)
2069 // (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
2070 // "SAS", NULL, NULL, DDI_NOSLEEP);
2071 }
2072 }
2073
2074 static int
2075 mpr_user_reg_access(struct mpr_softc *sc, mpr_reg_access_t *data)
2076 {
2077 int status = 0;
2078
2079 switch (data->Command) {
2080 /*
2081 * IO access is not supported.
2082 */
2083 case REG_IO_READ:
2084 case REG_IO_WRITE:
2085 mpr_dprint(sc, MPR_USER, "IO access is not supported. "
2086 "Use memory access.");
2087 status = EINVAL;
2088 break;
2089
2090 case REG_MEM_READ:
2091 data->RegData = mpr_regread(sc, data->RegOffset);
2092 break;
2093
2094 case REG_MEM_WRITE:
2095 mpr_regwrite(sc, data->RegOffset, data->RegData);
2096 break;
2097
2098 default:
2099 status = EINVAL;
2100 break;
2101 }
2102
2103 return (status);
2104 }
2105
2106 static int
2107 mpr_user_btdh(struct mpr_softc *sc, mpr_btdh_mapping_t *data)
2108 {
2109 uint8_t bt2dh = FALSE;
2110 uint8_t dh2bt = FALSE;
2111 uint16_t dev_handle, bus, target;
2112
2113 bus = data->Bus;
2114 target = data->TargetID;
2115 dev_handle = data->DevHandle;
2116
2117 /*
2118 * When DevHandle is 0xFFFF and Bus/Target are not 0xFFFF, use Bus/
2119 * Target to get DevHandle. When Bus/Target are 0xFFFF and DevHandle is
2120 * not 0xFFFF, use DevHandle to get Bus/Target. Anything else is
2121 * invalid.
2122 */
2123 if ((bus == 0xFFFF) && (target == 0xFFFF) && (dev_handle != 0xFFFF))
2124 dh2bt = TRUE;
2125 if ((dev_handle == 0xFFFF) && (bus != 0xFFFF) && (target != 0xFFFF))
2126 bt2dh = TRUE;
2127 if (!dh2bt && !bt2dh)
2128 return (EINVAL);
2129
2130 /*
2131 * Only handle bus of 0. Make sure target is within range.
2132 */
2133 if (bt2dh) {
2134 if (bus != 0)
2135 return (EINVAL);
2136
2137 if (target > sc->max_devices) {
2138 mpr_dprint(sc, MPR_XINFO, "Target ID is out of range "
2139 "for Bus/Target to DevHandle mapping.");
2140 return (EINVAL);
2141 }
2142 dev_handle = sc->mapping_table[target].dev_handle;
2143 if (dev_handle)
2144 data->DevHandle = dev_handle;
2145 } else {
2146 bus = 0;
2147 target = mpr_mapping_get_tid_from_handle(sc, dev_handle);
2148 data->Bus = bus;
2149 data->TargetID = target;
2150 }
2151
2152 return (0);
2153 }
2154
2155 static int
2156 mpr_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag,
2157 struct thread *td)
2158 {
2159 struct mpr_softc *sc;
2160 struct mpr_cfg_page_req *page_req;
2161 struct mpr_ext_cfg_page_req *ext_page_req;
2162 void *mpr_page;
2163 int error, msleep_ret;
2164
2165 mpr_page = NULL;
2166 sc = dev->si_drv1;
2167 page_req = (void *)arg;
2168 ext_page_req = (void *)arg;
2169
2170 switch (cmd) {
2171 case MPRIO_READ_CFG_HEADER:
2172 mpr_lock(sc);
2173 error = mpr_user_read_cfg_header(sc, page_req);
2174 mpr_unlock(sc);
2175 break;
2176 case MPRIO_READ_CFG_PAGE:
2177 if (page_req->len < (int)sizeof(MPI2_CONFIG_PAGE_HEADER)) {
2178 error = EINVAL;
2179 break;
2180 }
2181 mpr_page = malloc(page_req->len, M_MPRUSER, M_WAITOK | M_ZERO);
2182 error = copyin(page_req->buf, mpr_page,
2183 sizeof(MPI2_CONFIG_PAGE_HEADER));
2184 if (error)
2185 break;
2186 mpr_lock(sc);
2187 error = mpr_user_read_cfg_page(sc, page_req, mpr_page);
2188 mpr_unlock(sc);
2189 if (error)
2190 break;
2191 error = copyout(mpr_page, page_req->buf, page_req->len);
2192 break;
2193 case MPRIO_READ_EXT_CFG_HEADER:
2194 mpr_lock(sc);
2195 error = mpr_user_read_extcfg_header(sc, ext_page_req);
2196 mpr_unlock(sc);
2197 break;
2198 case MPRIO_READ_EXT_CFG_PAGE:
2199 if (ext_page_req->len <
2200 (int)sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)) {
2201 error = EINVAL;
2202 break;
2203 }
2204 mpr_page = malloc(ext_page_req->len, M_MPRUSER,
2205 M_WAITOK | M_ZERO);
2206 error = copyin(ext_page_req->buf, mpr_page,
2207 sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
2208 if (error)
2209 break;
2210 mpr_lock(sc);
2211 error = mpr_user_read_extcfg_page(sc, ext_page_req, mpr_page);
2212 mpr_unlock(sc);
2213 if (error)
2214 break;
2215 error = copyout(mpr_page, ext_page_req->buf, ext_page_req->len);
2216 break;
2217 case MPRIO_WRITE_CFG_PAGE:
2218 if (page_req->len < (int)sizeof(MPI2_CONFIG_PAGE_HEADER)) {
2219 error = EINVAL;
2220 break;
2221 }
2222 mpr_page = malloc(page_req->len, M_MPRUSER, M_WAITOK|M_ZERO);
2223 error = copyin(page_req->buf, mpr_page, page_req->len);
2224 if (error)
2225 break;
2226 mpr_lock(sc);
2227 error = mpr_user_write_cfg_page(sc, page_req, mpr_page);
2228 mpr_unlock(sc);
2229 break;
2230 case MPRIO_MPR_COMMAND:
2231 error = mpr_user_command(sc, (struct mpr_usr_command *)arg);
2232 break;
2233 case MPTIOCTL_PASS_THRU:
2234 /*
2235 * The user has requested to pass through a command to be
2236 * executed by the MPT firmware. Call our routine which does
2237 * this. Only allow one passthru IOCTL at one time.
2238 */
2239 error = mpr_user_pass_thru(sc, (mpr_pass_thru_t *)arg);
2240 break;
2241 case MPTIOCTL_GET_ADAPTER_DATA:
2242 /*
2243 * The user has requested to read adapter data. Call our
2244 * routine which does this.
2245 */
2246 error = 0;
2247 mpr_user_get_adapter_data(sc, (mpr_adapter_data_t *)arg);
2248 break;
2249 case MPTIOCTL_GET_PCI_INFO:
2250 /*
2251 * The user has requested to read pci info. Call
2252 * our routine which does this.
2253 */
2254 mpr_lock(sc);
2255 error = 0;
2256 mpr_user_read_pci_info(sc, (mpr_pci_info_t *)arg);
2257 mpr_unlock(sc);
2258 break;
2259 case MPTIOCTL_RESET_ADAPTER:
2260 mpr_lock(sc);
2261 sc->port_enable_complete = 0;
2262 uint32_t reinit_start = time_uptime;
2263 error = mpr_reinit(sc);
2264 /* Sleep for 300 second. */
2265 msleep_ret = msleep(&sc->port_enable_complete, &sc->mpr_mtx,
2266 PRIBIO, "mpr_porten", 300 * hz);
2267 mpr_unlock(sc);
2268 if (msleep_ret)
2269 printf("Port Enable did not complete after Diag "
2270 "Reset msleep error %d.\n", msleep_ret);
2271 else
2272 mpr_dprint(sc, MPR_USER, "Hard Reset with Port Enable "
2273 "completed in %d seconds.\n",
2274 (uint32_t)(time_uptime - reinit_start));
2275 break;
2276 case MPTIOCTL_DIAG_ACTION:
2277 /*
2278 * The user has done a diag buffer action. Call our routine
2279 * which does this. Only allow one diag action at one time.
2280 */
2281 mpr_lock(sc);
2282 error = mpr_user_diag_action(sc, (mpr_diag_action_t *)arg);
2283 mpr_unlock(sc);
2284 break;
2285 case MPTIOCTL_EVENT_QUERY:
2286 /*
2287 * The user has done an event query. Call our routine which does
2288 * this.
2289 */
2290 error = 0;
2291 mpr_user_event_query(sc, (mpr_event_query_t *)arg);
2292 break;
2293 case MPTIOCTL_EVENT_ENABLE:
2294 /*
2295 * The user has done an event enable. Call our routine which
2296 * does this.
2297 */
2298 error = 0;
2299 mpr_user_event_enable(sc, (mpr_event_enable_t *)arg);
2300 break;
2301 case MPTIOCTL_EVENT_REPORT:
2302 /*
2303 * The user has done an event report. Call our routine which
2304 * does this.
2305 */
2306 error = mpr_user_event_report(sc, (mpr_event_report_t *)arg);
2307 break;
2308 case MPTIOCTL_REG_ACCESS:
2309 /*
2310 * The user has requested register access. Call our routine
2311 * which does this.
2312 */
2313 mpr_lock(sc);
2314 error = mpr_user_reg_access(sc, (mpr_reg_access_t *)arg);
2315 mpr_unlock(sc);
2316 break;
2317 case MPTIOCTL_BTDH_MAPPING:
2318 /*
2319 * The user has requested to translate a bus/target to a
2320 * DevHandle or a DevHandle to a bus/target. Call our routine
2321 * which does this.
2322 */
2323 error = mpr_user_btdh(sc, (mpr_btdh_mapping_t *)arg);
2324 break;
2325 default:
2326 error = ENOIOCTL;
2327 break;
2328 }
2329
2330 if (mpr_page != NULL)
2331 free(mpr_page, M_MPRUSER);
2332
2333 return (error);
2334 }
2335
2336 #ifdef COMPAT_FREEBSD32
2337
2338 struct mpr_cfg_page_req32 {
2339 MPI2_CONFIG_PAGE_HEADER header;
2340 uint32_t page_address;
2341 uint32_t buf;
2342 int len;
2343 uint16_t ioc_status;
2344 };
2345
2346 struct mpr_ext_cfg_page_req32 {
2347 MPI2_CONFIG_EXTENDED_PAGE_HEADER header;
2348 uint32_t page_address;
2349 uint32_t buf;
2350 int len;
2351 uint16_t ioc_status;
2352 };
2353
2354 struct mpr_raid_action32 {
2355 uint8_t action;
2356 uint8_t volume_bus;
2357 uint8_t volume_id;
2358 uint8_t phys_disk_num;
2359 uint32_t action_data_word;
2360 uint32_t buf;
2361 int len;
2362 uint32_t volume_status;
2363 uint32_t action_data[4];
2364 uint16_t action_status;
2365 uint16_t ioc_status;
2366 uint8_t write;
2367 };
2368
2369 struct mpr_usr_command32 {
2370 uint32_t req;
2371 uint32_t req_len;
2372 uint32_t rpl;
2373 uint32_t rpl_len;
2374 uint32_t buf;
2375 int len;
2376 uint32_t flags;
2377 };
2378
2379 #define MPRIO_READ_CFG_HEADER32 _IOWR('M', 200, struct mpr_cfg_page_req32)
2380 #define MPRIO_READ_CFG_PAGE32 _IOWR('M', 201, struct mpr_cfg_page_req32)
2381 #define MPRIO_READ_EXT_CFG_HEADER32 _IOWR('M', 202, struct mpr_ext_cfg_page_req32)
2382 #define MPRIO_READ_EXT_CFG_PAGE32 _IOWR('M', 203, struct mpr_ext_cfg_page_req32)
2383 #define MPRIO_WRITE_CFG_PAGE32 _IOWR('M', 204, struct mpr_cfg_page_req32)
2384 #define MPRIO_RAID_ACTION32 _IOWR('M', 205, struct mpr_raid_action32)
2385 #define MPRIO_MPR_COMMAND32 _IOWR('M', 210, struct mpr_usr_command32)
2386
2387 static int
2388 mpr_ioctl32(struct cdev *dev, u_long cmd32, void *_arg, int flag,
2389 struct thread *td)
2390 {
2391 struct mpr_cfg_page_req32 *page32 = _arg;
2392 struct mpr_ext_cfg_page_req32 *ext32 = _arg;
2393 struct mpr_raid_action32 *raid32 = _arg;
2394 struct mpr_usr_command32 *user32 = _arg;
2395 union {
2396 struct mpr_cfg_page_req page;
2397 struct mpr_ext_cfg_page_req ext;
2398 struct mpr_raid_action raid;
2399 struct mpr_usr_command user;
2400 } arg;
2401 u_long cmd;
2402 int error;
2403
2404 switch (cmd32) {
2405 case MPRIO_READ_CFG_HEADER32:
2406 case MPRIO_READ_CFG_PAGE32:
2407 case MPRIO_WRITE_CFG_PAGE32:
2408 if (cmd32 == MPRIO_READ_CFG_HEADER32)
2409 cmd = MPRIO_READ_CFG_HEADER;
2410 else if (cmd32 == MPRIO_READ_CFG_PAGE32)
2411 cmd = MPRIO_READ_CFG_PAGE;
2412 else
2413 cmd = MPRIO_WRITE_CFG_PAGE;
2414 CP(*page32, arg.page, header);
2415 CP(*page32, arg.page, page_address);
2416 PTRIN_CP(*page32, arg.page, buf);
2417 CP(*page32, arg.page, len);
2418 CP(*page32, arg.page, ioc_status);
2419 break;
2420
2421 case MPRIO_READ_EXT_CFG_HEADER32:
2422 case MPRIO_READ_EXT_CFG_PAGE32:
2423 if (cmd32 == MPRIO_READ_EXT_CFG_HEADER32)
2424 cmd = MPRIO_READ_EXT_CFG_HEADER;
2425 else
2426 cmd = MPRIO_READ_EXT_CFG_PAGE;
2427 CP(*ext32, arg.ext, header);
2428 CP(*ext32, arg.ext, page_address);
2429 PTRIN_CP(*ext32, arg.ext, buf);
2430 CP(*ext32, arg.ext, len);
2431 CP(*ext32, arg.ext, ioc_status);
2432 break;
2433
2434 case MPRIO_RAID_ACTION32:
2435 cmd = MPRIO_RAID_ACTION;
2436 CP(*raid32, arg.raid, action);
2437 CP(*raid32, arg.raid, volume_bus);
2438 CP(*raid32, arg.raid, volume_id);
2439 CP(*raid32, arg.raid, phys_disk_num);
2440 CP(*raid32, arg.raid, action_data_word);
2441 PTRIN_CP(*raid32, arg.raid, buf);
2442 CP(*raid32, arg.raid, len);
2443 CP(*raid32, arg.raid, volume_status);
2444 bcopy(raid32->action_data, arg.raid.action_data,
2445 sizeof arg.raid.action_data);
2446 CP(*raid32, arg.raid, ioc_status);
2447 CP(*raid32, arg.raid, write);
2448 break;
2449
2450 case MPRIO_MPR_COMMAND32:
2451 cmd = MPRIO_MPR_COMMAND;
2452 PTRIN_CP(*user32, arg.user, req);
2453 CP(*user32, arg.user, req_len);
2454 PTRIN_CP(*user32, arg.user, rpl);
2455 CP(*user32, arg.user, rpl_len);
2456 PTRIN_CP(*user32, arg.user, buf);
2457 CP(*user32, arg.user, len);
2458 CP(*user32, arg.user, flags);
2459 break;
2460 default:
2461 return (ENOIOCTL);
2462 }
2463
2464 error = mpr_ioctl(dev, cmd, &arg, flag, td);
2465 if (error == 0 && (cmd32 & IOC_OUT) != 0) {
2466 switch (cmd32) {
2467 case MPRIO_READ_CFG_HEADER32:
2468 case MPRIO_READ_CFG_PAGE32:
2469 case MPRIO_WRITE_CFG_PAGE32:
2470 CP(arg.page, *page32, header);
2471 CP(arg.page, *page32, page_address);
2472 PTROUT_CP(arg.page, *page32, buf);
2473 CP(arg.page, *page32, len);
2474 CP(arg.page, *page32, ioc_status);
2475 break;
2476
2477 case MPRIO_READ_EXT_CFG_HEADER32:
2478 case MPRIO_READ_EXT_CFG_PAGE32:
2479 CP(arg.ext, *ext32, header);
2480 CP(arg.ext, *ext32, page_address);
2481 PTROUT_CP(arg.ext, *ext32, buf);
2482 CP(arg.ext, *ext32, len);
2483 CP(arg.ext, *ext32, ioc_status);
2484 break;
2485
2486 case MPRIO_RAID_ACTION32:
2487 CP(arg.raid, *raid32, action);
2488 CP(arg.raid, *raid32, volume_bus);
2489 CP(arg.raid, *raid32, volume_id);
2490 CP(arg.raid, *raid32, phys_disk_num);
2491 CP(arg.raid, *raid32, action_data_word);
2492 PTROUT_CP(arg.raid, *raid32, buf);
2493 CP(arg.raid, *raid32, len);
2494 CP(arg.raid, *raid32, volume_status);
2495 bcopy(arg.raid.action_data, raid32->action_data,
2496 sizeof arg.raid.action_data);
2497 CP(arg.raid, *raid32, ioc_status);
2498 CP(arg.raid, *raid32, write);
2499 break;
2500
2501 case MPRIO_MPR_COMMAND32:
2502 PTROUT_CP(arg.user, *user32, req);
2503 CP(arg.user, *user32, req_len);
2504 PTROUT_CP(arg.user, *user32, rpl);
2505 CP(arg.user, *user32, rpl_len);
2506 PTROUT_CP(arg.user, *user32, buf);
2507 CP(arg.user, *user32, len);
2508 CP(arg.user, *user32, flags);
2509 break;
2510 }
2511 }
2512
2513 return (error);
2514 }
2515 #endif /* COMPAT_FREEBSD32 */
2516
2517 static int
2518 mpr_ioctl_devsw(struct cdev *dev, u_long com, caddr_t arg, int flag,
2519 struct thread *td)
2520 {
2521 #ifdef COMPAT_FREEBSD32
2522 if (SV_CURPROC_FLAG(SV_ILP32))
2523 return (mpr_ioctl32(dev, com, arg, flag, td));
2524 #endif
2525 return (mpr_ioctl(dev, com, arg, flag, td));
2526 }
Cache object: 9898e0c24e42d4e3c3c589100cee821f
|