1 /*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34 #define OCS_COPYRIGHT "Copyright (C) 2017 Broadcom. All rights reserved."
35
36 /**
37 * @file
38 * Implementation of required FreeBSD PCI interface functions
39 */
40
41 #include "ocs.h"
42 #include "version.h"
43 #include <sys/sysctl.h>
44 #include <sys/malloc.h>
45
46 static MALLOC_DEFINE(M_OCS, "OCS", "OneCore Storage data");
47
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcivar.h>
50
51 #include <machine/bus.h>
52
53 /**
54 * Tunable parameters for transport
55 */
56 int logmask = 0;
57 int ctrlmask = 2;
58 int logdest = 1;
59 int loglevel = LOG_INFO;
60 int ramlog_size = 1*1024*1024;
61 int ddump_saved_size = 0;
62 static const char *queue_topology = "eq cq rq cq mq $nulp($nwq(cq wq:ulp=$rpt1)) cq wq:len=256:class=1";
63
64 static void ocs_release_bus(struct ocs_softc *);
65 static int32_t ocs_intr_alloc(struct ocs_softc *);
66 static int32_t ocs_intr_setup(struct ocs_softc *);
67 static int32_t ocs_intr_teardown(struct ocs_softc *);
68 static int ocs_pci_intx_filter(void *);
69 static void ocs_pci_intr(void *);
70 static int32_t ocs_init_dma_tag(struct ocs_softc *ocs);
71
72 static int32_t ocs_setup_fcports(ocs_t *ocs);
73
74 ocs_t *ocs_devices[MAX_OCS_DEVICES];
75
76 /**
77 * @brief Check support for the given device
78 *
79 * Determine support for a given device by examining the PCI vendor and
80 * device IDs
81 *
82 * @param dev device abstraction
83 *
84 * @return 0 if device is supported, ENXIO otherwise
85 */
86 static int
87 ocs_pci_probe(device_t dev)
88 {
89 char *desc = NULL;
90
91 if (pci_get_vendor(dev) != PCI_VENDOR_EMULEX) {
92 return ENXIO;
93 }
94
95 switch (pci_get_device(dev)) {
96 case PCI_PRODUCT_EMULEX_OCE16001:
97 desc = "Emulex LightPulse FC Adapter";
98 break;
99 case PCI_PRODUCT_EMULEX_LPE31004:
100 desc = "Emulex LightPulse FC Adapter";
101 break;
102 case PCI_PRODUCT_EMULEX_OCE50102:
103 desc = "Emulex LightPulse 10GbE FCoE/NIC Adapter";
104 break;
105 default:
106 return ENXIO;
107 }
108
109 device_set_desc(dev, desc);
110
111 return BUS_PROBE_DEFAULT;
112 }
113
114 static int
115 ocs_map_bars(device_t dev, struct ocs_softc *ocs)
116 {
117
118 /*
119 * Map PCI BAR0 register into the CPU's space.
120 */
121
122 ocs->reg[0].rid = PCIR_BAR(PCI_64BIT_BAR0);
123 ocs->reg[0].res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
124 &ocs->reg[0].rid, RF_ACTIVE);
125
126 if (ocs->reg[0].res == NULL) {
127 device_printf(dev, "bus_alloc_resource failed rid=%#x\n",
128 ocs->reg[0].rid);
129 return ENXIO;
130 }
131
132 ocs->reg[0].btag = rman_get_bustag(ocs->reg[0].res);
133 ocs->reg[0].bhandle = rman_get_bushandle(ocs->reg[0].res);
134 return 0;
135 }
136
137 static int
138 ocs_setup_params(struct ocs_softc *ocs)
139 {
140 int32_t i = 0;
141 const char *hw_war_version;
142 /* Setup tunable parameters */
143 ocs->ctrlmask = ctrlmask;
144 ocs->speed = 0;
145 ocs->topology = 0;
146 ocs->ethernet_license = 0;
147 ocs->num_scsi_ios = 8192;
148 ocs->enable_hlm = 0;
149 ocs->hlm_group_size = 8;
150 ocs->logmask = logmask;
151
152 ocs->config_tgt = FALSE;
153 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
154 "target", &i)) {
155 if (1 == i) {
156 ocs->config_tgt = TRUE;
157 device_printf(ocs->dev, "Enabling target\n");
158 }
159 }
160
161 ocs->config_ini = TRUE;
162 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
163 "initiator", &i)) {
164 if (0 == i) {
165 ocs->config_ini = FALSE;
166 device_printf(ocs->dev, "Disabling initiator\n");
167 }
168 }
169 ocs->enable_ini = ocs->config_ini;
170
171 if (!ocs->config_ini && !ocs->config_tgt) {
172 device_printf(ocs->dev, "Unsupported, both initiator and target mode disabled.\n");
173 return 1;
174 }
175
176 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
177 "logmask", &logmask)) {
178 device_printf(ocs->dev, "logmask = %#x\n", logmask);
179 }
180
181 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
182 "logdest", &logdest)) {
183 device_printf(ocs->dev, "logdest = %#x\n", logdest);
184 }
185
186 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
187 "loglevel", &loglevel)) {
188 device_printf(ocs->dev, "loglevel = %#x\n", loglevel);
189 }
190
191 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
192 "ramlog_size", &ramlog_size)) {
193 device_printf(ocs->dev, "ramlog_size = %#x\n", ramlog_size);
194 }
195
196 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
197 "ddump_saved_size", &ddump_saved_size)) {
198 device_printf(ocs->dev, "ddump_saved_size= %#x\n", ddump_saved_size);
199 }
200
201 /* If enabled, initailize a RAM logging buffer */
202 if (logdest & 2) {
203 ocs->ramlog = ocs_ramlog_init(ocs, ramlog_size/OCS_RAMLOG_DEFAULT_BUFFERS,
204 OCS_RAMLOG_DEFAULT_BUFFERS);
205 /* If NULL was returned, then we'll simply skip using the ramlog but */
206 /* set logdest to 1 to ensure that we at least get default logging. */
207 if (ocs->ramlog == NULL) {
208 logdest = 1;
209 }
210 }
211
212 /* initialize a saved ddump */
213 if (ddump_saved_size) {
214 if (ocs_textbuf_alloc(ocs, &ocs->ddump_saved, ddump_saved_size)) {
215 ocs_log_err(ocs, "failed to allocate memory for saved ddump\n");
216 }
217 }
218
219 if (0 == resource_string_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
220 "hw_war_version", &hw_war_version)) {
221 device_printf(ocs->dev, "hw_war_version = %s\n", hw_war_version);
222 ocs->hw_war_version = strdup(hw_war_version, M_OCS);
223 }
224
225 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
226 "explicit_buffer_list", &i)) {
227 ocs->explicit_buffer_list = i;
228 }
229
230 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
231 "ethernet_license", &i)) {
232 ocs->ethernet_license = i;
233 }
234
235 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
236 "speed", &i)) {
237 device_printf(ocs->dev, "speed = %d Mbps\n", i);
238 ocs->speed = i;
239 }
240 ocs->desc = device_get_desc(ocs->dev);
241
242 ocs_device_lock_init(ocs);
243 ocs->driver_version = STR_BE_MAJOR "." STR_BE_MINOR "." STR_BE_BUILD "." STR_BE_BRANCH;
244 ocs->model = ocs_pci_model(ocs->pci_vendor, ocs->pci_device);
245
246 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
247 "enable_hlm", &i)) {
248 device_printf(ocs->dev, "enable_hlm = %d\n", i);
249 ocs->enable_hlm = i;
250 if (ocs->enable_hlm) {
251 ocs->hlm_group_size = 8;
252
253 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
254 "hlm_group_size", &i)) {
255 ocs->hlm_group_size = i;
256 }
257 device_printf(ocs->dev, "hlm_group_size = %d\n", i);
258 }
259 }
260
261 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
262 "num_scsi_ios", &i)) {
263 ocs->num_scsi_ios = i;
264 device_printf(ocs->dev, "num_scsi_ios = %d\n", ocs->num_scsi_ios);
265 } else {
266 ocs->num_scsi_ios = 8192;
267 }
268
269 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
270 "topology", &i)) {
271 ocs->topology = i;
272 device_printf(ocs->dev, "Setting topology=%#x\n", i);
273 }
274
275 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
276 "num_vports", &i)) {
277 if (i >= 0 && i <= 254) {
278 device_printf(ocs->dev, "num_vports = %d\n", i);
279 ocs->num_vports = i;
280 } else {
281 device_printf(ocs->dev, "num_vports: %d not supported \n", i);
282 }
283 }
284
285 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
286 "external_loopback", &i)) {
287 device_printf(ocs->dev, "external_loopback = %d\n", i);
288 ocs->external_loopback = i;
289 }
290
291 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
292 "tgt_rscn_delay", &i)) {
293 device_printf(ocs->dev, "tgt_rscn_delay = %d\n", i);
294 ocs->tgt_rscn_delay_msec = i * 1000;
295 }
296
297 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
298 "tgt_rscn_period", &i)) {
299 device_printf(ocs->dev, "tgt_rscn_period = %d\n", i);
300 ocs->tgt_rscn_period_msec = i * 1000;
301 }
302
303 if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
304 "target_io_timer", &i)) {
305 device_printf(ocs->dev, "target_io_timer = %d\n", i);
306 ocs->target_io_timer_sec = i;
307 }
308
309 hw_global.queue_topology_string = queue_topology;
310 ocs->rq_selection_policy = 0;
311 ocs->rr_quanta = 1;
312 ocs->filter_def = "0,0,0,0";
313
314 return 0;
315 }
316
317 static int32_t
318 ocs_setup_fcports(ocs_t *ocs)
319 {
320 uint32_t i = 0, role = 0;
321 uint64_t sli_wwpn, sli_wwnn;
322 size_t size;
323 ocs_xport_t *xport = ocs->xport;
324 ocs_vport_spec_t *vport;
325 ocs_fcport *fcp = NULL;
326
327 size = sizeof(ocs_fcport) * (ocs->num_vports + 1);
328
329 ocs->fcports = ocs_malloc(ocs, size, M_ZERO|M_NOWAIT);
330 if (ocs->fcports == NULL) {
331 device_printf(ocs->dev, "Can't allocate fcport \n");
332 return 1;
333 }
334
335 role = (ocs->enable_ini)? KNOB_ROLE_INITIATOR: 0 |
336 (ocs->enable_tgt)? KNOB_ROLE_TARGET: 0;
337
338 fcp = FCPORT(ocs, i);
339 fcp->role = role;
340 i++;
341
342 ocs_list_foreach(&xport->vport_list, vport) {
343 fcp = FCPORT(ocs, i);
344 vport->tgt_data = fcp;
345 fcp->vport = vport;
346 fcp->role = role;
347
348 if (ocs_hw_get_def_wwn(ocs, i, &sli_wwpn, &sli_wwnn)) {
349 ocs_log_err(ocs, "Get default wwn failed \n");
350 i++;
351 continue;
352 }
353
354 vport->wwpn = ocs_be64toh(sli_wwpn);
355 vport->wwnn = ocs_be64toh(sli_wwnn);
356 i++;
357 ocs_log_debug(ocs, "VPort wwpn: %lx wwnn: %lx \n", vport->wwpn, vport->wwnn);
358 }
359
360 return 0;
361 }
362
363 int32_t
364 ocs_device_attach(ocs_t *ocs)
365 {
366 int32_t i;
367 ocs_io_t *io = NULL;
368
369 if (ocs->attached) {
370 ocs_log_warn(ocs, "%s: Device is already attached\n", __func__);
371 return -1;
372 }
373
374 /* Allocate transport object and bring online */
375 ocs->xport = ocs_xport_alloc(ocs);
376 if (ocs->xport == NULL) {
377 device_printf(ocs->dev, "failed to allocate transport object\n");
378 return ENOMEM;
379 } else if (ocs_xport_attach(ocs->xport) != 0) {
380 device_printf(ocs->dev, "%s: failed to attach transport object\n", __func__);
381 goto fail_xport_attach;
382 } else if (ocs_xport_initialize(ocs->xport) != 0) {
383 device_printf(ocs->dev, "%s: failed to initialize transport object\n", __func__);
384 goto fail_xport_init;
385 }
386
387 if (ocs_init_dma_tag(ocs)) {
388 goto fail_intr_setup;
389 }
390
391 for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) {
392 if (bus_dmamap_create(ocs->buf_dmat, 0, &io->tgt_io.dmap)) {
393 device_printf(ocs->dev, "%s: bad dma map create\n", __func__);
394 }
395
396 io->tgt_io.state = OCS_CAM_IO_FREE;
397 }
398
399 if (ocs_setup_fcports(ocs)) {
400 device_printf(ocs->dev, "FCports creation failed\n");
401 goto fail_intr_setup;
402 }
403
404 if(ocs_cam_attach(ocs)) {
405 device_printf(ocs->dev, "cam attach failed \n");
406 goto fail_intr_setup;
407 }
408
409 if (ocs_intr_setup(ocs)) {
410 device_printf(ocs->dev, "Interrupt setup failed\n");
411 goto fail_intr_setup;
412 }
413
414 if (ocs->enable_ini || ocs->enable_tgt) {
415 if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE)) {
416 device_printf(ocs->dev, "Can't init port\n");
417 goto fail_xport_online;
418 }
419 }
420
421 ocs->attached = true;
422
423 return 0;
424
425 fail_xport_online:
426 if (ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN)) {
427 device_printf(ocs->dev, "Transport Shutdown timed out\n");
428 }
429 ocs_intr_teardown(ocs);
430 fail_intr_setup:
431 fail_xport_init:
432 ocs_xport_detach(ocs->xport);
433 if (ocs->config_tgt)
434 ocs_scsi_tgt_del_device(ocs);
435
436 ocs_xport_free(ocs->xport);
437 ocs->xport = NULL;
438 fail_xport_attach:
439 if (ocs->xport)
440 ocs_free(ocs, ocs->xport, sizeof(*(ocs->xport)));
441 ocs->xport = NULL;
442 return ENXIO;
443 }
444
445 /**
446 * @brief Connect the driver to the given device
447 *
448 * If the probe routine is successful, the OS will give the driver
449 * the opportunity to connect itself to the device. This routine
450 * maps PCI resources (memory BARs and interrupts) and initialize a
451 * hardware object.
452 *
453 * @param dev device abstraction
454 *
455 * @return 0 if the driver attaches to the device, ENXIO otherwise
456 */
457
458 static int
459 ocs_pci_attach(device_t dev)
460 {
461 struct ocs_softc *ocs;
462 int instance;
463
464 instance = device_get_unit(dev);
465
466 ocs = (struct ocs_softc *)device_get_softc(dev);
467 if (NULL == ocs) {
468 device_printf(dev, "cannot allocate softc\n");
469 return ENOMEM;
470 }
471 memset(ocs, 0, sizeof(struct ocs_softc));
472
473 if (instance < ARRAY_SIZE(ocs_devices)) {
474 ocs_devices[instance] = ocs;
475 } else {
476 device_printf(dev, "got unexpected ocs instance number %d\n", instance);
477 }
478
479 ocs->instance_index = instance;
480
481 ocs->dev = dev;
482
483 pci_enable_io(dev, SYS_RES_MEMORY);
484 pci_enable_busmaster(dev);
485
486 ocs->pci_vendor = pci_get_vendor(dev);
487 ocs->pci_device = pci_get_device(dev);
488 snprintf(ocs->businfo, sizeof(ocs->businfo), "%02X:%02X:%02X",
489 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
490
491 /* Map all memory BARs */
492 if (ocs_map_bars(dev, ocs)) {
493 device_printf(dev, "Failed to map pci bars\n");
494 goto release_bus;
495 }
496
497 /* create a root DMA tag for the device */
498 if (bus_dma_tag_create(bus_get_dma_tag(dev),
499 1, /* byte alignment */
500 0, /* no boundary restrictions */
501 BUS_SPACE_MAXADDR, /* no minimum low address */
502 BUS_SPACE_MAXADDR, /* no maximum high address */
503 NULL, /* no filter function */
504 NULL, /* or arguments */
505 BUS_SPACE_MAXSIZE, /* max size covered by tag */
506 BUS_SPACE_UNRESTRICTED, /* no segment count restrictions */
507 BUS_SPACE_MAXSIZE, /* no segment length restrictions */
508 0, /* flags */
509 NULL, /* no lock manipulation function */
510 NULL, /* or arguments */
511 &ocs->dmat)) {
512 device_printf(dev, "parent DMA tag allocation failed\n");
513 goto release_bus;
514 }
515
516 if (ocs_intr_alloc(ocs)) {
517 device_printf(dev, "Interrupt allocation failed\n");
518 goto release_bus;
519 }
520
521 if (PCIC_SERIALBUS == pci_get_class(dev) &&
522 PCIS_SERIALBUS_FC == pci_get_subclass(dev))
523 ocs->ocs_xport = OCS_XPORT_FC;
524 else {
525 device_printf(dev, "unsupported class (%#x : %#x)\n",
526 pci_get_class(dev),
527 pci_get_class(dev));
528 goto release_bus;
529 }
530
531 /* Setup tunable parameters */
532 if (ocs_setup_params(ocs)) {
533 device_printf(ocs->dev, "failed to setup params\n");
534 goto release_bus;
535 }
536
537 if (ocs_device_attach(ocs)) {
538 device_printf(ocs->dev, "failed to attach device\n");
539 goto release_params;
540 }
541
542 ocs->fc_type = FC_TYPE_FCP;
543
544 ocs_debug_attach(ocs);
545
546 return 0;
547
548 release_params:
549 ocs_ramlog_free(ocs, ocs->ramlog);
550 ocs_device_lock_free(ocs);
551 free(ocs->hw_war_version, M_OCS);
552 release_bus:
553 ocs_release_bus(ocs);
554 return ENXIO;
555 }
556
557 /**
558 * @brief free resources when pci device detach
559 *
560 * @param ocs pointer to ocs structure
561 *
562 * @return 0 for success, a negative error code value for failure.
563 */
564
565 int32_t
566 ocs_device_detach(ocs_t *ocs)
567 {
568 int32_t rc = 0, i;
569 ocs_io_t *io = NULL;
570
571 if (ocs != NULL) {
572 if (!ocs->attached) {
573 ocs_log_warn(ocs, "%s: Device is not attached\n", __func__);
574 return -1;
575 }
576
577 ocs->attached = FALSE;
578
579 rc = ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN);
580 if (rc) {
581 ocs_log_err(ocs, "%s: Transport Shutdown timed out\n", __func__);
582 }
583
584 ocs_intr_teardown(ocs);
585
586 if (ocs_xport_detach(ocs->xport) != 0) {
587 ocs_log_err(ocs, "%s: Transport detach failed\n", __func__);
588 }
589
590 ocs_cam_detach(ocs);
591 ocs_free(ocs, ocs->fcports, sizeof(*(ocs->fcports)));
592
593 for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) {
594 if (bus_dmamap_destroy(ocs->buf_dmat, io->tgt_io.dmap)) {
595 device_printf(ocs->dev, "%s: bad dma map destroy\n", __func__);
596 }
597 }
598 bus_dma_tag_destroy(ocs->dmat);
599 ocs_xport_free(ocs->xport);
600 ocs->xport = NULL;
601 }
602
603 return 0;
604 }
605
606 /**
607 * @brief Detach the driver from the given device
608 *
609 * If the driver is a loadable module, this routine gets called at unload
610 * time. This routine will stop the device and free any allocated resources.
611 *
612 * @param dev device abstraction
613 *
614 * @return 0 if the driver detaches from the device, ENXIO otherwise
615 */
616 static int
617 ocs_pci_detach(device_t dev)
618 {
619 struct ocs_softc *ocs;
620
621 ocs = (struct ocs_softc *)device_get_softc(dev);
622 if (!ocs) {
623 device_printf(dev, "no driver context?!?\n");
624 return -1;
625 }
626
627 if (ocs->config_tgt && ocs->enable_tgt) {
628 device_printf(dev, "can't detach with target mode enabled\n");
629 return EBUSY;
630 }
631
632 ocs_device_detach(ocs);
633
634 /*
635 * Workaround for OCS SCSI Transport quirk.
636 *
637 * CTL requires that target mode is disabled prior to unloading the
638 * driver (ie ocs->enable_tgt = FALSE), but once the target is disabled,
639 * the transport will not call ocs_scsi_tgt_del_device() which deallocates
640 * CAM resources. The workaround is to explicitly make the call here.
641 */
642 if (ocs->config_tgt)
643 ocs_scsi_tgt_del_device(ocs);
644
645 /* free strdup created buffer.*/
646 free(ocs->hw_war_version, M_OCS);
647
648 ocs_device_lock_free(ocs);
649
650 ocs_debug_detach(ocs);
651
652 ocs_ramlog_free(ocs, ocs->ramlog);
653
654 ocs_release_bus(ocs);
655
656 return 0;
657 }
658
659 /**
660 * @brief Notify driver of system shutdown
661 *
662 * @param dev device abstraction
663 *
664 * @return 0 if the driver attaches to the device, ENXIO otherwise
665 */
666 static int
667 ocs_pci_shutdown(device_t dev)
668 {
669 device_printf(dev, "%s\n", __func__);
670 return 0;
671 }
672
673 /**
674 * @brief Release bus resources allocated within the soft context
675 *
676 * @param ocs Pointer to the driver's context
677 *
678 * @return none
679 */
680 static void
681 ocs_release_bus(struct ocs_softc *ocs)
682 {
683
684 if (NULL != ocs) {
685 uint32_t i;
686
687 ocs_intr_teardown(ocs);
688
689 if (ocs->irq) {
690 bus_release_resource(ocs->dev, SYS_RES_IRQ,
691 rman_get_rid(ocs->irq), ocs->irq);
692
693 if (ocs->n_vec) {
694 pci_release_msi(ocs->dev);
695 ocs->n_vec = 0;
696 }
697
698 ocs->irq = NULL;
699 }
700
701 bus_dma_tag_destroy(ocs->dmat);
702
703 for (i = 0; i < PCI_MAX_BAR; i++) {
704 if (ocs->reg[i].res) {
705 bus_release_resource(ocs->dev, SYS_RES_MEMORY,
706 ocs->reg[i].rid,
707 ocs->reg[i].res);
708 }
709 }
710 }
711 }
712
713 /**
714 * @brief Allocate and initialize interrupts
715 *
716 * @param ocs Pointer to the driver's context
717 *
718 * @return none
719 */
720 static int32_t
721 ocs_intr_alloc(struct ocs_softc *ocs)
722 {
723
724 ocs->n_vec = 1;
725 if (pci_alloc_msix(ocs->dev, &ocs->n_vec)) {
726 device_printf(ocs->dev, "MSI-X allocation failed\n");
727 if (pci_alloc_msi(ocs->dev, &ocs->n_vec)) {
728 device_printf(ocs->dev, "MSI allocation failed \n");
729 ocs->irqid = 0;
730 ocs->n_vec = 0;
731 } else
732 ocs->irqid = 1;
733 } else {
734 ocs->irqid = 1;
735 }
736
737 ocs->irq = bus_alloc_resource_any(ocs->dev, SYS_RES_IRQ, &ocs->irqid,
738 RF_ACTIVE | RF_SHAREABLE);
739 if (NULL == ocs->irq) {
740 device_printf(ocs->dev, "could not allocate interrupt\n");
741 return -1;
742 }
743
744 ocs->intr_ctx.vec = 0;
745 ocs->intr_ctx.softc = ocs;
746 snprintf(ocs->intr_ctx.name, sizeof(ocs->intr_ctx.name),
747 "%s_intr_%d",
748 device_get_nameunit(ocs->dev),
749 ocs->intr_ctx.vec);
750
751 return 0;
752 }
753
754 /**
755 * @brief Create and attach an interrupt handler
756 *
757 * @param ocs Pointer to the driver's context
758 *
759 * @return 0 on success, non-zero otherwise
760 */
761 static int32_t
762 ocs_intr_setup(struct ocs_softc *ocs)
763 {
764 driver_filter_t *filter = NULL;
765
766 if (0 == ocs->n_vec) {
767 filter = ocs_pci_intx_filter;
768 }
769
770 if (bus_setup_intr(ocs->dev, ocs->irq, INTR_MPSAFE | INTR_TYPE_CAM,
771 filter, ocs_pci_intr, &ocs->intr_ctx,
772 &ocs->tag)) {
773 device_printf(ocs->dev, "could not initialize interrupt\n");
774 return -1;
775 }
776
777 return 0;
778 }
779
780 /**
781 * @brief Detach an interrupt handler
782 *
783 * @param ocs Pointer to the driver's context
784 *
785 * @return 0 on success, non-zero otherwise
786 */
787 static int32_t
788 ocs_intr_teardown(struct ocs_softc *ocs)
789 {
790
791 if (!ocs) {
792 printf("%s: bad driver context?!?\n", __func__);
793 return -1;
794 }
795
796 if (ocs->tag) {
797 bus_teardown_intr(ocs->dev, ocs->irq, ocs->tag);
798 ocs->tag = NULL;
799 }
800
801 return 0;
802 }
803
804 /**
805 * @brief PCI interrupt handler
806 *
807 * @param arg pointer to the driver's software context
808 *
809 * @return FILTER_HANDLED if interrupt is processed, FILTER_STRAY otherwise
810 */
811 static int
812 ocs_pci_intx_filter(void *arg)
813 {
814 ocs_intr_ctx_t *intr = arg;
815 struct ocs_softc *ocs = NULL;
816 uint16_t val = 0;
817
818 if (NULL == intr) {
819 return FILTER_STRAY;
820 }
821
822 ocs = intr->softc;
823 #ifndef PCIM_STATUS_INTR
824 #define PCIM_STATUS_INTR 0x0008
825 #endif
826 val = pci_read_config(ocs->dev, PCIR_STATUS, 2);
827 if (0xffff == val) {
828 device_printf(ocs->dev, "%s: pci_read_config(PCIR_STATUS) failed\n", __func__);
829 return FILTER_STRAY;
830 }
831 if (0 == (val & PCIM_STATUS_INTR)) {
832 return FILTER_STRAY;
833 }
834
835 val = pci_read_config(ocs->dev, PCIR_COMMAND, 2);
836 val |= PCIM_CMD_INTxDIS;
837 pci_write_config(ocs->dev, PCIR_COMMAND, val, 2);
838
839 return FILTER_SCHEDULE_THREAD;
840 }
841
842 /**
843 * @brief interrupt handler
844 *
845 * @param context pointer to the interrupt context
846 */
847 static void
848 ocs_pci_intr(void *context)
849 {
850 ocs_intr_ctx_t *intr = context;
851 struct ocs_softc *ocs = intr->softc;
852
853 mtx_lock(&ocs->sim_lock);
854 ocs_hw_process(&ocs->hw, intr->vec, OCS_OS_MAX_ISR_TIME_MSEC);
855 mtx_unlock(&ocs->sim_lock);
856 }
857
858 /**
859 * @brief Initialize DMA tag
860 *
861 * @param ocs the driver instance's software context
862 *
863 * @return 0 on success, non-zero otherwise
864 */
865 static int32_t
866 ocs_init_dma_tag(struct ocs_softc *ocs)
867 {
868 uint32_t max_sgl = 0;
869 uint32_t max_sge = 0;
870
871 /*
872 * IOs can't use the parent DMA tag and must create their
873 * own, based primarily on a restricted number of DMA segments.
874 * This is more of a BSD requirement than a SLI Port requirement
875 */
876 ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl);
877 ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge);
878
879 if (bus_dma_tag_create(ocs->dmat,
880 1, /* byte alignment */
881 0, /* no boundary restrictions */
882 BUS_SPACE_MAXADDR, /* no minimum low address */
883 BUS_SPACE_MAXADDR, /* no maximum high address */
884 NULL, /* no filter function */
885 NULL, /* or arguments */
886 BUS_SPACE_MAXSIZE, /* max size covered by tag */
887 max_sgl, /* segment count restrictions */
888 max_sge, /* segment length restrictions */
889 0, /* flags */
890 NULL, /* no lock manipulation function */
891 NULL, /* or arguments */
892 &ocs->buf_dmat)) {
893 device_printf(ocs->dev, "%s: bad bus_dma_tag_create(buf_dmat)\n", __func__);
894 return -1;
895 }
896 return 0;
897 }
898
899 int32_t
900 ocs_get_property(const char *prop_name, char *buffer, uint32_t buffer_len)
901 {
902 return -1;
903 }
904
905 /**
906 * @brief return pointer to ocs structure given instance index
907 *
908 * A pointer to an ocs structure is returned given an instance index.
909 *
910 * @param index index to ocs_devices array
911 *
912 * @return ocs pointer
913 */
914
915 ocs_t *ocs_get_instance(uint32_t index)
916 {
917 if (index < ARRAY_SIZE(ocs_devices)) {
918 return ocs_devices[index];
919 }
920 return NULL;
921 }
922
923 /**
924 * @brief Return instance index of an opaque ocs structure
925 *
926 * Returns the ocs instance index
927 *
928 * @param os pointer to ocs instance
929 *
930 * @return pointer to ocs instance index
931 */
932 uint32_t
933 ocs_instance(void *os)
934 {
935 ocs_t *ocs = os;
936 return ocs->instance_index;
937 }
938
939 static device_method_t ocs_methods[] = {
940 DEVMETHOD(device_probe, ocs_pci_probe),
941 DEVMETHOD(device_attach, ocs_pci_attach),
942 DEVMETHOD(device_detach, ocs_pci_detach),
943 DEVMETHOD(device_shutdown, ocs_pci_shutdown),
944 {0, 0}
945 };
946
947 static driver_t ocs_driver = {
948 "ocs_fc",
949 ocs_methods,
950 sizeof(struct ocs_softc)
951 };
952
953 static devclass_t ocs_devclass;
954
955 DRIVER_MODULE(ocs_fc, pci, ocs_driver, ocs_devclass, 0, 0);
956 MODULE_VERSION(ocs_fc, 1);
Cache object: 2ce4ab3432f908554a50e109c7eaa0c7
|