FreeBSD/Linux Kernel Cross Reference
sys/dev/mpt/mpt.h
1 /* $FreeBSD$ */
2 /*-
3 * Generic defines for LSI '909 FC adapters.
4 * FreeBSD Version.
5 *
6 * Copyright (c) 2000, 2001 by Greg Ansley
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*-
30 * Copyright (c) 2002, 2006 by Matthew Jacob
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions are
35 * met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
39 * substantially similar to the "NO WARRANTY" disclaimer below
40 * ("Disclaimer") and any redistribution must be conditioned upon including
41 * a substantially similar Disclaimer requirement for further binary
42 * redistribution.
43 * 3. Neither the names of the above listed copyright holders nor the names
44 * of any contributors may be used to endorse or promote products derived
45 * from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
51 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
57 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 *
59 * Support from Chris Ellsworth in order to make SAS adapters work
60 * is gratefully acknowledged.
61 *
62 *
63 * Support from LSI-Logic has also gone a great deal toward making this a
64 * workable subsystem and is gratefully acknowledged.
65 */
66 /*
67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * Copyright (c) 2005, WHEEL Sp. z o.o.
70 * All rights reserved.
71 *
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions are
74 * met:
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
78 * substantially similar to the "NO WARRANTY" disclaimer below
79 * ("Disclaimer") and any redistribution must be conditioned upon including
80 * a substantially similar Disclaimer requirement for further binary
81 * redistribution.
82 * 3. Neither the names of the above listed copyright holders nor the names
83 * of any contributors may be used to endorse or promote products derived
84 * from this software without specific prior written permission.
85 *
86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 */
98
99 #ifndef _MPT_H_
100 #define _MPT_H_
101
102 /********************************* OS Includes ********************************/
103 #include <sys/types.h>
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/endian.h>
107 #include <sys/eventhandler.h>
108 #if __FreeBSD_version < 500000
109 #include <sys/kernel.h>
110 #include <sys/queue.h>
111 #include <sys/malloc.h>
112 #else
113 #include <sys/lock.h>
114 #include <sys/kernel.h>
115 #include <sys/queue.h>
116 #include <sys/malloc.h>
117 #include <sys/mutex.h>
118 #include <sys/condvar.h>
119 #endif
120 #include <sys/proc.h>
121 #include <sys/bus.h>
122 #include <sys/module.h>
123
124 #include <machine/cpu.h>
125 #include <machine/resource.h>
126
127 #if __FreeBSD_version < 500000
128 #include <machine/bus.h>
129 #include <machine/clock.h>
130 #endif
131
132 #include <sys/rman.h>
133
134 #if __FreeBSD_version < 500000
135 #include <pci/pcireg.h>
136 #include <pci/pcivar.h>
137 #else
138 #include <dev/pci/pcireg.h>
139 #include <dev/pci/pcivar.h>
140 #endif
141
142 #include <machine/bus.h>
143 #include "opt_ddb.h"
144
145 /**************************** Register Definitions ****************************/
146 #include <dev/mpt/mpt_reg.h>
147
148 /******************************* MPI Definitions ******************************/
149 #include <dev/mpt/mpilib/mpi_type.h>
150 #include <dev/mpt/mpilib/mpi.h>
151 #include <dev/mpt/mpilib/mpi_cnfg.h>
152 #include <dev/mpt/mpilib/mpi_ioc.h>
153 #include <dev/mpt/mpilib/mpi_raid.h>
154
155 /* XXX For mpt_debug.c */
156 #include <dev/mpt/mpilib/mpi_init.h>
157
158 #define MPT_S64_2_SCALAR(y) ((((int64_t)y.High) << 32) | (y.Low))
159 #define MPT_U64_2_SCALAR(y) ((((uint64_t)y.High) << 32) | (y.Low))
160
161 /****************************** Misc Definitions ******************************/
162 /* #define MPT_TEST_MULTIPATH 1 */
163 #define MPT_OK (0)
164 #define MPT_FAIL (0x10000)
165
166 #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
167
168 #define MPT_ROLE_NONE 0
169 #define MPT_ROLE_INITIATOR 1
170 #define MPT_ROLE_TARGET 2
171 #define MPT_ROLE_BOTH 3
172 #define MPT_ROLE_DEFAULT MPT_ROLE_INITIATOR
173
174 /**************************** Forward Declarations ****************************/
175 struct mpt_softc;
176 struct mpt_personality;
177 typedef struct req_entry request_t;
178
179 /************************* Personality Module Support *************************/
180 typedef int mpt_load_handler_t(struct mpt_personality *);
181 typedef int mpt_probe_handler_t(struct mpt_softc *);
182 typedef int mpt_attach_handler_t(struct mpt_softc *);
183 typedef int mpt_enable_handler_t(struct mpt_softc *);
184 typedef void mpt_ready_handler_t(struct mpt_softc *);
185 typedef int mpt_event_handler_t(struct mpt_softc *, request_t *,
186 MSG_EVENT_NOTIFY_REPLY *);
187 typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/);
188 /* XXX Add return value and use for veto? */
189 typedef void mpt_shutdown_handler_t(struct mpt_softc *);
190 typedef void mpt_detach_handler_t(struct mpt_softc *);
191 typedef int mpt_unload_handler_t(struct mpt_personality *);
192
193 struct mpt_personality
194 {
195 const char *name;
196 uint32_t id; /* Assigned identifier. */
197 u_int use_count; /* Instances using personality*/
198 mpt_load_handler_t *load; /* configure personailty */
199 #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load)
200 mpt_probe_handler_t *probe; /* configure personailty */
201 mpt_attach_handler_t *attach; /* initialize device instance */
202 mpt_enable_handler_t *enable; /* enable device */
203 mpt_ready_handler_t *ready; /* final open for business */
204 mpt_event_handler_t *event; /* Handle MPI event. */
205 mpt_reset_handler_t *reset; /* Re-init after reset. */
206 mpt_shutdown_handler_t *shutdown; /* Shutdown instance. */
207 mpt_detach_handler_t *detach; /* release device instance */
208 mpt_unload_handler_t *unload; /* Shutdown personality */
209 #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload)
210 };
211
212 int mpt_modevent(module_t, int, void *);
213
214 /* Maximum supported number of personalities. */
215 #define MPT_MAX_PERSONALITIES (15)
216
217 #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \
218 MODULE_DEPEND(name, dep, vmin, vpref, vmax)
219
220 #define DECLARE_MPT_PERSONALITY(name, order) \
221 static moduledata_t name##_mod = { \
222 #name, mpt_modevent, &name##_personality \
223 }; \
224 DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order); \
225 MODULE_VERSION(name, 1); \
226 MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1)
227
228 /******************************* Bus DMA Support ******************************/
229 /* XXX Need to update bus_dmamap_sync to take a range argument. */
230 #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op) \
231 bus_dmamap_sync(dma_tag, dmamap, op)
232
233 #if __FreeBSD_version < 600000
234 #define bus_get_dma_tag(x) NULL
235 #endif
236 #if __FreeBSD_version >= 501102
237 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \
238 lowaddr, highaddr, filter, filterarg, \
239 maxsize, nsegments, maxsegsz, flags, \
240 dma_tagp) \
241 bus_dma_tag_create(parent_tag, alignment, boundary, \
242 lowaddr, highaddr, filter, filterarg, \
243 maxsize, nsegments, maxsegsz, flags, \
244 busdma_lock_mutex, &Giant, \
245 dma_tagp)
246 #else
247 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \
248 lowaddr, highaddr, filter, filterarg, \
249 maxsize, nsegments, maxsegsz, flags, \
250 dma_tagp) \
251 bus_dma_tag_create(parent_tag, alignment, boundary, \
252 lowaddr, highaddr, filter, filterarg, \
253 maxsize, nsegments, maxsegsz, flags, \
254 dma_tagp)
255 #endif
256
257 struct mpt_map_info {
258 struct mpt_softc *mpt;
259 int error;
260 uint32_t phys;
261 };
262
263 void mpt_map_rquest(void *, bus_dma_segment_t *, int, int);
264 /* **************************** NewBUS interrupt Crock ************************/
265 #if __FreeBSD_version < 700031
266 #define mpt_setup_intr(d, i, f, U, if, ifa, hp) \
267 bus_setup_intr(d, i, f, if, ifa, hp)
268 #else
269 #define mpt_setup_intr bus_setup_intr
270 #endif
271
272 /**************************** Kernel Thread Support ***************************/
273 #if __FreeBSD_version > 500005
274 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
275 kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
276 #else
277 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
278 kthread_create(func, farg, proc_ptr, fmtstr, arg)
279 #endif
280
281 /****************************** Timer Facilities ******************************/
282 #if __FreeBSD_version > 500000
283 #define mpt_callout_init(c) callout_init(c, /*mpsafe*/0);
284 #else
285 #define mpt_callout_init(c) callout_init(c);
286 #endif
287
288 /********************************** Endianess *********************************/
289 #define MPT_2_HOST64(ptr, tag) ptr->tag = le64toh(ptr->tag)
290 #define MPT_2_HOST32(ptr, tag) ptr->tag = le32toh(ptr->tag)
291 #define MPT_2_HOST16(ptr, tag) ptr->tag = le16toh(ptr->tag)
292
293 #define HOST_2_MPT64(ptr, tag) ptr->tag = htole64(ptr->tag)
294 #define HOST_2_MPT32(ptr, tag) ptr->tag = htole32(ptr->tag)
295 #define HOST_2_MPT16(ptr, tag) ptr->tag = htole16(ptr->tag)
296
297 #if _BYTE_ORDER == _BIG_ENDIAN
298 void mpt2host_sge_simple_union(SGE_SIMPLE_UNION *);
299 void mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *);
300 void mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *);
301 void mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *);
302 void mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *);
303 void mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *);
304 #else
305 #define mpt2host_sge_simple_union(x) do { ; } while (0)
306 #define mpt2host_iocfacts_reply(x) do { ; } while (0)
307 #define mpt2host_portfacts_reply(x) do { ; } while (0)
308 #define mpt2host_config_page_ioc2(x) do { ; } while (0)
309 #define mpt2host_config_page_raid_vol_0(x) do { ; } while (0)
310 #define mpt2host_mpi_raid_vol_indicator(x) do { ; } while (0)
311 #endif
312
313 /**************************** MPI Transaction State ***************************/
314 typedef enum {
315 REQ_STATE_NIL = 0x00,
316 REQ_STATE_FREE = 0x01,
317 REQ_STATE_ALLOCATED = 0x02,
318 REQ_STATE_QUEUED = 0x04,
319 REQ_STATE_DONE = 0x08,
320 REQ_STATE_TIMEDOUT = 0x10,
321 REQ_STATE_NEED_WAKEUP = 0x20,
322 REQ_STATE_LOCKED = 0x80, /* can't be freed */
323 REQ_STATE_MASK = 0xFF
324 } mpt_req_state_t;
325
326 struct req_entry {
327 TAILQ_ENTRY(req_entry) links; /* Pointer to next in list */
328 mpt_req_state_t state; /* Request State Information */
329 uint16_t index; /* Index of this entry */
330 uint16_t IOCStatus; /* Completion status */
331 uint16_t ResponseCode; /* TMF Reponse Code */
332 uint16_t serno; /* serial number */
333 union ccb *ccb; /* CAM request */
334 void *req_vbuf; /* Virtual Address of Entry */
335 void *sense_vbuf; /* Virtual Address of sense data */
336 bus_addr_t req_pbuf; /* Physical Address of Entry */
337 bus_addr_t sense_pbuf; /* Physical Address of sense data */
338 bus_dmamap_t dmap; /* DMA map for data buffers */
339 struct req_entry *chain; /* for SGE overallocations */
340 };
341
342 /**************************** MPI Target State Info ***************************/
343
344 typedef struct {
345 uint32_t reply_desc; /* current reply descriptor */
346 uint32_t resid; /* current data residual */
347 uint32_t bytes_xfered; /* current relative offset */
348 union ccb *ccb; /* pointer to currently active ccb */
349 request_t *req; /* pointer to currently active assist request */
350 uint32_t
351 is_local : 1,
352 nxfers : 31;
353 uint32_t tag_id;
354 enum {
355 TGT_STATE_NIL,
356 TGT_STATE_LOADING,
357 TGT_STATE_LOADED,
358 TGT_STATE_IN_CAM,
359 TGT_STATE_SETTING_UP_FOR_DATA,
360 TGT_STATE_MOVING_DATA,
361 TGT_STATE_MOVING_DATA_AND_STATUS,
362 TGT_STATE_SENDING_STATUS
363 } state;
364 } mpt_tgt_state_t;
365
366 /*
367 * When we get an incoming command it has its own tag which is called the
368 * IoIndex. This is the value we gave that particular command buffer when
369 * we originally assigned it. It's just a number, really. The FC card uses
370 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which
371 * contains pointers the request_t structures related to that IoIndex.
372 *
373 * What *we* do is construct a tag out of the index for the target command
374 * which owns the incoming ATIO plus a rolling sequence number.
375 */
376 #define MPT_MAKE_TAGID(mpt, req, ioindex) \
377 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff))
378
379 #ifdef INVARIANTS
380 #define MPT_TAG_2_REQ(a, b) mpt_tag_2_req(a, (uint32_t) b)
381 #else
382 #define MPT_TAG_2_REQ(mpt, tag) mpt->tgt_cmd_ptrs[tag >> 18]
383 #endif
384
385 #define MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \
386 (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)]))
387
388 STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr);
389 #define MPT_MAX_LUNS 256
390 typedef struct {
391 struct mpt_hdr_stailq atios;
392 struct mpt_hdr_stailq inots;
393 int enabled;
394 } tgt_resource_t;
395 #define MPT_MAX_ELS 64
396
397 /**************************** Handler Registration ****************************/
398 /*
399 * Global table of registered reply handlers. The
400 * handler is indicated by byte 3 of the request
401 * index submitted to the IOC. This allows the
402 * driver core to perform generic processing without
403 * any knowledge of per-personality behavior.
404 *
405 * MPT_NUM_REPLY_HANDLERS must be a power of 2
406 * to allow the easy generation of a mask.
407 *
408 * The handler offsets used by the core are hard coded
409 * allowing faster code generation when assigning a handler
410 * to a request. All "personalities" must use the
411 * the handler registration mechanism.
412 *
413 * The IOC handlers that are rarely executed are placed
414 * at the tail of the table to make it more likely that
415 * all commonly executed handlers fit in a single cache
416 * line.
417 */
418 #define MPT_NUM_REPLY_HANDLERS (32)
419 #define MPT_REPLY_HANDLER_EVENTS MPT_CBI_TO_HID(0)
420 #define MPT_REPLY_HANDLER_CONFIG MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1)
421 #define MPT_REPLY_HANDLER_HANDSHAKE MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2)
422 typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request,
423 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame);
424 typedef union {
425 mpt_reply_handler_t *reply_handler;
426 } mpt_handler_t;
427
428 typedef enum {
429 MPT_HANDLER_REPLY,
430 MPT_HANDLER_EVENT,
431 MPT_HANDLER_RESET,
432 MPT_HANDLER_SHUTDOWN
433 } mpt_handler_type;
434
435 struct mpt_handler_record
436 {
437 LIST_ENTRY(mpt_handler_record) links;
438 mpt_handler_t handler;
439 };
440
441 LIST_HEAD(mpt_handler_list, mpt_handler_record);
442
443 /*
444 * The handler_id is currently unused but would contain the
445 * handler ID used in the MsgContext field to allow direction
446 * of replies to the handler. Registrations that don't require
447 * a handler id can pass in NULL for the handler_id.
448 *
449 * Deregistrations for handlers without a handler id should
450 * pass in MPT_HANDLER_ID_NONE.
451 */
452 #define MPT_HANDLER_ID_NONE (0xFFFFFFFF)
453 int mpt_register_handler(struct mpt_softc *, mpt_handler_type,
454 mpt_handler_t, uint32_t *);
455 int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type,
456 mpt_handler_t, uint32_t);
457
458 /******************* Per-Controller Instance Data Structures ******************/
459 TAILQ_HEAD(req_queue, req_entry);
460
461 /* Structure for saving proper values for modifyable PCI config registers */
462 struct mpt_pci_cfg {
463 uint16_t Command;
464 uint16_t LatencyTimer_LineSize;
465 uint32_t IO_BAR;
466 uint32_t Mem0_BAR[2];
467 uint32_t Mem1_BAR[2];
468 uint32_t ROM_BAR;
469 uint8_t IntLine;
470 uint32_t PMCSR;
471 };
472
473 typedef enum {
474 MPT_RVF_NONE = 0x0,
475 MPT_RVF_ACTIVE = 0x1,
476 MPT_RVF_ANNOUNCED = 0x2,
477 MPT_RVF_UP2DATE = 0x4,
478 MPT_RVF_REFERENCED = 0x8,
479 MPT_RVF_WCE_CHANGED = 0x10
480 } mpt_raid_volume_flags;
481
482 struct mpt_raid_volume {
483 CONFIG_PAGE_RAID_VOL_0 *config_page;
484 MPI_RAID_VOL_INDICATOR sync_progress;
485 mpt_raid_volume_flags flags;
486 u_int quiesced_disks;
487 };
488
489 typedef enum {
490 MPT_RDF_NONE = 0x00,
491 MPT_RDF_ACTIVE = 0x01,
492 MPT_RDF_ANNOUNCED = 0x02,
493 MPT_RDF_UP2DATE = 0x04,
494 MPT_RDF_REFERENCED = 0x08,
495 MPT_RDF_QUIESCING = 0x10,
496 MPT_RDF_QUIESCED = 0x20
497 } mpt_raid_disk_flags;
498
499 struct mpt_raid_disk {
500 CONFIG_PAGE_RAID_PHYS_DISK_0 config_page;
501 struct mpt_raid_volume *volume;
502 u_int member_number;
503 u_int pass_thru_active;
504 mpt_raid_disk_flags flags;
505 };
506
507 struct mpt_evtf_record {
508 MSG_EVENT_NOTIFY_REPLY reply;
509 uint32_t context;
510 LIST_ENTRY(mpt_evtf_record) links;
511 };
512
513 LIST_HEAD(mpt_evtf_list, mpt_evtf_record);
514
515 struct mpt_softc {
516 device_t dev;
517 #if __FreeBSD_version < 500000
518 uint32_t mpt_islocked;
519 int mpt_splsaved;
520 #else
521 struct mtx mpt_lock;
522 int mpt_locksetup;
523 #endif
524 uint32_t mpt_pers_mask;
525 uint32_t
526 : 8,
527 unit : 8,
528 ready : 1,
529 fw_uploaded : 1,
530 msi_enable : 1,
531 twildcard : 1,
532 tenabled : 1,
533 do_cfg_role : 1,
534 raid_enabled : 1,
535 raid_mwce_set : 1,
536 getreqwaiter : 1,
537 shutdwn_raid : 1,
538 shutdwn_recovery: 1,
539 outofbeer : 1,
540 disabled : 1,
541 is_spi : 1,
542 is_sas : 1,
543 is_fc : 1;
544
545 u_int cfg_role;
546 u_int role; /* role: none, ini, target, both */
547
548 u_int verbose;
549 #ifdef MPT_TEST_MULTIPATH
550 int failure_id;
551 #endif
552
553 /*
554 * IOC Facts
555 */
556 MSG_IOC_FACTS_REPLY ioc_facts;
557
558 /*
559 * Port Facts
560 */
561 MSG_PORT_FACTS_REPLY * port_facts;
562 #define mpt_ini_id port_facts[0].PortSCSIID
563 #define mpt_max_tgtcmds port_facts[0].MaxPostedCmdBuffers
564
565 /*
566 * Device Configuration Information
567 */
568 union {
569 struct mpt_spi_cfg {
570 CONFIG_PAGE_SCSI_PORT_0 _port_page0;
571 CONFIG_PAGE_SCSI_PORT_1 _port_page1;
572 CONFIG_PAGE_SCSI_PORT_2 _port_page2;
573 CONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16];
574 CONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16];
575 uint16_t _tag_enable;
576 uint16_t _disc_enable;
577 } spi;
578 #define mpt_port_page0 cfg.spi._port_page0
579 #define mpt_port_page1 cfg.spi._port_page1
580 #define mpt_port_page2 cfg.spi._port_page2
581 #define mpt_dev_page0 cfg.spi._dev_page0
582 #define mpt_dev_page1 cfg.spi._dev_page1
583 #define mpt_tag_enable cfg.spi._tag_enable
584 #define mpt_disc_enable cfg.spi._disc_enable
585 struct mpi_fc_cfg {
586 CONFIG_PAGE_FC_PORT_0 _port_page0;
587 uint32_t _port_speed;
588 #define mpt_fcport_page0 cfg.fc._port_page0
589 #define mpt_fcport_speed cfg.fc._port_speed
590 } fc;
591 } cfg;
592 #if __FreeBSD_version >= 500000
593 /*
594 * Device config information stored up for sysctl to access
595 */
596 union {
597 struct {
598 unsigned int initiator_id;
599 } spi;
600 struct {
601 char wwnn[19];
602 char wwpn[19];
603 } fc;
604 } scinfo;
605 #endif
606
607 /* Controller Info for RAID information */
608 CONFIG_PAGE_IOC_2 * ioc_page2;
609 CONFIG_PAGE_IOC_3 * ioc_page3;
610
611 /* Raid Data */
612 struct mpt_raid_volume* raid_volumes;
613 struct mpt_raid_disk* raid_disks;
614 u_int raid_max_volumes;
615 u_int raid_max_disks;
616 u_int raid_page0_len;
617 u_int raid_wakeup;
618 u_int raid_rescan;
619 u_int raid_resync_rate;
620 u_int raid_mwce_setting;
621 u_int raid_queue_depth;
622 u_int raid_nonopt_volumes;
623 struct proc *raid_thread;
624 struct callout raid_timer;
625
626 /*
627 * PCI Hardware info
628 */
629 int pci_msi_count;
630 struct resource * pci_irq; /* Interrupt map for chip */
631 void * ih; /* Interupt handle */
632 struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */
633
634 /*
635 * DMA Mapping Stuff
636 */
637 struct resource * pci_reg; /* Register map for chip */
638 int pci_mem_rid; /* Resource ID */
639 bus_space_tag_t pci_st; /* Bus tag for registers */
640 bus_space_handle_t pci_sh; /* Bus handle for registers */
641 /* PIO versions of above. */
642 int pci_pio_rid;
643 struct resource * pci_pio_reg;
644 bus_space_tag_t pci_pio_st;
645 bus_space_handle_t pci_pio_sh;
646
647 bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */
648 bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */
649 bus_dmamap_t reply_dmap; /* DMA map for reply memory */
650 uint8_t *reply; /* KVA of reply memory */
651 bus_addr_t reply_phys; /* BusAddr of reply memory */
652
653 bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */
654 bus_dma_tag_t request_dmat; /* DMA tag for request memroy */
655 bus_dmamap_t request_dmap; /* DMA map for request memroy */
656 uint8_t *request; /* KVA of Request memory */
657 bus_addr_t request_phys; /* BusAddr of request memory */
658
659 uint32_t max_seg_cnt; /* calculated after IOC facts */
660
661 /*
662 * Hardware management
663 */
664 u_int reset_cnt;
665
666 /*
667 * CAM && Software Management
668 */
669 request_t *request_pool;
670 struct req_queue request_free_list;
671 struct req_queue request_pending_list;
672 struct req_queue request_timeout_list;
673
674
675 struct cam_sim *sim;
676 struct cam_path *path;
677
678 struct cam_sim *phydisk_sim;
679 struct cam_path *phydisk_path;
680
681 struct proc *recovery_thread;
682 request_t *tmf_req;
683
684 /*
685 * Deferred frame acks due to resource shortage.
686 */
687 struct mpt_evtf_list ack_frames;
688
689 /*
690 * Target Mode Support
691 */
692 uint32_t scsi_tgt_handler_id;
693 request_t ** tgt_cmd_ptrs;
694 request_t ** els_cmd_ptrs; /* FC only */
695
696 /*
697 * *snork*- this is chosen to be here *just in case* somebody
698 * forgets to point to it exactly and we index off of trt with
699 * CAM_LUN_WILDCARD.
700 */
701 tgt_resource_t trt_wildcard; /* wildcard luns */
702 tgt_resource_t trt[MPT_MAX_LUNS];
703 uint16_t tgt_cmds_allocated;
704 uint16_t els_cmds_allocated; /* FC only */
705
706 uint16_t timeouts; /* timeout count */
707 uint16_t success; /* successes afer timeout */
708 uint16_t sequence; /* Sequence Number */
709 uint16_t pad3;
710
711
712 /* Paired port in some dual adapters configurations */
713 struct mpt_softc * mpt2;
714
715 /* FW Image management */
716 uint32_t fw_image_size;
717 uint8_t *fw_image;
718 bus_dma_tag_t fw_dmat; /* DMA tag for firmware image */
719 bus_dmamap_t fw_dmap; /* DMA map for firmware image */
720 bus_addr_t fw_phys; /* BusAddr of firmware image */
721
722 /* Shutdown Event Handler. */
723 eventhandler_tag eh;
724
725 TAILQ_ENTRY(mpt_softc) links;
726 };
727
728 static __inline void mpt_assign_serno(struct mpt_softc *, request_t *);
729
730 static __inline void
731 mpt_assign_serno(struct mpt_softc *mpt, request_t *req)
732 {
733 if ((req->serno = mpt->sequence++) == 0) {
734 req->serno = mpt->sequence++;
735 }
736 }
737
738 /***************************** Locking Primitives *****************************/
739 #if __FreeBSD_version < 500000
740 #define MPT_IFLAGS INTR_TYPE_CAM
741 #define MPT_LOCK(mpt) mpt_lockspl(mpt)
742 #define MPT_UNLOCK(mpt) mpt_unlockspl(mpt)
743 #define MPT_OWNED(mpt) mpt->mpt_islocked
744 #define MPTLOCK_2_CAMLOCK MPT_UNLOCK
745 #define CAMLOCK_2_MPTLOCK MPT_LOCK
746 #define MPT_LOCK_SETUP(mpt)
747 #define MPT_LOCK_DESTROY(mpt)
748
749 static __inline void mpt_lockspl(struct mpt_softc *mpt);
750 static __inline void mpt_unlockspl(struct mpt_softc *mpt);
751
752 static __inline void
753 mpt_lockspl(struct mpt_softc *mpt)
754 {
755 int s;
756
757 s = splcam();
758 if (mpt->mpt_islocked++ == 0) {
759 mpt->mpt_splsaved = s;
760 } else {
761 splx(s);
762 panic("Recursed lock with mask: 0x%x\n", s);
763 }
764 }
765
766 static __inline void
767 mpt_unlockspl(struct mpt_softc *mpt)
768 {
769 if (mpt->mpt_islocked) {
770 if (--mpt->mpt_islocked == 0) {
771 splx(mpt->mpt_splsaved);
772 }
773 } else
774 panic("Negative lock count\n");
775 }
776
777 static __inline int
778 mpt_sleep(struct mpt_softc *mpt, void *ident, int priority,
779 const char *wmesg, int timo)
780 {
781 int saved_cnt;
782 int saved_spl;
783 int error;
784
785 KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep"));
786 saved_cnt = mpt->mpt_islocked;
787 saved_spl = mpt->mpt_splsaved;
788 mpt->mpt_islocked = 0;
789 error = tsleep(ident, priority, wmesg, timo);
790 KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup"));
791 mpt->mpt_islocked = saved_cnt;
792 mpt->mpt_splsaved = saved_spl;
793 return (error);
794 }
795
796 #else
797 #ifdef LOCKING_WORKED_AS_IT_SHOULD
798 #error "Shouldn't Be Here!"
799 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE
800 #define MPT_LOCK_SETUP(mpt) \
801 mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \
802 mpt->mpt_locksetup = 1
803 #define MPT_LOCK_DESTROY(mpt) \
804 if (mpt->mpt_locksetup) { \
805 mtx_destroy(&mpt->mpt_lock); \
806 mpt->mpt_locksetup = 0; \
807 }
808
809 #define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock)
810 #define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock)
811 #define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock)
812 #define MPTLOCK_2_CAMLOCK(mpt) \
813 mtx_unlock(&(mpt)->mpt_lock); mtx_lock(&Giant)
814 #define CAMLOCK_2_MPTLOCK(mpt) \
815 mtx_unlock(&Giant); mtx_lock(&(mpt)->mpt_lock)
816 #define mpt_sleep(mpt, ident, priority, wmesg, timo) \
817 msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo)
818
819 #else
820
821 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY
822 #define MPT_LOCK_SETUP(mpt) do { } while (0)
823 #define MPT_LOCK_DESTROY(mpt) do { } while (0)
824 #if 0
825 #define MPT_LOCK(mpt) \
826 device_printf(mpt->dev, "LOCK %s:%d\n", __FILE__, __LINE__); \
827 KASSERT(mpt->mpt_locksetup == 0, \
828 ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \
829 mpt->mpt_locksetup = 1
830 #define MPT_UNLOCK(mpt) \
831 device_printf(mpt->dev, "UNLK %s:%d\n", __FILE__, __LINE__); \
832 KASSERT(mpt->mpt_locksetup == 1, \
833 ("release unowned lock at %s:%d", __FILE__, __LINE__)); \
834 mpt->mpt_locksetup = 0
835 #else
836 #define MPT_LOCK(mpt) \
837 KASSERT(mpt->mpt_locksetup == 0, \
838 ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \
839 mpt->mpt_locksetup = 1
840 #define MPT_UNLOCK(mpt) \
841 KASSERT(mpt->mpt_locksetup == 1, \
842 ("release unowned lock at %s:%d", __FILE__, __LINE__)); \
843 mpt->mpt_locksetup = 0
844 #endif
845 #define MPT_OWNED(mpt) mpt->mpt_locksetup
846 #define MPTLOCK_2_CAMLOCK(mpt) MPT_UNLOCK(mpt)
847 #define CAMLOCK_2_MPTLOCK(mpt) MPT_LOCK(mpt)
848
849 static __inline int
850 mpt_sleep(struct mpt_softc *, void *, int, const char *, int);
851
852 static __inline int
853 mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t)
854 {
855 int r;
856 MPT_UNLOCK(mpt);
857 r = tsleep(i, p, w, t);
858 MPT_LOCK(mpt);
859 return (r);
860 }
861 #endif
862 #endif
863
864 /******************************* Register Access ******************************/
865 static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t);
866 static __inline uint32_t mpt_read(struct mpt_softc *, int);
867 static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t);
868 static __inline uint32_t mpt_pio_read(struct mpt_softc *, int);
869
870 static __inline void
871 mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
872 {
873 bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val);
874 }
875
876 static __inline uint32_t
877 mpt_read(struct mpt_softc *mpt, int offset)
878 {
879 return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset));
880 }
881
882 /*
883 * Some operations (e.g. diagnostic register writes while the ARM proccessor
884 * is disabled), must be performed using "PCI pio" operations. On non-PCI
885 * busses, these operations likely map to normal register accesses.
886 */
887 static __inline void
888 mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
889 {
890 bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val);
891 }
892
893 static __inline uint32_t
894 mpt_pio_read(struct mpt_softc *mpt, int offset)
895 {
896 return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset));
897 }
898 /*********************** Reply Frame/Request Management ***********************/
899 /* Max MPT Reply we are willing to accept (must be power of 2) */
900 #define MPT_REPLY_SIZE 256
901
902 /*
903 * Must be less than 16384 in order for target mode to work
904 */
905 #define MPT_MAX_REQUESTS(mpt) 512
906 #define MPT_REQUEST_AREA 512
907 #define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */
908 #define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA)
909
910 #define MPT_CONTEXT_CB_SHIFT (16)
911 #define MPT_CBI(handle) (handle >> MPT_CONTEXT_CB_SHIFT)
912 #define MPT_CBI_TO_HID(cbi) ((cbi) << MPT_CONTEXT_CB_SHIFT)
913 #define MPT_CONTEXT_TO_CBI(x) \
914 (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1))
915 #define MPT_CONTEXT_REQI_MASK 0xFFFF
916 #define MPT_CONTEXT_TO_REQI(x) ((x) & MPT_CONTEXT_REQI_MASK)
917
918 /*
919 * Convert a 32bit physical address returned from IOC to an
920 * offset into our reply frame memory or the kvm address needed
921 * to access the data. The returned address is only the low
922 * 32 bits, so mask our base physical address accordingly.
923 */
924 #define MPT_REPLY_BADDR(x) \
925 (x << 1)
926 #define MPT_REPLY_OTOV(m, i) \
927 ((void *)(&m->reply[i]))
928
929 #define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \
930 do { \
931 if (mpt->verbose > MPT_PRT_DEBUG) \
932 mpt_dump_reply_frame(mpt, reply_frame); \
933 } while(0)
934
935 static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt);
936 static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr);
937
938 /*
939 * Give the reply buffer back to the IOC after we have
940 * finished processing it.
941 */
942 static __inline void
943 mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr)
944 {
945 mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr);
946 }
947
948 /* Get a reply from the IOC */
949 static __inline uint32_t
950 mpt_pop_reply_queue(struct mpt_softc *mpt)
951 {
952 return mpt_read(mpt, MPT_OFFSET_REPLY_Q);
953 }
954
955 void
956 mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int);
957
958 /************************** Scatter Gather Managment **************************/
959 /* MPT_RQSL- size of request frame, in bytes */
960 #define MPT_RQSL(mpt) (mpt->ioc_facts.RequestFrameSize << 2)
961
962 /* MPT_NSGL- how many SG entries can fit in a request frame size */
963 #define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION))
964
965 /* MPT_NRFM- how many request frames can fit in each request alloc we make */
966 #define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt))
967
968 /*
969 * MPT_NSGL_FIRST- # of SG elements that can fit after
970 * an I/O request but still within the request frame.
971 * Do this safely based upon SGE_IO_UNION.
972 *
973 * Note that the first element is *within* the SCSI request.
974 */
975 #define MPT_NSGL_FIRST(mpt) \
976 ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \
977 sizeof (SGE_IO_UNION))
978
979 /***************************** IOC Initialization *****************************/
980 int mpt_reset(struct mpt_softc *, int /*reinit*/);
981
982 /****************************** Debugging ************************************/
983 typedef struct mpt_decode_entry {
984 char *name;
985 u_int value;
986 u_int mask;
987 } mpt_decode_entry_t;
988
989 int mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries,
990 const char *name, u_int value, u_int *cur_column,
991 u_int wrap_point);
992
993 void mpt_dump_data(struct mpt_softc *, const char *, void *, int);
994 void mpt_dump_request(struct mpt_softc *, request_t *);
995
996 enum {
997 MPT_PRT_ALWAYS,
998 MPT_PRT_FATAL,
999 MPT_PRT_ERROR,
1000 MPT_PRT_WARN,
1001 MPT_PRT_INFO,
1002 MPT_PRT_NEGOTIATION,
1003 MPT_PRT_DEBUG,
1004 MPT_PRT_DEBUG1,
1005 MPT_PRT_DEBUG2,
1006 MPT_PRT_DEBUG3,
1007 MPT_PRT_TRACE,
1008 MPT_PRT_NONE=100
1009 };
1010
1011 #if __FreeBSD_version > 500000
1012 #define mpt_lprt(mpt, level, ...) \
1013 do { \
1014 if (level <= (mpt)->verbose) \
1015 mpt_prt(mpt, __VA_ARGS__); \
1016 } while (0)
1017
1018 #define mpt_lprtc(mpt, level, ...) \
1019 do { \
1020 if (level <= (mpt)->debug_level) \
1021 mpt_prtc(mpt, __VA_ARGS__); \
1022 } while (0)
1023 #else
1024 void mpt_lprt(struct mpt_softc *, int, const char *, ...)
1025 __printflike(3, 4);
1026 void mpt_lprtc(struct mpt_softc *, int, const char *, ...)
1027 __printflike(3, 4);
1028 #endif
1029 void mpt_prt(struct mpt_softc *, const char *, ...)
1030 __printflike(2, 3);
1031 void mpt_prtc(struct mpt_softc *, const char *, ...)
1032 __printflike(2, 3);
1033
1034 /**************************** Target Mode Related ***************************/
1035 static __inline int mpt_cdblen(uint8_t, int);
1036 static __inline int
1037 mpt_cdblen(uint8_t cdb0, int maxlen)
1038 {
1039 int group = cdb0 >> 5;
1040 switch (group) {
1041 case 0:
1042 return (6);
1043 case 1:
1044 return (10);
1045 case 4:
1046 case 5:
1047 return (12);
1048 default:
1049 return (16);
1050 }
1051 }
1052 #ifdef INVARIANTS
1053 static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t);
1054 static __inline request_t *
1055 mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag)
1056 {
1057 uint16_t rtg = (tag >> 18);
1058 KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d\n", tag));
1059 KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array"));
1060 KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer"));
1061 return (mpt->tgt_cmd_ptrs[rtg]);
1062 }
1063
1064
1065 static __inline int
1066 mpt_req_on_free_list(struct mpt_softc *, request_t *);
1067 static __inline int
1068 mpt_req_on_pending_list(struct mpt_softc *, request_t *);
1069
1070 static __inline void
1071 mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int);
1072 static __inline void
1073 mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int);
1074
1075
1076 /*
1077 * Is request on freelist?
1078 */
1079 static __inline int
1080 mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req)
1081 {
1082 request_t *lrq;
1083
1084 TAILQ_FOREACH(lrq, &mpt->request_free_list, links) {
1085 if (lrq == req) {
1086 return (1);
1087 }
1088 }
1089 return (0);
1090 }
1091
1092 /*
1093 * Is request on pending list?
1094 */
1095 static __inline int
1096 mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req)
1097 {
1098 request_t *lrq;
1099
1100 TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) {
1101 if (lrq == req) {
1102 return (1);
1103 }
1104 }
1105 return (0);
1106 }
1107
1108 /*
1109 * Make sure that req *is* part of one of the special lists
1110 */
1111 static __inline void
1112 mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1113 {
1114 int i;
1115 for (i = 0; i < mpt->els_cmds_allocated; i++) {
1116 if (req == mpt->els_cmd_ptrs[i]) {
1117 return;
1118 }
1119 }
1120 for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1121 if (req == mpt->tgt_cmd_ptrs[i]) {
1122 return;
1123 }
1124 }
1125 panic("%s(%d): req %p:%u function %x not in els or tgt ptrs\n",
1126 s, line, req, req->serno,
1127 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function);
1128 }
1129
1130 /*
1131 * Make sure that req is *not* part of one of the special lists.
1132 */
1133 static __inline void
1134 mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1135 {
1136 int i;
1137 for (i = 0; i < mpt->els_cmds_allocated; i++) {
1138 KASSERT(req != mpt->els_cmd_ptrs[i],
1139 ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d\n",
1140 s, line, req, req->serno,
1141 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1142 }
1143 for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1144 KASSERT(req != mpt->tgt_cmd_ptrs[i],
1145 ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d\n",
1146 s, line, req, req->serno,
1147 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1148 }
1149 }
1150 #endif
1151
1152 /*
1153 * Task Management Types, purely for internal consumption
1154 */
1155 typedef enum {
1156 MPT_ABORT_TASK_SET=1234,
1157 MPT_CLEAR_TASK_SET,
1158 MPT_TARGET_RESET,
1159 MPT_CLEAR_ACA,
1160 MPT_TERMINATE_TASK,
1161 MPT_NIL_TMT_VALUE=5678
1162 } mpt_task_mgmt_t;
1163
1164 /**************************** Unclassified Routines ***************************/
1165 void mpt_send_cmd(struct mpt_softc *mpt, request_t *req);
1166 int mpt_recv_handshake_reply(struct mpt_softc *mpt,
1167 size_t reply_len, void *reply);
1168 int mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1169 mpt_req_state_t state, mpt_req_state_t mask,
1170 int sleep_ok, int time_ms);
1171 void mpt_enable_ints(struct mpt_softc *mpt);
1172 void mpt_disable_ints(struct mpt_softc *mpt);
1173 int mpt_attach(struct mpt_softc *mpt);
1174 int mpt_shutdown(struct mpt_softc *mpt);
1175 int mpt_detach(struct mpt_softc *mpt);
1176 int mpt_send_handshake_cmd(struct mpt_softc *mpt,
1177 size_t len, void *cmd);
1178 request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok);
1179 void mpt_free_request(struct mpt_softc *mpt, request_t *req);
1180 void mpt_intr(void *arg);
1181 void mpt_check_doorbell(struct mpt_softc *mpt);
1182 void mpt_dump_reply_frame(struct mpt_softc *mpt,
1183 MSG_DEFAULT_REPLY *reply_frame);
1184
1185 void mpt_set_config_regs(struct mpt_softc *);
1186 int mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/,
1187 u_int /*Action*/, u_int /*PageVersion*/,
1188 u_int /*PageLength*/, u_int /*PageNumber*/,
1189 u_int /*PageType*/, uint32_t /*PageAddress*/,
1190 bus_addr_t /*addr*/, bus_size_t/*len*/,
1191 int /*sleep_ok*/, int /*timeout_ms*/);
1192 int mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/,
1193 int /*PageNumber*/,
1194 uint32_t /*PageAddress*/,
1195 CONFIG_PAGE_HEADER *,
1196 int /*sleep_ok*/, int /*timeout_ms*/);
1197 int mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/,
1198 uint32_t /*PageAddress*/,
1199 CONFIG_PAGE_HEADER *, size_t /*len*/,
1200 int /*sleep_ok*/, int /*timeout_ms*/);
1201 int mpt_write_cfg_page(struct mpt_softc *, int /*Action*/,
1202 uint32_t /*PageAddress*/,
1203 CONFIG_PAGE_HEADER *, size_t /*len*/,
1204 int /*sleep_ok*/, int /*timeout_ms*/);
1205 static __inline int
1206 mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1207 CONFIG_PAGE_HEADER *hdr, size_t len,
1208 int sleep_ok, int timeout_ms)
1209 {
1210 return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
1211 PageAddress, hdr, len, sleep_ok, timeout_ms));
1212 }
1213
1214 static __inline int
1215 mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1216 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1217 int timeout_ms)
1218 {
1219 return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT,
1220 PageAddress, hdr, len, sleep_ok,
1221 timeout_ms));
1222 }
1223 /* mpt_debug.c functions */
1224 void mpt_print_reply(void *vmsg);
1225 void mpt_print_db(uint32_t mb);
1226 void mpt_print_config_reply(void *vmsg);
1227 char *mpt_ioc_diag(uint32_t diag);
1228 void mpt_req_state(mpt_req_state_t state);
1229 void mpt_print_config_request(void *vmsg);
1230 void mpt_print_request(void *vmsg);
1231 void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg);
1232 void mpt_dump_sgl(SGE_IO_UNION *se, int offset);
1233 #endif /* _MPT_H_ */
Cache object: 552d84ffcf376874c9f44b2d7ab54e3b
|