FreeBSD/Linux Kernel Cross Reference
sys/dev/ahci/ahci.c
1 /*-
2 * Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/module.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/endian.h>
37 #include <sys/malloc.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/sysctl.h>
41 #include <machine/stdarg.h>
42 #include <machine/resource.h>
43 #include <machine/bus.h>
44 #include <sys/rman.h>
45 #include "ahci.h"
46
47 #include <cam/cam.h>
48 #include <cam/cam_ccb.h>
49 #include <cam/cam_sim.h>
50 #include <cam/cam_xpt_sim.h>
51 #include <cam/cam_debug.h>
52
53 /* local prototypes */
54 static void ahci_intr(void *data);
55 static void ahci_intr_one(void *data);
56 static void ahci_intr_one_edge(void *data);
57 static int ahci_ch_init(device_t dev);
58 static int ahci_ch_deinit(device_t dev);
59 static int ahci_ch_suspend(device_t dev);
60 static int ahci_ch_resume(device_t dev);
61 static void ahci_ch_pm(void *arg);
62 static void ahci_ch_intr(void *arg);
63 static void ahci_ch_intr_direct(void *arg);
64 static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus);
65 static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb);
66 static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
67 static void ahci_execute_transaction(struct ahci_slot *slot);
68 static void ahci_timeout(struct ahci_slot *slot);
69 static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et);
70 static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag);
71 static void ahci_dmainit(device_t dev);
72 static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
73 static void ahci_dmafini(device_t dev);
74 static void ahci_slotsalloc(device_t dev);
75 static void ahci_slotsfree(device_t dev);
76 static void ahci_reset(struct ahci_channel *ch);
77 static void ahci_start(struct ahci_channel *ch, int fbs);
78 static void ahci_stop(struct ahci_channel *ch);
79 static void ahci_clo(struct ahci_channel *ch);
80 static void ahci_start_fr(struct ahci_channel *ch);
81 static void ahci_stop_fr(struct ahci_channel *ch);
82 static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr);
83 static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val);
84
85 static int ahci_sata_connect(struct ahci_channel *ch);
86 static int ahci_sata_phy_reset(struct ahci_channel *ch);
87 static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0);
88
89 static void ahci_issue_recovery(struct ahci_channel *ch);
90 static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb);
91 static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb);
92
93 static void ahciaction(struct cam_sim *sim, union ccb *ccb);
94 static void ahcipoll(struct cam_sim *sim);
95
96 static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers");
97
98 #define recovery_type spriv_field0
99 #define RECOVERY_NONE 0
100 #define RECOVERY_READ_LOG 1
101 #define RECOVERY_REQUEST_SENSE 2
102 #define recovery_slot spriv_field1
103
104 static uint32_t
105 ahci_ch_detval(struct ahci_channel *ch, uint32_t val)
106 {
107
108 return ch->disablephy ? ATA_SC_DET_DISABLE : val;
109 }
110
111 int
112 ahci_ctlr_setup(device_t dev)
113 {
114 struct ahci_controller *ctlr = device_get_softc(dev);
115 /* Clear interrupts */
116 ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS));
117 /* Configure CCC */
118 if (ctlr->ccc) {
119 ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI));
120 ATA_OUTL(ctlr->r_mem, AHCI_CCCC,
121 (ctlr->ccc << AHCI_CCCC_TV_SHIFT) |
122 (4 << AHCI_CCCC_CC_SHIFT) |
123 AHCI_CCCC_EN);
124 ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) &
125 AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT;
126 if (bootverbose) {
127 device_printf(dev,
128 "CCC with %dms/4cmd enabled on vector %d\n",
129 ctlr->ccc, ctlr->cccv);
130 }
131 }
132 /* Enable AHCI interrupts */
133 ATA_OUTL(ctlr->r_mem, AHCI_GHC,
134 ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE);
135 return (0);
136 }
137
138 int
139 ahci_ctlr_reset(device_t dev)
140 {
141 struct ahci_controller *ctlr = device_get_softc(dev);
142 int timeout;
143
144 /* Enable AHCI mode */
145 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE);
146 /* Reset AHCI controller */
147 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR);
148 for (timeout = 1000; timeout > 0; timeout--) {
149 DELAY(1000);
150 if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0)
151 break;
152 }
153 if (timeout == 0) {
154 device_printf(dev, "AHCI controller reset failure\n");
155 return (ENXIO);
156 }
157 /* Reenable AHCI mode */
158 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE);
159
160 if (ctlr->quirks & AHCI_Q_RESTORE_CAP) {
161 /*
162 * Restore capability field.
163 * This is write to a read-only register to restore its state.
164 * On fully standard-compliant hardware this is not needed and
165 * this operation shall not take place. See ahci_pci.c for
166 * platforms using this quirk.
167 */
168 ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps);
169 }
170
171 return (0);
172 }
173
174
175 int
176 ahci_attach(device_t dev)
177 {
178 struct ahci_controller *ctlr = device_get_softc(dev);
179 int error, i, speed, unit;
180 uint32_t u, version;
181 device_t child;
182
183 ctlr->dev = dev;
184 ctlr->ccc = 0;
185 resource_int_value(device_get_name(dev),
186 device_get_unit(dev), "ccc", &ctlr->ccc);
187 mtx_init(&ctlr->ch_mtx, "AHCI channels lock", NULL, MTX_DEF);
188
189 /* Setup our own memory management for channels. */
190 ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem);
191 ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem);
192 ctlr->sc_iomem.rm_type = RMAN_ARRAY;
193 ctlr->sc_iomem.rm_descr = "I/O memory addresses";
194 if ((error = rman_init(&ctlr->sc_iomem)) != 0) {
195 ahci_free_mem(dev);
196 return (error);
197 }
198 if ((error = rman_manage_region(&ctlr->sc_iomem,
199 rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) {
200 ahci_free_mem(dev);
201 rman_fini(&ctlr->sc_iomem);
202 return (error);
203 }
204 /* Get the HW capabilities */
205 version = ATA_INL(ctlr->r_mem, AHCI_VS);
206 ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP);
207 if (version >= 0x00010200)
208 ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2);
209 if (ctlr->caps & AHCI_CAP_EMS)
210 ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL);
211
212 if (ctlr->quirks & AHCI_Q_FORCE_PI) {
213 /*
214 * Enable ports.
215 * The spec says that BIOS sets up bits corresponding to
216 * available ports. On platforms where this information
217 * is missing, the driver can define available ports on its own.
218 */
219 int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1;
220 int nmask = (1 << nports) - 1;
221
222 ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask);
223 device_printf(dev, "Forcing PI to %d ports (mask = %x)\n",
224 nports, nmask);
225 }
226
227 ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI);
228
229 /* Identify and set separate quirks for HBA and RAID f/w Marvells. */
230 if ((ctlr->quirks & AHCI_Q_ALTSIG) &&
231 (ctlr->caps & AHCI_CAP_SPM) == 0)
232 ctlr->quirks |= AHCI_Q_NOBSYRES;
233
234 if (ctlr->quirks & AHCI_Q_1CH) {
235 ctlr->caps &= ~AHCI_CAP_NPMASK;
236 ctlr->ichannels &= 0x01;
237 }
238 if (ctlr->quirks & AHCI_Q_2CH) {
239 ctlr->caps &= ~AHCI_CAP_NPMASK;
240 ctlr->caps |= 1;
241 ctlr->ichannels &= 0x03;
242 }
243 if (ctlr->quirks & AHCI_Q_4CH) {
244 ctlr->caps &= ~AHCI_CAP_NPMASK;
245 ctlr->caps |= 3;
246 ctlr->ichannels &= 0x0f;
247 }
248 ctlr->channels = MAX(flsl(ctlr->ichannels),
249 (ctlr->caps & AHCI_CAP_NPMASK) + 1);
250 if (ctlr->quirks & AHCI_Q_NOPMP)
251 ctlr->caps &= ~AHCI_CAP_SPM;
252 if (ctlr->quirks & AHCI_Q_NONCQ)
253 ctlr->caps &= ~AHCI_CAP_SNCQ;
254 if ((ctlr->caps & AHCI_CAP_CCCS) == 0)
255 ctlr->ccc = 0;
256 ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC);
257
258 /* Create controller-wide DMA tag. */
259 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
260 (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR :
261 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
262 BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE,
263 0, NULL, NULL, &ctlr->dma_tag)) {
264 ahci_free_mem(dev);
265 rman_fini(&ctlr->sc_iomem);
266 return (ENXIO);
267 }
268
269 ahci_ctlr_setup(dev);
270
271 /* Setup interrupts. */
272 if ((error = ahci_setup_interrupt(dev)) != 0) {
273 bus_dma_tag_destroy(ctlr->dma_tag);
274 ahci_free_mem(dev);
275 rman_fini(&ctlr->sc_iomem);
276 return (error);
277 }
278
279 i = 0;
280 for (u = ctlr->ichannels; u != 0; u >>= 1)
281 i += (u & 1);
282 ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3));
283 resource_int_value(device_get_name(dev), device_get_unit(dev),
284 "direct", &ctlr->direct);
285 /* Announce HW capabilities. */
286 speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT;
287 device_printf(dev,
288 "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n",
289 ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f),
290 ((version >> 4) & 0xf0) + (version & 0x0f),
291 (ctlr->caps & AHCI_CAP_NPMASK) + 1,
292 ((speed == 1) ? "1.5":((speed == 2) ? "3":
293 ((speed == 3) ? "6":"?"))),
294 (ctlr->caps & AHCI_CAP_SPM) ?
295 "supported" : "not supported",
296 (ctlr->caps & AHCI_CAP_FBSS) ?
297 " with FBS" : "");
298 if (ctlr->quirks != 0) {
299 device_printf(dev, "quirks=0x%b\n", ctlr->quirks,
300 AHCI_Q_BIT_STRING);
301 }
302 if (bootverbose) {
303 device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps",
304 (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"",
305 (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"",
306 (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"",
307 (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"",
308 (ctlr->caps & AHCI_CAP_SSS) ? " SS":"",
309 (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"",
310 (ctlr->caps & AHCI_CAP_SAL) ? " AL":"",
311 (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"",
312 ((speed == 1) ? "1.5":((speed == 2) ? "3":
313 ((speed == 3) ? "6":"?"))));
314 printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n",
315 (ctlr->caps & AHCI_CAP_SAM) ? " AM":"",
316 (ctlr->caps & AHCI_CAP_SPM) ? " PM":"",
317 (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"",
318 (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"",
319 (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"",
320 (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"",
321 ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1,
322 (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"",
323 (ctlr->caps & AHCI_CAP_EMS) ? " EM":"",
324 (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"",
325 (ctlr->caps & AHCI_CAP_NPMASK) + 1);
326 }
327 if (bootverbose && version >= 0x00010200) {
328 device_printf(dev, "Caps2:%s%s%s%s%s%s\n",
329 (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"",
330 (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"",
331 (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"",
332 (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"",
333 (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"",
334 (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":"");
335 }
336 /* Attach all channels on this controller */
337 for (unit = 0; unit < ctlr->channels; unit++) {
338 child = device_add_child(dev, "ahcich", -1);
339 if (child == NULL) {
340 device_printf(dev, "failed to add channel device\n");
341 continue;
342 }
343 device_set_ivars(child, (void *)(intptr_t)unit);
344 if ((ctlr->ichannels & (1 << unit)) == 0)
345 device_disable(child);
346 }
347 if (ctlr->caps & AHCI_CAP_EMS) {
348 child = device_add_child(dev, "ahciem", -1);
349 if (child == NULL)
350 device_printf(dev, "failed to add enclosure device\n");
351 else
352 device_set_ivars(child, (void *)(intptr_t)-1);
353 }
354 bus_generic_attach(dev);
355 return (0);
356 }
357
358 int
359 ahci_detach(device_t dev)
360 {
361 struct ahci_controller *ctlr = device_get_softc(dev);
362 int i;
363
364 /* Detach & delete all children */
365 device_delete_children(dev);
366
367 /* Free interrupts. */
368 for (i = 0; i < ctlr->numirqs; i++) {
369 if (ctlr->irqs[i].r_irq) {
370 bus_teardown_intr(dev, ctlr->irqs[i].r_irq,
371 ctlr->irqs[i].handle);
372 bus_release_resource(dev, SYS_RES_IRQ,
373 ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq);
374 }
375 }
376 bus_dma_tag_destroy(ctlr->dma_tag);
377 /* Free memory. */
378 rman_fini(&ctlr->sc_iomem);
379 ahci_free_mem(dev);
380 mtx_destroy(&ctlr->ch_mtx);
381 return (0);
382 }
383
384 void
385 ahci_free_mem(device_t dev)
386 {
387 struct ahci_controller *ctlr = device_get_softc(dev);
388
389 /* Release memory resources */
390 if (ctlr->r_mem)
391 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
392 if (ctlr->r_msix_table)
393 bus_release_resource(dev, SYS_RES_MEMORY,
394 ctlr->r_msix_tab_rid, ctlr->r_msix_table);
395 if (ctlr->r_msix_pba)
396 bus_release_resource(dev, SYS_RES_MEMORY,
397 ctlr->r_msix_pba_rid, ctlr->r_msix_pba);
398
399 ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL;
400 }
401
402 int
403 ahci_setup_interrupt(device_t dev)
404 {
405 struct ahci_controller *ctlr = device_get_softc(dev);
406 int i;
407
408 /* Check for single MSI vector fallback. */
409 if (ctlr->numirqs > 1 &&
410 (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) {
411 device_printf(dev, "Falling back to one MSI\n");
412 ctlr->numirqs = 1;
413 }
414
415 /* Ensure we don't overrun irqs. */
416 if (ctlr->numirqs > AHCI_MAX_IRQS) {
417 device_printf(dev, "Too many irqs %d > %d (clamping)\n",
418 ctlr->numirqs, AHCI_MAX_IRQS);
419 ctlr->numirqs = AHCI_MAX_IRQS;
420 }
421
422 /* Allocate all IRQs. */
423 for (i = 0; i < ctlr->numirqs; i++) {
424 ctlr->irqs[i].ctlr = ctlr;
425 ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0);
426 if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi)
427 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE;
428 else if (ctlr->numirqs == 1 || i >= ctlr->channels ||
429 (ctlr->ccc && i == ctlr->cccv))
430 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL;
431 else if (ctlr->channels > ctlr->numirqs &&
432 i == ctlr->numirqs - 1)
433 ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER;
434 else
435 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE;
436 if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
437 &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) {
438 device_printf(dev, "unable to map interrupt\n");
439 return (ENXIO);
440 }
441 if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL,
442 (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr :
443 ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge :
444 ahci_intr_one),
445 &ctlr->irqs[i], &ctlr->irqs[i].handle))) {
446 /* SOS XXX release r_irq */
447 device_printf(dev, "unable to setup interrupt\n");
448 return (ENXIO);
449 }
450 if (ctlr->numirqs > 1) {
451 bus_describe_intr(dev, ctlr->irqs[i].r_irq,
452 ctlr->irqs[i].handle,
453 ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ?
454 "ch%d" : "%d", i);
455 }
456 }
457 return (0);
458 }
459
460 /*
461 * Common case interrupt handler.
462 */
463 static void
464 ahci_intr(void *data)
465 {
466 struct ahci_controller_irq *irq = data;
467 struct ahci_controller *ctlr = irq->ctlr;
468 u_int32_t is, ise = 0;
469 void *arg;
470 int unit;
471
472 if (irq->mode == AHCI_IRQ_MODE_ALL) {
473 unit = 0;
474 if (ctlr->ccc)
475 is = ctlr->ichannels;
476 else
477 is = ATA_INL(ctlr->r_mem, AHCI_IS);
478 } else { /* AHCI_IRQ_MODE_AFTER */
479 unit = irq->r_irq_rid - 1;
480 is = ATA_INL(ctlr->r_mem, AHCI_IS);
481 is &= (0xffffffff << unit);
482 }
483 /* CCC interrupt is edge triggered. */
484 if (ctlr->ccc)
485 ise = 1 << ctlr->cccv;
486 /* Some controllers have edge triggered IS. */
487 if (ctlr->quirks & AHCI_Q_EDGEIS)
488 ise |= is;
489 if (ise != 0)
490 ATA_OUTL(ctlr->r_mem, AHCI_IS, ise);
491 for (; unit < ctlr->channels; unit++) {
492 if ((is & (1 << unit)) != 0 &&
493 (arg = ctlr->interrupt[unit].argument)) {
494 ctlr->interrupt[unit].function(arg);
495 }
496 }
497 /* AHCI declares level triggered IS. */
498 if (!(ctlr->quirks & AHCI_Q_EDGEIS))
499 ATA_OUTL(ctlr->r_mem, AHCI_IS, is);
500 ATA_RBL(ctlr->r_mem, AHCI_IS);
501 }
502
503 /*
504 * Simplified interrupt handler for multivector MSI mode.
505 */
506 static void
507 ahci_intr_one(void *data)
508 {
509 struct ahci_controller_irq *irq = data;
510 struct ahci_controller *ctlr = irq->ctlr;
511 void *arg;
512 int unit;
513
514 unit = irq->r_irq_rid - 1;
515 if ((arg = ctlr->interrupt[unit].argument))
516 ctlr->interrupt[unit].function(arg);
517 /* AHCI declares level triggered IS. */
518 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
519 ATA_RBL(ctlr->r_mem, AHCI_IS);
520 }
521
522 static void
523 ahci_intr_one_edge(void *data)
524 {
525 struct ahci_controller_irq *irq = data;
526 struct ahci_controller *ctlr = irq->ctlr;
527 void *arg;
528 int unit;
529
530 unit = irq->r_irq_rid - 1;
531 /* Some controllers have edge triggered IS. */
532 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit);
533 if ((arg = ctlr->interrupt[unit].argument))
534 ctlr->interrupt[unit].function(arg);
535 ATA_RBL(ctlr->r_mem, AHCI_IS);
536 }
537
538 struct resource *
539 ahci_alloc_resource(device_t dev, device_t child, int type, int *rid,
540 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
541 {
542 struct ahci_controller *ctlr = device_get_softc(dev);
543 struct resource *res;
544 rman_res_t st;
545 int offset, size, unit;
546
547 unit = (intptr_t)device_get_ivars(child);
548 res = NULL;
549 switch (type) {
550 case SYS_RES_MEMORY:
551 if (unit >= 0) {
552 offset = AHCI_OFFSET + (unit << 7);
553 size = 128;
554 } else if (*rid == 0) {
555 offset = AHCI_EM_CTL;
556 size = 4;
557 } else {
558 offset = (ctlr->emloc & 0xffff0000) >> 14;
559 size = (ctlr->emloc & 0x0000ffff) << 2;
560 if (*rid != 1) {
561 if (*rid == 2 && (ctlr->capsem &
562 (AHCI_EM_XMT | AHCI_EM_SMB)) == 0)
563 offset += size;
564 else
565 break;
566 }
567 }
568 st = rman_get_start(ctlr->r_mem);
569 res = rman_reserve_resource(&ctlr->sc_iomem, st + offset,
570 st + offset + size - 1, size, RF_ACTIVE, child);
571 if (res) {
572 bus_space_handle_t bsh;
573 bus_space_tag_t bst;
574 bsh = rman_get_bushandle(ctlr->r_mem);
575 bst = rman_get_bustag(ctlr->r_mem);
576 bus_space_subregion(bst, bsh, offset, 128, &bsh);
577 rman_set_bushandle(res, bsh);
578 rman_set_bustag(res, bst);
579 }
580 break;
581 case SYS_RES_IRQ:
582 if (*rid == ATA_IRQ_RID)
583 res = ctlr->irqs[0].r_irq;
584 break;
585 }
586 return (res);
587 }
588
589 int
590 ahci_release_resource(device_t dev, device_t child, int type, int rid,
591 struct resource *r)
592 {
593
594 switch (type) {
595 case SYS_RES_MEMORY:
596 rman_release_resource(r);
597 return (0);
598 case SYS_RES_IRQ:
599 if (rid != ATA_IRQ_RID)
600 return (ENOENT);
601 return (0);
602 }
603 return (EINVAL);
604 }
605
606 int
607 ahci_setup_intr(device_t dev, device_t child, struct resource *irq,
608 int flags, driver_filter_t *filter, driver_intr_t *function,
609 void *argument, void **cookiep)
610 {
611 struct ahci_controller *ctlr = device_get_softc(dev);
612 int unit = (intptr_t)device_get_ivars(child);
613
614 if (filter != NULL) {
615 printf("ahci.c: we cannot use a filter here\n");
616 return (EINVAL);
617 }
618 ctlr->interrupt[unit].function = function;
619 ctlr->interrupt[unit].argument = argument;
620 return (0);
621 }
622
623 int
624 ahci_teardown_intr(device_t dev, device_t child, struct resource *irq,
625 void *cookie)
626 {
627 struct ahci_controller *ctlr = device_get_softc(dev);
628 int unit = (intptr_t)device_get_ivars(child);
629
630 ctlr->interrupt[unit].function = NULL;
631 ctlr->interrupt[unit].argument = NULL;
632 return (0);
633 }
634
635 int
636 ahci_print_child(device_t dev, device_t child)
637 {
638 int retval, channel;
639
640 retval = bus_print_child_header(dev, child);
641 channel = (int)(intptr_t)device_get_ivars(child);
642 if (channel >= 0)
643 retval += printf(" at channel %d", channel);
644 retval += bus_print_child_footer(dev, child);
645 return (retval);
646 }
647
648 int
649 ahci_child_location_str(device_t dev, device_t child, char *buf,
650 size_t buflen)
651 {
652 int channel;
653
654 channel = (int)(intptr_t)device_get_ivars(child);
655 if (channel >= 0)
656 snprintf(buf, buflen, "channel=%d", channel);
657 return (0);
658 }
659
660 bus_dma_tag_t
661 ahci_get_dma_tag(device_t dev, device_t child)
662 {
663 struct ahci_controller *ctlr = device_get_softc(dev);
664
665 return (ctlr->dma_tag);
666 }
667
668 void
669 ahci_attached(device_t dev, struct ahci_channel *ch)
670 {
671 struct ahci_controller *ctlr = device_get_softc(dev);
672
673 mtx_lock(&ctlr->ch_mtx);
674 ctlr->ch[ch->unit] = ch;
675 mtx_unlock(&ctlr->ch_mtx);
676 }
677
678 void
679 ahci_detached(device_t dev, struct ahci_channel *ch)
680 {
681 struct ahci_controller *ctlr = device_get_softc(dev);
682
683 mtx_lock(&ctlr->ch_mtx);
684 mtx_lock(&ch->mtx);
685 ctlr->ch[ch->unit] = NULL;
686 mtx_unlock(&ch->mtx);
687 mtx_unlock(&ctlr->ch_mtx);
688 }
689
690 struct ahci_channel *
691 ahci_getch(device_t dev, int n)
692 {
693 struct ahci_controller *ctlr = device_get_softc(dev);
694 struct ahci_channel *ch;
695
696 KASSERT(n >= 0 && n < AHCI_MAX_PORTS, ("Bad channel number %d", n));
697 mtx_lock(&ctlr->ch_mtx);
698 ch = ctlr->ch[n];
699 if (ch != NULL)
700 mtx_lock(&ch->mtx);
701 mtx_unlock(&ctlr->ch_mtx);
702 return (ch);
703 }
704
705 void
706 ahci_putch(struct ahci_channel *ch)
707 {
708
709 mtx_unlock(&ch->mtx);
710 }
711
712 static int
713 ahci_ch_probe(device_t dev)
714 {
715
716 device_set_desc_copy(dev, "AHCI channel");
717 return (BUS_PROBE_DEFAULT);
718 }
719
720 static int
721 ahci_ch_disablephy_proc(SYSCTL_HANDLER_ARGS)
722 {
723 struct ahci_channel *ch;
724 int error, value;
725
726 ch = arg1;
727 value = ch->disablephy;
728 error = sysctl_handle_int(oidp, &value, 0, req);
729 if (error != 0 || req->newptr == NULL || (value != 0 && value != 1))
730 return (error);
731
732 mtx_lock(&ch->mtx);
733 ch->disablephy = value;
734 if (value) {
735 ahci_ch_deinit(ch->dev);
736 } else {
737 ahci_ch_init(ch->dev);
738 ahci_phy_check_events(ch, ATA_SE_PHY_CHANGED | ATA_SE_EXCHANGED);
739 }
740 mtx_unlock(&ch->mtx);
741
742 return (0);
743 }
744
745 static int
746 ahci_ch_attach(device_t dev)
747 {
748 struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev));
749 struct ahci_channel *ch = device_get_softc(dev);
750 struct cam_devq *devq;
751 struct sysctl_ctx_list *ctx;
752 struct sysctl_oid *tree;
753 int rid, error, i, sata_rev = 0;
754 u_int32_t version;
755
756 ch->dev = dev;
757 ch->unit = (intptr_t)device_get_ivars(dev);
758 ch->caps = ctlr->caps;
759 ch->caps2 = ctlr->caps2;
760 ch->start = ctlr->ch_start;
761 ch->quirks = ctlr->quirks;
762 ch->vendorid = ctlr->vendorid;
763 ch->deviceid = ctlr->deviceid;
764 ch->subvendorid = ctlr->subvendorid;
765 ch->subdeviceid = ctlr->subdeviceid;
766 ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1;
767 mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF);
768 ch->pm_level = 0;
769 resource_int_value(device_get_name(dev),
770 device_get_unit(dev), "pm_level", &ch->pm_level);
771 STAILQ_INIT(&ch->doneq);
772 if (ch->pm_level > 3)
773 callout_init_mtx(&ch->pm_timer, &ch->mtx, 0);
774 callout_init_mtx(&ch->reset_timer, &ch->mtx, 0);
775 /* JMicron external ports (0) sometimes limited */
776 if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0)
777 sata_rev = 1;
778 if (ch->quirks & AHCI_Q_SATA2)
779 sata_rev = 2;
780 resource_int_value(device_get_name(dev),
781 device_get_unit(dev), "sata_rev", &sata_rev);
782 for (i = 0; i < 16; i++) {
783 ch->user[i].revision = sata_rev;
784 ch->user[i].mode = 0;
785 ch->user[i].bytecount = 8192;
786 ch->user[i].tags = ch->numslots;
787 ch->user[i].caps = 0;
788 ch->curr[i] = ch->user[i];
789 if (ch->pm_level) {
790 ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ |
791 CTS_SATA_CAPS_H_APST |
792 CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST;
793 }
794 ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA |
795 CTS_SATA_CAPS_H_AN;
796 }
797 rid = 0;
798 if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
799 &rid, RF_ACTIVE)))
800 return (ENXIO);
801 ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD);
802 version = ATA_INL(ctlr->r_mem, AHCI_VS);
803 if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS))
804 ch->chcaps |= AHCI_P_CMD_FBSCP;
805 if (ch->caps2 & AHCI_CAP2_SDS)
806 ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP);
807 if (bootverbose) {
808 device_printf(dev, "Caps:%s%s%s%s%s%s\n",
809 (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"",
810 (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"",
811 (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"",
812 (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"",
813 (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"",
814 (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":"");
815 }
816 ahci_dmainit(dev);
817 ahci_slotsalloc(dev);
818 mtx_lock(&ch->mtx);
819 ahci_ch_init(dev);
820 rid = ATA_IRQ_RID;
821 if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
822 &rid, RF_SHAREABLE | RF_ACTIVE))) {
823 device_printf(dev, "Unable to map interrupt\n");
824 error = ENXIO;
825 goto err0;
826 }
827 if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
828 ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr,
829 ch, &ch->ih))) {
830 device_printf(dev, "Unable to setup interrupt\n");
831 error = ENXIO;
832 goto err1;
833 }
834 /* Create the device queue for our SIM. */
835 devq = cam_simq_alloc(ch->numslots);
836 if (devq == NULL) {
837 device_printf(dev, "Unable to allocate simq\n");
838 error = ENOMEM;
839 goto err1;
840 }
841 /* Construct SIM entry */
842 ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch,
843 device_get_unit(dev), (struct mtx *)&ch->mtx,
844 (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots),
845 (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0,
846 devq);
847 if (ch->sim == NULL) {
848 cam_simq_free(devq);
849 device_printf(dev, "unable to allocate sim\n");
850 error = ENOMEM;
851 goto err1;
852 }
853 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
854 device_printf(dev, "unable to register xpt bus\n");
855 error = ENXIO;
856 goto err2;
857 }
858 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
859 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
860 device_printf(dev, "unable to create path\n");
861 error = ENXIO;
862 goto err3;
863 }
864 if (ch->pm_level > 3) {
865 callout_reset(&ch->pm_timer,
866 (ch->pm_level == 4) ? hz / 1000 : hz / 8,
867 ahci_ch_pm, ch);
868 }
869 mtx_unlock(&ch->mtx);
870 ahci_attached(device_get_parent(dev), ch);
871 ctx = device_get_sysctl_ctx(dev);
872 tree = device_get_sysctl_tree(dev);
873 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "disable_phy",
874 CTLFLAG_RW | CTLTYPE_UINT, ch, 0, ahci_ch_disablephy_proc, "IU",
875 "Disable PHY");
876 return (0);
877
878 err3:
879 xpt_bus_deregister(cam_sim_path(ch->sim));
880 err2:
881 cam_sim_free(ch->sim, /*free_devq*/TRUE);
882 err1:
883 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
884 err0:
885 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
886 mtx_unlock(&ch->mtx);
887 mtx_destroy(&ch->mtx);
888 return (error);
889 }
890
891 static int
892 ahci_ch_detach(device_t dev)
893 {
894 struct ahci_channel *ch = device_get_softc(dev);
895
896 ahci_detached(device_get_parent(dev), ch);
897 mtx_lock(&ch->mtx);
898 xpt_async(AC_LOST_DEVICE, ch->path, NULL);
899 /* Forget about reset. */
900 if (ch->resetting) {
901 ch->resetting = 0;
902 xpt_release_simq(ch->sim, TRUE);
903 }
904 xpt_free_path(ch->path);
905 xpt_bus_deregister(cam_sim_path(ch->sim));
906 cam_sim_free(ch->sim, /*free_devq*/TRUE);
907 mtx_unlock(&ch->mtx);
908
909 if (ch->pm_level > 3)
910 callout_drain(&ch->pm_timer);
911 callout_drain(&ch->reset_timer);
912 bus_teardown_intr(dev, ch->r_irq, ch->ih);
913 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
914
915 ahci_ch_deinit(dev);
916 ahci_slotsfree(dev);
917 ahci_dmafini(dev);
918
919 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
920 mtx_destroy(&ch->mtx);
921 return (0);
922 }
923
924 static int
925 ahci_ch_init(device_t dev)
926 {
927 struct ahci_channel *ch = device_get_softc(dev);
928 uint64_t work;
929
930 /* Disable port interrupts */
931 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
932 /* Setup work areas */
933 work = ch->dma.work_bus + AHCI_CL_OFFSET;
934 ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff);
935 ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32);
936 work = ch->dma.rfis_bus;
937 ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff);
938 ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32);
939 /* Activate the channel and power/spin up device */
940 ATA_OUTL(ch->r_mem, AHCI_P_CMD,
941 (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD |
942 ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) |
943 ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 )));
944 ahci_start_fr(ch);
945 ahci_start(ch, 1);
946 return (0);
947 }
948
949 static int
950 ahci_ch_deinit(device_t dev)
951 {
952 struct ahci_channel *ch = device_get_softc(dev);
953
954 /* Disable port interrupts. */
955 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
956 /* Reset command register. */
957 ahci_stop(ch);
958 ahci_stop_fr(ch);
959 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0);
960 /* Allow everything, including partial and slumber modes. */
961 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0);
962 /* Request slumber mode transition and give some time to get there. */
963 ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER);
964 DELAY(100);
965 /* Disable PHY. */
966 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE);
967 return (0);
968 }
969
970 static int
971 ahci_ch_suspend(device_t dev)
972 {
973 struct ahci_channel *ch = device_get_softc(dev);
974
975 mtx_lock(&ch->mtx);
976 xpt_freeze_simq(ch->sim, 1);
977 /* Forget about reset. */
978 if (ch->resetting) {
979 ch->resetting = 0;
980 callout_stop(&ch->reset_timer);
981 xpt_release_simq(ch->sim, TRUE);
982 }
983 while (ch->oslots)
984 msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100);
985 ahci_ch_deinit(dev);
986 mtx_unlock(&ch->mtx);
987 return (0);
988 }
989
990 static int
991 ahci_ch_resume(device_t dev)
992 {
993 struct ahci_channel *ch = device_get_softc(dev);
994
995 mtx_lock(&ch->mtx);
996 ahci_ch_init(dev);
997 ahci_reset(ch);
998 xpt_release_simq(ch->sim, TRUE);
999 mtx_unlock(&ch->mtx);
1000 return (0);
1001 }
1002
1003 devclass_t ahcich_devclass;
1004 static device_method_t ahcich_methods[] = {
1005 DEVMETHOD(device_probe, ahci_ch_probe),
1006 DEVMETHOD(device_attach, ahci_ch_attach),
1007 DEVMETHOD(device_detach, ahci_ch_detach),
1008 DEVMETHOD(device_suspend, ahci_ch_suspend),
1009 DEVMETHOD(device_resume, ahci_ch_resume),
1010 DEVMETHOD_END
1011 };
1012 static driver_t ahcich_driver = {
1013 "ahcich",
1014 ahcich_methods,
1015 sizeof(struct ahci_channel)
1016 };
1017 DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, NULL, NULL);
1018
1019 struct ahci_dc_cb_args {
1020 bus_addr_t maddr;
1021 int error;
1022 };
1023
1024 static void
1025 ahci_dmainit(device_t dev)
1026 {
1027 struct ahci_channel *ch = device_get_softc(dev);
1028 struct ahci_dc_cb_args dcba;
1029 size_t rfsize;
1030 int error;
1031
1032 /* Command area. */
1033 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0,
1034 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1035 NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE,
1036 0, NULL, NULL, &ch->dma.work_tag);
1037 if (error != 0)
1038 goto error;
1039 error = bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work,
1040 BUS_DMA_ZERO, &ch->dma.work_map);
1041 if (error != 0)
1042 goto error;
1043 error = bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
1044 AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT);
1045 if (error != 0 || (error = dcba.error) != 0) {
1046 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
1047 goto error;
1048 }
1049 ch->dma.work_bus = dcba.maddr;
1050 /* FIS receive area. */
1051 if (ch->chcaps & AHCI_P_CMD_FBSCP)
1052 rfsize = 4096;
1053 else
1054 rfsize = 256;
1055 error = bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0,
1056 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1057 NULL, NULL, rfsize, 1, rfsize,
1058 0, NULL, NULL, &ch->dma.rfis_tag);
1059 if (error != 0)
1060 goto error;
1061 error = bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0,
1062 &ch->dma.rfis_map);
1063 if (error != 0)
1064 goto error;
1065 error = bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis,
1066 rfsize, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT);
1067 if (error != 0 || (error = dcba.error) != 0) {
1068 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map);
1069 goto error;
1070 }
1071 ch->dma.rfis_bus = dcba.maddr;
1072 /* Data area. */
1073 error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
1074 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1075 NULL, NULL,
1076 AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots,
1077 AHCI_SG_ENTRIES, AHCI_PRD_MAX,
1078 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag);
1079 if (error != 0)
1080 goto error;
1081 return;
1082
1083 error:
1084 device_printf(dev, "WARNING - DMA initialization failed, error %d\n",
1085 error);
1086 ahci_dmafini(dev);
1087 }
1088
1089 static void
1090 ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
1091 {
1092 struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc;
1093
1094 if (!(dcba->error = error))
1095 dcba->maddr = segs[0].ds_addr;
1096 }
1097
1098 static void
1099 ahci_dmafini(device_t dev)
1100 {
1101 struct ahci_channel *ch = device_get_softc(dev);
1102
1103 if (ch->dma.data_tag) {
1104 bus_dma_tag_destroy(ch->dma.data_tag);
1105 ch->dma.data_tag = NULL;
1106 }
1107 if (ch->dma.rfis_bus) {
1108 bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map);
1109 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map);
1110 ch->dma.rfis_bus = 0;
1111 ch->dma.rfis = NULL;
1112 }
1113 if (ch->dma.work_bus) {
1114 bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map);
1115 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
1116 ch->dma.work_bus = 0;
1117 ch->dma.work = NULL;
1118 }
1119 if (ch->dma.work_tag) {
1120 bus_dma_tag_destroy(ch->dma.work_tag);
1121 ch->dma.work_tag = NULL;
1122 }
1123 }
1124
1125 static void
1126 ahci_slotsalloc(device_t dev)
1127 {
1128 struct ahci_channel *ch = device_get_softc(dev);
1129 int i;
1130
1131 /* Alloc and setup command/dma slots */
1132 bzero(ch->slot, sizeof(ch->slot));
1133 for (i = 0; i < ch->numslots; i++) {
1134 struct ahci_slot *slot = &ch->slot[i];
1135
1136 slot->ch = ch;
1137 slot->slot = i;
1138 slot->state = AHCI_SLOT_EMPTY;
1139 slot->ccb = NULL;
1140 callout_init_mtx(&slot->timeout, &ch->mtx, 0);
1141
1142 if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map))
1143 device_printf(ch->dev, "FAILURE - create data_map\n");
1144 }
1145 }
1146
1147 static void
1148 ahci_slotsfree(device_t dev)
1149 {
1150 struct ahci_channel *ch = device_get_softc(dev);
1151 int i;
1152
1153 /* Free all dma slots */
1154 for (i = 0; i < ch->numslots; i++) {
1155 struct ahci_slot *slot = &ch->slot[i];
1156
1157 callout_drain(&slot->timeout);
1158 if (slot->dma.data_map) {
1159 bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map);
1160 slot->dma.data_map = NULL;
1161 }
1162 }
1163 }
1164
1165 static int
1166 ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr)
1167 {
1168
1169 if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) ||
1170 ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) {
1171 u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS);
1172 union ccb *ccb;
1173
1174 if (bootverbose) {
1175 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE)
1176 device_printf(ch->dev, "CONNECT requested\n");
1177 else
1178 device_printf(ch->dev, "DISCONNECT requested\n");
1179 }
1180 ahci_reset(ch);
1181 if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
1182 return (0);
1183 if (xpt_create_path(&ccb->ccb_h.path, NULL,
1184 cam_sim_path(ch->sim),
1185 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1186 xpt_free_ccb(ccb);
1187 return (0);
1188 }
1189 xpt_rescan(ccb);
1190 return (1);
1191 }
1192 return (0);
1193 }
1194
1195 static void
1196 ahci_cpd_check_events(struct ahci_channel *ch)
1197 {
1198 u_int32_t status;
1199 union ccb *ccb;
1200 device_t dev;
1201
1202 if (ch->pm_level == 0)
1203 return;
1204
1205 status = ATA_INL(ch->r_mem, AHCI_P_CMD);
1206 if ((status & AHCI_P_CMD_CPD) == 0)
1207 return;
1208
1209 if (bootverbose) {
1210 dev = ch->dev;
1211 if (status & AHCI_P_CMD_CPS) {
1212 device_printf(dev, "COLD CONNECT requested\n");
1213 } else
1214 device_printf(dev, "COLD DISCONNECT requested\n");
1215 }
1216 ahci_reset(ch);
1217 if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
1218 return;
1219 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim),
1220 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1221 xpt_free_ccb(ccb);
1222 return;
1223 }
1224 xpt_rescan(ccb);
1225 }
1226
1227 static void
1228 ahci_notify_events(struct ahci_channel *ch, u_int32_t status)
1229 {
1230 struct cam_path *dpath;
1231 int i;
1232
1233 if (ch->caps & AHCI_CAP_SSNTF)
1234 ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status);
1235 if (bootverbose)
1236 device_printf(ch->dev, "SNTF 0x%04x\n", status);
1237 for (i = 0; i < 16; i++) {
1238 if ((status & (1 << i)) == 0)
1239 continue;
1240 if (xpt_create_path(&dpath, NULL,
1241 xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) {
1242 xpt_async(AC_SCSI_AEN, dpath, NULL);
1243 xpt_free_path(dpath);
1244 }
1245 }
1246 }
1247
1248 static void
1249 ahci_done(struct ahci_channel *ch, union ccb *ccb)
1250 {
1251
1252 mtx_assert(&ch->mtx, MA_OWNED);
1253 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 ||
1254 ch->batch == 0) {
1255 xpt_done(ccb);
1256 return;
1257 }
1258
1259 STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe);
1260 }
1261
1262 static void
1263 ahci_ch_intr(void *arg)
1264 {
1265 struct ahci_channel *ch = (struct ahci_channel *)arg;
1266 uint32_t istatus;
1267
1268 /* Read interrupt statuses. */
1269 istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
1270
1271 mtx_lock(&ch->mtx);
1272 ahci_ch_intr_main(ch, istatus);
1273 mtx_unlock(&ch->mtx);
1274 }
1275
1276 static void
1277 ahci_ch_intr_direct(void *arg)
1278 {
1279 struct ahci_channel *ch = (struct ahci_channel *)arg;
1280 struct ccb_hdr *ccb_h;
1281 uint32_t istatus;
1282 STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq);
1283
1284 /* Read interrupt statuses. */
1285 istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
1286
1287 mtx_lock(&ch->mtx);
1288 ch->batch = 1;
1289 ahci_ch_intr_main(ch, istatus);
1290 ch->batch = 0;
1291 /*
1292 * Prevent the possibility of issues caused by processing the queue
1293 * while unlocked below by moving the contents to a local queue.
1294 */
1295 STAILQ_CONCAT(&tmp_doneq, &ch->doneq);
1296 mtx_unlock(&ch->mtx);
1297 while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) {
1298 STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe);
1299 xpt_done_direct((union ccb *)ccb_h);
1300 }
1301 }
1302
1303 static void
1304 ahci_ch_pm(void *arg)
1305 {
1306 struct ahci_channel *ch = (struct ahci_channel *)arg;
1307 uint32_t work;
1308
1309 if (ch->numrslots != 0)
1310 return;
1311 work = ATA_INL(ch->r_mem, AHCI_P_CMD);
1312 if (ch->pm_level == 4)
1313 work |= AHCI_P_CMD_PARTIAL;
1314 else
1315 work |= AHCI_P_CMD_SLUMBER;
1316 ATA_OUTL(ch->r_mem, AHCI_P_CMD, work);
1317 }
1318
1319 static void
1320 ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus)
1321 {
1322 uint32_t cstatus, serr = 0, sntf = 0, ok, err;
1323 enum ahci_err_type et;
1324 int i, ccs, port, reset = 0;
1325
1326 /* Clear interrupt statuses. */
1327 ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus);
1328 /* Read command statuses. */
1329 if (ch->numtslots != 0)
1330 cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT);
1331 else
1332 cstatus = 0;
1333 if (ch->numrslots != ch->numtslots)
1334 cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI);
1335 /* Read SNTF in one of possible ways. */
1336 if ((istatus & AHCI_P_IX_SDB) &&
1337 (ch->pm_present || ch->curr[0].atapi != 0)) {
1338 if (ch->caps & AHCI_CAP_SSNTF)
1339 sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF);
1340 else if (ch->fbs_enabled) {
1341 u_int8_t *fis = ch->dma.rfis + 0x58;
1342
1343 for (i = 0; i < 16; i++) {
1344 if (fis[1] & 0x80) {
1345 fis[1] &= 0x7f;
1346 sntf |= 1 << i;
1347 }
1348 fis += 256;
1349 }
1350 } else {
1351 u_int8_t *fis = ch->dma.rfis + 0x58;
1352
1353 if (fis[1] & 0x80)
1354 sntf = (1 << (fis[1] & 0x0f));
1355 }
1356 }
1357 /* Process PHY events */
1358 if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF |
1359 AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
1360 serr = ATA_INL(ch->r_mem, AHCI_P_SERR);
1361 if (serr) {
1362 ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr);
1363 reset = ahci_phy_check_events(ch, serr);
1364 }
1365 }
1366 /* Process cold presence detection events */
1367 if ((istatus & AHCI_P_IX_CPD) && !reset)
1368 ahci_cpd_check_events(ch);
1369 /* Process command errors */
1370 if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF |
1371 AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) {
1372 if (ch->quirks & AHCI_Q_NOCCS) {
1373 /*
1374 * ASMedia chips sometimes report failed commands as
1375 * completed. Count all running commands as failed.
1376 */
1377 cstatus |= ch->rslots;
1378
1379 /* They also report wrong CCS, so try to guess one. */
1380 ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1;
1381 } else {
1382 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) &
1383 AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT;
1384 }
1385 //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n",
1386 // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD),
1387 // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs);
1388 port = -1;
1389 if (ch->fbs_enabled) {
1390 uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS);
1391 if (fbs & AHCI_P_FBS_SDE) {
1392 port = (fbs & AHCI_P_FBS_DWE)
1393 >> AHCI_P_FBS_DWE_SHIFT;
1394 } else {
1395 for (i = 0; i < 16; i++) {
1396 if (ch->numrslotspd[i] == 0)
1397 continue;
1398 if (port == -1)
1399 port = i;
1400 else if (port != i) {
1401 port = -2;
1402 break;
1403 }
1404 }
1405 }
1406 }
1407 err = ch->rslots & cstatus;
1408 } else {
1409 ccs = 0;
1410 err = 0;
1411 port = -1;
1412 }
1413 /* Complete all successful commands. */
1414 ok = ch->rslots & ~cstatus;
1415 for (i = 0; i < ch->numslots; i++) {
1416 if ((ok >> i) & 1)
1417 ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE);
1418 }
1419 /* On error, complete the rest of commands with error statuses. */
1420 if (err) {
1421 if (ch->frozen) {
1422 union ccb *fccb = ch->frozen;
1423 ch->frozen = NULL;
1424 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
1425 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
1426 xpt_freeze_devq(fccb->ccb_h.path, 1);
1427 fccb->ccb_h.status |= CAM_DEV_QFRZN;
1428 }
1429 ahci_done(ch, fccb);
1430 }
1431 for (i = 0; i < ch->numslots; i++) {
1432 /* XXX: reqests in loading state. */
1433 if (((err >> i) & 1) == 0)
1434 continue;
1435 if (port >= 0 &&
1436 ch->slot[i].ccb->ccb_h.target_id != port)
1437 continue;
1438 if (istatus & AHCI_P_IX_TFE) {
1439 if (port != -2) {
1440 /* Task File Error */
1441 if (ch->numtslotspd[
1442 ch->slot[i].ccb->ccb_h.target_id] == 0) {
1443 /* Untagged operation. */
1444 if (i == ccs)
1445 et = AHCI_ERR_TFE;
1446 else
1447 et = AHCI_ERR_INNOCENT;
1448 } else {
1449 /* Tagged operation. */
1450 et = AHCI_ERR_NCQ;
1451 }
1452 } else {
1453 et = AHCI_ERR_TFE;
1454 ch->fatalerr = 1;
1455 }
1456 } else if (istatus & AHCI_P_IX_IF) {
1457 if (ch->numtslots == 0 && i != ccs && port != -2)
1458 et = AHCI_ERR_INNOCENT;
1459 else
1460 et = AHCI_ERR_SATA;
1461 } else
1462 et = AHCI_ERR_INVALID;
1463 ahci_end_transaction(&ch->slot[i], et);
1464 }
1465 /*
1466 * We can't reinit port if there are some other
1467 * commands active, use resume to complete them.
1468 */
1469 if (ch->rslots != 0 && !ch->recoverycmd)
1470 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC);
1471 }
1472 /* Process NOTIFY events */
1473 if (sntf)
1474 ahci_notify_events(ch, sntf);
1475 }
1476
1477 /* Must be called with channel locked. */
1478 static int
1479 ahci_check_collision(struct ahci_channel *ch, union ccb *ccb)
1480 {
1481 int t = ccb->ccb_h.target_id;
1482
1483 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1484 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1485 /* Tagged command while we have no supported tag free. */
1486 if (((~ch->oslots) & (0xffffffff >> (32 -
1487 ch->curr[t].tags))) == 0)
1488 return (1);
1489 /* If we have FBS */
1490 if (ch->fbs_enabled) {
1491 /* Tagged command while untagged are active. */
1492 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0)
1493 return (1);
1494 } else {
1495 /* Tagged command while untagged are active. */
1496 if (ch->numrslots != 0 && ch->numtslots == 0)
1497 return (1);
1498 /* Tagged command while tagged to other target is active. */
1499 if (ch->numtslots != 0 &&
1500 ch->taggedtarget != ccb->ccb_h.target_id)
1501 return (1);
1502 }
1503 } else {
1504 /* If we have FBS */
1505 if (ch->fbs_enabled) {
1506 /* Untagged command while tagged are active. */
1507 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0)
1508 return (1);
1509 } else {
1510 /* Untagged command while tagged are active. */
1511 if (ch->numrslots != 0 && ch->numtslots != 0)
1512 return (1);
1513 }
1514 }
1515 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1516 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) {
1517 /* Atomic command while anything active. */
1518 if (ch->numrslots != 0)
1519 return (1);
1520 }
1521 /* We have some atomic command running. */
1522 if (ch->aslots != 0)
1523 return (1);
1524 return (0);
1525 }
1526
1527 /* Must be called with channel locked. */
1528 static void
1529 ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb)
1530 {
1531 struct ahci_slot *slot;
1532 int tag, tags;
1533
1534 /* Choose empty slot. */
1535 tags = ch->numslots;
1536 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1537 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA))
1538 tags = ch->curr[ccb->ccb_h.target_id].tags;
1539 if (ch->lastslot + 1 < tags)
1540 tag = ffs(~(ch->oslots >> (ch->lastslot + 1)));
1541 else
1542 tag = 0;
1543 if (tag == 0 || tag + ch->lastslot >= tags)
1544 tag = ffs(~ch->oslots) - 1;
1545 else
1546 tag += ch->lastslot;
1547 ch->lastslot = tag;
1548 /* Occupy chosen slot. */
1549 slot = &ch->slot[tag];
1550 slot->ccb = ccb;
1551 /* Stop PM timer. */
1552 if (ch->numrslots == 0 && ch->pm_level > 3)
1553 callout_stop(&ch->pm_timer);
1554 /* Update channel stats. */
1555 ch->oslots |= (1 << tag);
1556 ch->numrslots++;
1557 ch->numrslotspd[ccb->ccb_h.target_id]++;
1558 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1559 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1560 ch->numtslots++;
1561 ch->numtslotspd[ccb->ccb_h.target_id]++;
1562 ch->taggedtarget = ccb->ccb_h.target_id;
1563 }
1564 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1565 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT)))
1566 ch->aslots |= (1 << tag);
1567 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1568 slot->state = AHCI_SLOT_LOADING;
1569 bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb,
1570 ahci_dmasetprd, slot, 0);
1571 } else {
1572 slot->dma.nsegs = 0;
1573 ahci_execute_transaction(slot);
1574 }
1575 }
1576
1577 /* Locked by busdma engine. */
1578 static void
1579 ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1580 {
1581 struct ahci_slot *slot = arg;
1582 struct ahci_channel *ch = slot->ch;
1583 struct ahci_cmd_tab *ctp;
1584 struct ahci_dma_prd *prd;
1585 int i;
1586
1587 if (error) {
1588 device_printf(ch->dev, "DMA load error\n");
1589 ahci_end_transaction(slot, AHCI_ERR_INVALID);
1590 return;
1591 }
1592 KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n"));
1593 /* Get a piece of the workspace for this request */
1594 ctp = (struct ahci_cmd_tab *)
1595 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot));
1596 /* Fill S/G table */
1597 prd = &ctp->prd_tab[0];
1598 for (i = 0; i < nsegs; i++) {
1599 prd[i].dba = htole64(segs[i].ds_addr);
1600 prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK);
1601 }
1602 slot->dma.nsegs = nsegs;
1603 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
1604 ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ?
1605 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1606 ahci_execute_transaction(slot);
1607 }
1608
1609 /* Must be called with channel locked. */
1610 static void
1611 ahci_execute_transaction(struct ahci_slot *slot)
1612 {
1613 struct ahci_channel *ch = slot->ch;
1614 struct ahci_cmd_tab *ctp;
1615 struct ahci_cmd_list *clp;
1616 union ccb *ccb = slot->ccb;
1617 int port = ccb->ccb_h.target_id & 0x0f;
1618 int fis_size, i, softreset;
1619 uint8_t *fis = ch->dma.rfis + 0x40;
1620 uint8_t val;
1621 uint16_t cmd_flags;
1622
1623 /* Get a piece of the workspace for this request */
1624 ctp = (struct ahci_cmd_tab *)
1625 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot));
1626 /* Setup the FIS for this request */
1627 if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) {
1628 device_printf(ch->dev, "Setting up SATA FIS failed\n");
1629 ahci_end_transaction(slot, AHCI_ERR_INVALID);
1630 return;
1631 }
1632 /* Setup the command list entry */
1633 clp = (struct ahci_cmd_list *)
1634 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
1635 cmd_flags =
1636 (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) |
1637 (ccb->ccb_h.func_code == XPT_SCSI_IO ?
1638 (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) |
1639 (fis_size / sizeof(u_int32_t)) |
1640 (port << 12);
1641 clp->prd_length = htole16(slot->dma.nsegs);
1642 /* Special handling for Soft Reset command. */
1643 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1644 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) {
1645 if (ccb->ataio.cmd.control & ATA_A_RESET) {
1646 softreset = 1;
1647 /* Kick controller into sane state */
1648 ahci_stop(ch);
1649 ahci_clo(ch);
1650 ahci_start(ch, 0);
1651 cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY;
1652 } else {
1653 softreset = 2;
1654 /* Prepare FIS receive area for check. */
1655 for (i = 0; i < 20; i++)
1656 fis[i] = 0xff;
1657 }
1658 } else
1659 softreset = 0;
1660 clp->bytecount = 0;
1661 clp->cmd_flags = htole16(cmd_flags);
1662 clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET +
1663 (AHCI_CT_SIZE * slot->slot));
1664 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
1665 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1666 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map,
1667 BUS_DMASYNC_PREREAD);
1668 /* Set ACTIVE bit for NCQ commands. */
1669 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1670 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1671 ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot);
1672 }
1673 /* If FBS is enabled, set PMP port. */
1674 if (ch->fbs_enabled) {
1675 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN |
1676 (port << AHCI_P_FBS_DEV_SHIFT));
1677 }
1678 /* Issue command to the controller. */
1679 slot->state = AHCI_SLOT_RUNNING;
1680 ch->rslots |= (1 << slot->slot);
1681 ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot));
1682 /* Device reset commands doesn't interrupt. Poll them. */
1683 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1684 (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) {
1685 int count, timeout = ccb->ccb_h.timeout * 100;
1686 enum ahci_err_type et = AHCI_ERR_NONE;
1687
1688 for (count = 0; count < timeout; count++) {
1689 DELAY(10);
1690 if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot)))
1691 break;
1692 if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) &&
1693 softreset != 1) {
1694 #if 0
1695 device_printf(ch->dev,
1696 "Poll error on slot %d, TFD: %04x\n",
1697 slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD));
1698 #endif
1699 et = AHCI_ERR_TFE;
1700 break;
1701 }
1702 /* Workaround for ATI SB600/SB700 chipsets. */
1703 if (ccb->ccb_h.target_id == 15 &&
1704 (ch->quirks & AHCI_Q_ATI_PMP_BUG) &&
1705 (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) {
1706 et = AHCI_ERR_TIMEOUT;
1707 break;
1708 }
1709 }
1710
1711 /*
1712 * Marvell HBAs with non-RAID firmware do not wait for
1713 * readiness after soft reset, so we have to wait here.
1714 * Marvell RAIDs do not have this problem, but instead
1715 * sometimes forget to update FIS receive area, breaking
1716 * this wait.
1717 */
1718 if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 &&
1719 (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 &&
1720 softreset == 2 && et == AHCI_ERR_NONE) {
1721 for ( ; count < timeout; count++) {
1722 bus_dmamap_sync(ch->dma.rfis_tag,
1723 ch->dma.rfis_map, BUS_DMASYNC_POSTREAD);
1724 val = fis[2];
1725 bus_dmamap_sync(ch->dma.rfis_tag,
1726 ch->dma.rfis_map, BUS_DMASYNC_PREREAD);
1727 if ((val & ATA_S_BUSY) == 0)
1728 break;
1729 DELAY(10);
1730 }
1731 }
1732
1733 if (timeout && (count >= timeout)) {
1734 device_printf(ch->dev, "Poll timeout on slot %d port %d\n",
1735 slot->slot, port);
1736 device_printf(ch->dev, "is %08x cs %08x ss %08x "
1737 "rs %08x tfd %02x serr %08x cmd %08x\n",
1738 ATA_INL(ch->r_mem, AHCI_P_IS),
1739 ATA_INL(ch->r_mem, AHCI_P_CI),
1740 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots,
1741 ATA_INL(ch->r_mem, AHCI_P_TFD),
1742 ATA_INL(ch->r_mem, AHCI_P_SERR),
1743 ATA_INL(ch->r_mem, AHCI_P_CMD));
1744 et = AHCI_ERR_TIMEOUT;
1745 }
1746
1747 /* Kick controller into sane state and enable FBS. */
1748 if (softreset == 2)
1749 ch->eslots |= (1 << slot->slot);
1750 ahci_end_transaction(slot, et);
1751 return;
1752 }
1753 /* Start command execution timeout */
1754 callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2,
1755 0, (timeout_t*)ahci_timeout, slot, 0);
1756 return;
1757 }
1758
1759 /* Must be called with channel locked. */
1760 static void
1761 ahci_process_timeout(struct ahci_channel *ch)
1762 {
1763 int i;
1764
1765 mtx_assert(&ch->mtx, MA_OWNED);
1766 /* Handle the rest of commands. */
1767 for (i = 0; i < ch->numslots; i++) {
1768 /* Do we have a running request on slot? */
1769 if (ch->slot[i].state < AHCI_SLOT_RUNNING)
1770 continue;
1771 ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT);
1772 }
1773 }
1774
1775 /* Must be called with channel locked. */
1776 static void
1777 ahci_rearm_timeout(struct ahci_channel *ch)
1778 {
1779 int i;
1780
1781 mtx_assert(&ch->mtx, MA_OWNED);
1782 for (i = 0; i < ch->numslots; i++) {
1783 struct ahci_slot *slot = &ch->slot[i];
1784
1785 /* Do we have a running request on slot? */
1786 if (slot->state < AHCI_SLOT_RUNNING)
1787 continue;
1788 if ((ch->toslots & (1 << i)) == 0)
1789 continue;
1790 callout_reset_sbt(&slot->timeout,
1791 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0,
1792 (timeout_t*)ahci_timeout, slot, 0);
1793 }
1794 }
1795
1796 /* Locked by callout mechanism. */
1797 static void
1798 ahci_timeout(struct ahci_slot *slot)
1799 {
1800 struct ahci_channel *ch = slot->ch;
1801 device_t dev = ch->dev;
1802 uint32_t sstatus;
1803 int ccs;
1804 int i;
1805
1806 /* Check for stale timeout. */
1807 if (slot->state < AHCI_SLOT_RUNNING)
1808 return;
1809
1810 /* Check if slot was not being executed last time we checked. */
1811 if (slot->state < AHCI_SLOT_EXECUTING) {
1812 /* Check if slot started executing. */
1813 sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT);
1814 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK)
1815 >> AHCI_P_CMD_CCS_SHIFT;
1816 if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot ||
1817 ch->fbs_enabled || ch->wrongccs)
1818 slot->state = AHCI_SLOT_EXECUTING;
1819 else if ((ch->rslots & (1 << ccs)) == 0) {
1820 ch->wrongccs = 1;
1821 slot->state = AHCI_SLOT_EXECUTING;
1822 }
1823
1824 callout_reset_sbt(&slot->timeout,
1825 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0,
1826 (timeout_t*)ahci_timeout, slot, 0);
1827 return;
1828 }
1829
1830 device_printf(dev, "Timeout on slot %d port %d\n",
1831 slot->slot, slot->ccb->ccb_h.target_id & 0x0f);
1832 device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x "
1833 "serr %08x cmd %08x\n",
1834 ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI),
1835 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots,
1836 ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR),
1837 ATA_INL(ch->r_mem, AHCI_P_CMD));
1838
1839 /* Handle frozen command. */
1840 if (ch->frozen) {
1841 union ccb *fccb = ch->frozen;
1842 ch->frozen = NULL;
1843 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
1844 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
1845 xpt_freeze_devq(fccb->ccb_h.path, 1);
1846 fccb->ccb_h.status |= CAM_DEV_QFRZN;
1847 }
1848 ahci_done(ch, fccb);
1849 }
1850 if (!ch->fbs_enabled && !ch->wrongccs) {
1851 /* Without FBS we know real timeout source. */
1852 ch->fatalerr = 1;
1853 /* Handle command with timeout. */
1854 ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT);
1855 /* Handle the rest of commands. */
1856 for (i = 0; i < ch->numslots; i++) {
1857 /* Do we have a running request on slot? */
1858 if (ch->slot[i].state < AHCI_SLOT_RUNNING)
1859 continue;
1860 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT);
1861 }
1862 } else {
1863 /* With FBS we wait for other commands timeout and pray. */
1864 if (ch->toslots == 0)
1865 xpt_freeze_simq(ch->sim, 1);
1866 ch->toslots |= (1 << slot->slot);
1867 if ((ch->rslots & ~ch->toslots) == 0)
1868 ahci_process_timeout(ch);
1869 else
1870 device_printf(dev, " ... waiting for slots %08x\n",
1871 ch->rslots & ~ch->toslots);
1872 }
1873 }
1874
1875 /* Must be called with channel locked. */
1876 static void
1877 ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
1878 {
1879 struct ahci_channel *ch = slot->ch;
1880 union ccb *ccb = slot->ccb;
1881 struct ahci_cmd_list *clp;
1882 int lastto;
1883 uint32_t sig;
1884
1885 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
1886 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1887 clp = (struct ahci_cmd_list *)
1888 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot));
1889 /* Read result registers to the result struct
1890 * May be incorrect if several commands finished same time,
1891 * so read only when sure or have to.
1892 */
1893 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1894 struct ata_res *res = &ccb->ataio.res;
1895
1896 if ((et == AHCI_ERR_TFE) ||
1897 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) {
1898 u_int8_t *fis = ch->dma.rfis + 0x40;
1899
1900 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map,
1901 BUS_DMASYNC_POSTREAD);
1902 if (ch->fbs_enabled) {
1903 fis += ccb->ccb_h.target_id * 256;
1904 res->status = fis[2];
1905 res->error = fis[3];
1906 } else {
1907 uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD);
1908
1909 res->status = tfd;
1910 res->error = tfd >> 8;
1911 }
1912 res->lba_low = fis[4];
1913 res->lba_mid = fis[5];
1914 res->lba_high = fis[6];
1915 res->device = fis[7];
1916 res->lba_low_exp = fis[8];
1917 res->lba_mid_exp = fis[9];
1918 res->lba_high_exp = fis[10];
1919 res->sector_count = fis[12];
1920 res->sector_count_exp = fis[13];
1921
1922 /*
1923 * Some weird controllers do not return signature in
1924 * FIS receive area. Read it from PxSIG register.
1925 */
1926 if ((ch->quirks & AHCI_Q_ALTSIG) &&
1927 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1928 (ccb->ataio.cmd.control & ATA_A_RESET) == 0) {
1929 sig = ATA_INL(ch->r_mem, AHCI_P_SIG);
1930 res->lba_high = sig >> 24;
1931 res->lba_mid = sig >> 16;
1932 res->lba_low = sig >> 8;
1933 res->sector_count = sig;
1934 }
1935 } else
1936 bzero(res, sizeof(*res));
1937 if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 &&
1938 (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1939 (ch->quirks & AHCI_Q_NOCOUNT) == 0) {
1940 ccb->ataio.resid =
1941 ccb->ataio.dxfer_len - le32toh(clp->bytecount);
1942 }
1943 } else {
1944 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1945 (ch->quirks & AHCI_Q_NOCOUNT) == 0) {
1946 ccb->csio.resid =
1947 ccb->csio.dxfer_len - le32toh(clp->bytecount);
1948 }
1949 }
1950 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1951 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
1952 (ccb->ccb_h.flags & CAM_DIR_IN) ?
1953 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1954 bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map);
1955 }
1956 if (et != AHCI_ERR_NONE)
1957 ch->eslots |= (1 << slot->slot);
1958 /* In case of error, freeze device for proper recovery. */
1959 if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) &&
1960 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1961 xpt_freeze_devq(ccb->ccb_h.path, 1);
1962 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1963 }
1964 /* Set proper result status. */
1965 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1966 switch (et) {
1967 case AHCI_ERR_NONE:
1968 ccb->ccb_h.status |= CAM_REQ_CMP;
1969 if (ccb->ccb_h.func_code == XPT_SCSI_IO)
1970 ccb->csio.scsi_status = SCSI_STATUS_OK;
1971 break;
1972 case AHCI_ERR_INVALID:
1973 ch->fatalerr = 1;
1974 ccb->ccb_h.status |= CAM_REQ_INVALID;
1975 break;
1976 case AHCI_ERR_INNOCENT:
1977 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1978 break;
1979 case AHCI_ERR_TFE:
1980 case AHCI_ERR_NCQ:
1981 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1982 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1983 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1984 } else {
1985 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1986 }
1987 break;
1988 case AHCI_ERR_SATA:
1989 ch->fatalerr = 1;
1990 if (!ch->recoverycmd) {
1991 xpt_freeze_simq(ch->sim, 1);
1992 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1993 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1994 }
1995 ccb->ccb_h.status |= CAM_UNCOR_PARITY;
1996 break;
1997 case AHCI_ERR_TIMEOUT:
1998 if (!ch->recoverycmd) {
1999 xpt_freeze_simq(ch->sim, 1);
2000 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2001 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2002 }
2003 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
2004 break;
2005 default:
2006 ch->fatalerr = 1;
2007 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
2008 }
2009 /* Free slot. */
2010 ch->oslots &= ~(1 << slot->slot);
2011 ch->rslots &= ~(1 << slot->slot);
2012 ch->aslots &= ~(1 << slot->slot);
2013 slot->state = AHCI_SLOT_EMPTY;
2014 slot->ccb = NULL;
2015 /* Update channel stats. */
2016 ch->numrslots--;
2017 ch->numrslotspd[ccb->ccb_h.target_id]--;
2018 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
2019 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
2020 ch->numtslots--;
2021 ch->numtslotspd[ccb->ccb_h.target_id]--;
2022 }
2023 /* Cancel timeout state if request completed normally. */
2024 if (et != AHCI_ERR_TIMEOUT) {
2025 lastto = (ch->toslots == (1 << slot->slot));
2026 ch->toslots &= ~(1 << slot->slot);
2027 if (lastto)
2028 xpt_release_simq(ch->sim, TRUE);
2029 }
2030 /* If it was first request of reset sequence and there is no error,
2031 * proceed to second request. */
2032 if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
2033 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
2034 (ccb->ataio.cmd.control & ATA_A_RESET) &&
2035 et == AHCI_ERR_NONE) {
2036 ccb->ataio.cmd.control &= ~ATA_A_RESET;
2037 ahci_begin_transaction(ch, ccb);
2038 return;
2039 }
2040 /* If it was our READ LOG command - process it. */
2041 if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) {
2042 ahci_process_read_log(ch, ccb);
2043 /* If it was our REQUEST SENSE command - process it. */
2044 } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) {
2045 ahci_process_request_sense(ch, ccb);
2046 /* If it was NCQ or ATAPI command error, put result on hold. */
2047 } else if (et == AHCI_ERR_NCQ ||
2048 ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
2049 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) {
2050 ch->hold[slot->slot] = ccb;
2051 ch->numhslots++;
2052 } else
2053 ahci_done(ch, ccb);
2054 /* If we have no other active commands, ... */
2055 if (ch->rslots == 0) {
2056 /* if there was fatal error - reset port. */
2057 if (ch->toslots != 0 || ch->fatalerr) {
2058 ahci_reset(ch);
2059 } else {
2060 /* if we have slots in error, we can reinit port. */
2061 if (ch->eslots != 0) {
2062 ahci_stop(ch);
2063 ahci_clo(ch);
2064 ahci_start(ch, 1);
2065 }
2066 /* if there commands on hold, we can do READ LOG. */
2067 if (!ch->recoverycmd && ch->numhslots)
2068 ahci_issue_recovery(ch);
2069 }
2070 /* If all the rest of commands are in timeout - give them chance. */
2071 } else if ((ch->rslots & ~ch->toslots) == 0 &&
2072 et != AHCI_ERR_TIMEOUT)
2073 ahci_rearm_timeout(ch);
2074 /* Unfreeze frozen command. */
2075 if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) {
2076 union ccb *fccb = ch->frozen;
2077 ch->frozen = NULL;
2078 ahci_begin_transaction(ch, fccb);
2079 xpt_release_simq(ch->sim, TRUE);
2080 }
2081 /* Start PM timer. */
2082 if (ch->numrslots == 0 && ch->pm_level > 3 &&
2083 (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) {
2084 callout_schedule(&ch->pm_timer,
2085 (ch->pm_level == 4) ? hz / 1000 : hz / 8);
2086 }
2087 }
2088
2089 static void
2090 ahci_issue_recovery(struct ahci_channel *ch)
2091 {
2092 union ccb *ccb;
2093 struct ccb_ataio *ataio;
2094 struct ccb_scsiio *csio;
2095 int i;
2096
2097 /* Find some held command. */
2098 for (i = 0; i < ch->numslots; i++) {
2099 if (ch->hold[i])
2100 break;
2101 }
2102 ccb = xpt_alloc_ccb_nowait();
2103 if (ccb == NULL) {
2104 device_printf(ch->dev, "Unable to allocate recovery command\n");
2105 completeall:
2106 /* We can't do anything -- complete held commands. */
2107 for (i = 0; i < ch->numslots; i++) {
2108 if (ch->hold[i] == NULL)
2109 continue;
2110 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
2111 ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL;
2112 ahci_done(ch, ch->hold[i]);
2113 ch->hold[i] = NULL;
2114 ch->numhslots--;
2115 }
2116 ahci_reset(ch);
2117 return;
2118 }
2119 ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */
2120 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
2121 /* READ LOG */
2122 ccb->ccb_h.recovery_type = RECOVERY_READ_LOG;
2123 ccb->ccb_h.func_code = XPT_ATA_IO;
2124 ccb->ccb_h.flags = CAM_DIR_IN;
2125 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */
2126 ataio = &ccb->ataio;
2127 ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT);
2128 if (ataio->data_ptr == NULL) {
2129 xpt_free_ccb(ccb);
2130 device_printf(ch->dev,
2131 "Unable to allocate memory for READ LOG command\n");
2132 goto completeall;
2133 }
2134 ataio->dxfer_len = 512;
2135 bzero(&ataio->cmd, sizeof(ataio->cmd));
2136 ataio->cmd.flags = CAM_ATAIO_48BIT;
2137 ataio->cmd.command = 0x2F; /* READ LOG EXT */
2138 ataio->cmd.sector_count = 1;
2139 ataio->cmd.sector_count_exp = 0;
2140 ataio->cmd.lba_low = 0x10;
2141 ataio->cmd.lba_mid = 0;
2142 ataio->cmd.lba_mid_exp = 0;
2143 } else {
2144 /* REQUEST SENSE */
2145 ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE;
2146 ccb->ccb_h.recovery_slot = i;
2147 ccb->ccb_h.func_code = XPT_SCSI_IO;
2148 ccb->ccb_h.flags = CAM_DIR_IN;
2149 ccb->ccb_h.status = 0;
2150 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */
2151 csio = &ccb->csio;
2152 csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data;
2153 csio->dxfer_len = ch->hold[i]->csio.sense_len;
2154 csio->cdb_len = 6;
2155 bzero(&csio->cdb_io, sizeof(csio->cdb_io));
2156 csio->cdb_io.cdb_bytes[0] = 0x03;
2157 csio->cdb_io.cdb_bytes[4] = csio->dxfer_len;
2158 }
2159 /* Freeze SIM while doing recovery. */
2160 ch->recoverycmd = 1;
2161 xpt_freeze_simq(ch->sim, 1);
2162 ahci_begin_transaction(ch, ccb);
2163 }
2164
2165 static void
2166 ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb)
2167 {
2168 uint8_t *data;
2169 struct ata_res *res;
2170 int i;
2171
2172 ch->recoverycmd = 0;
2173
2174 data = ccb->ataio.data_ptr;
2175 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2176 (data[0] & 0x80) == 0) {
2177 for (i = 0; i < ch->numslots; i++) {
2178 if (!ch->hold[i])
2179 continue;
2180 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO)
2181 continue;
2182 if ((data[0] & 0x1F) == i) {
2183 res = &ch->hold[i]->ataio.res;
2184 res->status = data[2];
2185 res->error = data[3];
2186 res->lba_low = data[4];
2187 res->lba_mid = data[5];
2188 res->lba_high = data[6];
2189 res->device = data[7];
2190 res->lba_low_exp = data[8];
2191 res->lba_mid_exp = data[9];
2192 res->lba_high_exp = data[10];
2193 res->sector_count = data[12];
2194 res->sector_count_exp = data[13];
2195 } else {
2196 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
2197 ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ;
2198 }
2199 ahci_done(ch, ch->hold[i]);
2200 ch->hold[i] = NULL;
2201 ch->numhslots--;
2202 }
2203 } else {
2204 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
2205 device_printf(ch->dev, "Error while READ LOG EXT\n");
2206 else if ((data[0] & 0x80) == 0) {
2207 device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n");
2208 }
2209 for (i = 0; i < ch->numslots; i++) {
2210 if (!ch->hold[i])
2211 continue;
2212 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO)
2213 continue;
2214 ahci_done(ch, ch->hold[i]);
2215 ch->hold[i] = NULL;
2216 ch->numhslots--;
2217 }
2218 }
2219 free(ccb->ataio.data_ptr, M_AHCI);
2220 xpt_free_ccb(ccb);
2221 xpt_release_simq(ch->sim, TRUE);
2222 }
2223
2224 static void
2225 ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb)
2226 {
2227 int i;
2228
2229 ch->recoverycmd = 0;
2230
2231 i = ccb->ccb_h.recovery_slot;
2232 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
2233 ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID;
2234 } else {
2235 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
2236 ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2237 }
2238 ahci_done(ch, ch->hold[i]);
2239 ch->hold[i] = NULL;
2240 ch->numhslots--;
2241 xpt_free_ccb(ccb);
2242 xpt_release_simq(ch->sim, TRUE);
2243 }
2244
2245 static void
2246 ahci_start(struct ahci_channel *ch, int fbs)
2247 {
2248 u_int32_t cmd;
2249
2250 /* Run the channel start callback, if any. */
2251 if (ch->start)
2252 ch->start(ch);
2253
2254 /* Clear SATA error register */
2255 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF);
2256 /* Clear any interrupts pending on this channel */
2257 ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF);
2258 /* Configure FIS-based switching if supported. */
2259 if (ch->chcaps & AHCI_P_CMD_FBSCP) {
2260 ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0;
2261 ATA_OUTL(ch->r_mem, AHCI_P_FBS,
2262 ch->fbs_enabled ? AHCI_P_FBS_EN : 0);
2263 }
2264 /* Start operations on this channel */
2265 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2266 cmd &= ~AHCI_P_CMD_PMA;
2267 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST |
2268 (ch->pm_present ? AHCI_P_CMD_PMA : 0));
2269 }
2270
2271 static void
2272 ahci_stop(struct ahci_channel *ch)
2273 {
2274 u_int32_t cmd;
2275 int timeout;
2276
2277 /* Kill all activity on this channel */
2278 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2279 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST);
2280 /* Wait for activity stop. */
2281 timeout = 0;
2282 do {
2283 DELAY(10);
2284 if (timeout++ > 50000) {
2285 device_printf(ch->dev, "stopping AHCI engine failed\n");
2286 break;
2287 }
2288 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR);
2289 ch->eslots = 0;
2290 }
2291
2292 static void
2293 ahci_clo(struct ahci_channel *ch)
2294 {
2295 u_int32_t cmd;
2296 int timeout;
2297
2298 /* Issue Command List Override if supported */
2299 if (ch->caps & AHCI_CAP_SCLO) {
2300 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2301 cmd |= AHCI_P_CMD_CLO;
2302 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd);
2303 timeout = 0;
2304 do {
2305 DELAY(10);
2306 if (timeout++ > 50000) {
2307 device_printf(ch->dev, "executing CLO failed\n");
2308 break;
2309 }
2310 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO);
2311 }
2312 }
2313
2314 static void
2315 ahci_stop_fr(struct ahci_channel *ch)
2316 {
2317 u_int32_t cmd;
2318 int timeout;
2319
2320 /* Kill all FIS reception on this channel */
2321 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2322 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE);
2323 /* Wait for FIS reception stop. */
2324 timeout = 0;
2325 do {
2326 DELAY(10);
2327 if (timeout++ > 50000) {
2328 device_printf(ch->dev, "stopping AHCI FR engine failed\n");
2329 break;
2330 }
2331 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR);
2332 }
2333
2334 static void
2335 ahci_start_fr(struct ahci_channel *ch)
2336 {
2337 u_int32_t cmd;
2338
2339 /* Start FIS reception on this channel */
2340 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD);
2341 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE);
2342 }
2343
2344 static int
2345 ahci_wait_ready(struct ahci_channel *ch, int t, int t0)
2346 {
2347 int timeout = 0;
2348 uint32_t val;
2349
2350 while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) &
2351 (ATA_S_BUSY | ATA_S_DRQ)) {
2352 if (timeout > t) {
2353 if (t != 0) {
2354 device_printf(ch->dev,
2355 "AHCI reset: device not ready after %dms "
2356 "(tfd = %08x)\n",
2357 MAX(t, 0) + t0, val);
2358 }
2359 return (EBUSY);
2360 }
2361 DELAY(1000);
2362 timeout++;
2363 }
2364 if (bootverbose)
2365 device_printf(ch->dev, "AHCI reset: device ready after %dms\n",
2366 timeout + t0);
2367 return (0);
2368 }
2369
2370 static void
2371 ahci_reset_to(void *arg)
2372 {
2373 struct ahci_channel *ch = arg;
2374
2375 if (ch->resetting == 0)
2376 return;
2377 ch->resetting--;
2378 if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0,
2379 (310 - ch->resetting) * 100) == 0) {
2380 ch->resetting = 0;
2381 ahci_start(ch, 1);
2382 xpt_release_simq(ch->sim, TRUE);
2383 return;
2384 }
2385 if (ch->resetting == 0) {
2386 ahci_clo(ch);
2387 ahci_start(ch, 1);
2388 xpt_release_simq(ch->sim, TRUE);
2389 return;
2390 }
2391 callout_schedule(&ch->reset_timer, hz / 10);
2392 }
2393
2394 static void
2395 ahci_reset(struct ahci_channel *ch)
2396 {
2397 struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev));
2398 int i;
2399
2400 xpt_freeze_simq(ch->sim, 1);
2401 if (bootverbose)
2402 device_printf(ch->dev, "AHCI reset...\n");
2403 /* Forget about previous reset. */
2404 if (ch->resetting) {
2405 ch->resetting = 0;
2406 callout_stop(&ch->reset_timer);
2407 xpt_release_simq(ch->sim, TRUE);
2408 }
2409 /* Requeue freezed command. */
2410 if (ch->frozen) {
2411 union ccb *fccb = ch->frozen;
2412 ch->frozen = NULL;
2413 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
2414 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
2415 xpt_freeze_devq(fccb->ccb_h.path, 1);
2416 fccb->ccb_h.status |= CAM_DEV_QFRZN;
2417 }
2418 ahci_done(ch, fccb);
2419 }
2420 /* Kill the engine and requeue all running commands. */
2421 ahci_stop(ch);
2422 for (i = 0; i < ch->numslots; i++) {
2423 /* Do we have a running request on slot? */
2424 if (ch->slot[i].state < AHCI_SLOT_RUNNING)
2425 continue;
2426 /* XXX; Commands in loading state. */
2427 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT);
2428 }
2429 for (i = 0; i < ch->numslots; i++) {
2430 if (!ch->hold[i])
2431 continue;
2432 ahci_done(ch, ch->hold[i]);
2433 ch->hold[i] = NULL;
2434 ch->numhslots--;
2435 }
2436 if (ch->toslots != 0)
2437 xpt_release_simq(ch->sim, TRUE);
2438 ch->eslots = 0;
2439 ch->toslots = 0;
2440 ch->wrongccs = 0;
2441 ch->fatalerr = 0;
2442 /* Tell the XPT about the event */
2443 xpt_async(AC_BUS_RESET, ch->path, NULL);
2444 /* Disable port interrupts */
2445 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0);
2446 /* Reset and reconnect PHY, */
2447 if (!ahci_sata_phy_reset(ch)) {
2448 if (bootverbose)
2449 device_printf(ch->dev,
2450 "AHCI reset: device not found\n");
2451 ch->devices = 0;
2452 /* Enable wanted port interrupts */
2453 ATA_OUTL(ch->r_mem, AHCI_P_IE,
2454 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) |
2455 AHCI_P_IX_PRC | AHCI_P_IX_PC));
2456 xpt_release_simq(ch->sim, TRUE);
2457 return;
2458 }
2459 if (bootverbose)
2460 device_printf(ch->dev, "AHCI reset: device found\n");
2461 /* Wait for clearing busy status. */
2462 if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) {
2463 if (dumping)
2464 ahci_clo(ch);
2465 else
2466 ch->resetting = 310;
2467 }
2468 ch->devices = 1;
2469 /* Enable wanted port interrupts */
2470 ATA_OUTL(ch->r_mem, AHCI_P_IE,
2471 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) |
2472 AHCI_P_IX_TFE | AHCI_P_IX_HBF |
2473 AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF |
2474 ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC |
2475 AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) |
2476 AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR)));
2477 if (ch->resetting)
2478 callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch);
2479 else {
2480 ahci_start(ch, 1);
2481 xpt_release_simq(ch->sim, TRUE);
2482 }
2483 }
2484
2485 static int
2486 ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag)
2487 {
2488 u_int8_t *fis = &ctp->cfis[0];
2489
2490 bzero(fis, 20);
2491 fis[0] = 0x27; /* host to device */
2492 fis[1] = (ccb->ccb_h.target_id & 0x0f);
2493 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2494 fis[1] |= 0x80;
2495 fis[2] = ATA_PACKET_CMD;
2496 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
2497 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
2498 fis[3] = ATA_F_DMA;
2499 else {
2500 fis[5] = ccb->csio.dxfer_len;
2501 fis[6] = ccb->csio.dxfer_len >> 8;
2502 }
2503 fis[7] = ATA_D_LBA;
2504 fis[15] = ATA_A_4BIT;
2505 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2506 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
2507 ctp->acmd, ccb->csio.cdb_len);
2508 bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len);
2509 } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) {
2510 fis[1] |= 0x80;
2511 fis[2] = ccb->ataio.cmd.command;
2512 fis[3] = ccb->ataio.cmd.features;
2513 fis[4] = ccb->ataio.cmd.lba_low;
2514 fis[5] = ccb->ataio.cmd.lba_mid;
2515 fis[6] = ccb->ataio.cmd.lba_high;
2516 fis[7] = ccb->ataio.cmd.device;
2517 fis[8] = ccb->ataio.cmd.lba_low_exp;
2518 fis[9] = ccb->ataio.cmd.lba_mid_exp;
2519 fis[10] = ccb->ataio.cmd.lba_high_exp;
2520 fis[11] = ccb->ataio.cmd.features_exp;
2521 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
2522 fis[12] = tag << 3;
2523 } else {
2524 fis[12] = ccb->ataio.cmd.sector_count;
2525 }
2526 fis[13] = ccb->ataio.cmd.sector_count_exp;
2527 fis[15] = ATA_A_4BIT;
2528 } else {
2529 fis[15] = ccb->ataio.cmd.control;
2530 }
2531 if (ccb->ataio.ata_flags & ATA_FLAG_AUX) {
2532 fis[16] = ccb->ataio.aux & 0xff;
2533 fis[17] = (ccb->ataio.aux >> 8) & 0xff;
2534 fis[18] = (ccb->ataio.aux >> 16) & 0xff;
2535 fis[19] = (ccb->ataio.aux >> 24) & 0xff;
2536 }
2537 return (20);
2538 }
2539
2540 static int
2541 ahci_sata_connect(struct ahci_channel *ch)
2542 {
2543 u_int32_t status;
2544 int timeout, found = 0;
2545
2546 /* Wait up to 100ms for "connect well" */
2547 for (timeout = 0; timeout < 1000 ; timeout++) {
2548 status = ATA_INL(ch->r_mem, AHCI_P_SSTS);
2549 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE)
2550 found = 1;
2551 if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) &&
2552 ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) &&
2553 ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE))
2554 break;
2555 if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) {
2556 if (bootverbose) {
2557 device_printf(ch->dev, "SATA offline status=%08x\n",
2558 status);
2559 }
2560 return (0);
2561 }
2562 if (found == 0 && timeout >= 100)
2563 break;
2564 DELAY(100);
2565 }
2566 if (timeout >= 1000 || !found) {
2567 if (bootverbose) {
2568 device_printf(ch->dev,
2569 "SATA connect timeout time=%dus status=%08x\n",
2570 timeout * 100, status);
2571 }
2572 return (0);
2573 }
2574 if (bootverbose) {
2575 device_printf(ch->dev, "SATA connect time=%dus status=%08x\n",
2576 timeout * 100, status);
2577 }
2578 /* Clear SATA error register */
2579 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff);
2580 return (1);
2581 }
2582
2583 static int
2584 ahci_sata_phy_reset(struct ahci_channel *ch)
2585 {
2586 int sata_rev;
2587 uint32_t val, detval;
2588
2589 if (ch->listening) {
2590 val = ATA_INL(ch->r_mem, AHCI_P_CMD);
2591 val |= AHCI_P_CMD_SUD;
2592 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val);
2593 ch->listening = 0;
2594 }
2595 sata_rev = ch->user[ch->pm_present ? 15 : 0].revision;
2596 if (sata_rev == 1)
2597 val = ATA_SC_SPD_SPEED_GEN1;
2598 else if (sata_rev == 2)
2599 val = ATA_SC_SPD_SPEED_GEN2;
2600 else if (sata_rev == 3)
2601 val = ATA_SC_SPD_SPEED_GEN3;
2602 else
2603 val = 0;
2604 detval = ahci_ch_detval(ch, ATA_SC_DET_RESET);
2605 ATA_OUTL(ch->r_mem, AHCI_P_SCTL,
2606 detval | val |
2607 ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER);
2608 DELAY(1000);
2609 detval = ahci_ch_detval(ch, ATA_SC_DET_IDLE);
2610 ATA_OUTL(ch->r_mem, AHCI_P_SCTL,
2611 detval | val | ((ch->pm_level > 0) ? 0 :
2612 (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER)));
2613 if (!ahci_sata_connect(ch)) {
2614 if (ch->caps & AHCI_CAP_SSS) {
2615 val = ATA_INL(ch->r_mem, AHCI_P_CMD);
2616 val &= ~AHCI_P_CMD_SUD;
2617 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val);
2618 ch->listening = 1;
2619 } else if (ch->pm_level > 0)
2620 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE);
2621 return (0);
2622 }
2623 return (1);
2624 }
2625
2626 static int
2627 ahci_check_ids(struct ahci_channel *ch, union ccb *ccb)
2628 {
2629
2630 if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) {
2631 ccb->ccb_h.status = CAM_TID_INVALID;
2632 ahci_done(ch, ccb);
2633 return (-1);
2634 }
2635 if (ccb->ccb_h.target_lun != 0) {
2636 ccb->ccb_h.status = CAM_LUN_INVALID;
2637 ahci_done(ch, ccb);
2638 return (-1);
2639 }
2640 return (0);
2641 }
2642
2643 static void
2644 ahciaction(struct cam_sim *sim, union ccb *ccb)
2645 {
2646 struct ahci_channel *ch;
2647
2648 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n",
2649 ccb->ccb_h.func_code));
2650
2651 ch = (struct ahci_channel *)cam_sim_softc(sim);
2652 switch (ccb->ccb_h.func_code) {
2653 /* Common cases first */
2654 case XPT_ATA_IO: /* Execute the requested I/O operation */
2655 case XPT_SCSI_IO:
2656 if (ahci_check_ids(ch, ccb))
2657 return;
2658 if (ch->devices == 0 ||
2659 (ch->pm_present == 0 &&
2660 ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) {
2661 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2662 break;
2663 }
2664 ccb->ccb_h.recovery_type = RECOVERY_NONE;
2665 /* Check for command collision. */
2666 if (ahci_check_collision(ch, ccb)) {
2667 /* Freeze command. */
2668 ch->frozen = ccb;
2669 /* We have only one frozen slot, so freeze simq also. */
2670 xpt_freeze_simq(ch->sim, 1);
2671 return;
2672 }
2673 ahci_begin_transaction(ch, ccb);
2674 return;
2675 case XPT_ABORT: /* Abort the specified CCB */
2676 /* XXX Implement */
2677 ccb->ccb_h.status = CAM_REQ_INVALID;
2678 break;
2679 case XPT_SET_TRAN_SETTINGS:
2680 {
2681 struct ccb_trans_settings *cts = &ccb->cts;
2682 struct ahci_device *d;
2683
2684 if (ahci_check_ids(ch, ccb))
2685 return;
2686 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
2687 d = &ch->curr[ccb->ccb_h.target_id];
2688 else
2689 d = &ch->user[ccb->ccb_h.target_id];
2690 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
2691 d->revision = cts->xport_specific.sata.revision;
2692 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE)
2693 d->mode = cts->xport_specific.sata.mode;
2694 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
2695 d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
2696 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS)
2697 d->tags = min(ch->numslots, cts->xport_specific.sata.tags);
2698 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM)
2699 ch->pm_present = cts->xport_specific.sata.pm_present;
2700 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
2701 d->atapi = cts->xport_specific.sata.atapi;
2702 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
2703 d->caps = cts->xport_specific.sata.caps;
2704 ccb->ccb_h.status = CAM_REQ_CMP;
2705 break;
2706 }
2707 case XPT_GET_TRAN_SETTINGS:
2708 /* Get default/user set transfer settings for the target */
2709 {
2710 struct ccb_trans_settings *cts = &ccb->cts;
2711 struct ahci_device *d;
2712 uint32_t status;
2713
2714 if (ahci_check_ids(ch, ccb))
2715 return;
2716 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
2717 d = &ch->curr[ccb->ccb_h.target_id];
2718 else
2719 d = &ch->user[ccb->ccb_h.target_id];
2720 cts->protocol = PROTO_UNSPECIFIED;
2721 cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
2722 cts->transport = XPORT_SATA;
2723 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
2724 cts->proto_specific.valid = 0;
2725 cts->xport_specific.sata.valid = 0;
2726 if (cts->type == CTS_TYPE_CURRENT_SETTINGS &&
2727 (ccb->ccb_h.target_id == 15 ||
2728 (ccb->ccb_h.target_id == 0 && !ch->pm_present))) {
2729 status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK;
2730 if (status & 0x0f0) {
2731 cts->xport_specific.sata.revision =
2732 (status & 0x0f0) >> 4;
2733 cts->xport_specific.sata.valid |=
2734 CTS_SATA_VALID_REVISION;
2735 }
2736 cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D;
2737 if (ch->pm_level) {
2738 if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC))
2739 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ;
2740 if (ch->caps2 & AHCI_CAP2_APST)
2741 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST;
2742 }
2743 if ((ch->caps & AHCI_CAP_SNCQ) &&
2744 (ch->quirks & AHCI_Q_NOAA) == 0)
2745 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA;
2746 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN;
2747 cts->xport_specific.sata.caps &=
2748 ch->user[ccb->ccb_h.target_id].caps;
2749 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
2750 } else {
2751 cts->xport_specific.sata.revision = d->revision;
2752 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
2753 cts->xport_specific.sata.caps = d->caps;
2754 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
2755 }
2756 cts->xport_specific.sata.mode = d->mode;
2757 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
2758 cts->xport_specific.sata.bytecount = d->bytecount;
2759 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
2760 cts->xport_specific.sata.pm_present = ch->pm_present;
2761 cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM;
2762 cts->xport_specific.sata.tags = d->tags;
2763 cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS;
2764 cts->xport_specific.sata.atapi = d->atapi;
2765 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
2766 ccb->ccb_h.status = CAM_REQ_CMP;
2767 break;
2768 }
2769 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2770 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2771 ahci_reset(ch);
2772 ccb->ccb_h.status = CAM_REQ_CMP;
2773 break;
2774 case XPT_TERM_IO: /* Terminate the I/O process */
2775 /* XXX Implement */
2776 ccb->ccb_h.status = CAM_REQ_INVALID;
2777 break;
2778 case XPT_PATH_INQ: /* Path routing inquiry */
2779 {
2780 struct ccb_pathinq *cpi = &ccb->cpi;
2781
2782 cpi->version_num = 1; /* XXX??? */
2783 cpi->hba_inquiry = PI_SDTR_ABLE;
2784 if (ch->caps & AHCI_CAP_SNCQ)
2785 cpi->hba_inquiry |= PI_TAG_ABLE;
2786 if (ch->caps & AHCI_CAP_SPM)
2787 cpi->hba_inquiry |= PI_SATAPM;
2788 cpi->target_sprt = 0;
2789 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
2790 if ((ch->quirks & AHCI_Q_NOAUX) == 0)
2791 cpi->hba_misc |= PIM_ATA_EXT;
2792 cpi->hba_eng_cnt = 0;
2793 if (ch->caps & AHCI_CAP_SPM)
2794 cpi->max_target = 15;
2795 else
2796 cpi->max_target = 0;
2797 cpi->max_lun = 0;
2798 cpi->initiator_id = 0;
2799 cpi->bus_id = cam_sim_bus(sim);
2800 cpi->base_transfer_speed = 150000;
2801 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2802 strlcpy(cpi->hba_vid, "AHCI", HBA_IDLEN);
2803 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2804 cpi->unit_number = cam_sim_unit(sim);
2805 cpi->transport = XPORT_SATA;
2806 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
2807 cpi->protocol = PROTO_ATA;
2808 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
2809 cpi->maxio = MAXPHYS;
2810 /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */
2811 if (ch->quirks & AHCI_Q_MAXIO_64K)
2812 cpi->maxio = min(cpi->maxio, 128 * 512);
2813 cpi->hba_vendor = ch->vendorid;
2814 cpi->hba_device = ch->deviceid;
2815 cpi->hba_subvendor = ch->subvendorid;
2816 cpi->hba_subdevice = ch->subdeviceid;
2817 cpi->ccb_h.status = CAM_REQ_CMP;
2818 break;
2819 }
2820 default:
2821 ccb->ccb_h.status = CAM_REQ_INVALID;
2822 break;
2823 }
2824 ahci_done(ch, ccb);
2825 }
2826
2827 static void
2828 ahcipoll(struct cam_sim *sim)
2829 {
2830 struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim);
2831 uint32_t istatus;
2832
2833 /* Read interrupt statuses and process if any. */
2834 istatus = ATA_INL(ch->r_mem, AHCI_P_IS);
2835 if (istatus != 0)
2836 ahci_ch_intr_main(ch, istatus);
2837 if (ch->resetting != 0 &&
2838 (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) {
2839 ch->resetpolldiv = 1000;
2840 ahci_reset_to(ch);
2841 }
2842 }
2843
2844 devclass_t ahci_devclass;
2845
2846 MODULE_VERSION(ahci, 1);
2847 MODULE_DEPEND(ahci, cam, 1, 1, 1);
Cache object: acab00a0298de4211324a07aa0447243
|