1 /*-
2 * Copyright (c) 2006 Bernd Walter. All rights reserved.
3 * Copyright (c) 2006 M. Warner Losh. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bio.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/queue.h>
42 #include <sys/resource.h>
43 #include <sys/rman.h>
44 #include <sys/time.h>
45 #include <sys/timetc.h>
46 #include <sys/watchdog.h>
47
48 #include <machine/bus.h>
49 #include <machine/cpu.h>
50 #include <machine/cpufunc.h>
51 #include <machine/resource.h>
52 #include <machine/frame.h>
53 #include <machine/intr.h>
54 #include <arm/at91/at91rm92reg.h>
55 #include <arm/at91/at91var.h>
56 #include <arm/at91/at91_mcireg.h>
57 #include <arm/at91/at91_pdcreg.h>
58 #include <dev/mmc/bridge.h>
59 #include <dev/mmc/mmcreg.h>
60 #include <dev/mmc/mmcbrvar.h>
61
62 #include "mmcbr_if.h"
63
64 #define BBSZ 512
65
66 struct at91_mci_softc {
67 void *intrhand; /* Interrupt handle */
68 device_t dev;
69 int flags;
70 #define CMD_STARTED 1
71 #define STOP_STARTED 2
72 struct resource *irq_res; /* IRQ resource */
73 struct resource *mem_res; /* Memory resource */
74 struct mtx sc_mtx;
75 bus_dma_tag_t dmatag;
76 bus_dmamap_t map;
77 int mapped;
78 struct mmc_host host;
79 int wire4;
80 int bus_busy;
81 struct mmc_request *req;
82 struct mmc_command *curcmd;
83 char bounce_buffer[BBSZ];
84 };
85
86 static inline uint32_t
87 RD4(struct at91_mci_softc *sc, bus_size_t off)
88 {
89 return bus_read_4(sc->mem_res, off);
90 }
91
92 static inline void
93 WR4(struct at91_mci_softc *sc, bus_size_t off, uint32_t val)
94 {
95 bus_write_4(sc->mem_res, off, val);
96 }
97
98 /* bus entry points */
99 static int at91_mci_probe(device_t dev);
100 static int at91_mci_attach(device_t dev);
101 static int at91_mci_detach(device_t dev);
102 static void at91_mci_intr(void *);
103
104 /* helper routines */
105 static int at91_mci_activate(device_t dev);
106 static void at91_mci_deactivate(device_t dev);
107
108 #define AT91_MCI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
109 #define AT91_MCI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
110 #define AT91_MCI_LOCK_INIT(_sc) \
111 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
112 "mci", MTX_DEF)
113 #define AT91_MCI_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
114 #define AT91_MCI_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
115 #define AT91_MCI_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
116
117 static void
118 at91_mci_pdc_disable(struct at91_mci_softc *sc)
119 {
120 WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS);
121 WR4(sc, PDC_RPR, 0);
122 WR4(sc, PDC_RCR, 0);
123 WR4(sc, PDC_RNPR, 0);
124 WR4(sc, PDC_RNCR, 0);
125 WR4(sc, PDC_TPR, 0);
126 WR4(sc, PDC_TCR, 0);
127 WR4(sc, PDC_TNPR, 0);
128 WR4(sc, PDC_TNCR, 0);
129 }
130
131 static void
132 at91_mci_init(device_t dev)
133 {
134 struct at91_mci_softc *sc = device_get_softc(dev);
135
136 WR4(sc, MCI_CR, MCI_CR_MCIEN); /* Enable controller */
137 WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */
138 WR4(sc, MCI_DTOR, MCI_DTOR_DTOMUL_1M | 1);
139 WR4(sc, MCI_MR, 0x834a); // XXX GROSS HACK FROM LINUX
140 WR4(sc, MCI_SDCR, 0); /* SLOT A, 1 bit bus */
141 }
142
143 static void
144 at91_mci_fini(device_t dev)
145 {
146 struct at91_mci_softc *sc = device_get_softc(dev);
147
148 WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */
149 at91_mci_pdc_disable(sc);
150 WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* Put the device into reset */
151 }
152
153 static int
154 at91_mci_probe(device_t dev)
155 {
156
157 device_set_desc(dev, "MCI mmc/sd host bridge");
158 return (0);
159 }
160
161 static int
162 at91_mci_attach(device_t dev)
163 {
164 struct at91_mci_softc *sc = device_get_softc(dev);
165 int err;
166 device_t child;
167
168 sc->dev = dev;
169 err = at91_mci_activate(dev);
170 if (err)
171 goto out;
172
173 AT91_MCI_LOCK_INIT(sc);
174
175 /*
176 * Allocate DMA tags and maps
177 */
178 err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
179 BUS_SPACE_MAXADDR, NULL, NULL, MAXPHYS, 1, MAXPHYS,
180 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->dmatag);
181 if (err != 0)
182 goto out;
183
184 err = bus_dmamap_create(sc->dmatag, 0, &sc->map);
185 if (err != 0)
186 goto out;
187
188 at91_mci_fini(dev);
189 at91_mci_init(dev);
190
191 /*
192 * Activate the interrupt
193 */
194 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
195 at91_mci_intr, sc, &sc->intrhand);
196 if (err) {
197 AT91_MCI_LOCK_DESTROY(sc);
198 goto out;
199 }
200 sc->host.f_min = 375000;
201 sc->host.f_max = 30000000;
202 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
203 sc->host.caps = MMC_CAP_4_BIT_DATA;
204 child = device_add_child(dev, "mmc", 0);
205 device_set_ivars(dev, &sc->host);
206 err = bus_generic_attach(dev);
207 out:;
208 if (err)
209 at91_mci_deactivate(dev);
210 return (err);
211 }
212
213 static int
214 at91_mci_detach(device_t dev)
215 {
216 at91_mci_fini(dev);
217 at91_mci_deactivate(dev);
218 return (EBUSY); /* XXX */
219 }
220
221 static int
222 at91_mci_activate(device_t dev)
223 {
224 struct at91_mci_softc *sc;
225 int rid;
226
227 sc = device_get_softc(dev);
228 rid = 0;
229 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
230 RF_ACTIVE);
231 if (sc->mem_res == NULL)
232 goto errout;
233 rid = 0;
234 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
235 RF_ACTIVE);
236 if (sc->irq_res == NULL)
237 goto errout;
238 return (0);
239 errout:
240 at91_mci_deactivate(dev);
241 return (ENOMEM);
242 }
243
244 static void
245 at91_mci_deactivate(device_t dev)
246 {
247 struct at91_mci_softc *sc;
248
249 sc = device_get_softc(dev);
250 if (sc->intrhand)
251 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
252 sc->intrhand = 0;
253 bus_generic_detach(sc->dev);
254 if (sc->mem_res)
255 bus_release_resource(dev, SYS_RES_IOPORT,
256 rman_get_rid(sc->mem_res), sc->mem_res);
257 sc->mem_res = 0;
258 if (sc->irq_res)
259 bus_release_resource(dev, SYS_RES_IRQ,
260 rman_get_rid(sc->irq_res), sc->irq_res);
261 sc->irq_res = 0;
262 return;
263 }
264
265 static void
266 at91_mci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
267 {
268 if (error != 0)
269 return;
270 *(bus_addr_t *)arg = segs[0].ds_addr;
271 }
272
273 static int
274 at91_mci_update_ios(device_t brdev, device_t reqdev)
275 {
276 uint32_t at91_master_clock = AT91C_MASTER_CLOCK;
277 struct at91_mci_softc *sc;
278 struct mmc_host *host;
279 struct mmc_ios *ios;
280 uint32_t clkdiv;
281
282 sc = device_get_softc(brdev);
283 host = &sc->host;
284 ios = &host->ios;
285 // bus mode?
286 if (ios->clock == 0) {
287 WR4(sc, MCI_CR, MCI_CR_MCIDIS);
288 clkdiv = 0;
289 } else {
290 WR4(sc, MCI_CR, MCI_CR_MCIEN);
291 if ((at91_master_clock % (ios->clock * 2)) == 0)
292 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
293 else
294 clkdiv = (at91_master_clock / ios->clock) / 2;
295 }
296 if (ios->bus_width == bus_width_4 && sc->wire4) {
297 device_printf(sc->dev, "Setting controller bus width to 4\n");
298 WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) | MCI_SDCR_SDCBUS);
299 } else {
300 device_printf(sc->dev, "Setting controller bus width to 1\n");
301 WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) & ~MCI_SDCR_SDCBUS);
302 }
303 WR4(sc, MCI_MR, (RD4(sc, MCI_MR) & ~MCI_MR_CLKDIV) | clkdiv);
304 #if 0
305 if (sc->vcc_pin) {
306 if (sc->power_mode == MMC_POWER_OFF)
307 gpio_set(sc->vcc_pin, 0);
308 else
309 gpio_set(sc->vcc_pin, 1);
310 }
311 #endif
312 return (0);
313 }
314
315 static void
316 at91_mci_start_cmd(struct at91_mci_softc *sc, struct mmc_command *cmd)
317 {
318 uint32_t cmdr, ier = 0, mr;
319 uint32_t *src, *dst;
320 int i;
321 struct mmc_data *data;
322 struct mmc_request *req;
323 size_t block_size = 1 << 9; // Fixed, per mmc/sd spec for 2GB cards
324 void *vaddr;
325 bus_addr_t paddr;
326
327 sc->curcmd = cmd;
328 data = cmd->data;
329 cmdr = cmd->opcode;
330 req = cmd->mrq;
331 if (MMC_RSP(cmd->flags) == MMC_RSP_NONE)
332 cmdr |= MCI_CMDR_RSPTYP_NO;
333 else {
334 /* Allow big timeout for responses */
335 cmdr |= MCI_CMDR_MAXLAT;
336 if (cmd->flags & MMC_RSP_136)
337 cmdr |= MCI_CMDR_RSPTYP_136;
338 else
339 cmdr |= MCI_CMDR_RSPTYP_48;
340 }
341 if (cmd->opcode == MMC_STOP_TRANSMISSION)
342 cmdr |= MCI_CMDR_TRCMD_STOP;
343 if (sc->host.ios.bus_mode == opendrain)
344 cmdr |= MCI_CMDR_OPDCMD;
345 if (!data) {
346 // The no data case is fairly simple
347 at91_mci_pdc_disable(sc);
348 // printf("CMDR %x ARGR %x\n", cmdr, cmd->arg);
349 WR4(sc, MCI_ARGR, cmd->arg);
350 WR4(sc, MCI_CMDR, cmdr);
351 WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_CMDRDY);
352 return;
353 }
354 if (data->flags & MMC_DATA_READ)
355 cmdr |= MCI_CMDR_TRDIR;
356 if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE))
357 cmdr |= MCI_CMDR_TRCMD_START;
358 if (data->flags & MMC_DATA_STREAM)
359 cmdr |= MCI_CMDR_TRTYP_STREAM;
360 if (data->flags & MMC_DATA_MULTI)
361 cmdr |= MCI_CMDR_TRTYP_MULTIPLE;
362 // Set block size and turn on PDC mode for dma xfer and disable
363 // PDC until we're ready.
364 mr = RD4(sc, MCI_MR) & ~MCI_MR_BLKLEN;
365 WR4(sc, MCI_MR, mr | (block_size << 16) | MCI_MR_PDCMODE);
366 WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);
367 if (cmdr & MCI_CMDR_TRCMD_START) {
368 if (cmdr & MCI_CMDR_TRDIR)
369 vaddr = cmd->data->data;
370 else {
371 if (data->len != BBSZ)
372 panic("Write multiblock write support");
373 vaddr = sc->bounce_buffer;
374 src = (uint32_t *)cmd->data->data;
375 dst = (uint32_t *)vaddr;
376 for (i = 0; i < data->len / 4; i++)
377 dst[i] = bswap32(src[i]);
378 }
379 data->xfer_len = 0;
380 if (bus_dmamap_load(sc->dmatag, sc->map, vaddr, data->len,
381 at91_mci_getaddr, &paddr, 0) != 0) {
382 if (req->cmd->flags & STOP_STARTED)
383 req->stop->error = MMC_ERR_NO_MEMORY;
384 else
385 req->cmd->error = MMC_ERR_NO_MEMORY;
386 sc->req = NULL;
387 sc->curcmd = NULL;
388 req->done(req);
389 return;
390 }
391 sc->mapped++;
392 if (cmdr & MCI_CMDR_TRDIR) {
393 bus_dmamap_sync(sc->dmatag, sc->map, BUS_DMASYNC_PREREAD);
394 WR4(sc, PDC_RPR, paddr);
395 WR4(sc, PDC_RCR, data->len / 4);
396 ier = MCI_SR_ENDRX;
397 } else {
398 bus_dmamap_sync(sc->dmatag, sc->map, BUS_DMASYNC_PREWRITE);
399 WR4(sc, PDC_TPR, paddr);
400 WR4(sc, PDC_TCR, data->len / 4);
401 ier = MCI_SR_TXBUFE;
402 }
403 }
404 // printf("CMDR %x ARGR %x with data\n", cmdr, cmd->arg);
405 WR4(sc, MCI_ARGR, cmd->arg);
406 WR4(sc, MCI_CMDR, cmdr);
407 if (cmdr & MCI_CMDR_TRCMD_START) {
408 if (cmdr & MCI_CMDR_TRDIR)
409 WR4(sc, PDC_PTCR, PDC_PTCR_RXTEN);
410 else
411 WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN);
412 }
413 WR4(sc, MCI_IER, MCI_SR_ERROR | ier);
414 }
415
416 static void
417 at91_mci_start(struct at91_mci_softc *sc)
418 {
419 struct mmc_request *req;
420
421 req = sc->req;
422 if (req == NULL)
423 return;
424 // assert locked
425 if (!(sc->flags & CMD_STARTED)) {
426 sc->flags |= CMD_STARTED;
427 // printf("Starting CMD\n");
428 at91_mci_start_cmd(sc, req->cmd);
429 return;
430 }
431 if (!(sc->flags & STOP_STARTED) && req->stop) {
432 // printf("Starting Stop\n");
433 sc->flags |= STOP_STARTED;
434 at91_mci_start_cmd(sc, req->stop);
435 return;
436 }
437 /* We must be done -- bad idea to do this while locked? */
438 sc->req = NULL;
439 sc->curcmd = NULL;
440 req->done(req);
441 }
442
443 static int
444 at91_mci_request(device_t brdev, device_t reqdev, struct mmc_request *req)
445 {
446 struct at91_mci_softc *sc = device_get_softc(brdev);
447
448 AT91_MCI_LOCK(sc);
449 // XXX do we want to be able to queue up multiple commands?
450 // XXX sounds like a good idea, but all protocols are sync, so
451 // XXX maybe the idea is naive...
452 if (sc->req != NULL) {
453 AT91_MCI_UNLOCK(sc);
454 return EBUSY;
455 }
456 sc->req = req;
457 sc->flags = 0;
458 at91_mci_start(sc);
459 AT91_MCI_UNLOCK(sc);
460 return (0);
461 }
462
463 static int
464 at91_mci_get_ro(device_t brdev, device_t reqdev)
465 {
466 return (-1);
467 }
468
469 static int
470 at91_mci_acquire_host(device_t brdev, device_t reqdev)
471 {
472 struct at91_mci_softc *sc = device_get_softc(brdev);
473 int err = 0;
474
475 AT91_MCI_LOCK(sc);
476 while (sc->bus_busy)
477 msleep(sc, &sc->sc_mtx, PZERO, "mciah", hz / 5);
478 sc->bus_busy++;
479 AT91_MCI_UNLOCK(sc);
480 return (err);
481 }
482
483 static int
484 at91_mci_release_host(device_t brdev, device_t reqdev)
485 {
486 struct at91_mci_softc *sc = device_get_softc(brdev);
487
488 AT91_MCI_LOCK(sc);
489 sc->bus_busy--;
490 wakeup(sc);
491 AT91_MCI_UNLOCK(sc);
492 return (0);
493 }
494
495 static void
496 at91_mci_read_done(struct at91_mci_softc *sc)
497 {
498 uint32_t *walker;
499 struct mmc_command *cmd;
500 int i, len;
501
502 cmd = sc->curcmd;
503 bus_dmamap_sync(sc->dmatag, sc->map, BUS_DMASYNC_POSTREAD);
504 bus_dmamap_unload(sc->dmatag, sc->map);
505 sc->mapped--;
506 walker = (uint32_t *)cmd->data->data;
507 len = cmd->data->len / 4;
508 for (i = 0; i < len; i++)
509 walker[i] = bswap32(walker[i]);
510 // Finish up the sequence...
511 WR4(sc, MCI_IDR, MCI_SR_ENDRX);
512 WR4(sc, MCI_IER, MCI_SR_RXBUFF);
513 WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);
514 }
515
516 static void
517 at91_mci_xmit_done(struct at91_mci_softc *sc)
518 {
519 // Finish up the sequence...
520 WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);
521 WR4(sc, MCI_IDR, MCI_SR_TXBUFE);
522 WR4(sc, MCI_IER, MCI_SR_NOTBUSY);
523 bus_dmamap_sync(sc->dmatag, sc->map, BUS_DMASYNC_POSTWRITE);
524 bus_dmamap_unload(sc->dmatag, sc->map);
525 sc->mapped--;
526 }
527
528 static void
529 at91_mci_intr(void *arg)
530 {
531 struct at91_mci_softc *sc = (struct at91_mci_softc*)arg;
532 uint32_t sr;
533 int i, done = 0;
534 struct mmc_command *cmd;
535
536 AT91_MCI_LOCK(sc);
537 sr = RD4(sc, MCI_SR) & RD4(sc, MCI_IMR);
538 // printf("i 0x%x\n", sr);
539 cmd = sc->curcmd;
540 if (sr & MCI_SR_ERROR) {
541 // Ignore CRC errors on CMD2 and ACMD47, per relevant standards
542 if ((sr & MCI_SR_RCRCE) && (cmd->opcode == MMC_SEND_OP_COND ||
543 cmd->opcode == ACMD_SD_SEND_OP_COND))
544 cmd->error = MMC_ERR_NONE;
545 else if (sr & (MCI_SR_RTOE | MCI_SR_DTOE))
546 cmd->error = MMC_ERR_TIMEOUT;
547 else if (sr & (MCI_SR_RCRCE | MCI_SR_DCRCE))
548 cmd->error = MMC_ERR_BADCRC;
549 else if (sr & (MCI_SR_OVRE | MCI_SR_UNRE))
550 cmd->error = MMC_ERR_FIFO;
551 else
552 cmd->error = MMC_ERR_FAILED;
553 done = 1;
554 if (sc->mapped && cmd->error) {
555 bus_dmamap_unload(sc->dmatag, sc->map);
556 sc->mapped--;
557 }
558 } else {
559 if (sr & MCI_SR_TXBUFE) {
560 // printf("TXBUFE\n");
561 at91_mci_xmit_done(sc);
562 }
563 if (sr & MCI_SR_RXBUFF) {
564 // printf("RXBUFF\n");
565 WR4(sc, MCI_IDR, MCI_SR_RXBUFF);
566 WR4(sc, MCI_IER, MCI_SR_CMDRDY);
567 }
568 if (sr & MCI_SR_ENDTX) {
569 // printf("ENDTX\n");
570 }
571 if (sr & MCI_SR_ENDRX) {
572 // printf("ENDRX\n");
573 at91_mci_read_done(sc);
574 }
575 if (sr & MCI_SR_NOTBUSY) {
576 // printf("NOTBUSY\n");
577 WR4(sc, MCI_IDR, MCI_SR_NOTBUSY);
578 WR4(sc, MCI_IER, MCI_SR_CMDRDY);
579 }
580 if (sr & MCI_SR_DTIP) {
581 // printf("Data transfer in progress\n");
582 }
583 if (sr & MCI_SR_BLKE) {
584 // printf("Block transfer end\n");
585 }
586 if (sr & MCI_SR_TXRDY) {
587 // printf("Ready to transmit\n");
588 }
589 if (sr & MCI_SR_RXRDY) {
590 // printf("Ready to receive\n");
591 }
592 if (sr & MCI_SR_CMDRDY) {
593 // printf("Command ready\n");
594 done = 1;
595 cmd->error = MMC_ERR_NONE;
596 }
597 }
598 if (done) {
599 WR4(sc, MCI_IDR, 0xffffffff);
600 if (cmd != NULL && (cmd->flags & MMC_RSP_PRESENT)) {
601 for (i = 0; i < ((cmd->flags & MMC_RSP_136) ? 4 : 1);
602 i++) {
603 cmd->resp[i] = RD4(sc, MCI_RSPR + i * 4);
604 // printf("RSPR[%d] = %x\n", i, cmd->resp[i]);
605 }
606 }
607 at91_mci_start(sc);
608 }
609 AT91_MCI_UNLOCK(sc);
610 }
611
612 static int
613 at91_mci_read_ivar(device_t bus, device_t child, int which, u_char *result)
614 {
615 struct at91_mci_softc *sc = device_get_softc(bus);
616
617 switch (which) {
618 default:
619 return (EINVAL);
620 case MMCBR_IVAR_BUS_MODE:
621 *(int *)result = sc->host.ios.bus_mode;
622 break;
623 case MMCBR_IVAR_BUS_WIDTH:
624 *(int *)result = sc->host.ios.bus_width;
625 break;
626 case MMCBR_IVAR_CHIP_SELECT:
627 *(int *)result = sc->host.ios.chip_select;
628 break;
629 case MMCBR_IVAR_CLOCK:
630 *(int *)result = sc->host.ios.clock;
631 break;
632 case MMCBR_IVAR_F_MIN:
633 *(int *)result = sc->host.f_min;
634 break;
635 case MMCBR_IVAR_F_MAX:
636 *(int *)result = sc->host.f_max;
637 break;
638 case MMCBR_IVAR_HOST_OCR:
639 *(int *)result = sc->host.host_ocr;
640 break;
641 case MMCBR_IVAR_MODE:
642 *(int *)result = sc->host.mode;
643 break;
644 case MMCBR_IVAR_OCR:
645 *(int *)result = sc->host.ocr;
646 break;
647 case MMCBR_IVAR_POWER_MODE:
648 *(int *)result = sc->host.ios.power_mode;
649 break;
650 case MMCBR_IVAR_VDD:
651 *(int *)result = sc->host.ios.vdd;
652 break;
653 }
654 return (0);
655 }
656
657 static int
658 at91_mci_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
659 {
660 struct at91_mci_softc *sc = device_get_softc(bus);
661
662 switch (which) {
663 default:
664 return (EINVAL);
665 case MMCBR_IVAR_BUS_MODE:
666 sc->host.ios.bus_mode = value;
667 break;
668 case MMCBR_IVAR_BUS_WIDTH:
669 sc->host.ios.bus_width = value;
670 break;
671 case MMCBR_IVAR_CHIP_SELECT:
672 sc->host.ios.chip_select = value;
673 break;
674 case MMCBR_IVAR_CLOCK:
675 sc->host.ios.clock = value;
676 break;
677 case MMCBR_IVAR_MODE:
678 sc->host.mode = value;
679 break;
680 case MMCBR_IVAR_OCR:
681 sc->host.ocr = value;
682 break;
683 case MMCBR_IVAR_POWER_MODE:
684 sc->host.ios.power_mode = value;
685 break;
686 case MMCBR_IVAR_VDD:
687 sc->host.ios.vdd = value;
688 break;
689 case MMCBR_IVAR_HOST_OCR:
690 case MMCBR_IVAR_F_MIN:
691 case MMCBR_IVAR_F_MAX:
692 return (EINVAL);
693 }
694 return (0);
695 }
696
697 static device_method_t at91_mci_methods[] = {
698 /* device_if */
699 DEVMETHOD(device_probe, at91_mci_probe),
700 DEVMETHOD(device_attach, at91_mci_attach),
701 DEVMETHOD(device_detach, at91_mci_detach),
702
703 /* Bus interface */
704 DEVMETHOD(bus_read_ivar, at91_mci_read_ivar),
705 DEVMETHOD(bus_write_ivar, at91_mci_write_ivar),
706
707 /* mmcbr_if */
708 DEVMETHOD(mmcbr_update_ios, at91_mci_update_ios),
709 DEVMETHOD(mmcbr_request, at91_mci_request),
710 DEVMETHOD(mmcbr_get_ro, at91_mci_get_ro),
711 DEVMETHOD(mmcbr_acquire_host, at91_mci_acquire_host),
712 DEVMETHOD(mmcbr_release_host, at91_mci_release_host),
713
714 {0, 0},
715 };
716
717 static driver_t at91_mci_driver = {
718 "at91_mci",
719 at91_mci_methods,
720 sizeof(struct at91_mci_softc),
721 };
722 static devclass_t at91_mci_devclass;
723
724
725 DRIVER_MODULE(at91_mci, atmelarm, at91_mci_driver, at91_mci_devclass, 0, 0);
Cache object: e432972123407c4a972ecc1ad9c46ac6
|