1 /*-
2 * Copyright (c) 2015 Brian Fundakowski Feldman. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
27
28 #include "opt_platform.h"
29 #include "opt_spi.h"
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mman.h>
39 #include <sys/mutex.h>
40 #include <sys/module.h>
41 #include <sys/proc.h>
42 #include <sys/rwlock.h>
43 #include <sys/spigenio.h>
44 #include <sys/types.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pager.h>
51
52 #include <dev/spibus/spi.h>
53 #include <dev/spibus/spibusvar.h>
54
55 #ifdef FDT
56 #include <dev/ofw/ofw_bus_subr.h>
57
58 static struct ofw_compat_data compat_data[] = {
59 {"freebsd,spigen", true},
60 {NULL, false}
61 };
62
63 #endif
64
65 #include "spibus_if.h"
66
67 struct spigen_softc {
68 device_t sc_dev;
69 struct cdev *sc_cdev;
70 #ifdef SPIGEN_LEGACY_CDEVNAME
71 struct cdev *sc_adev; /* alias device */
72 #endif
73 struct mtx sc_mtx;
74 };
75
76 struct spigen_mmap {
77 vm_object_t bufobj;
78 vm_offset_t kvaddr;
79 size_t bufsize;
80 };
81
82 static int
83 spigen_probe(device_t dev)
84 {
85 int rv;
86
87 /*
88 * By default we only bid to attach if specifically added by our parent
89 * (usually via hint.spigen.#.at=busname). On FDT systems we bid as the
90 * default driver based on being configured in the FDT data.
91 */
92 rv = BUS_PROBE_NOWILDCARD;
93
94 #ifdef FDT
95 if (ofw_bus_status_okay(dev) &&
96 ofw_bus_search_compatible(dev, compat_data)->ocd_data)
97 rv = BUS_PROBE_DEFAULT;
98 #endif
99
100 device_set_desc(dev, "SPI Generic IO");
101
102 return (rv);
103 }
104
105 static int spigen_open(struct cdev *, int, int, struct thread *);
106 static int spigen_ioctl(struct cdev *, u_long, caddr_t, int, struct thread *);
107 static int spigen_close(struct cdev *, int, int, struct thread *);
108 static d_mmap_single_t spigen_mmap_single;
109
110 static struct cdevsw spigen_cdevsw = {
111 .d_version = D_VERSION,
112 .d_name = "spigen",
113 .d_open = spigen_open,
114 .d_ioctl = spigen_ioctl,
115 .d_mmap_single = spigen_mmap_single,
116 .d_close = spigen_close
117 };
118
119 static int
120 spigen_attach(device_t dev)
121 {
122 struct spigen_softc *sc;
123 const int unit = device_get_unit(dev);
124 int cs, res;
125 struct make_dev_args mda;
126
127 spibus_get_cs(dev, &cs);
128 cs &= ~SPIBUS_CS_HIGH; /* trim 'cs high' bit */
129
130 sc = device_get_softc(dev);
131 sc->sc_dev = dev;
132
133 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
134
135 make_dev_args_init(&mda);
136 mda.mda_flags = MAKEDEV_WAITOK;
137 mda.mda_devsw = &spigen_cdevsw;
138 mda.mda_cr = NULL;
139 mda.mda_uid = UID_ROOT;
140 mda.mda_gid = GID_OPERATOR;
141 mda.mda_mode = 0660;
142 mda.mda_unit = unit;
143 mda.mda_si_drv1 = dev;
144
145 res = make_dev_s(&mda, &(sc->sc_cdev), "spigen%d.%d",
146 device_get_unit(device_get_parent(dev)), cs);
147 if (res) {
148 return res;
149 }
150
151 #ifdef SPIGEN_LEGACY_CDEVNAME
152 res = make_dev_alias_p(0, &sc->sc_adev, sc->sc_cdev, "spigen%d", unit);
153 if (res) {
154 if (sc->sc_cdev) {
155 destroy_dev(sc->sc_cdev);
156 sc->sc_cdev = NULL;
157 }
158 return res;
159 }
160 #endif
161
162 return (0);
163 }
164
165 static int
166 spigen_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
167 {
168 device_t dev;
169 struct spigen_softc *sc;
170
171 dev = cdev->si_drv1;
172 sc = device_get_softc(dev);
173
174 mtx_lock(&sc->sc_mtx);
175 device_busy(sc->sc_dev);
176 mtx_unlock(&sc->sc_mtx);
177
178 return (0);
179 }
180
181 static int
182 spigen_transfer(struct cdev *cdev, struct spigen_transfer *st)
183 {
184 struct spi_command transfer = SPI_COMMAND_INITIALIZER;
185 device_t dev = cdev->si_drv1;
186 int error = 0;
187
188 #if 0
189 device_printf(dev, "cmd %p %u data %p %u\n", st->st_command.iov_base,
190 st->st_command.iov_len, st->st_data.iov_base, st->st_data.iov_len);
191 #endif
192
193 if (st->st_command.iov_len == 0)
194 return (EINVAL);
195
196 transfer.tx_cmd = transfer.rx_cmd = malloc(st->st_command.iov_len,
197 M_DEVBUF, M_WAITOK);
198 if (st->st_data.iov_len > 0) {
199 transfer.tx_data = transfer.rx_data = malloc(st->st_data.iov_len,
200 M_DEVBUF, M_WAITOK);
201 }
202 else
203 transfer.tx_data = transfer.rx_data = NULL;
204
205 error = copyin(st->st_command.iov_base, transfer.tx_cmd,
206 transfer.tx_cmd_sz = transfer.rx_cmd_sz = st->st_command.iov_len);
207 if ((error == 0) && (st->st_data.iov_len > 0))
208 error = copyin(st->st_data.iov_base, transfer.tx_data,
209 transfer.tx_data_sz = transfer.rx_data_sz =
210 st->st_data.iov_len);
211 if (error == 0)
212 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer);
213 if (error == 0) {
214 error = copyout(transfer.rx_cmd, st->st_command.iov_base,
215 transfer.rx_cmd_sz);
216 if ((error == 0) && (st->st_data.iov_len > 0))
217 error = copyout(transfer.rx_data, st->st_data.iov_base,
218 transfer.rx_data_sz);
219 }
220
221 free(transfer.tx_cmd, M_DEVBUF);
222 free(transfer.tx_data, M_DEVBUF);
223 return (error);
224 }
225
226 static int
227 spigen_transfer_mmapped(struct cdev *cdev, struct spigen_transfer_mmapped *stm)
228 {
229 struct spi_command transfer = SPI_COMMAND_INITIALIZER;
230 device_t dev = cdev->si_drv1;
231 struct spigen_mmap *mmap;
232 int error;
233
234 if ((error = devfs_get_cdevpriv((void **)&mmap)) != 0)
235 return (error);
236
237 if (mmap->bufsize < stm->stm_command_length + stm->stm_data_length)
238 return (E2BIG);
239
240 transfer.tx_cmd = transfer.rx_cmd = (void *)((uintptr_t)mmap->kvaddr);
241 transfer.tx_cmd_sz = transfer.rx_cmd_sz = stm->stm_command_length;
242 transfer.tx_data = transfer.rx_data =
243 (void *)((uintptr_t)mmap->kvaddr + stm->stm_command_length);
244 transfer.tx_data_sz = transfer.rx_data_sz = stm->stm_data_length;
245 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer);
246
247 return (error);
248 }
249
250 static int
251 spigen_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
252 struct thread *td)
253 {
254 device_t dev = cdev->si_drv1;
255 int error;
256
257 switch (cmd) {
258 case SPIGENIOC_TRANSFER:
259 error = spigen_transfer(cdev, (struct spigen_transfer *)data);
260 break;
261 case SPIGENIOC_TRANSFER_MMAPPED:
262 error = spigen_transfer_mmapped(cdev, (struct spigen_transfer_mmapped *)data);
263 break;
264 case SPIGENIOC_GET_CLOCK_SPEED:
265 error = spibus_get_clock(dev, (uint32_t *)data);
266 break;
267 case SPIGENIOC_SET_CLOCK_SPEED:
268 error = spibus_set_clock(dev, *(uint32_t *)data);
269 break;
270 case SPIGENIOC_GET_SPI_MODE:
271 error = spibus_get_mode(dev, (uint32_t *)data);
272 break;
273 case SPIGENIOC_SET_SPI_MODE:
274 error = spibus_set_mode(dev, *(uint32_t *)data);
275 break;
276 default:
277 error = ENOTTY;
278 break;
279 }
280 return (error);
281 }
282
283 static void
284 spigen_mmap_cleanup(void *arg)
285 {
286 struct spigen_mmap *mmap = arg;
287
288 if (mmap->kvaddr != 0)
289 pmap_qremove(mmap->kvaddr, mmap->bufsize / PAGE_SIZE);
290 if (mmap->bufobj != NULL)
291 vm_object_deallocate(mmap->bufobj);
292 free(mmap, M_DEVBUF);
293 }
294
295 static int
296 spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
297 vm_size_t size, struct vm_object **object, int nprot)
298 {
299 struct spigen_mmap *mmap;
300 vm_page_t *m;
301 size_t n, pages;
302 int error;
303
304 if (size == 0 ||
305 (nprot & (PROT_EXEC | PROT_READ | PROT_WRITE))
306 != (PROT_READ | PROT_WRITE))
307 return (EINVAL);
308 size = roundup2(size, PAGE_SIZE);
309 pages = size / PAGE_SIZE;
310
311 if (devfs_get_cdevpriv((void **)&mmap) == 0)
312 return (EBUSY);
313
314 mmap = malloc(sizeof(*mmap), M_DEVBUF, M_ZERO | M_WAITOK);
315 if ((mmap->kvaddr = kva_alloc(size)) == 0) {
316 spigen_mmap_cleanup(mmap);
317 return (ENOMEM);
318 }
319 mmap->bufsize = size;
320 mmap->bufobj = vm_pager_allocate(OBJT_PHYS, 0, size, nprot, 0,
321 curthread->td_ucred);
322
323 m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK);
324 VM_OBJECT_WLOCK(mmap->bufobj);
325 vm_object_reference_locked(mmap->bufobj); // kernel and userland both
326 for (n = 0; n < pages; n++) {
327 m[n] = vm_page_grab(mmap->bufobj, n,
328 VM_ALLOC_ZERO | VM_ALLOC_WIRED);
329 vm_page_valid(m[n]);
330 vm_page_xunbusy(m[n]);
331 }
332 VM_OBJECT_WUNLOCK(mmap->bufobj);
333 pmap_qenter(mmap->kvaddr, m, pages);
334 free(m, M_TEMP);
335
336 if ((error = devfs_set_cdevpriv(mmap, spigen_mmap_cleanup)) != 0) {
337 /* Two threads were racing through this code; we lost. */
338 spigen_mmap_cleanup(mmap);
339 return (error);
340 }
341 *offset = 0;
342 *object = mmap->bufobj;
343
344 return (0);
345 }
346
347 static int
348 spigen_close(struct cdev *cdev, int fflag, int devtype, struct thread *td)
349 {
350 device_t dev = cdev->si_drv1;
351 struct spigen_softc *sc = device_get_softc(dev);
352
353 mtx_lock(&sc->sc_mtx);
354 device_unbusy(sc->sc_dev);
355 mtx_unlock(&sc->sc_mtx);
356 return (0);
357 }
358
359 static int
360 spigen_detach(device_t dev)
361 {
362 struct spigen_softc *sc;
363
364 sc = device_get_softc(dev);
365
366 #ifdef SPIGEN_LEGACY_CDEVNAME
367 if (sc->sc_adev)
368 destroy_dev(sc->sc_adev);
369 #endif
370
371 if (sc->sc_cdev)
372 destroy_dev(sc->sc_cdev);
373
374 mtx_destroy(&sc->sc_mtx);
375
376 return (0);
377 }
378
379 static device_method_t spigen_methods[] = {
380 /* Device interface */
381 DEVMETHOD(device_probe, spigen_probe),
382 DEVMETHOD(device_attach, spigen_attach),
383 DEVMETHOD(device_detach, spigen_detach),
384 { 0, 0 }
385 };
386
387 static driver_t spigen_driver = {
388 "spigen",
389 spigen_methods,
390 sizeof(struct spigen_softc),
391 };
392
393 DRIVER_MODULE(spigen, spibus, spigen_driver, 0, 0);
394 MODULE_DEPEND(spigen, spibus, 1, 1, 1);
395 #ifdef FDT
396 SIMPLEBUS_PNP_INFO(compat_data);
397 #endif
Cache object: 3651bac01aa9ad5e5e7bd7b6615bd71b
|