1 /*-
2 * Copyright (C) 2012-2013 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/conf.h>
33 #include <sys/fcntl.h>
34 #include <sys/kthread.h>
35 #include <sys/module.h>
36 #include <sys/proc.h>
37 #include <sys/syscallsubr.h>
38 #include <sys/sysctl.h>
39 #include <sys/sysproto.h>
40 #include <sys/systm.h>
41 #include <sys/unistd.h>
42
43 #include <geom/geom.h>
44
45 #include "nvme_private.h"
46
47 struct nvme_io_test_thread {
48
49 uint32_t idx;
50 struct nvme_namespace *ns;
51 enum nvme_nvm_opcode opc;
52 struct timeval start;
53 void *buf;
54 uint32_t size;
55 uint32_t time;
56 uint64_t io_completed;
57 };
58
59 struct nvme_io_test_internal {
60
61 struct nvme_namespace *ns;
62 enum nvme_nvm_opcode opc;
63 struct timeval start;
64 uint32_t time;
65 uint32_t size;
66 uint32_t td_active;
67 uint32_t td_idx;
68 uint32_t flags;
69 uint64_t io_completed[NVME_TEST_MAX_THREADS];
70 };
71
72 static void
73 nvme_ns_bio_test_cb(struct bio *bio)
74 {
75 struct mtx *mtx;
76
77 mtx = mtx_pool_find(mtxpool_sleep, bio);
78 mtx_lock(mtx);
79 wakeup(bio);
80 mtx_unlock(mtx);
81 }
82
83 static void
84 nvme_ns_bio_test(void *arg)
85 {
86 struct nvme_io_test_internal *io_test = arg;
87 struct cdevsw *csw;
88 struct mtx *mtx;
89 struct bio *bio;
90 struct cdev *dev;
91 void *buf;
92 struct timeval t;
93 uint64_t io_completed = 0, offset;
94 uint32_t idx;
95 int ref;
96
97 buf = malloc(io_test->size, M_NVME, M_WAITOK);
98 idx = atomic_fetchadd_int(&io_test->td_idx, 1);
99 dev = io_test->ns->cdev;
100
101 offset = idx * 2048 * nvme_ns_get_sector_size(io_test->ns);
102
103 while (1) {
104
105 bio = g_alloc_bio();
106
107 memset(bio, 0, sizeof(*bio));
108 bio->bio_cmd = (io_test->opc == NVME_OPC_READ) ?
109 BIO_READ : BIO_WRITE;
110 bio->bio_done = nvme_ns_bio_test_cb;
111 bio->bio_dev = dev;
112 bio->bio_offset = offset;
113 bio->bio_data = buf;
114 bio->bio_bcount = io_test->size;
115
116 if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
117 csw = dev_refthread(dev, &ref);
118 } else
119 csw = dev->si_devsw;
120
121 mtx = mtx_pool_find(mtxpool_sleep, bio);
122 mtx_lock(mtx);
123 (*csw->d_strategy)(bio);
124 msleep(bio, mtx, PRIBIO, "biotestwait", 0);
125 mtx_unlock(mtx);
126
127 if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
128 dev_relthread(dev, ref);
129 }
130
131 if ((bio->bio_flags & BIO_ERROR) || (bio->bio_resid > 0))
132 break;
133
134 g_destroy_bio(bio);
135
136 io_completed++;
137
138 getmicrouptime(&t);
139 timevalsub(&t, &io_test->start);
140
141 if (t.tv_sec >= io_test->time)
142 break;
143
144 offset += io_test->size;
145 if ((offset + io_test->size) > nvme_ns_get_size(io_test->ns))
146 offset = 0;
147 }
148
149 io_test->io_completed[idx] = io_completed;
150 wakeup_one(io_test);
151
152 free(buf, M_NVME);
153
154 atomic_subtract_int(&io_test->td_active, 1);
155 mb();
156
157 kthread_exit();
158 }
159
160 static void
161 nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl)
162 {
163 struct nvme_io_test_thread *tth = arg;
164 struct timeval t;
165
166 tth->io_completed++;
167
168 if (nvme_completion_is_error(cpl)) {
169 printf("%s: error occurred\n", __func__);
170 wakeup_one(tth);
171 return;
172 }
173
174 getmicrouptime(&t);
175 timevalsub(&t, &tth->start);
176
177 if (t.tv_sec >= tth->time) {
178 wakeup_one(tth);
179 return;
180 }
181
182 switch (tth->opc) {
183 case NVME_OPC_WRITE:
184 nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048,
185 tth->size/nvme_ns_get_sector_size(tth->ns),
186 nvme_ns_io_test_cb, tth);
187 break;
188 case NVME_OPC_READ:
189 nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048,
190 tth->size/nvme_ns_get_sector_size(tth->ns),
191 nvme_ns_io_test_cb, tth);
192 break;
193 default:
194 break;
195 }
196 }
197
198 static void
199 nvme_ns_io_test(void *arg)
200 {
201 struct nvme_io_test_internal *io_test = arg;
202 struct nvme_io_test_thread *tth;
203 struct nvme_completion cpl;
204 int error;
205
206 tth = malloc(sizeof(*tth), M_NVME, M_WAITOK | M_ZERO);
207 tth->ns = io_test->ns;
208 tth->opc = io_test->opc;
209 memcpy(&tth->start, &io_test->start, sizeof(tth->start));
210 tth->buf = malloc(io_test->size, M_NVME, M_WAITOK);
211 tth->size = io_test->size;
212 tth->time = io_test->time;
213 tth->idx = atomic_fetchadd_int(&io_test->td_idx, 1);
214
215 memset(&cpl, 0, sizeof(cpl));
216
217 nvme_ns_io_test_cb(tth, &cpl);
218
219 error = tsleep(tth, 0, "test_wait", tth->time*hz*2);
220
221 if (error)
222 printf("%s: error = %d\n", __func__, error);
223
224 io_test->io_completed[tth->idx] = tth->io_completed;
225 wakeup_one(io_test);
226
227 free(tth->buf, M_NVME);
228 free(tth, M_NVME);
229
230 atomic_subtract_int(&io_test->td_active, 1);
231 mb();
232
233 kthread_exit();
234 }
235
236 void
237 nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg)
238 {
239 struct nvme_io_test *io_test;
240 struct nvme_io_test_internal *io_test_internal;
241 void (*fn)(void *);
242 int i;
243
244 io_test = (struct nvme_io_test *)arg;
245
246 if ((io_test->opc != NVME_OPC_READ) &&
247 (io_test->opc != NVME_OPC_WRITE))
248 return;
249
250 if (io_test->size % nvme_ns_get_sector_size(ns))
251 return;
252
253 io_test_internal = malloc(sizeof(*io_test_internal), M_NVME,
254 M_WAITOK | M_ZERO);
255 io_test_internal->opc = io_test->opc;
256 io_test_internal->ns = ns;
257 io_test_internal->td_active = io_test->num_threads;
258 io_test_internal->time = io_test->time;
259 io_test_internal->size = io_test->size;
260 io_test_internal->flags = io_test->flags;
261
262 if (cmd == NVME_IO_TEST)
263 fn = nvme_ns_io_test;
264 else
265 fn = nvme_ns_bio_test;
266
267 getmicrouptime(&io_test_internal->start);
268
269 for (i = 0; i < io_test->num_threads; i++)
270 kthread_add(fn, io_test_internal,
271 NULL, NULL, 0, 0, "nvme_io_test[%d]", i);
272
273 tsleep(io_test_internal, 0, "nvme_test", io_test->time * 2 * hz);
274
275 while (io_test_internal->td_active > 0)
276 DELAY(10);
277
278 memcpy(io_test->io_completed, io_test_internal->io_completed,
279 sizeof(io_test->io_completed));
280
281 free(io_test_internal, M_NVME);
282 }
Cache object: 073709f4937ba3afc6c06779d609035b
|