1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_umem.h>
39 #include <rdma/ib_user_verbs.h>
40
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/mm.h>
44 #include <linux/fs.h>
45
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include <rdma/mthca-abi.h>
49 #include "mthca_memfree.h"
50
51 static void init_query_mad(struct ib_smp *mad)
52 {
53 mad->base_version = 1;
54 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
55 mad->class_version = 1;
56 mad->method = IB_MGMT_METHOD_GET;
57 }
58
59 static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
60 struct ib_udata *uhw)
61 {
62 struct ib_smp *in_mad = NULL;
63 struct ib_smp *out_mad = NULL;
64 int err = -ENOMEM;
65 struct mthca_dev *mdev = to_mdev(ibdev);
66
67 if (uhw->inlen || uhw->outlen)
68 return -EINVAL;
69
70 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
71 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
72 if (!in_mad || !out_mad)
73 goto out;
74
75 memset(props, 0, sizeof *props);
76
77 props->fw_ver = mdev->fw_ver;
78
79 init_query_mad(in_mad);
80 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
81
82 err = mthca_MAD_IFC(mdev, 1, 1,
83 1, NULL, NULL, in_mad, out_mad);
84 if (err)
85 goto out;
86
87 props->device_cap_flags = mdev->device_cap_flags;
88 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
89 0xffffff;
90 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
91 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
92 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
93
94 props->max_mr_size = ~0ull;
95 props->page_size_cap = mdev->limits.page_size_cap;
96 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
97 props->max_qp_wr = mdev->limits.max_wqes;
98 props->max_sge = mdev->limits.max_sg;
99 props->max_sge_rd = props->max_sge;
100 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
101 props->max_cqe = mdev->limits.max_cqes;
102 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
103 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
104 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
105 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
106 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
107 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
108 props->max_srq_wr = mdev->limits.max_srq_wqes;
109 props->max_srq_sge = mdev->limits.max_srq_sge;
110 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
111 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
112 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
113 props->max_pkeys = mdev->limits.pkey_table_len;
114 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
115 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
116 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
117 props->max_mcast_grp;
118 /*
119 * If Sinai memory key optimization is being used, then only
120 * the 8-bit key portion will change. For other HCAs, the
121 * unused index bits will also be used for FMR remapping.
122 */
123 if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
124 props->max_map_per_fmr = 255;
125 else
126 props->max_map_per_fmr =
127 (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
128
129 err = 0;
130 out:
131 kfree(in_mad);
132 kfree(out_mad);
133 return err;
134 }
135
136 static int mthca_query_port(struct ib_device *ibdev,
137 u8 port, struct ib_port_attr *props)
138 {
139 struct ib_smp *in_mad = NULL;
140 struct ib_smp *out_mad = NULL;
141 int err = -ENOMEM;
142
143 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
144 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
145 if (!in_mad || !out_mad)
146 goto out;
147
148 memset(props, 0, sizeof *props);
149
150 init_query_mad(in_mad);
151 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
152 in_mad->attr_mod = cpu_to_be32(port);
153
154 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
155 port, NULL, NULL, in_mad, out_mad);
156 if (err)
157 goto out;
158
159 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
160 props->lmc = out_mad->data[34] & 0x7;
161 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
162 props->sm_sl = out_mad->data[36] & 0xf;
163 props->state = out_mad->data[32] & 0xf;
164 props->phys_state = out_mad->data[33] >> 4;
165 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
166 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
167 props->max_msg_sz = 0x80000000;
168 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
169 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
170 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
171 props->active_width = out_mad->data[31] & 0xf;
172 props->active_speed = out_mad->data[35] >> 4;
173 props->max_mtu = out_mad->data[41] & 0xf;
174 props->active_mtu = out_mad->data[36] >> 4;
175 props->subnet_timeout = out_mad->data[51] & 0x1f;
176 props->max_vl_num = out_mad->data[37] >> 4;
177 props->init_type_reply = out_mad->data[41] >> 4;
178
179 out:
180 kfree(in_mad);
181 kfree(out_mad);
182 return err;
183 }
184
185 static int mthca_modify_device(struct ib_device *ibdev,
186 int mask,
187 struct ib_device_modify *props)
188 {
189 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
190 return -EOPNOTSUPP;
191
192 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
193 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
194 return -ERESTARTSYS;
195 memcpy(ibdev->node_desc, props->node_desc,
196 IB_DEVICE_NODE_DESC_MAX);
197 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
198 }
199
200 return 0;
201 }
202
203 static int mthca_modify_port(struct ib_device *ibdev,
204 u8 port, int port_modify_mask,
205 struct ib_port_modify *props)
206 {
207 struct mthca_set_ib_param set_ib;
208 struct ib_port_attr attr;
209 int err;
210
211 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
212 return -ERESTARTSYS;
213
214 err = mthca_query_port(ibdev, port, &attr);
215 if (err)
216 goto out;
217
218 set_ib.set_si_guid = 0;
219 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
220
221 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
222 ~props->clr_port_cap_mask;
223
224 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
225 if (err)
226 goto out;
227 out:
228 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
229 return err;
230 }
231
232 static int mthca_query_pkey(struct ib_device *ibdev,
233 u8 port, u16 index, u16 *pkey)
234 {
235 struct ib_smp *in_mad = NULL;
236 struct ib_smp *out_mad = NULL;
237 int err = -ENOMEM;
238
239 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
240 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
241 if (!in_mad || !out_mad)
242 goto out;
243
244 init_query_mad(in_mad);
245 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
246 in_mad->attr_mod = cpu_to_be32(index / 32);
247
248 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
249 port, NULL, NULL, in_mad, out_mad);
250 if (err)
251 goto out;
252
253 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
254
255 out:
256 kfree(in_mad);
257 kfree(out_mad);
258 return err;
259 }
260
261 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
262 int index, union ib_gid *gid)
263 {
264 struct ib_smp *in_mad = NULL;
265 struct ib_smp *out_mad = NULL;
266 int err = -ENOMEM;
267
268 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
269 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
270 if (!in_mad || !out_mad)
271 goto out;
272
273 init_query_mad(in_mad);
274 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
275 in_mad->attr_mod = cpu_to_be32(port);
276
277 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
278 port, NULL, NULL, in_mad, out_mad);
279 if (err)
280 goto out;
281
282 memcpy(gid->raw, out_mad->data + 8, 8);
283
284 init_query_mad(in_mad);
285 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
286 in_mad->attr_mod = cpu_to_be32(index / 8);
287
288 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
289 port, NULL, NULL, in_mad, out_mad);
290 if (err)
291 goto out;
292
293 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
294
295 out:
296 kfree(in_mad);
297 kfree(out_mad);
298 return err;
299 }
300
301 static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
302 struct ib_udata *udata)
303 {
304 struct mthca_alloc_ucontext_resp uresp;
305 struct mthca_ucontext *context;
306 int err;
307
308 if (!(to_mdev(ibdev)->active))
309 return ERR_PTR(-EAGAIN);
310
311 memset(&uresp, 0, sizeof uresp);
312
313 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
314 if (mthca_is_memfree(to_mdev(ibdev)))
315 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
316 else
317 uresp.uarc_size = 0;
318
319 context = kmalloc(sizeof *context, GFP_KERNEL);
320 if (!context)
321 return ERR_PTR(-ENOMEM);
322
323 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
324 if (err) {
325 kfree(context);
326 return ERR_PTR(err);
327 }
328
329 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
330 if (IS_ERR(context->db_tab)) {
331 err = PTR_ERR(context->db_tab);
332 mthca_uar_free(to_mdev(ibdev), &context->uar);
333 kfree(context);
334 return ERR_PTR(err);
335 }
336
337 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
338 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
339 mthca_uar_free(to_mdev(ibdev), &context->uar);
340 kfree(context);
341 return ERR_PTR(-EFAULT);
342 }
343
344 context->reg_mr_warned = 0;
345
346 return &context->ibucontext;
347 }
348
349 static int mthca_dealloc_ucontext(struct ib_ucontext *context)
350 {
351 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
352 to_mucontext(context)->db_tab);
353 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
354 kfree(to_mucontext(context));
355
356 return 0;
357 }
358
359 static int mthca_mmap_uar(struct ib_ucontext *context,
360 struct vm_area_struct *vma)
361 {
362 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
363 return -EINVAL;
364
365 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
366
367 if (io_remap_pfn_range(vma, vma->vm_start,
368 to_mucontext(context)->uar.pfn,
369 PAGE_SIZE, vma->vm_page_prot))
370 return -EAGAIN;
371
372 return 0;
373 }
374
375 static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
376 struct ib_ucontext *context,
377 struct ib_udata *udata)
378 {
379 struct mthca_pd *pd;
380 int err;
381
382 pd = kmalloc(sizeof *pd, GFP_KERNEL);
383 if (!pd)
384 return ERR_PTR(-ENOMEM);
385
386 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
387 if (err) {
388 kfree(pd);
389 return ERR_PTR(err);
390 }
391
392 if (context) {
393 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
394 mthca_pd_free(to_mdev(ibdev), pd);
395 kfree(pd);
396 return ERR_PTR(-EFAULT);
397 }
398 }
399
400 return &pd->ibpd;
401 }
402
403 static int mthca_dealloc_pd(struct ib_pd *pd)
404 {
405 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
406 kfree(pd);
407
408 return 0;
409 }
410
411 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
412 struct ib_ah_attr *ah_attr,
413 struct ib_udata *udata)
414 {
415 int err;
416 struct mthca_ah *ah;
417
418 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
419 if (!ah)
420 return ERR_PTR(-ENOMEM);
421
422 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
423 if (err) {
424 kfree(ah);
425 return ERR_PTR(err);
426 }
427
428 return &ah->ibah;
429 }
430
431 static int mthca_ah_destroy(struct ib_ah *ah)
432 {
433 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
434 kfree(ah);
435
436 return 0;
437 }
438
439 static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
440 struct ib_srq_init_attr *init_attr,
441 struct ib_udata *udata)
442 {
443 struct mthca_create_srq ucmd;
444 struct mthca_ucontext *context = NULL;
445 struct mthca_srq *srq;
446 int err;
447
448 if (init_attr->srq_type != IB_SRQT_BASIC)
449 return ERR_PTR(-ENOSYS);
450
451 srq = kmalloc(sizeof *srq, GFP_KERNEL);
452 if (!srq)
453 return ERR_PTR(-ENOMEM);
454
455 if (pd->uobject) {
456 context = to_mucontext(pd->uobject->context);
457
458 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
459 err = -EFAULT;
460 goto err_free;
461 }
462
463 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
464 context->db_tab, ucmd.db_index,
465 ucmd.db_page);
466
467 if (err)
468 goto err_free;
469
470 srq->mr.ibmr.lkey = ucmd.lkey;
471 srq->db_index = ucmd.db_index;
472 }
473
474 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
475 &init_attr->attr, srq);
476
477 if (err && pd->uobject)
478 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
479 context->db_tab, ucmd.db_index);
480
481 if (err)
482 goto err_free;
483
484 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
485 mthca_free_srq(to_mdev(pd->device), srq);
486 err = -EFAULT;
487 goto err_free;
488 }
489
490 return &srq->ibsrq;
491
492 err_free:
493 kfree(srq);
494
495 return ERR_PTR(err);
496 }
497
498 static int mthca_destroy_srq(struct ib_srq *srq)
499 {
500 struct mthca_ucontext *context;
501
502 if (srq->uobject) {
503 context = to_mucontext(srq->uobject->context);
504
505 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
506 context->db_tab, to_msrq(srq)->db_index);
507 }
508
509 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
510 kfree(srq);
511
512 return 0;
513 }
514
515 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
516 struct ib_qp_init_attr *init_attr,
517 struct ib_udata *udata)
518 {
519 struct mthca_create_qp ucmd;
520 struct mthca_qp *qp;
521 int err;
522
523 if (init_attr->create_flags)
524 return ERR_PTR(-EINVAL);
525
526 switch (init_attr->qp_type) {
527 case IB_QPT_RC:
528 case IB_QPT_UC:
529 case IB_QPT_UD:
530 {
531 struct mthca_ucontext *context;
532
533 qp = kmalloc(sizeof *qp, GFP_KERNEL);
534 if (!qp)
535 return ERR_PTR(-ENOMEM);
536
537 if (pd->uobject) {
538 context = to_mucontext(pd->uobject->context);
539
540 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
541 kfree(qp);
542 return ERR_PTR(-EFAULT);
543 }
544
545 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
546 context->db_tab,
547 ucmd.sq_db_index, ucmd.sq_db_page);
548 if (err) {
549 kfree(qp);
550 return ERR_PTR(err);
551 }
552
553 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
554 context->db_tab,
555 ucmd.rq_db_index, ucmd.rq_db_page);
556 if (err) {
557 mthca_unmap_user_db(to_mdev(pd->device),
558 &context->uar,
559 context->db_tab,
560 ucmd.sq_db_index);
561 kfree(qp);
562 return ERR_PTR(err);
563 }
564
565 qp->mr.ibmr.lkey = ucmd.lkey;
566 qp->sq.db_index = ucmd.sq_db_index;
567 qp->rq.db_index = ucmd.rq_db_index;
568 }
569
570 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
571 to_mcq(init_attr->send_cq),
572 to_mcq(init_attr->recv_cq),
573 init_attr->qp_type, init_attr->sq_sig_type,
574 &init_attr->cap, qp);
575
576 if (err && pd->uobject) {
577 context = to_mucontext(pd->uobject->context);
578
579 mthca_unmap_user_db(to_mdev(pd->device),
580 &context->uar,
581 context->db_tab,
582 ucmd.sq_db_index);
583 mthca_unmap_user_db(to_mdev(pd->device),
584 &context->uar,
585 context->db_tab,
586 ucmd.rq_db_index);
587 }
588
589 qp->ibqp.qp_num = qp->qpn;
590 break;
591 }
592 case IB_QPT_SMI:
593 case IB_QPT_GSI:
594 {
595 /* Don't allow userspace to create special QPs */
596 if (pd->uobject)
597 return ERR_PTR(-EINVAL);
598
599 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
600 if (!qp)
601 return ERR_PTR(-ENOMEM);
602
603 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
604
605 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
606 to_mcq(init_attr->send_cq),
607 to_mcq(init_attr->recv_cq),
608 init_attr->sq_sig_type, &init_attr->cap,
609 qp->ibqp.qp_num, init_attr->port_num,
610 to_msqp(qp));
611 break;
612 }
613 default:
614 /* Don't support raw QPs */
615 return ERR_PTR(-ENOSYS);
616 }
617
618 if (err) {
619 kfree(qp);
620 return ERR_PTR(err);
621 }
622
623 init_attr->cap.max_send_wr = qp->sq.max;
624 init_attr->cap.max_recv_wr = qp->rq.max;
625 init_attr->cap.max_send_sge = qp->sq.max_gs;
626 init_attr->cap.max_recv_sge = qp->rq.max_gs;
627 init_attr->cap.max_inline_data = qp->max_inline_data;
628
629 return &qp->ibqp;
630 }
631
632 static int mthca_destroy_qp(struct ib_qp *qp)
633 {
634 if (qp->uobject) {
635 mthca_unmap_user_db(to_mdev(qp->device),
636 &to_mucontext(qp->uobject->context)->uar,
637 to_mucontext(qp->uobject->context)->db_tab,
638 to_mqp(qp)->sq.db_index);
639 mthca_unmap_user_db(to_mdev(qp->device),
640 &to_mucontext(qp->uobject->context)->uar,
641 to_mucontext(qp->uobject->context)->db_tab,
642 to_mqp(qp)->rq.db_index);
643 }
644 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
645 kfree(qp);
646 return 0;
647 }
648
649 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
650 const struct ib_cq_init_attr *attr,
651 struct ib_ucontext *context,
652 struct ib_udata *udata)
653 {
654 int entries = attr->cqe;
655 struct mthca_create_cq ucmd;
656 struct mthca_cq *cq;
657 int nent;
658 int err;
659
660 if (attr->flags)
661 return ERR_PTR(-EINVAL);
662
663 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
664 return ERR_PTR(-EINVAL);
665
666 if (context) {
667 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
668 return ERR_PTR(-EFAULT);
669
670 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
671 to_mucontext(context)->db_tab,
672 ucmd.set_db_index, ucmd.set_db_page);
673 if (err)
674 return ERR_PTR(err);
675
676 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
677 to_mucontext(context)->db_tab,
678 ucmd.arm_db_index, ucmd.arm_db_page);
679 if (err)
680 goto err_unmap_set;
681 }
682
683 cq = kmalloc(sizeof *cq, GFP_KERNEL);
684 if (!cq) {
685 err = -ENOMEM;
686 goto err_unmap_arm;
687 }
688
689 if (context) {
690 cq->buf.mr.ibmr.lkey = ucmd.lkey;
691 cq->set_ci_db_index = ucmd.set_db_index;
692 cq->arm_db_index = ucmd.arm_db_index;
693 }
694
695 for (nent = 1; nent <= entries; nent <<= 1)
696 ; /* nothing */
697
698 err = mthca_init_cq(to_mdev(ibdev), nent,
699 context ? to_mucontext(context) : NULL,
700 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
701 cq);
702 if (err)
703 goto err_free;
704
705 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
706 mthca_free_cq(to_mdev(ibdev), cq);
707 err = -EFAULT;
708 goto err_free;
709 }
710
711 cq->resize_buf = NULL;
712
713 return &cq->ibcq;
714
715 err_free:
716 kfree(cq);
717
718 err_unmap_arm:
719 if (context)
720 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
721 to_mucontext(context)->db_tab, ucmd.arm_db_index);
722
723 err_unmap_set:
724 if (context)
725 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
726 to_mucontext(context)->db_tab, ucmd.set_db_index);
727
728 return ERR_PTR(err);
729 }
730
731 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
732 int entries)
733 {
734 int ret;
735
736 spin_lock_irq(&cq->lock);
737 if (cq->resize_buf) {
738 ret = -EBUSY;
739 goto unlock;
740 }
741
742 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
743 if (!cq->resize_buf) {
744 ret = -ENOMEM;
745 goto unlock;
746 }
747
748 cq->resize_buf->state = CQ_RESIZE_ALLOC;
749
750 ret = 0;
751
752 unlock:
753 spin_unlock_irq(&cq->lock);
754
755 if (ret)
756 return ret;
757
758 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
759 if (ret) {
760 spin_lock_irq(&cq->lock);
761 kfree(cq->resize_buf);
762 cq->resize_buf = NULL;
763 spin_unlock_irq(&cq->lock);
764 return ret;
765 }
766
767 cq->resize_buf->cqe = entries - 1;
768
769 spin_lock_irq(&cq->lock);
770 cq->resize_buf->state = CQ_RESIZE_READY;
771 spin_unlock_irq(&cq->lock);
772
773 return 0;
774 }
775
776 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
777 {
778 struct mthca_dev *dev = to_mdev(ibcq->device);
779 struct mthca_cq *cq = to_mcq(ibcq);
780 struct mthca_resize_cq ucmd;
781 u32 lkey;
782 int ret;
783
784 if (entries < 1 || entries > dev->limits.max_cqes)
785 return -EINVAL;
786
787 mutex_lock(&cq->mutex);
788
789 entries = roundup_pow_of_two(entries + 1);
790 if (entries == ibcq->cqe + 1) {
791 ret = 0;
792 goto out;
793 }
794
795 if (cq->is_kernel) {
796 ret = mthca_alloc_resize_buf(dev, cq, entries);
797 if (ret)
798 goto out;
799 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
800 } else {
801 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
802 ret = -EFAULT;
803 goto out;
804 }
805 lkey = ucmd.lkey;
806 }
807
808 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
809
810 if (ret) {
811 if (cq->resize_buf) {
812 mthca_free_cq_buf(dev, &cq->resize_buf->buf,
813 cq->resize_buf->cqe);
814 kfree(cq->resize_buf);
815 spin_lock_irq(&cq->lock);
816 cq->resize_buf = NULL;
817 spin_unlock_irq(&cq->lock);
818 }
819 goto out;
820 }
821
822 if (cq->is_kernel) {
823 struct mthca_cq_buf tbuf;
824 int tcqe;
825
826 spin_lock_irq(&cq->lock);
827 if (cq->resize_buf->state == CQ_RESIZE_READY) {
828 mthca_cq_resize_copy_cqes(cq);
829 tbuf = cq->buf;
830 tcqe = cq->ibcq.cqe;
831 cq->buf = cq->resize_buf->buf;
832 cq->ibcq.cqe = cq->resize_buf->cqe;
833 } else {
834 tbuf = cq->resize_buf->buf;
835 tcqe = cq->resize_buf->cqe;
836 }
837
838 kfree(cq->resize_buf);
839 cq->resize_buf = NULL;
840 spin_unlock_irq(&cq->lock);
841
842 mthca_free_cq_buf(dev, &tbuf, tcqe);
843 } else
844 ibcq->cqe = entries - 1;
845
846 out:
847 mutex_unlock(&cq->mutex);
848
849 return ret;
850 }
851
852 static int mthca_destroy_cq(struct ib_cq *cq)
853 {
854 if (cq->uobject) {
855 mthca_unmap_user_db(to_mdev(cq->device),
856 &to_mucontext(cq->uobject->context)->uar,
857 to_mucontext(cq->uobject->context)->db_tab,
858 to_mcq(cq)->arm_db_index);
859 mthca_unmap_user_db(to_mdev(cq->device),
860 &to_mucontext(cq->uobject->context)->uar,
861 to_mucontext(cq->uobject->context)->db_tab,
862 to_mcq(cq)->set_ci_db_index);
863 }
864 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
865 kfree(cq);
866
867 return 0;
868 }
869
870 static inline u32 convert_access(int acc)
871 {
872 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
873 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
874 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
875 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
876 MTHCA_MPT_FLAG_LOCAL_READ;
877 }
878
879 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
880 {
881 struct mthca_mr *mr;
882 int err;
883
884 mr = kmalloc(sizeof *mr, GFP_KERNEL);
885 if (!mr)
886 return ERR_PTR(-ENOMEM);
887
888 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
889 to_mpd(pd)->pd_num,
890 convert_access(acc), mr);
891
892 if (err) {
893 kfree(mr);
894 return ERR_PTR(err);
895 }
896
897 mr->umem = NULL;
898
899 return &mr->ibmr;
900 }
901
902 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
903 u64 virt, int acc, struct ib_udata *udata)
904 {
905 struct mthca_dev *dev = to_mdev(pd->device);
906 struct scatterlist *sg;
907 struct mthca_mr *mr;
908 struct mthca_reg_mr ucmd;
909 u64 *pages;
910 int shift, n, len;
911 int i, k, entry;
912 int err = 0;
913 int write_mtt_size;
914
915 if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
916 if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
917 mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
918 current->comm);
919 mthca_warn(dev, " Update libmthca to fix this.\n");
920 }
921 ++to_mucontext(pd->uobject->context)->reg_mr_warned;
922 ucmd.mr_attrs = 0;
923 } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
924 return ERR_PTR(-EFAULT);
925
926 mr = kmalloc(sizeof *mr, GFP_KERNEL);
927 if (!mr)
928 return ERR_PTR(-ENOMEM);
929
930 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
931 ucmd.mr_attrs & MTHCA_MR_DMASYNC);
932
933 if (IS_ERR(mr->umem)) {
934 err = PTR_ERR(mr->umem);
935 goto err;
936 }
937
938 shift = ffs(mr->umem->page_size) - 1;
939 n = mr->umem->nmap;
940
941 mr->mtt = mthca_alloc_mtt(dev, n);
942 if (IS_ERR(mr->mtt)) {
943 err = PTR_ERR(mr->mtt);
944 goto err_umem;
945 }
946
947 pages = (u64 *) __get_free_page(GFP_KERNEL);
948 if (!pages) {
949 err = -ENOMEM;
950 goto err_mtt;
951 }
952
953 i = n = 0;
954
955 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
956
957 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
958 len = sg_dma_len(sg) >> shift;
959 for (k = 0; k < len; ++k) {
960 pages[i++] = sg_dma_address(sg) +
961 mr->umem->page_size * k;
962 /*
963 * Be friendly to write_mtt and pass it chunks
964 * of appropriate size.
965 */
966 if (i == write_mtt_size) {
967 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
968 if (err)
969 goto mtt_done;
970 n += i;
971 i = 0;
972 }
973 }
974 }
975
976 if (i)
977 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
978 mtt_done:
979 free_page((unsigned long) pages);
980 if (err)
981 goto err_mtt;
982
983 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
984 convert_access(acc), mr);
985
986 if (err)
987 goto err_mtt;
988
989 return &mr->ibmr;
990
991 err_mtt:
992 mthca_free_mtt(dev, mr->mtt);
993
994 err_umem:
995 ib_umem_release(mr->umem);
996
997 err:
998 kfree(mr);
999 return ERR_PTR(err);
1000 }
1001
1002 static int mthca_dereg_mr(struct ib_mr *mr)
1003 {
1004 struct mthca_mr *mmr = to_mmr(mr);
1005
1006 mthca_free_mr(to_mdev(mr->device), mmr);
1007 if (mmr->umem)
1008 ib_umem_release(mmr->umem);
1009 kfree(mmr);
1010
1011 return 0;
1012 }
1013
1014 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1015 struct ib_fmr_attr *fmr_attr)
1016 {
1017 struct mthca_fmr *fmr;
1018 int err;
1019
1020 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1021 if (!fmr)
1022 return ERR_PTR(-ENOMEM);
1023
1024 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1025 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1026 convert_access(mr_access_flags), fmr);
1027
1028 if (err) {
1029 kfree(fmr);
1030 return ERR_PTR(err);
1031 }
1032
1033 return &fmr->ibmr;
1034 }
1035
1036 static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1037 {
1038 struct mthca_fmr *mfmr = to_mfmr(fmr);
1039 int err;
1040
1041 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1042 if (err)
1043 return err;
1044
1045 kfree(mfmr);
1046 return 0;
1047 }
1048
1049 static int mthca_unmap_fmr(struct list_head *fmr_list)
1050 {
1051 struct ib_fmr *fmr;
1052 int err;
1053 struct mthca_dev *mdev = NULL;
1054
1055 list_for_each_entry(fmr, fmr_list, list) {
1056 if (mdev && to_mdev(fmr->device) != mdev)
1057 return -EINVAL;
1058 mdev = to_mdev(fmr->device);
1059 }
1060
1061 if (!mdev)
1062 return 0;
1063
1064 if (mthca_is_memfree(mdev)) {
1065 list_for_each_entry(fmr, fmr_list, list)
1066 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1067
1068 wmb();
1069 } else
1070 list_for_each_entry(fmr, fmr_list, list)
1071 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1072
1073 err = mthca_SYNC_TPT(mdev);
1074 return err;
1075 }
1076
1077 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1078 char *buf)
1079 {
1080 struct mthca_dev *dev =
1081 container_of(device, struct mthca_dev, ib_dev.dev);
1082 return sprintf(buf, "%x\n", dev->rev_id);
1083 }
1084
1085 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1086 char *buf)
1087 {
1088 struct mthca_dev *dev =
1089 container_of(device, struct mthca_dev, ib_dev.dev);
1090 switch (dev->pdev->device) {
1091 case PCI_DEVICE_ID_MELLANOX_TAVOR:
1092 return sprintf(buf, "MT23108\n");
1093 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1094 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1095 case PCI_DEVICE_ID_MELLANOX_ARBEL:
1096 return sprintf(buf, "MT25208\n");
1097 case PCI_DEVICE_ID_MELLANOX_SINAI:
1098 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1099 return sprintf(buf, "MT25204\n");
1100 default:
1101 return sprintf(buf, "unknown\n");
1102 }
1103 }
1104
1105 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1106 char *buf)
1107 {
1108 struct mthca_dev *dev =
1109 container_of(device, struct mthca_dev, ib_dev.dev);
1110 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1111 }
1112
1113 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1114 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1115 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1116
1117 static struct device_attribute *mthca_dev_attributes[] = {
1118 &dev_attr_hw_rev,
1119 &dev_attr_hca_type,
1120 &dev_attr_board_id
1121 };
1122
1123 static int mthca_init_node_data(struct mthca_dev *dev)
1124 {
1125 struct ib_smp *in_mad = NULL;
1126 struct ib_smp *out_mad = NULL;
1127 int err = -ENOMEM;
1128
1129 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1130 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1131 if (!in_mad || !out_mad)
1132 goto out;
1133
1134 init_query_mad(in_mad);
1135 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1136
1137 err = mthca_MAD_IFC(dev, 1, 1,
1138 1, NULL, NULL, in_mad, out_mad);
1139 if (err)
1140 goto out;
1141
1142 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
1143
1144 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1145
1146 err = mthca_MAD_IFC(dev, 1, 1,
1147 1, NULL, NULL, in_mad, out_mad);
1148 if (err)
1149 goto out;
1150
1151 if (mthca_is_memfree(dev))
1152 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1153 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1154
1155 out:
1156 kfree(in_mad);
1157 kfree(out_mad);
1158 return err;
1159 }
1160
1161 static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
1162 struct ib_port_immutable *immutable)
1163 {
1164 struct ib_port_attr attr;
1165 int err;
1166
1167 err = mthca_query_port(ibdev, port_num, &attr);
1168 if (err)
1169 return err;
1170
1171 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1172 immutable->gid_tbl_len = attr.gid_tbl_len;
1173 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1174 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1175
1176 return 0;
1177 }
1178
1179 static void get_dev_fw_str(struct ib_device *device, char *str,
1180 size_t str_len)
1181 {
1182 struct mthca_dev *dev =
1183 container_of(device, struct mthca_dev, ib_dev);
1184 snprintf(str, str_len, "%d.%d.%d",
1185 (int) (dev->fw_ver >> 32),
1186 (int) (dev->fw_ver >> 16) & 0xffff,
1187 (int) dev->fw_ver & 0xffff);
1188 }
1189
1190 int mthca_register_device(struct mthca_dev *dev)
1191 {
1192 int ret;
1193 int i;
1194
1195 ret = mthca_init_node_data(dev);
1196 if (ret)
1197 return ret;
1198
1199 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1200 dev->ib_dev.owner = THIS_MODULE;
1201
1202 dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
1203 dev->ib_dev.uverbs_cmd_mask =
1204 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1205 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1206 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1207 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1208 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1209 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1210 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1211 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1212 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1213 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1214 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1215 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1216 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1217 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1218 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1219 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1220 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1221 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1222 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1223 dev->ib_dev.num_comp_vectors = 1;
1224 dev->ib_dev.dma_device = &dev->pdev->dev;
1225 dev->ib_dev.query_device = mthca_query_device;
1226 dev->ib_dev.query_port = mthca_query_port;
1227 dev->ib_dev.modify_device = mthca_modify_device;
1228 dev->ib_dev.modify_port = mthca_modify_port;
1229 dev->ib_dev.query_pkey = mthca_query_pkey;
1230 dev->ib_dev.query_gid = mthca_query_gid;
1231 dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1232 dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
1233 dev->ib_dev.mmap = mthca_mmap_uar;
1234 dev->ib_dev.alloc_pd = mthca_alloc_pd;
1235 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1236 dev->ib_dev.create_ah = mthca_ah_create;
1237 dev->ib_dev.query_ah = mthca_ah_query;
1238 dev->ib_dev.destroy_ah = mthca_ah_destroy;
1239
1240 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1241 dev->ib_dev.create_srq = mthca_create_srq;
1242 dev->ib_dev.modify_srq = mthca_modify_srq;
1243 dev->ib_dev.query_srq = mthca_query_srq;
1244 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1245 dev->ib_dev.uverbs_cmd_mask |=
1246 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1247 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1248 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1249 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1250
1251 if (mthca_is_memfree(dev))
1252 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1253 else
1254 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1255 }
1256
1257 dev->ib_dev.create_qp = mthca_create_qp;
1258 dev->ib_dev.modify_qp = mthca_modify_qp;
1259 dev->ib_dev.query_qp = mthca_query_qp;
1260 dev->ib_dev.destroy_qp = mthca_destroy_qp;
1261 dev->ib_dev.create_cq = mthca_create_cq;
1262 dev->ib_dev.resize_cq = mthca_resize_cq;
1263 dev->ib_dev.destroy_cq = mthca_destroy_cq;
1264 dev->ib_dev.poll_cq = mthca_poll_cq;
1265 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
1266 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
1267 dev->ib_dev.dereg_mr = mthca_dereg_mr;
1268 dev->ib_dev.get_port_immutable = mthca_port_immutable;
1269 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
1270
1271 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1272 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
1273 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
1274 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
1275 if (mthca_is_memfree(dev))
1276 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1277 else
1278 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1279 }
1280
1281 dev->ib_dev.attach_mcast = mthca_multicast_attach;
1282 dev->ib_dev.detach_mcast = mthca_multicast_detach;
1283 dev->ib_dev.process_mad = mthca_process_mad;
1284
1285 if (mthca_is_memfree(dev)) {
1286 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1287 dev->ib_dev.post_send = mthca_arbel_post_send;
1288 dev->ib_dev.post_recv = mthca_arbel_post_receive;
1289 } else {
1290 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1291 dev->ib_dev.post_send = mthca_tavor_post_send;
1292 dev->ib_dev.post_recv = mthca_tavor_post_receive;
1293 }
1294
1295 mutex_init(&dev->cap_mask_mutex);
1296
1297 ret = ib_register_device(&dev->ib_dev, NULL);
1298 if (ret)
1299 return ret;
1300
1301 for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
1302 ret = device_create_file(&dev->ib_dev.dev,
1303 mthca_dev_attributes[i]);
1304 if (ret) {
1305 ib_unregister_device(&dev->ib_dev);
1306 return ret;
1307 }
1308 }
1309
1310 mthca_start_catas_poll(dev);
1311
1312 return 0;
1313 }
1314
1315 void mthca_unregister_device(struct mthca_dev *dev)
1316 {
1317 mthca_stop_catas_poll(dev);
1318 ib_unregister_device(&dev->ib_dev);
1319 }
Cache object: 5ec5f5e0443e1d9076740a3db04653f7
|