The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/mlx4/mlx4_ib/mlx4_ib_cm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
    3  *
    4  * This software is available to you under a choice of one of two
    5  * licenses.  You may choose to be licensed under the terms of the GNU
    6  * General Public License (GPL) Version 2, available from the file
    7  * COPYING in the main directory of this source tree, or the
    8  * OpenIB.org BSD license below:
    9  *
   10  *     Redistribution and use in source and binary forms, with or
   11  *     without modification, are permitted provided that the following
   12  *     conditions are met:
   13  *
   14  *      - Redistributions of source code must retain the above
   15  *        copyright notice, this list of conditions and the following
   16  *        disclaimer.
   17  *
   18  *      - Redistributions in binary form must reproduce the above
   19  *        copyright notice, this list of conditions and the following
   20  *        disclaimer in the documentation and/or other materials
   21  *        provided with the distribution.
   22  *
   23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
   26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
   27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
   28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
   29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
   30  * SOFTWARE.
   31  */
   32 
   33 #include <rdma/ib_mad.h>
   34 
   35 #include <dev/mlx4/cmd.h>
   36 #include <linux/rbtree.h>
   37 #include <linux/idr.h>
   38 #include <rdma/ib_cm.h>
   39 
   40 #include "mlx4_ib.h"
   41 
   42 #define CM_CLEANUP_CACHE_TIMEOUT  (5 * HZ)
   43 
   44 struct id_map_entry {
   45         struct rb_node node;
   46 
   47         u32 sl_cm_id;
   48         u32 pv_cm_id;
   49         int slave_id;
   50         int scheduled_delete;
   51         struct mlx4_ib_dev *dev;
   52 
   53         struct list_head list;
   54         struct delayed_work timeout;
   55 };
   56 
   57 struct cm_generic_msg {
   58         struct ib_mad_hdr hdr;
   59 
   60         __be32 local_comm_id;
   61         __be32 remote_comm_id;
   62 };
   63 
   64 struct cm_sidr_generic_msg {
   65         struct ib_mad_hdr hdr;
   66         __be32 request_id;
   67 };
   68 
   69 struct cm_req_msg {
   70         unsigned char unused[0x60];
   71         union ib_gid primary_path_sgid;
   72 };
   73 
   74 
   75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
   76 {
   77         if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
   78                 struct cm_sidr_generic_msg *msg =
   79                         (struct cm_sidr_generic_msg *)mad;
   80                 msg->request_id = cpu_to_be32(cm_id);
   81         } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
   82                 pr_err("trying to set local_comm_id in SIDR_REP\n");
   83                 return;
   84         } else {
   85                 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
   86                 msg->local_comm_id = cpu_to_be32(cm_id);
   87         }
   88 }
   89 
   90 static u32 get_local_comm_id(struct ib_mad *mad)
   91 {
   92         if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
   93                 struct cm_sidr_generic_msg *msg =
   94                         (struct cm_sidr_generic_msg *)mad;
   95                 return be32_to_cpu(msg->request_id);
   96         } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
   97                 pr_err("trying to set local_comm_id in SIDR_REP\n");
   98                 return -1;
   99         } else {
  100                 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
  101                 return be32_to_cpu(msg->local_comm_id);
  102         }
  103 }
  104 
  105 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
  106 {
  107         if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
  108                 struct cm_sidr_generic_msg *msg =
  109                         (struct cm_sidr_generic_msg *)mad;
  110                 msg->request_id = cpu_to_be32(cm_id);
  111         } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
  112                 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
  113                 return;
  114         } else {
  115                 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
  116                 msg->remote_comm_id = cpu_to_be32(cm_id);
  117         }
  118 }
  119 
  120 static u32 get_remote_comm_id(struct ib_mad *mad)
  121 {
  122         if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
  123                 struct cm_sidr_generic_msg *msg =
  124                         (struct cm_sidr_generic_msg *)mad;
  125                 return be32_to_cpu(msg->request_id);
  126         } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
  127                 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
  128                 return -1;
  129         } else {
  130                 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
  131                 return be32_to_cpu(msg->remote_comm_id);
  132         }
  133 }
  134 
  135 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
  136 {
  137         struct cm_req_msg *msg = (struct cm_req_msg *)mad;
  138 
  139         return msg->primary_path_sgid;
  140 }
  141 
  142 /* Lock should be taken before called */
  143 static struct id_map_entry *
  144 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
  145 {
  146         struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
  147         struct rb_node *node = sl_id_map->rb_node;
  148 
  149         while (node) {
  150                 struct id_map_entry *id_map_entry =
  151                         rb_entry(node, struct id_map_entry, node);
  152 
  153                 if (id_map_entry->sl_cm_id > sl_cm_id)
  154                         node = node->rb_left;
  155                 else if (id_map_entry->sl_cm_id < sl_cm_id)
  156                         node = node->rb_right;
  157                 else if (id_map_entry->slave_id > slave_id)
  158                         node = node->rb_left;
  159                 else if (id_map_entry->slave_id < slave_id)
  160                         node = node->rb_right;
  161                 else
  162                         return id_map_entry;
  163         }
  164         return NULL;
  165 }
  166 
  167 static void id_map_ent_timeout(struct work_struct *work)
  168 {
  169         struct delayed_work *delay = to_delayed_work(work);
  170         struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
  171         struct id_map_entry *db_ent, *found_ent;
  172         struct mlx4_ib_dev *dev = ent->dev;
  173         struct mlx4_ib_sriov *sriov = &dev->sriov;
  174         struct rb_root *sl_id_map = &sriov->sl_id_map;
  175         int pv_id = (int) ent->pv_cm_id;
  176 
  177         spin_lock(&sriov->id_map_lock);
  178         db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
  179         if (!db_ent)
  180                 goto out;
  181         found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
  182         if (found_ent && found_ent == ent)
  183                 rb_erase(&found_ent->node, sl_id_map);
  184         idr_remove(&sriov->pv_id_table, pv_id);
  185 
  186 out:
  187         list_del(&ent->list);
  188         spin_unlock(&sriov->id_map_lock);
  189         kfree(ent);
  190 }
  191 
  192 static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
  193 {
  194         struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
  195         struct rb_root *sl_id_map = &sriov->sl_id_map;
  196         struct id_map_entry *ent, *found_ent;
  197 
  198         spin_lock(&sriov->id_map_lock);
  199         ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
  200         if (!ent)
  201                 goto out;
  202         found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
  203         if (found_ent && found_ent == ent)
  204                 rb_erase(&found_ent->node, sl_id_map);
  205         idr_remove(&sriov->pv_id_table, pv_cm_id);
  206 out:
  207         spin_unlock(&sriov->id_map_lock);
  208 }
  209 
  210 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
  211 {
  212         struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
  213         struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
  214         struct id_map_entry *ent;
  215         int slave_id = new->slave_id;
  216         int sl_cm_id = new->sl_cm_id;
  217 
  218         ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
  219         if (ent) {
  220                 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
  221                          sl_cm_id);
  222 
  223                 rb_replace_node(&ent->node, &new->node, sl_id_map);
  224                 return;
  225         }
  226 
  227         /* Go to the bottom of the tree */
  228         while (*link) {
  229                 parent = *link;
  230                 ent = rb_entry(parent, struct id_map_entry, node);
  231 
  232                 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
  233                         link = &(*link)->rb_left;
  234                 else
  235                         link = &(*link)->rb_right;
  236         }
  237 
  238         rb_link_node(&new->node, parent, link);
  239         rb_insert_color(&new->node, sl_id_map);
  240 }
  241 
  242 static struct id_map_entry *
  243 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
  244 {
  245         int ret;
  246         struct id_map_entry *ent;
  247         struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
  248 
  249         ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
  250         if (!ent) {
  251                 mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
  252                 return ERR_PTR(-ENOMEM);
  253         }
  254 
  255         ent->sl_cm_id = sl_cm_id;
  256         ent->slave_id = slave_id;
  257         ent->scheduled_delete = 0;
  258         ent->dev = to_mdev(ibdev);
  259         INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
  260 
  261         idr_preload(GFP_KERNEL);
  262         spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
  263 
  264         ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
  265         if (ret >= 0) {
  266                 ent->pv_cm_id = (u32)ret;
  267                 sl_id_map_add(ibdev, ent);
  268                 list_add_tail(&ent->list, &sriov->cm_list);
  269         }
  270 
  271         spin_unlock(&sriov->id_map_lock);
  272         idr_preload_end();
  273 
  274         if (ret >= 0)
  275                 return ent;
  276 
  277         /*error flow*/
  278         kfree(ent);
  279         mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
  280         return ERR_PTR(-ENOMEM);
  281 }
  282 
  283 static struct id_map_entry *
  284 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id)
  285 {
  286         struct id_map_entry *ent;
  287         struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
  288 
  289         spin_lock(&sriov->id_map_lock);
  290         if (*pv_cm_id == -1) {
  291                 ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id);
  292                 if (ent)
  293                         *pv_cm_id = (int) ent->pv_cm_id;
  294         } else
  295                 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
  296         spin_unlock(&sriov->id_map_lock);
  297 
  298         return ent;
  299 }
  300 
  301 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
  302 {
  303         struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
  304         unsigned long flags;
  305 
  306         spin_lock(&sriov->id_map_lock);
  307         spin_lock_irqsave(&sriov->going_down_lock, flags);
  308         /*make sure that there is no schedule inside the scheduled work.*/
  309         if (!sriov->is_going_down) {
  310                 id->scheduled_delete = 1;
  311                 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
  312         }
  313         spin_unlock_irqrestore(&sriov->going_down_lock, flags);
  314         spin_unlock(&sriov->id_map_lock);
  315 }
  316 
  317 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
  318                 struct ib_mad *mad)
  319 {
  320         struct id_map_entry *id;
  321         u32 sl_cm_id;
  322         int pv_cm_id = -1;
  323 
  324         if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
  325                         mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
  326                         mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
  327                 sl_cm_id = get_local_comm_id(mad);
  328                 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
  329                 if (IS_ERR(id)) {
  330                         mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
  331                                 __func__, slave_id, sl_cm_id);
  332                         return PTR_ERR(id);
  333                 }
  334         } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
  335                    mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
  336                 return 0;
  337         } else {
  338                 sl_cm_id = get_local_comm_id(mad);
  339                 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
  340         }
  341 
  342         if (!id) {
  343                 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
  344                          slave_id, sl_cm_id);
  345                 return -EINVAL;
  346         }
  347 
  348         set_local_comm_id(mad, id->pv_cm_id);
  349 
  350         if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
  351                 schedule_delayed(ibdev, id);
  352         else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
  353                 id_map_find_del(ibdev, pv_cm_id);
  354 
  355         return 0;
  356 }
  357 
  358 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
  359                              struct ib_mad *mad)
  360 {
  361         u32 pv_cm_id;
  362         struct id_map_entry *id;
  363 
  364         if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
  365             mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
  366                 union ib_gid gid;
  367 
  368                 if (!slave)
  369                         return 0;
  370 
  371                 gid = gid_from_req_msg(ibdev, mad);
  372                 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
  373                 if (*slave < 0) {
  374                         mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
  375                                         (unsigned long long)gid.global.interface_id);
  376                         return -ENOENT;
  377                 }
  378                 return 0;
  379         }
  380 
  381         pv_cm_id = get_remote_comm_id(mad);
  382         id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
  383 
  384         if (!id) {
  385                 pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
  386                 return -ENOENT;
  387         }
  388 
  389         if (slave)
  390                 *slave = id->slave_id;
  391         set_remote_comm_id(mad, id->sl_cm_id);
  392 
  393         if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
  394                 schedule_delayed(ibdev, id);
  395         else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
  396                         mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
  397                 id_map_find_del(ibdev, (int) pv_cm_id);
  398         }
  399 
  400         return 0;
  401 }
  402 
  403 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
  404 {
  405         spin_lock_init(&dev->sriov.id_map_lock);
  406         INIT_LIST_HEAD(&dev->sriov.cm_list);
  407         dev->sriov.sl_id_map = RB_ROOT;
  408         idr_init(&dev->sriov.pv_id_table);
  409 }
  410 
  411 /* slave = -1 ==> all slaves */
  412 /* TBD -- call paravirt clean for single slave.  Need for slave RESET event */
  413 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
  414 {
  415         struct mlx4_ib_sriov *sriov = &dev->sriov;
  416         struct rb_root *sl_id_map = &sriov->sl_id_map;
  417         struct list_head lh;
  418         struct rb_node *nd;
  419         int need_flush = 1;
  420         struct id_map_entry *map, *tmp_map;
  421         /* cancel all delayed work queue entries */
  422         INIT_LIST_HEAD(&lh);
  423         spin_lock(&sriov->id_map_lock);
  424         list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
  425                 if (slave < 0 || slave == map->slave_id) {
  426                         if (map->scheduled_delete)
  427                                 need_flush &= !!cancel_delayed_work(&map->timeout);
  428                 }
  429         }
  430 
  431         spin_unlock(&sriov->id_map_lock);
  432 
  433         if (!need_flush)
  434                 flush_scheduled_work(); /* make sure all timers were flushed */
  435 
  436         /* now, remove all leftover entries from databases*/
  437         spin_lock(&sriov->id_map_lock);
  438         if (slave < 0) {
  439                 while (rb_first(sl_id_map)) {
  440                         struct id_map_entry *ent =
  441                                 rb_entry(rb_first(sl_id_map),
  442                                          struct id_map_entry, node);
  443 
  444                         rb_erase(&ent->node, sl_id_map);
  445                         idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
  446                 }
  447                 list_splice_init(&dev->sriov.cm_list, &lh);
  448         } else {
  449                 /* first, move nodes belonging to slave to db remove list */
  450                 nd = rb_first(sl_id_map);
  451                 while (nd) {
  452                         struct id_map_entry *ent =
  453                                 rb_entry(nd, struct id_map_entry, node);
  454                         nd = rb_next(nd);
  455                         if (ent->slave_id == slave)
  456                                 list_move_tail(&ent->list, &lh);
  457                 }
  458                 /* remove those nodes from databases */
  459                 list_for_each_entry_safe(map, tmp_map, &lh, list) {
  460                         rb_erase(&map->node, sl_id_map);
  461                         idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
  462                 }
  463 
  464                 /* add remaining nodes from cm_list */
  465                 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
  466                         if (slave == map->slave_id)
  467                                 list_move_tail(&map->list, &lh);
  468                 }
  469         }
  470 
  471         spin_unlock(&sriov->id_map_lock);
  472 
  473         /* free any map entries left behind due to cancel_delayed_work above */
  474         list_for_each_entry_safe(map, tmp_map, &lh, list) {
  475                 list_del(&map->list);
  476                 kfree(map);
  477         }
  478 }

Cache object: 1265112c75cc79427bd6f7fe1ca5a34f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.