1 /* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
5 * www.emulex.com
6 *
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * Contact Information:
36 * linux-drivers@emulex.com
37 *
38 * Emulex
39 * 3333 Susan Street
40 * Costa Mesa, CA 92626
41 */
42
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_mad.h>
48 #include <rdma/ib_cache.h>
49
50 #include "ocrdma.h"
51 #include "ocrdma_verbs.h"
52 #include "ocrdma_ah.h"
53 #include "ocrdma_hw.h"
54 #include "ocrdma_stats.h"
55
56 #define OCRDMA_VID_PCP_SHIFT 0xD
57
ocrdma_hdr_type_to_proto_num(int devid,u8 hdr_type)58 static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
59 {
60 switch (hdr_type) {
61 case OCRDMA_L3_TYPE_IB_GRH:
62 return (u16)ETH_P_IBOE;
63 case OCRDMA_L3_TYPE_IPV4:
64 return (u16)0x0800;
65 case OCRDMA_L3_TYPE_IPV6:
66 return (u16)0x86dd;
67 default:
68 pr_err("ocrdma%d: Invalid network header\n", devid);
69 return 0;
70 }
71 }
72
set_av_attr(struct ocrdma_dev * dev,struct ocrdma_ah * ah,struct rdma_ah_attr * attr,const union ib_gid * sgid,int pdid,bool * isvlan,u16 vlan_tag)73 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
74 struct rdma_ah_attr *attr, const union ib_gid *sgid,
75 int pdid, bool *isvlan, u16 vlan_tag)
76 {
77 int status;
78 struct ocrdma_eth_vlan eth;
79 struct ocrdma_grh grh;
80 int eth_sz;
81 u16 proto_num = 0;
82 u8 nxthdr = 0x11;
83 struct iphdr ipv4;
84 const struct ib_global_route *ib_grh;
85 union {
86 struct sockaddr _sockaddr;
87 struct sockaddr_in _sockaddr_in;
88 struct sockaddr_in6 _sockaddr_in6;
89 } sgid_addr, dgid_addr;
90
91 memset(ð, 0, sizeof(eth));
92 memset(&grh, 0, sizeof(grh));
93
94 /* Protocol Number */
95 proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
96 if (!proto_num)
97 return -EINVAL;
98 nxthdr = (proto_num == ETH_P_IBOE) ? 0x1b : 0x11;
99 /* VLAN */
100 if (!vlan_tag || (vlan_tag > 0xFFF))
101 vlan_tag = dev->pvid;
102 if (vlan_tag || dev->pfc_state) {
103 if (!vlan_tag) {
104 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
105 dev->id);
106 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
107 dev->id);
108 }
109 eth.eth_type = cpu_to_be16(0x8100);
110 eth.roce_eth_type = cpu_to_be16(proto_num);
111 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
112 eth.vlan_tag = cpu_to_be16(vlan_tag);
113 eth_sz = sizeof(struct ocrdma_eth_vlan);
114 *isvlan = true;
115 } else {
116 eth.eth_type = cpu_to_be16(proto_num);
117 eth_sz = sizeof(struct ocrdma_eth_basic);
118 }
119 /* MAC */
120 memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
121 status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]);
122 if (status)
123 return status;
124 ib_grh = rdma_ah_read_grh(attr);
125 ah->sgid_index = ib_grh->sgid_index;
126 /* Eth HDR */
127 memcpy(&ah->av->eth_hdr, ð, eth_sz);
128 if (ah->hdr_type == RDMA_NETWORK_IPV4) {
129 *((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
130 ib_grh->traffic_class);
131 ipv4.id = cpu_to_be16(pdid);
132 ipv4.frag_off = htons(IP_DF);
133 ipv4.tot_len = htons(0);
134 ipv4.ttl = ib_grh->hop_limit;
135 ipv4.protocol = nxthdr;
136 rdma_gid2ip(&sgid_addr._sockaddr, sgid);
137 ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
138 rdma_gid2ip(&dgid_addr._sockaddr, &ib_grh->dgid);
139 ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
140 memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
141 } else {
142 memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
143 grh.tclass_flow = cpu_to_be32((6 << 28) |
144 (ib_grh->traffic_class << 24) |
145 ib_grh->flow_label);
146 memcpy(&grh.dgid[0], ib_grh->dgid.raw,
147 sizeof(ib_grh->dgid.raw));
148 grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
149 (nxthdr << 8) |
150 ib_grh->hop_limit);
151 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
152 }
153 if (*isvlan)
154 ah->av->valid |= OCRDMA_AV_VLAN_VALID;
155 ah->av->valid = cpu_to_le32(ah->av->valid);
156 return status;
157 }
158
ocrdma_create_ah(struct ib_pd * ibpd,struct rdma_ah_attr * attr,struct ib_udata * udata)159 struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
160 struct ib_udata *udata)
161 {
162 u32 *ahid_addr;
163 int status;
164 struct ocrdma_ah *ah;
165 bool isvlan = false;
166 u16 vlan_tag = 0xffff;
167 const struct ib_gid_attr *sgid_attr;
168 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
169 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
170
171 if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
172 !(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
173 return ERR_PTR(-EINVAL);
174
175 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
176 ocrdma_init_service_level(dev);
177
178 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
179 if (!ah)
180 return ERR_PTR(-ENOMEM);
181
182 status = ocrdma_alloc_av(dev, ah);
183 if (status)
184 goto av_err;
185
186 sgid_attr = attr->grh.sgid_attr;
187 if (is_vlan_dev(sgid_attr->ndev))
188 vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
189
190 /* Get network header type for this GID */
191 ah->hdr_type = rdma_gid_attr_network_type(sgid_attr);
192
193 status = set_av_attr(dev, ah, attr, &sgid_attr->gid, pd->id,
194 &isvlan, vlan_tag);
195 if (status)
196 goto av_conf_err;
197
198 /* if pd is for the user process, pass the ah_id to user space */
199 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
200 ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr);
201 *ahid_addr = 0;
202 *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
203 if (ocrdma_is_udp_encap_supported(dev)) {
204 *ahid_addr |= ((u32)ah->hdr_type &
205 OCRDMA_AH_L3_TYPE_MASK) <<
206 OCRDMA_AH_L3_TYPE_SHIFT;
207 }
208 if (isvlan)
209 *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
210 OCRDMA_AH_VLAN_VALID_SHIFT);
211 }
212
213 return &ah->ibah;
214
215 av_conf_err:
216 ocrdma_free_av(dev, ah);
217 av_err:
218 kfree(ah);
219 return ERR_PTR(status);
220 }
221
ocrdma_destroy_ah(struct ib_ah * ibah)222 int ocrdma_destroy_ah(struct ib_ah *ibah)
223 {
224 struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
225 struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
226
227 ocrdma_free_av(dev, ah);
228 kfree(ah);
229 return 0;
230 }
231
ocrdma_query_ah(struct ib_ah * ibah,struct rdma_ah_attr * attr)232 int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
233 {
234 struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
235 struct ocrdma_av *av = ah->av;
236 struct ocrdma_grh *grh;
237
238 attr->type = ibah->type;
239 if (ah->av->valid & OCRDMA_AV_VALID) {
240 grh = (struct ocrdma_grh *)((u8 *)ah->av +
241 sizeof(struct ocrdma_eth_vlan));
242 rdma_ah_set_sl(attr, be16_to_cpu(av->eth_hdr.vlan_tag) >> 13);
243 } else {
244 grh = (struct ocrdma_grh *)((u8 *)ah->av +
245 sizeof(struct ocrdma_eth_basic));
246 rdma_ah_set_sl(attr, 0);
247 }
248 rdma_ah_set_grh(attr, NULL,
249 be32_to_cpu(grh->tclass_flow) & 0xffffffff,
250 ah->sgid_index,
251 be32_to_cpu(grh->pdid_hoplimit) & 0xff,
252 be32_to_cpu(grh->tclass_flow) >> 24);
253 rdma_ah_set_dgid_raw(attr, &grh->dgid[0]);
254 return 0;
255 }
256
ocrdma_process_mad(struct ib_device * ibdev,int process_mad_flags,u8 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad_hdr * in,size_t in_mad_size,struct ib_mad_hdr * out,size_t * out_mad_size,u16 * out_mad_pkey_index)257 int ocrdma_process_mad(struct ib_device *ibdev,
258 int process_mad_flags,
259 u8 port_num,
260 const struct ib_wc *in_wc,
261 const struct ib_grh *in_grh,
262 const struct ib_mad_hdr *in, size_t in_mad_size,
263 struct ib_mad_hdr *out, size_t *out_mad_size,
264 u16 *out_mad_pkey_index)
265 {
266 int status;
267 struct ocrdma_dev *dev;
268 const struct ib_mad *in_mad = (const struct ib_mad *)in;
269 struct ib_mad *out_mad = (struct ib_mad *)out;
270
271 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
272 *out_mad_size != sizeof(*out_mad)))
273 return IB_MAD_RESULT_FAILURE;
274
275 switch (in_mad->mad_hdr.mgmt_class) {
276 case IB_MGMT_CLASS_PERF_MGMT:
277 dev = get_ocrdma_dev(ibdev);
278 if (!ocrdma_pma_counters(dev, out_mad))
279 status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
280 else
281 status = IB_MAD_RESULT_SUCCESS;
282 break;
283 default:
284 status = IB_MAD_RESULT_SUCCESS;
285 break;
286 }
287 return status;
288 }
289