include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / mlx4 / cq.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
225c7b1f 37#include <linux/hardirq.h>
5a0e3ad6 38#include <linux/gfp.h>
225c7b1f
RD
39
40#include <linux/mlx4/cmd.h>
3fdcb97f 41#include <linux/mlx4/cq.h>
225c7b1f
RD
42
43#include "mlx4.h"
44#include "icm.h"
45
46struct mlx4_cq_context {
47 __be32 flags;
48 u16 reserved1[3];
49 __be16 page_offset;
50 __be32 logsize_usrpage;
3fdcb97f
EC
51 __be16 cq_period;
52 __be16 cq_max_count;
53 u8 reserved2[3];
225c7b1f
RD
54 u8 comp_eqn;
55 u8 log_page_size;
3fdcb97f 56 u8 reserved3[2];
225c7b1f
RD
57 u8 mtt_base_addr_h;
58 __be32 mtt_base_addr_l;
59 __be32 last_notified_index;
60 __be32 solicit_producer_index;
61 __be32 consumer_index;
62 __be32 producer_index;
3fdcb97f 63 u32 reserved4[2];
225c7b1f
RD
64 __be64 db_rec_addr;
65};
66
67#define MLX4_CQ_STATUS_OK ( 0 << 28)
68#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
69#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
70#define MLX4_CQ_FLAG_CC ( 1 << 18)
71#define MLX4_CQ_FLAG_OI ( 1 << 17)
72#define MLX4_CQ_STATE_ARMED ( 9 << 8)
73#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
74#define MLX4_EQ_STATE_FIRED (10 << 8)
75
76void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
77{
78 struct mlx4_cq *cq;
79
80 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
81 cqn & (dev->caps.num_cqs - 1));
82 if (!cq) {
83 mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
84 return;
85 }
86
87 ++cq->arm_sn;
88
89 cq->comp(cq);
90}
91
92void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
93{
94 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
95 struct mlx4_cq *cq;
96
97 spin_lock(&cq_table->lock);
98
99 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
100 if (cq)
101 atomic_inc(&cq->refcount);
102
103 spin_unlock(&cq_table->lock);
104
105 if (!cq) {
106 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
107 return;
108 }
109
110 cq->event(cq, event_type);
111
112 if (atomic_dec_and_test(&cq->refcount))
113 complete(&cq->free);
114}
115
116static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
117 int cq_num)
118{
119 return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
120 MLX4_CMD_TIME_CLASS_A);
121}
122
3fdcb97f
EC
123static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
124 int cq_num, u32 opmod)
125{
126 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
127 MLX4_CMD_TIME_CLASS_A);
128}
129
225c7b1f
RD
130static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
131 int cq_num)
132{
133 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
134 mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
135 MLX4_CMD_TIME_CLASS_A);
136}
137
3fdcb97f
EC
138int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
139 u16 count, u16 period)
140{
141 struct mlx4_cmd_mailbox *mailbox;
142 struct mlx4_cq_context *cq_context;
143 int err;
144
145 mailbox = mlx4_alloc_cmd_mailbox(dev);
146 if (IS_ERR(mailbox))
147 return PTR_ERR(mailbox);
148
149 cq_context = mailbox->buf;
150 memset(cq_context, 0, sizeof *cq_context);
151
152 cq_context->cq_max_count = cpu_to_be16(count);
153 cq_context->cq_period = cpu_to_be16(period);
154
155 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
156
157 mlx4_free_cmd_mailbox(dev, mailbox);
158 return err;
159}
160EXPORT_SYMBOL_GPL(mlx4_cq_modify);
161
bbf8eed1
VS
162int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
163 int entries, struct mlx4_mtt *mtt)
164{
165 struct mlx4_cmd_mailbox *mailbox;
166 struct mlx4_cq_context *cq_context;
167 u64 mtt_addr;
168 int err;
169
170 mailbox = mlx4_alloc_cmd_mailbox(dev);
171 if (IS_ERR(mailbox))
172 return PTR_ERR(mailbox);
173
174 cq_context = mailbox->buf;
175 memset(cq_context, 0, sizeof *cq_context);
176
177 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
178 cq_context->log_page_size = mtt->page_shift - 12;
179 mtt_addr = mlx4_mtt_addr(dev, mtt);
180 cq_context->mtt_base_addr_h = mtt_addr >> 32;
181 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
182
f5b3a096 183 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
bbf8eed1
VS
184
185 mlx4_free_cmd_mailbox(dev, mailbox);
186 return err;
187}
188EXPORT_SYMBOL_GPL(mlx4_cq_resize);
189
225c7b1f 190int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
e463c7b1 191 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
b8dd786f 192 unsigned vector, int collapsed)
225c7b1f
RD
193{
194 struct mlx4_priv *priv = mlx4_priv(dev);
195 struct mlx4_cq_table *cq_table = &priv->cq_table;
196 struct mlx4_cmd_mailbox *mailbox;
197 struct mlx4_cq_context *cq_context;
198 u64 mtt_addr;
199 int err;
200
b8dd786f
YP
201 if (vector >= dev->caps.num_comp_vectors)
202 return -EINVAL;
203
204 cq->vector = vector;
205
225c7b1f
RD
206 cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
207 if (cq->cqn == -1)
208 return -ENOMEM;
209
210 err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
211 if (err)
212 goto err_out;
213
214 err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
215 if (err)
216 goto err_put;
217
218 spin_lock_irq(&cq_table->lock);
219 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
220 spin_unlock_irq(&cq_table->lock);
221 if (err)
222 goto err_cmpt_put;
223
224 mailbox = mlx4_alloc_cmd_mailbox(dev);
225 if (IS_ERR(mailbox)) {
226 err = PTR_ERR(mailbox);
227 goto err_radix;
228 }
229
230 cq_context = mailbox->buf;
231 memset(cq_context, 0, sizeof *cq_context);
232
e463c7b1 233 cq_context->flags = cpu_to_be32(!!collapsed << 18);
225c7b1f 234 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
b8dd786f 235 cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
225c7b1f
RD
236 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
237
238 mtt_addr = mlx4_mtt_addr(dev, mtt);
239 cq_context->mtt_base_addr_h = mtt_addr >> 32;
240 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
241 cq_context->db_rec_addr = cpu_to_be64(db_rec);
242
243 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
244 mlx4_free_cmd_mailbox(dev, mailbox);
245 if (err)
246 goto err_radix;
247
248 cq->cons_index = 0;
249 cq->arm_sn = 1;
250 cq->uar = uar;
251 atomic_set(&cq->refcount, 1);
252 init_completion(&cq->free);
253
254 return 0;
255
256err_radix:
257 spin_lock_irq(&cq_table->lock);
258 radix_tree_delete(&cq_table->tree, cq->cqn);
259 spin_unlock_irq(&cq_table->lock);
260
261err_cmpt_put:
262 mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
263
264err_put:
265 mlx4_table_put(dev, &cq_table->table, cq->cqn);
266
267err_out:
268 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
269
270 return err;
271}
272EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
273
274void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
275{
276 struct mlx4_priv *priv = mlx4_priv(dev);
277 struct mlx4_cq_table *cq_table = &priv->cq_table;
278 int err;
279
280 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
281 if (err)
282 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
283
b8dd786f 284 synchronize_irq(priv->eq_table.eq[cq->vector].irq);
225c7b1f
RD
285
286 spin_lock_irq(&cq_table->lock);
287 radix_tree_delete(&cq_table->tree, cq->cqn);
288 spin_unlock_irq(&cq_table->lock);
289
290 if (atomic_dec_and_test(&cq->refcount))
291 complete(&cq->free);
292 wait_for_completion(&cq->free);
293
294 mlx4_table_put(dev, &cq_table->table, cq->cqn);
295 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
296}
297EXPORT_SYMBOL_GPL(mlx4_cq_free);
298
3d73c288 299int mlx4_init_cq_table(struct mlx4_dev *dev)
225c7b1f
RD
300{
301 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
302 int err;
303
304 spin_lock_init(&cq_table->lock);
305 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
306
307 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
93fc9e1b 308 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
225c7b1f
RD
309 if (err)
310 return err;
311
312 return 0;
313}
314
315void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
316{
317 /* Nothing to do to clean up radix_tree */
318 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
319}