[SCSI] be2iscsi: Fix memory leak in control path of driver
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / be2iscsi / be_main.c
CommitLineData
6733b39a 1/**
255fa9a3 2 * Copyright (C) 2005 - 2011 Emulex
6733b39a
JK
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
255fa9a3 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
6733b39a
JK
11 *
12 * Contact Information:
255fa9a3 13 * linux-drivers@emulex.com
6733b39a 14 *
255fa9a3
JK
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
6733b39a 18 */
255fa9a3 19
6733b39a
JK
20#include <linux/reboot.h>
21#include <linux/delay.h>
5a0e3ad6 22#include <linux/slab.h>
6733b39a
JK
23#include <linux/interrupt.h>
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/string.h>
27#include <linux/kernel.h>
28#include <linux/semaphore.h>
c7acc5b8 29#include <linux/iscsi_boot_sysfs.h>
acf3368f 30#include <linux/module.h>
ffce3e2e 31#include <linux/bsg-lib.h>
6733b39a
JK
32
33#include <scsi/libiscsi.h>
ffce3e2e
JK
34#include <scsi/scsi_bsg_iscsi.h>
35#include <scsi/scsi_netlink.h>
6733b39a
JK
36#include <scsi/scsi_transport_iscsi.h>
37#include <scsi/scsi_transport.h>
38#include <scsi/scsi_cmnd.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi.h>
42#include "be_main.h"
43#include "be_iscsi.h"
44#include "be_mgmt.h"
0a513dd8 45#include "be_cmds.h"
6733b39a
JK
46
47static unsigned int be_iopoll_budget = 10;
48static unsigned int be_max_phys_size = 64;
bfead3b2 49static unsigned int enable_msix = 1;
e9b91193
JK
50static unsigned int gcrashmode = 0;
51static unsigned int num_hba = 0;
6733b39a
JK
52
53MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
54MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
76d15dbd 55MODULE_VERSION(BUILD_STR);
2f635883 56MODULE_AUTHOR("Emulex Corporation");
6733b39a
JK
57MODULE_LICENSE("GPL");
58module_param(be_iopoll_budget, int, 0);
59module_param(enable_msix, int, 0);
60module_param(be_max_phys_size, uint, S_IRUGO);
99bc5d55
JSJ
61MODULE_PARM_DESC(be_max_phys_size,
62 "Maximum Size (In Kilobytes) of physically contiguous "
63 "memory that can be allocated. Range is 16 - 128");
64
65#define beiscsi_disp_param(_name)\
66ssize_t \
67beiscsi_##_name##_disp(struct device *dev,\
68 struct device_attribute *attrib, char *buf) \
69{ \
70 struct Scsi_Host *shost = class_to_shost(dev);\
71 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
72 uint32_t param_val = 0; \
73 param_val = phba->attr_##_name;\
74 return snprintf(buf, PAGE_SIZE, "%d\n",\
75 phba->attr_##_name);\
76}
77
78#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
79int \
80beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
81{\
82 if (val >= _minval && val <= _maxval) {\
83 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
84 "BA_%d : beiscsi_"#_name" updated "\
85 "from 0x%x ==> 0x%x\n",\
86 phba->attr_##_name, val); \
87 phba->attr_##_name = val;\
88 return 0;\
89 } \
90 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
91 "BA_%d beiscsi_"#_name" attribute "\
92 "cannot be updated to 0x%x, "\
93 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
94 return -EINVAL;\
95}
96
97#define beiscsi_store_param(_name) \
98ssize_t \
99beiscsi_##_name##_store(struct device *dev,\
100 struct device_attribute *attr, const char *buf,\
101 size_t count) \
102{ \
103 struct Scsi_Host *shost = class_to_shost(dev);\
104 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
105 uint32_t param_val = 0;\
106 if (!isdigit(buf[0]))\
107 return -EINVAL;\
108 if (sscanf(buf, "%i", &param_val) != 1)\
109 return -EINVAL;\
110 if (beiscsi_##_name##_change(phba, param_val) == 0) \
111 return strlen(buf);\
112 else \
113 return -EINVAL;\
114}
115
116#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
117int \
118beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
119{ \
120 if (val >= _minval && val <= _maxval) {\
121 phba->attr_##_name = val;\
122 return 0;\
123 } \
124 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
125 "BA_%d beiscsi_"#_name" attribute " \
126 "cannot be updated to 0x%x, "\
127 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
128 phba->attr_##_name = _defval;\
129 return -EINVAL;\
130}
131
132#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
133static uint beiscsi_##_name = _defval;\
134module_param(beiscsi_##_name, uint, S_IRUGO);\
135MODULE_PARM_DESC(beiscsi_##_name, _descp);\
136beiscsi_disp_param(_name)\
137beiscsi_change_param(_name, _minval, _maxval, _defval)\
138beiscsi_store_param(_name)\
139beiscsi_init_param(_name, _minval, _maxval, _defval)\
140DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
141 beiscsi_##_name##_disp, beiscsi_##_name##_store)
142
143/*
144 * When new log level added update the
145 * the MAX allowed value for log_enable
146 */
147BEISCSI_RW_ATTR(log_enable, 0x00,
148 0xFF, 0x00, "Enable logging Bit Mask\n"
149 "\t\t\t\tInitialization Events : 0x01\n"
150 "\t\t\t\tMailbox Events : 0x02\n"
151 "\t\t\t\tMiscellaneous Events : 0x04\n"
152 "\t\t\t\tError Handling : 0x08\n"
153 "\t\t\t\tIO Path Events : 0x10\n"
154 "\t\t\t\tConfiguration Path : 0x20\n");
155
156struct device_attribute *beiscsi_attrs[] = {
157 &dev_attr_beiscsi_log_enable,
158 NULL,
159};
6733b39a 160
6763daae
JSJ
161static char const *cqe_desc[] = {
162 "RESERVED_DESC",
163 "SOL_CMD_COMPLETE",
164 "SOL_CMD_KILLED_DATA_DIGEST_ERR",
165 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
166 "CXN_KILLED_BURST_LEN_MISMATCH",
167 "CXN_KILLED_AHS_RCVD",
168 "CXN_KILLED_HDR_DIGEST_ERR",
169 "CXN_KILLED_UNKNOWN_HDR",
170 "CXN_KILLED_STALE_ITT_TTT_RCVD",
171 "CXN_KILLED_INVALID_ITT_TTT_RCVD",
172 "CXN_KILLED_RST_RCVD",
173 "CXN_KILLED_TIMED_OUT",
174 "CXN_KILLED_RST_SENT",
175 "CXN_KILLED_FIN_RCVD",
176 "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
177 "CXN_KILLED_BAD_WRB_INDEX_ERROR",
178 "CXN_KILLED_OVER_RUN_RESIDUAL",
179 "CXN_KILLED_UNDER_RUN_RESIDUAL",
180 "CMD_KILLED_INVALID_STATSN_RCVD",
181 "CMD_KILLED_INVALID_R2T_RCVD",
182 "CMD_CXN_KILLED_LUN_INVALID",
183 "CMD_CXN_KILLED_ICD_INVALID",
184 "CMD_CXN_KILLED_ITT_INVALID",
185 "CMD_CXN_KILLED_SEQ_OUTOFORDER",
186 "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
187 "CXN_INVALIDATE_NOTIFY",
188 "CXN_INVALIDATE_INDEX_NOTIFY",
189 "CMD_INVALIDATED_NOTIFY",
190 "UNSOL_HDR_NOTIFY",
191 "UNSOL_DATA_NOTIFY",
192 "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
193 "DRIVERMSG_NOTIFY",
194 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
195 "SOL_CMD_KILLED_DIF_ERR",
196 "CXN_KILLED_SYN_RCVD",
197 "CXN_KILLED_IMM_DATA_RCVD"
198};
199
6733b39a
JK
200static int beiscsi_slave_configure(struct scsi_device *sdev)
201{
202 blk_queue_max_segment_size(sdev->request_queue, 65536);
203 return 0;
204}
205
4183122d
JK
206static int beiscsi_eh_abort(struct scsi_cmnd *sc)
207{
208 struct iscsi_cls_session *cls_session;
209 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
210 struct beiscsi_io_task *aborted_io_task;
211 struct iscsi_conn *conn;
212 struct beiscsi_conn *beiscsi_conn;
213 struct beiscsi_hba *phba;
214 struct iscsi_session *session;
215 struct invalidate_command_table *inv_tbl;
3cbb7a74 216 struct be_dma_mem nonemb_cmd;
4183122d
JK
217 unsigned int cid, tag, num_invalidate;
218
219 cls_session = starget_to_session(scsi_target(sc->device));
220 session = cls_session->dd_data;
221
222 spin_lock_bh(&session->lock);
223 if (!aborted_task || !aborted_task->sc) {
224 /* we raced */
225 spin_unlock_bh(&session->lock);
226 return SUCCESS;
227 }
228
229 aborted_io_task = aborted_task->dd_data;
230 if (!aborted_io_task->scsi_cmnd) {
231 /* raced or invalid command */
232 spin_unlock_bh(&session->lock);
233 return SUCCESS;
234 }
235 spin_unlock_bh(&session->lock);
236 conn = aborted_task->conn;
237 beiscsi_conn = conn->dd_data;
238 phba = beiscsi_conn->phba;
239
240 /* invalidate iocb */
241 cid = beiscsi_conn->beiscsi_conn_cid;
242 inv_tbl = phba->inv_tbl;
243 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
244 inv_tbl->cid = cid;
245 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
246 num_invalidate = 1;
3cbb7a74
JK
247 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
248 sizeof(struct invalidate_commands_params_in),
249 &nonemb_cmd.dma);
250 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
251 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
252 "BM_%d : Failed to allocate memory for"
253 "mgmt_invalidate_icds\n");
3cbb7a74
JK
254 return FAILED;
255 }
256 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
257
258 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
259 cid, &nonemb_cmd);
4183122d 260 if (!tag) {
99bc5d55
JSJ
261 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
262 "BM_%d : mgmt_invalidate_icds could not be"
263 "submitted\n");
3cbb7a74
JK
264 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
265 nonemb_cmd.va, nonemb_cmd.dma);
266
4183122d
JK
267 return FAILED;
268 } else {
269 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
270 phba->ctrl.mcc_numtag[tag]);
271 free_mcc_tag(&phba->ctrl, tag);
272 }
3cbb7a74
JK
273 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
274 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
275 return iscsi_eh_abort(sc);
276}
277
278static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
279{
280 struct iscsi_task *abrt_task;
281 struct beiscsi_io_task *abrt_io_task;
282 struct iscsi_conn *conn;
283 struct beiscsi_conn *beiscsi_conn;
284 struct beiscsi_hba *phba;
285 struct iscsi_session *session;
286 struct iscsi_cls_session *cls_session;
287 struct invalidate_command_table *inv_tbl;
3cbb7a74 288 struct be_dma_mem nonemb_cmd;
4183122d 289 unsigned int cid, tag, i, num_invalidate;
4183122d
JK
290
291 /* invalidate iocbs */
292 cls_session = starget_to_session(scsi_target(sc->device));
293 session = cls_session->dd_data;
294 spin_lock_bh(&session->lock);
db7f7709
JK
295 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
296 spin_unlock_bh(&session->lock);
297 return FAILED;
298 }
4183122d
JK
299 conn = session->leadconn;
300 beiscsi_conn = conn->dd_data;
301 phba = beiscsi_conn->phba;
302 cid = beiscsi_conn->beiscsi_conn_cid;
303 inv_tbl = phba->inv_tbl;
304 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
305 num_invalidate = 0;
306 for (i = 0; i < conn->session->cmds_max; i++) {
307 abrt_task = conn->session->cmds[i];
308 abrt_io_task = abrt_task->dd_data;
309 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
310 continue;
311
312 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
313 continue;
314
315 inv_tbl->cid = cid;
316 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
317 num_invalidate++;
318 inv_tbl++;
319 }
320 spin_unlock_bh(&session->lock);
321 inv_tbl = phba->inv_tbl;
322
3cbb7a74
JK
323 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
324 sizeof(struct invalidate_commands_params_in),
325 &nonemb_cmd.dma);
326 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
327 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
328 "BM_%d : Failed to allocate memory for"
329 "mgmt_invalidate_icds\n");
3cbb7a74
JK
330 return FAILED;
331 }
332 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
333 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
334 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
335 cid, &nonemb_cmd);
4183122d 336 if (!tag) {
99bc5d55
JSJ
337 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
338 "BM_%d : mgmt_invalidate_icds could not be"
339 " submitted\n");
3cbb7a74
JK
340 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
341 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
342 return FAILED;
343 } else {
344 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
345 phba->ctrl.mcc_numtag[tag]);
346 free_mcc_tag(&phba->ctrl, tag);
347 }
3cbb7a74
JK
348 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
349 nonemb_cmd.va, nonemb_cmd.dma);
4183122d 350 return iscsi_eh_device_reset(sc);
4183122d
JK
351}
352
c7acc5b8
JK
353static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
354{
355 struct beiscsi_hba *phba = data;
f457a46f
MC
356 struct mgmt_session_info *boot_sess = &phba->boot_sess;
357 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
c7acc5b8
JK
358 char *str = buf;
359 int rc;
360
361 switch (type) {
362 case ISCSI_BOOT_TGT_NAME:
363 rc = sprintf(buf, "%.*s\n",
f457a46f
MC
364 (int)strlen(boot_sess->target_name),
365 (char *)&boot_sess->target_name);
c7acc5b8
JK
366 break;
367 case ISCSI_BOOT_TGT_IP_ADDR:
f457a46f 368 if (boot_conn->dest_ipaddr.ip_type == 0x1)
c7acc5b8 369 rc = sprintf(buf, "%pI4\n",
0e43895e 370 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
371 else
372 rc = sprintf(str, "%pI6\n",
0e43895e 373 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
374 break;
375 case ISCSI_BOOT_TGT_PORT:
f457a46f 376 rc = sprintf(str, "%d\n", boot_conn->dest_port);
c7acc5b8
JK
377 break;
378
379 case ISCSI_BOOT_TGT_CHAP_NAME:
380 rc = sprintf(str, "%.*s\n",
f457a46f
MC
381 boot_conn->negotiated_login_options.auth_data.chap.
382 target_chap_name_length,
383 (char *)&boot_conn->negotiated_login_options.
384 auth_data.chap.target_chap_name);
c7acc5b8
JK
385 break;
386 case ISCSI_BOOT_TGT_CHAP_SECRET:
387 rc = sprintf(str, "%.*s\n",
f457a46f
MC
388 boot_conn->negotiated_login_options.auth_data.chap.
389 target_secret_length,
390 (char *)&boot_conn->negotiated_login_options.
391 auth_data.chap.target_secret);
c7acc5b8
JK
392 break;
393 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
394 rc = sprintf(str, "%.*s\n",
f457a46f
MC
395 boot_conn->negotiated_login_options.auth_data.chap.
396 intr_chap_name_length,
397 (char *)&boot_conn->negotiated_login_options.
398 auth_data.chap.intr_chap_name);
c7acc5b8
JK
399 break;
400 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
f457a46f
MC
401 rc = sprintf(str, "%.*s\n",
402 boot_conn->negotiated_login_options.auth_data.chap.
403 intr_secret_length,
404 (char *)&boot_conn->negotiated_login_options.
405 auth_data.chap.intr_secret);
c7acc5b8
JK
406 break;
407 case ISCSI_BOOT_TGT_FLAGS:
f457a46f 408 rc = sprintf(str, "2\n");
c7acc5b8
JK
409 break;
410 case ISCSI_BOOT_TGT_NIC_ASSOC:
f457a46f 411 rc = sprintf(str, "0\n");
c7acc5b8
JK
412 break;
413 default:
414 rc = -ENOSYS;
415 break;
416 }
417 return rc;
418}
419
420static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
421{
422 struct beiscsi_hba *phba = data;
423 char *str = buf;
424 int rc;
425
426 switch (type) {
427 case ISCSI_BOOT_INI_INITIATOR_NAME:
428 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
429 break;
430 default:
431 rc = -ENOSYS;
432 break;
433 }
434 return rc;
435}
436
437static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
438{
439 struct beiscsi_hba *phba = data;
440 char *str = buf;
441 int rc;
442
443 switch (type) {
444 case ISCSI_BOOT_ETH_FLAGS:
f457a46f 445 rc = sprintf(str, "2\n");
c7acc5b8
JK
446 break;
447 case ISCSI_BOOT_ETH_INDEX:
f457a46f 448 rc = sprintf(str, "0\n");
c7acc5b8
JK
449 break;
450 case ISCSI_BOOT_ETH_MAC:
0e43895e
MC
451 rc = beiscsi_get_macaddr(str, phba);
452 break;
c7acc5b8
JK
453 default:
454 rc = -ENOSYS;
455 break;
456 }
457 return rc;
458}
459
460
587a1f16 461static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
c7acc5b8 462{
587a1f16 463 umode_t rc;
c7acc5b8
JK
464
465 switch (type) {
466 case ISCSI_BOOT_TGT_NAME:
467 case ISCSI_BOOT_TGT_IP_ADDR:
468 case ISCSI_BOOT_TGT_PORT:
469 case ISCSI_BOOT_TGT_CHAP_NAME:
470 case ISCSI_BOOT_TGT_CHAP_SECRET:
471 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
472 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
473 case ISCSI_BOOT_TGT_NIC_ASSOC:
474 case ISCSI_BOOT_TGT_FLAGS:
475 rc = S_IRUGO;
476 break;
477 default:
478 rc = 0;
479 break;
480 }
481 return rc;
482}
483
587a1f16 484static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
c7acc5b8 485{
587a1f16 486 umode_t rc;
c7acc5b8
JK
487
488 switch (type) {
489 case ISCSI_BOOT_INI_INITIATOR_NAME:
490 rc = S_IRUGO;
491 break;
492 default:
493 rc = 0;
494 break;
495 }
496 return rc;
497}
498
499
587a1f16 500static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
c7acc5b8 501{
587a1f16 502 umode_t rc;
c7acc5b8
JK
503
504 switch (type) {
505 case ISCSI_BOOT_ETH_FLAGS:
506 case ISCSI_BOOT_ETH_MAC:
507 case ISCSI_BOOT_ETH_INDEX:
508 rc = S_IRUGO;
509 break;
510 default:
511 rc = 0;
512 break;
513 }
514 return rc;
515}
516
bfead3b2
JK
517/*------------------- PCI Driver operations and data ----------------- */
518static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
519 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
f98c96b0 520 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
bfead3b2
JK
521 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
522 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
523 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
bfead3b2
JK
524 { 0 }
525};
526MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
527
99bc5d55 528
6733b39a
JK
529static struct scsi_host_template beiscsi_sht = {
530 .module = THIS_MODULE,
2f635883 531 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
6733b39a
JK
532 .proc_name = DRV_NAME,
533 .queuecommand = iscsi_queuecommand,
6733b39a
JK
534 .change_queue_depth = iscsi_change_queue_depth,
535 .slave_configure = beiscsi_slave_configure,
536 .target_alloc = iscsi_target_alloc,
4183122d
JK
537 .eh_abort_handler = beiscsi_eh_abort,
538 .eh_device_reset_handler = beiscsi_eh_device_reset,
309ce156 539 .eh_target_reset_handler = iscsi_eh_session_reset,
99bc5d55 540 .shost_attrs = beiscsi_attrs,
6733b39a
JK
541 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
542 .can_queue = BE2_IO_DEPTH,
543 .this_id = -1,
544 .max_sectors = BEISCSI_MAX_SECTORS,
545 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
546 .use_clustering = ENABLE_CLUSTERING,
ffce3e2e
JK
547 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
548
6733b39a 549};
6733b39a 550
bfead3b2 551static struct scsi_transport_template *beiscsi_scsi_transport;
6733b39a
JK
552
553static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
554{
555 struct beiscsi_hba *phba;
556 struct Scsi_Host *shost;
557
558 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
559 if (!shost) {
99bc5d55
JSJ
560 dev_err(&pcidev->dev,
561 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
6733b39a
JK
562 return NULL;
563 }
564 shost->dma_boundary = pcidev->dma_mask;
565 shost->max_id = BE2_MAX_SESSIONS;
566 shost->max_channel = 0;
567 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
568 shost->max_lun = BEISCSI_NUM_MAX_LUN;
569 shost->transportt = beiscsi_scsi_transport;
6733b39a
JK
570 phba = iscsi_host_priv(shost);
571 memset(phba, 0, sizeof(*phba));
572 phba->shost = shost;
573 phba->pcidev = pci_dev_get(pcidev);
2807afb7 574 pci_set_drvdata(pcidev, phba);
0e43895e 575 phba->interface_handle = 0xFFFFFFFF;
6733b39a
JK
576
577 if (iscsi_host_add(shost, &phba->pcidev->dev))
578 goto free_devices;
c7acc5b8 579
6733b39a
JK
580 return phba;
581
582free_devices:
583 pci_dev_put(phba->pcidev);
584 iscsi_host_free(phba->shost);
585 return NULL;
586}
587
588static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
589{
590 if (phba->csr_va) {
591 iounmap(phba->csr_va);
592 phba->csr_va = NULL;
593 }
594 if (phba->db_va) {
595 iounmap(phba->db_va);
596 phba->db_va = NULL;
597 }
598 if (phba->pci_va) {
599 iounmap(phba->pci_va);
600 phba->pci_va = NULL;
601 }
602}
603
604static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
605 struct pci_dev *pcidev)
606{
607 u8 __iomem *addr;
f98c96b0 608 int pcicfg_reg;
6733b39a
JK
609
610 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
611 pci_resource_len(pcidev, 2));
612 if (addr == NULL)
613 return -ENOMEM;
614 phba->ctrl.csr = addr;
615 phba->csr_va = addr;
616 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
617
618 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
619 if (addr == NULL)
620 goto pci_map_err;
621 phba->ctrl.db = addr;
622 phba->db_va = addr;
623 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
624
f98c96b0
JK
625 if (phba->generation == BE_GEN2)
626 pcicfg_reg = 1;
627 else
628 pcicfg_reg = 0;
629
630 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
631 pci_resource_len(pcidev, pcicfg_reg));
632
6733b39a
JK
633 if (addr == NULL)
634 goto pci_map_err;
635 phba->ctrl.pcicfg = addr;
636 phba->pci_va = addr;
f98c96b0 637 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
6733b39a
JK
638 return 0;
639
640pci_map_err:
641 beiscsi_unmap_pci_function(phba);
642 return -ENOMEM;
643}
644
645static int beiscsi_enable_pci(struct pci_dev *pcidev)
646{
647 int ret;
648
649 ret = pci_enable_device(pcidev);
650 if (ret) {
99bc5d55
JSJ
651 dev_err(&pcidev->dev,
652 "beiscsi_enable_pci - enable device failed\n");
6733b39a
JK
653 return ret;
654 }
655
bfead3b2 656 pci_set_master(pcidev);
6733b39a
JK
657 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
658 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
659 if (ret) {
660 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
661 pci_disable_device(pcidev);
662 return ret;
663 }
664 }
665 return 0;
666}
667
668static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
669{
670 struct be_ctrl_info *ctrl = &phba->ctrl;
671 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
672 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
673 int status = 0;
674
675 ctrl->pdev = pdev;
676 status = beiscsi_map_pci_bars(phba, pdev);
677 if (status)
678 return status;
6733b39a
JK
679 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
680 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
681 mbox_mem_alloc->size,
682 &mbox_mem_alloc->dma);
683 if (!mbox_mem_alloc->va) {
684 beiscsi_unmap_pci_function(phba);
a49e06d5 685 return -ENOMEM;
6733b39a
JK
686 }
687
688 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
689 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
690 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
691 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
692 spin_lock_init(&ctrl->mbox_lock);
bfead3b2
JK
693 spin_lock_init(&phba->ctrl.mcc_lock);
694 spin_lock_init(&phba->ctrl.mcc_cq_lock);
695
6733b39a
JK
696 return status;
697}
698
699static void beiscsi_get_params(struct beiscsi_hba *phba)
700{
7da50879
JK
701 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
702 - (phba->fw_config.iscsi_cid_count
703 + BE2_TMFS
704 + BE2_NOPOUT_REQ));
705 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
ed58ea2a 706 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
6eab04a8 707 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
6733b39a
JK
708 phba->params.num_sge_per_io = BE2_SGE;
709 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
710 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
711 phba->params.eq_timer = 64;
712 phba->params.num_eq_entries =
7da50879
JK
713 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
714 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
715 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
716 ? 1024 : phba->params.num_eq_entries;
99bc5d55
JSJ
717 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
718 "BM_%d : phba->params.num_eq_entries=%d\n",
719 phba->params.num_eq_entries);
6733b39a 720 phba->params.num_cq_entries =
7da50879
JK
721 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
722 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
723 phba->params.wrbs_per_cxn = 256;
724}
725
726static void hwi_ring_eq_db(struct beiscsi_hba *phba,
727 unsigned int id, unsigned int clr_interrupt,
728 unsigned int num_processed,
729 unsigned char rearm, unsigned char event)
730{
731 u32 val = 0;
732 val |= id & DB_EQ_RING_ID_MASK;
733 if (rearm)
734 val |= 1 << DB_EQ_REARM_SHIFT;
735 if (clr_interrupt)
736 val |= 1 << DB_EQ_CLR_SHIFT;
737 if (event)
738 val |= 1 << DB_EQ_EVNT_SHIFT;
739 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
740 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
741}
742
bfead3b2
JK
743/**
744 * be_isr_mcc - The isr routine of the driver.
745 * @irq: Not used
746 * @dev_id: Pointer to host adapter structure
747 */
748static irqreturn_t be_isr_mcc(int irq, void *dev_id)
749{
750 struct beiscsi_hba *phba;
751 struct be_eq_entry *eqe = NULL;
752 struct be_queue_info *eq;
753 struct be_queue_info *mcc;
754 unsigned int num_eq_processed;
755 struct be_eq_obj *pbe_eq;
756 unsigned long flags;
757
758 pbe_eq = dev_id;
759 eq = &pbe_eq->q;
760 phba = pbe_eq->phba;
761 mcc = &phba->ctrl.mcc_obj.cq;
762 eqe = queue_tail_node(eq);
bfead3b2
JK
763
764 num_eq_processed = 0;
765
766 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
767 & EQE_VALID_MASK) {
768 if (((eqe->dw[offsetof(struct amap_eq_entry,
769 resource_id) / 32] &
770 EQE_RESID_MASK) >> 16) == mcc->id) {
771 spin_lock_irqsave(&phba->isr_lock, flags);
772 phba->todo_mcc_cq = 1;
773 spin_unlock_irqrestore(&phba->isr_lock, flags);
774 }
775 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
776 queue_tail_inc(eq);
777 eqe = queue_tail_node(eq);
778 num_eq_processed++;
779 }
780 if (phba->todo_mcc_cq)
781 queue_work(phba->wq, &phba->work_cqs);
782 if (num_eq_processed)
783 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
784
785 return IRQ_HANDLED;
786}
787
788/**
789 * be_isr_msix - The isr routine of the driver.
790 * @irq: Not used
791 * @dev_id: Pointer to host adapter structure
792 */
793static irqreturn_t be_isr_msix(int irq, void *dev_id)
794{
795 struct beiscsi_hba *phba;
796 struct be_eq_entry *eqe = NULL;
797 struct be_queue_info *eq;
798 struct be_queue_info *cq;
799 unsigned int num_eq_processed;
800 struct be_eq_obj *pbe_eq;
801 unsigned long flags;
802
803 pbe_eq = dev_id;
804 eq = &pbe_eq->q;
805 cq = pbe_eq->cq;
806 eqe = queue_tail_node(eq);
bfead3b2
JK
807
808 phba = pbe_eq->phba;
809 num_eq_processed = 0;
810 if (blk_iopoll_enabled) {
811 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
812 & EQE_VALID_MASK) {
813 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
814 blk_iopoll_sched(&pbe_eq->iopoll);
815
816 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
817 queue_tail_inc(eq);
818 eqe = queue_tail_node(eq);
819 num_eq_processed++;
820 }
821 if (num_eq_processed)
822 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
823
824 return IRQ_HANDLED;
825 } else {
826 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
827 & EQE_VALID_MASK) {
828 spin_lock_irqsave(&phba->isr_lock, flags);
829 phba->todo_cq = 1;
830 spin_unlock_irqrestore(&phba->isr_lock, flags);
831 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
832 queue_tail_inc(eq);
833 eqe = queue_tail_node(eq);
834 num_eq_processed++;
835 }
836 if (phba->todo_cq)
837 queue_work(phba->wq, &phba->work_cqs);
838
839 if (num_eq_processed)
840 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
841
842 return IRQ_HANDLED;
843 }
844}
845
6733b39a
JK
846/**
847 * be_isr - The isr routine of the driver.
848 * @irq: Not used
849 * @dev_id: Pointer to host adapter structure
850 */
851static irqreturn_t be_isr(int irq, void *dev_id)
852{
853 struct beiscsi_hba *phba;
854 struct hwi_controller *phwi_ctrlr;
855 struct hwi_context_memory *phwi_context;
856 struct be_eq_entry *eqe = NULL;
857 struct be_queue_info *eq;
858 struct be_queue_info *cq;
bfead3b2 859 struct be_queue_info *mcc;
6733b39a 860 unsigned long flags, index;
bfead3b2 861 unsigned int num_mcceq_processed, num_ioeq_processed;
6733b39a 862 struct be_ctrl_info *ctrl;
bfead3b2 863 struct be_eq_obj *pbe_eq;
6733b39a
JK
864 int isr;
865
866 phba = dev_id;
6eab04a8 867 ctrl = &phba->ctrl;
bfead3b2
JK
868 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
869 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
870 if (!isr)
871 return IRQ_NONE;
6733b39a
JK
872
873 phwi_ctrlr = phba->phwi_ctrlr;
874 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
875 pbe_eq = &phwi_context->be_eq[0];
876
877 eq = &phwi_context->be_eq[0].q;
878 mcc = &phba->ctrl.mcc_obj.cq;
6733b39a
JK
879 index = 0;
880 eqe = queue_tail_node(eq);
6733b39a 881
bfead3b2
JK
882 num_ioeq_processed = 0;
883 num_mcceq_processed = 0;
6733b39a
JK
884 if (blk_iopoll_enabled) {
885 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
886 & EQE_VALID_MASK) {
bfead3b2
JK
887 if (((eqe->dw[offsetof(struct amap_eq_entry,
888 resource_id) / 32] &
889 EQE_RESID_MASK) >> 16) == mcc->id) {
890 spin_lock_irqsave(&phba->isr_lock, flags);
891 phba->todo_mcc_cq = 1;
892 spin_unlock_irqrestore(&phba->isr_lock, flags);
893 num_mcceq_processed++;
894 } else {
895 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
896 blk_iopoll_sched(&pbe_eq->iopoll);
897 num_ioeq_processed++;
898 }
6733b39a
JK
899 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
900 queue_tail_inc(eq);
901 eqe = queue_tail_node(eq);
6733b39a 902 }
bfead3b2
JK
903 if (num_ioeq_processed || num_mcceq_processed) {
904 if (phba->todo_mcc_cq)
905 queue_work(phba->wq, &phba->work_cqs);
906
756d29c8 907 if ((num_mcceq_processed) && (!num_ioeq_processed))
bfead3b2
JK
908 hwi_ring_eq_db(phba, eq->id, 0,
909 (num_ioeq_processed +
910 num_mcceq_processed) , 1, 1);
911 else
912 hwi_ring_eq_db(phba, eq->id, 0,
913 (num_ioeq_processed +
914 num_mcceq_processed), 0, 1);
915
6733b39a
JK
916 return IRQ_HANDLED;
917 } else
918 return IRQ_NONE;
919 } else {
bfead3b2 920 cq = &phwi_context->be_cq[0];
6733b39a
JK
921 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
922 & EQE_VALID_MASK) {
923
924 if (((eqe->dw[offsetof(struct amap_eq_entry,
925 resource_id) / 32] &
926 EQE_RESID_MASK) >> 16) != cq->id) {
927 spin_lock_irqsave(&phba->isr_lock, flags);
928 phba->todo_mcc_cq = 1;
929 spin_unlock_irqrestore(&phba->isr_lock, flags);
930 } else {
931 spin_lock_irqsave(&phba->isr_lock, flags);
932 phba->todo_cq = 1;
933 spin_unlock_irqrestore(&phba->isr_lock, flags);
934 }
935 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
936 queue_tail_inc(eq);
937 eqe = queue_tail_node(eq);
bfead3b2 938 num_ioeq_processed++;
6733b39a
JK
939 }
940 if (phba->todo_cq || phba->todo_mcc_cq)
941 queue_work(phba->wq, &phba->work_cqs);
942
bfead3b2
JK
943 if (num_ioeq_processed) {
944 hwi_ring_eq_db(phba, eq->id, 0,
945 num_ioeq_processed, 1, 1);
6733b39a
JK
946 return IRQ_HANDLED;
947 } else
948 return IRQ_NONE;
949 }
950}
951
952static int beiscsi_init_irqs(struct beiscsi_hba *phba)
953{
954 struct pci_dev *pcidev = phba->pcidev;
bfead3b2
JK
955 struct hwi_controller *phwi_ctrlr;
956 struct hwi_context_memory *phwi_context;
4f5af07e 957 int ret, msix_vec, i, j;
6733b39a 958
bfead3b2
JK
959 phwi_ctrlr = phba->phwi_ctrlr;
960 phwi_context = phwi_ctrlr->phwi_ctxt;
961
962 if (phba->msix_enabled) {
963 for (i = 0; i < phba->num_cpus; i++) {
8fcfb210
JK
964 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
965 GFP_KERNEL);
966 if (!phba->msi_name[i]) {
967 ret = -ENOMEM;
968 goto free_msix_irqs;
969 }
970
971 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
972 phba->shost->host_no, i);
bfead3b2 973 msix_vec = phba->msix_entries[i].vector;
8fcfb210
JK
974 ret = request_irq(msix_vec, be_isr_msix, 0,
975 phba->msi_name[i],
bfead3b2 976 &phwi_context->be_eq[i]);
4f5af07e 977 if (ret) {
99bc5d55
JSJ
978 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
979 "BM_%d : beiscsi_init_irqs-Failed to"
980 "register msix for i = %d\n",
981 i);
8fcfb210 982 kfree(phba->msi_name[i]);
4f5af07e
JK
983 goto free_msix_irqs;
984 }
bfead3b2 985 }
8fcfb210
JK
986 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
987 if (!phba->msi_name[i]) {
988 ret = -ENOMEM;
989 goto free_msix_irqs;
990 }
991 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
992 phba->shost->host_no);
bfead3b2 993 msix_vec = phba->msix_entries[i].vector;
8fcfb210 994 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
bfead3b2 995 &phwi_context->be_eq[i]);
4f5af07e 996 if (ret) {
99bc5d55
JSJ
997 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
998 "BM_%d : beiscsi_init_irqs-"
999 "Failed to register beiscsi_msix_mcc\n");
8fcfb210 1000 kfree(phba->msi_name[i]);
4f5af07e
JK
1001 goto free_msix_irqs;
1002 }
1003
bfead3b2
JK
1004 } else {
1005 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
1006 "beiscsi", phba);
1007 if (ret) {
99bc5d55
JSJ
1008 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1009 "BM_%d : beiscsi_init_irqs-"
1010 "Failed to register irq\\n");
bfead3b2
JK
1011 return ret;
1012 }
6733b39a
JK
1013 }
1014 return 0;
4f5af07e 1015free_msix_irqs:
8fcfb210
JK
1016 for (j = i - 1; j >= 0; j--) {
1017 kfree(phba->msi_name[j]);
1018 msix_vec = phba->msix_entries[j].vector;
4f5af07e 1019 free_irq(msix_vec, &phwi_context->be_eq[j]);
8fcfb210 1020 }
4f5af07e 1021 return ret;
6733b39a
JK
1022}
1023
1024static void hwi_ring_cq_db(struct beiscsi_hba *phba,
1025 unsigned int id, unsigned int num_processed,
1026 unsigned char rearm, unsigned char event)
1027{
1028 u32 val = 0;
1029 val |= id & DB_CQ_RING_ID_MASK;
1030 if (rearm)
1031 val |= 1 << DB_CQ_REARM_SHIFT;
1032 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
1033 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1034}
1035
6733b39a
JK
1036static unsigned int
1037beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1038 struct beiscsi_hba *phba,
1039 unsigned short cid,
1040 struct pdu_base *ppdu,
1041 unsigned long pdu_len,
1042 void *pbuffer, unsigned long buf_len)
1043{
1044 struct iscsi_conn *conn = beiscsi_conn->conn;
1045 struct iscsi_session *session = conn->session;
bfead3b2
JK
1046 struct iscsi_task *task;
1047 struct beiscsi_io_task *io_task;
1048 struct iscsi_hdr *login_hdr;
6733b39a
JK
1049
1050 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1051 PDUBASE_OPCODE_MASK) {
1052 case ISCSI_OP_NOOP_IN:
1053 pbuffer = NULL;
1054 buf_len = 0;
1055 break;
1056 case ISCSI_OP_ASYNC_EVENT:
1057 break;
1058 case ISCSI_OP_REJECT:
1059 WARN_ON(!pbuffer);
1060 WARN_ON(!(buf_len == 48));
99bc5d55
JSJ
1061 beiscsi_log(phba, KERN_ERR,
1062 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1063 "BM_%d : In ISCSI_OP_REJECT\n");
6733b39a
JK
1064 break;
1065 case ISCSI_OP_LOGIN_RSP:
7bd6e25c 1066 case ISCSI_OP_TEXT_RSP:
bfead3b2
JK
1067 task = conn->login_task;
1068 io_task = task->dd_data;
1069 login_hdr = (struct iscsi_hdr *)ppdu;
1070 login_hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1071 break;
1072 default:
99bc5d55
JSJ
1073 beiscsi_log(phba, KERN_WARNING,
1074 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1075 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1076 (ppdu->
6733b39a 1077 dw[offsetof(struct amap_pdu_base, opcode) / 32]
99bc5d55 1078 & PDUBASE_OPCODE_MASK));
6733b39a
JK
1079 return 1;
1080 }
1081
1082 spin_lock_bh(&session->lock);
1083 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1084 spin_unlock_bh(&session->lock);
1085 return 0;
1086}
1087
1088static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1089{
1090 struct sgl_handle *psgl_handle;
1091
1092 if (phba->io_sgl_hndl_avbl) {
99bc5d55
JSJ
1093 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1094 "BM_%d : In alloc_io_sgl_handle,"
1095 " io_sgl_alloc_index=%d\n",
1096 phba->io_sgl_alloc_index);
1097
6733b39a
JK
1098 psgl_handle = phba->io_sgl_hndl_base[phba->
1099 io_sgl_alloc_index];
1100 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1101 phba->io_sgl_hndl_avbl--;
bfead3b2
JK
1102 if (phba->io_sgl_alloc_index == (phba->params.
1103 ios_per_ctrl - 1))
6733b39a
JK
1104 phba->io_sgl_alloc_index = 0;
1105 else
1106 phba->io_sgl_alloc_index++;
1107 } else
1108 psgl_handle = NULL;
1109 return psgl_handle;
1110}
1111
1112static void
1113free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1114{
99bc5d55
JSJ
1115 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1116 "BM_%d : In free_,io_sgl_free_index=%d\n",
1117 phba->io_sgl_free_index);
1118
6733b39a
JK
1119 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1120 /*
1121 * this can happen if clean_task is called on a task that
1122 * failed in xmit_task or alloc_pdu.
1123 */
99bc5d55
JSJ
1124 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1125 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1126 "value there=%p\n", phba->io_sgl_free_index,
1127 phba->io_sgl_hndl_base
1128 [phba->io_sgl_free_index]);
6733b39a
JK
1129 return;
1130 }
1131 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1132 phba->io_sgl_hndl_avbl++;
1133 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1134 phba->io_sgl_free_index = 0;
1135 else
1136 phba->io_sgl_free_index++;
1137}
1138
1139/**
1140 * alloc_wrb_handle - To allocate a wrb handle
1141 * @phba: The hba pointer
1142 * @cid: The cid to use for allocation
6733b39a
JK
1143 *
1144 * This happens under session_lock until submission to chip
1145 */
d5431488 1146struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
6733b39a
JK
1147{
1148 struct hwi_wrb_context *pwrb_context;
1149 struct hwi_controller *phwi_ctrlr;
d5431488 1150 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
6733b39a
JK
1151
1152 phwi_ctrlr = phba->phwi_ctrlr;
1153 pwrb_context = &phwi_ctrlr->wrb_context[cid];
d5431488 1154 if (pwrb_context->wrb_handles_available >= 2) {
bfead3b2
JK
1155 pwrb_handle = pwrb_context->pwrb_handle_base[
1156 pwrb_context->alloc_index];
1157 pwrb_context->wrb_handles_available--;
bfead3b2
JK
1158 if (pwrb_context->alloc_index ==
1159 (phba->params.wrbs_per_cxn - 1))
1160 pwrb_context->alloc_index = 0;
1161 else
1162 pwrb_context->alloc_index++;
d5431488
JK
1163 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1164 pwrb_context->alloc_index];
1165 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
bfead3b2
JK
1166 } else
1167 pwrb_handle = NULL;
6733b39a
JK
1168 return pwrb_handle;
1169}
1170
1171/**
1172 * free_wrb_handle - To free the wrb handle back to pool
1173 * @phba: The hba pointer
1174 * @pwrb_context: The context to free from
1175 * @pwrb_handle: The wrb_handle to free
1176 *
1177 * This happens under session_lock until submission to chip
1178 */
1179static void
1180free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1181 struct wrb_handle *pwrb_handle)
1182{
32951dd8 1183 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
bfead3b2
JK
1184 pwrb_context->wrb_handles_available++;
1185 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1186 pwrb_context->free_index = 0;
1187 else
1188 pwrb_context->free_index++;
1189
99bc5d55
JSJ
1190 beiscsi_log(phba, KERN_INFO,
1191 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1192 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1193 "wrb_handles_available=%d\n",
1194 pwrb_handle, pwrb_context->free_index,
1195 pwrb_context->wrb_handles_available);
6733b39a
JK
1196}
1197
1198static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1199{
1200 struct sgl_handle *psgl_handle;
1201
1202 if (phba->eh_sgl_hndl_avbl) {
1203 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1204 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
99bc5d55
JSJ
1205 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1206 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1207 phba->eh_sgl_alloc_index,
1208 phba->eh_sgl_alloc_index);
1209
6733b39a
JK
1210 phba->eh_sgl_hndl_avbl--;
1211 if (phba->eh_sgl_alloc_index ==
1212 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1213 1))
1214 phba->eh_sgl_alloc_index = 0;
1215 else
1216 phba->eh_sgl_alloc_index++;
1217 } else
1218 psgl_handle = NULL;
1219 return psgl_handle;
1220}
1221
1222void
1223free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1224{
1225
99bc5d55
JSJ
1226 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1227 "BM_%d : In free_mgmt_sgl_handle,"
1228 "eh_sgl_free_index=%d\n",
1229 phba->eh_sgl_free_index);
1230
6733b39a
JK
1231 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1232 /*
1233 * this can happen if clean_task is called on a task that
1234 * failed in xmit_task or alloc_pdu.
1235 */
99bc5d55
JSJ
1236 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1237 "BM_%d : Double Free in eh SGL ,"
1238 "eh_sgl_free_index=%d\n",
1239 phba->eh_sgl_free_index);
6733b39a
JK
1240 return;
1241 }
1242 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1243 phba->eh_sgl_hndl_avbl++;
1244 if (phba->eh_sgl_free_index ==
1245 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1246 phba->eh_sgl_free_index = 0;
1247 else
1248 phba->eh_sgl_free_index++;
1249}
1250
1251static void
1252be_complete_io(struct beiscsi_conn *beiscsi_conn,
1253 struct iscsi_task *task, struct sol_cqe *psol)
1254{
1255 struct beiscsi_io_task *io_task = task->dd_data;
1256 struct be_status_bhs *sts_bhs =
1257 (struct be_status_bhs *)io_task->cmd_bhs;
1258 struct iscsi_conn *conn = beiscsi_conn->conn;
6733b39a
JK
1259 unsigned char *sense;
1260 u32 resid = 0, exp_cmdsn, max_cmdsn;
1261 u8 rsp, status, flags;
1262
bfead3b2 1263 exp_cmdsn = (psol->
6733b39a
JK
1264 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1265 & SOL_EXP_CMD_SN_MASK);
bfead3b2 1266 max_cmdsn = ((psol->
6733b39a
JK
1267 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1268 & SOL_EXP_CMD_SN_MASK) +
1269 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1270 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1271 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1272 & SOL_RESP_MASK) >> 16);
1273 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1274 & SOL_STS_MASK) >> 8);
1275 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1276 & SOL_FLAGS_MASK) >> 24) | 0x80;
bd535451
JK
1277 if (!task->sc) {
1278 if (io_task->scsi_cmnd)
1279 scsi_dma_unmap(io_task->scsi_cmnd);
6733b39a 1280
bd535451
JK
1281 return;
1282 }
6733b39a
JK
1283 task->sc->result = (DID_OK << 16) | status;
1284 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1285 task->sc->result = DID_ERROR << 16;
1286 goto unmap;
1287 }
1288
1289 /* bidi not initially supported */
1290 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1291 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1292 32] & SOL_RES_CNT_MASK);
1293
1294 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1295 task->sc->result = DID_ERROR << 16;
1296
1297 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1298 scsi_set_resid(task->sc, resid);
1299 if (!status && (scsi_bufflen(task->sc) - resid <
1300 task->sc->underflow))
1301 task->sc->result = DID_ERROR << 16;
1302 }
1303 }
1304
1305 if (status == SAM_STAT_CHECK_CONDITION) {
4053a4be 1306 u16 sense_len;
bfead3b2 1307 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
4053a4be 1308
6733b39a 1309 sense = sts_bhs->sense_info + sizeof(unsigned short);
4053a4be 1310 sense_len = be16_to_cpu(*slen);
6733b39a
JK
1311 memcpy(task->sc->sense_buffer, sense,
1312 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1313 }
756d29c8 1314
6733b39a
JK
1315 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1316 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1317 & SOL_RES_CNT_MASK)
1318 conn->rxdata_octets += (psol->
bfead3b2
JK
1319 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1320 & SOL_RES_CNT_MASK);
6733b39a
JK
1321 }
1322unmap:
1323 scsi_dma_unmap(io_task->scsi_cmnd);
1324 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1325}
1326
1327static void
1328be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1329 struct iscsi_task *task, struct sol_cqe *psol)
1330{
1331 struct iscsi_logout_rsp *hdr;
bfead3b2 1332 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1333 struct iscsi_conn *conn = beiscsi_conn->conn;
1334
1335 hdr = (struct iscsi_logout_rsp *)task->hdr;
7bd6e25c 1336 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
6733b39a
JK
1337 hdr->t2wait = 5;
1338 hdr->t2retain = 0;
1339 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1340 & SOL_FLAGS_MASK) >> 24) | 0x80;
1341 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1342 32] & SOL_RESP_MASK);
1343 hdr->exp_cmdsn = cpu_to_be32(psol->
1344 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1345 & SOL_EXP_CMD_SN_MASK);
1346 hdr->max_cmdsn = be32_to_cpu((psol->
1347 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1348 & SOL_EXP_CMD_SN_MASK) +
1349 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1350 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
7bd6e25c
JK
1351 hdr->dlength[0] = 0;
1352 hdr->dlength[1] = 0;
1353 hdr->dlength[2] = 0;
6733b39a 1354 hdr->hlength = 0;
bfead3b2 1355 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1356 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1357}
1358
1359static void
1360be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1361 struct iscsi_task *task, struct sol_cqe *psol)
1362{
1363 struct iscsi_tm_rsp *hdr;
1364 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1365 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1366
1367 hdr = (struct iscsi_tm_rsp *)task->hdr;
7bd6e25c 1368 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
6733b39a
JK
1369 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1370 & SOL_FLAGS_MASK) >> 24) | 0x80;
1371 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1372 32] & SOL_RESP_MASK);
1373 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
bfead3b2 1374 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
6733b39a
JK
1375 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1376 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1377 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1378 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
bfead3b2 1379 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1380 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1381}
1382
1383static void
1384hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1385 struct beiscsi_hba *phba, struct sol_cqe *psol)
1386{
1387 struct hwi_wrb_context *pwrb_context;
bfead3b2 1388 struct wrb_handle *pwrb_handle = NULL;
6733b39a 1389 struct hwi_controller *phwi_ctrlr;
bfead3b2
JK
1390 struct iscsi_task *task;
1391 struct beiscsi_io_task *io_task;
6733b39a
JK
1392 struct iscsi_conn *conn = beiscsi_conn->conn;
1393 struct iscsi_session *session = conn->session;
1394
1395 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1396 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
35e66019 1397 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
7da50879
JK
1398 SOL_CID_MASK) >> 6) -
1399 phba->fw_config.iscsi_cid_start];
32951dd8 1400 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1401 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1402 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8 1403 task = pwrb_handle->pio_handle;
35e66019 1404
bfead3b2 1405 io_task = task->dd_data;
1282ab76 1406 spin_lock_bh(&phba->mgmt_sgl_lock);
bfead3b2 1407 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1282ab76 1408 spin_unlock_bh(&phba->mgmt_sgl_lock);
6733b39a
JK
1409 spin_lock_bh(&session->lock);
1410 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1411 spin_unlock_bh(&session->lock);
1412}
1413
1414static void
1415be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1416 struct iscsi_task *task, struct sol_cqe *psol)
1417{
1418 struct iscsi_nopin *hdr;
1419 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1420 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1421
1422 hdr = (struct iscsi_nopin *)task->hdr;
1423 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1424 & SOL_FLAGS_MASK) >> 24) | 0x80;
1425 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1426 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1427 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1428 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1429 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1430 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1431 hdr->opcode = ISCSI_OP_NOOP_IN;
bfead3b2 1432 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1433 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1434}
1435
1436static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1437 struct beiscsi_hba *phba, struct sol_cqe *psol)
1438{
1439 struct hwi_wrb_context *pwrb_context;
1440 struct wrb_handle *pwrb_handle;
1441 struct iscsi_wrb *pwrb = NULL;
1442 struct hwi_controller *phwi_ctrlr;
1443 struct iscsi_task *task;
bfead3b2 1444 unsigned int type;
6733b39a
JK
1445 struct iscsi_conn *conn = beiscsi_conn->conn;
1446 struct iscsi_session *session = conn->session;
1447
1448 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1449 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
35e66019 1450 (struct amap_sol_cqe, cid) / 32]
7da50879
JK
1451 & SOL_CID_MASK) >> 6) -
1452 phba->fw_config.iscsi_cid_start];
32951dd8 1453 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1454 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1455 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8
JK
1456 task = pwrb_handle->pio_handle;
1457 pwrb = pwrb_handle->pwrb;
1458 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1459 WRB_TYPE_MASK) >> 28;
1460
bfead3b2
JK
1461 spin_lock_bh(&session->lock);
1462 switch (type) {
6733b39a
JK
1463 case HWH_TYPE_IO:
1464 case HWH_TYPE_IO_RD:
1465 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
dafab8e0 1466 ISCSI_OP_NOOP_OUT)
6733b39a 1467 be_complete_nopin_resp(beiscsi_conn, task, psol);
dafab8e0 1468 else
6733b39a
JK
1469 be_complete_io(beiscsi_conn, task, psol);
1470 break;
1471
1472 case HWH_TYPE_LOGOUT:
dafab8e0
JK
1473 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1474 be_complete_logout(beiscsi_conn, task, psol);
1475 else
1476 be_complete_tmf(beiscsi_conn, task, psol);
1477
6733b39a
JK
1478 break;
1479
1480 case HWH_TYPE_LOGIN:
99bc5d55
JSJ
1481 beiscsi_log(phba, KERN_ERR,
1482 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1483 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1484 " hwi_complete_cmd- Solicited path\n");
6733b39a
JK
1485 break;
1486
6733b39a
JK
1487 case HWH_TYPE_NOP:
1488 be_complete_nopin_resp(beiscsi_conn, task, psol);
1489 break;
1490
1491 default:
99bc5d55
JSJ
1492 beiscsi_log(phba, KERN_WARNING,
1493 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1494 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1495 "wrb_index 0x%x CID 0x%x\n", type,
1496 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1497 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1498 ((psol->dw[offsetof(struct amap_sol_cqe,
1499 cid) / 32] & SOL_CID_MASK) >> 6));
6733b39a
JK
1500 break;
1501 }
35e66019 1502
6733b39a
JK
1503 spin_unlock_bh(&session->lock);
1504}
1505
1506static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1507 *pasync_ctx, unsigned int is_header,
1508 unsigned int host_write_ptr)
1509{
1510 if (is_header)
1511 return &pasync_ctx->async_entry[host_write_ptr].
1512 header_busy_list;
1513 else
1514 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1515}
1516
1517static struct async_pdu_handle *
1518hwi_get_async_handle(struct beiscsi_hba *phba,
1519 struct beiscsi_conn *beiscsi_conn,
1520 struct hwi_async_pdu_context *pasync_ctx,
1521 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1522{
1523 struct be_bus_address phys_addr;
1524 struct list_head *pbusy_list;
1525 struct async_pdu_handle *pasync_handle = NULL;
6733b39a
JK
1526 unsigned char is_header = 0;
1527
1528 phys_addr.u.a32.address_lo =
1529 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1530 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1531 & PDUCQE_DPL_MASK) >> 16);
1532 phys_addr.u.a32.address_hi =
1533 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1534
1535 phys_addr.u.a64.address =
1536 *((unsigned long long *)(&phys_addr.u.a64.address));
1537
1538 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1539 & PDUCQE_CODE_MASK) {
1540 case UNSOL_HDR_NOTIFY:
1541 is_header = 1;
1542
1543 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1544 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1545 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1546 break;
1547 case UNSOL_DATA_NOTIFY:
1548 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1549 dw[offsetof(struct amap_i_t_dpdu_cqe,
1550 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1551 break;
1552 default:
1553 pbusy_list = NULL;
99bc5d55
JSJ
1554 beiscsi_log(phba, KERN_WARNING,
1555 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1556 "BM_%d : Unexpected code=%d\n",
1557 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1558 code) / 32] & PDUCQE_CODE_MASK);
6733b39a
JK
1559 return NULL;
1560 }
1561
6733b39a
JK
1562 WARN_ON(list_empty(pbusy_list));
1563 list_for_each_entry(pasync_handle, pbusy_list, link) {
dc63aac6 1564 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
6733b39a
JK
1565 break;
1566 }
1567
1568 WARN_ON(!pasync_handle);
1569
7da50879
JK
1570 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1571 phba->fw_config.iscsi_cid_start;
6733b39a
JK
1572 pasync_handle->is_header = is_header;
1573 pasync_handle->buffer_len = ((pdpdu_cqe->
1574 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1575 & PDUCQE_DPL_MASK) >> 16);
1576
1577 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1578 index) / 32] & PDUCQE_INDEX_MASK);
1579 return pasync_handle;
1580}
1581
1582static unsigned int
99bc5d55
JSJ
1583hwi_update_async_writables(struct beiscsi_hba *phba,
1584 struct hwi_async_pdu_context *pasync_ctx,
1585 unsigned int is_header, unsigned int cq_index)
6733b39a
JK
1586{
1587 struct list_head *pbusy_list;
1588 struct async_pdu_handle *pasync_handle;
1589 unsigned int num_entries, writables = 0;
1590 unsigned int *pep_read_ptr, *pwritables;
1591
dc63aac6 1592 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1593 if (is_header) {
1594 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1595 pwritables = &pasync_ctx->async_header.writables;
6733b39a
JK
1596 } else {
1597 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1598 pwritables = &pasync_ctx->async_data.writables;
6733b39a
JK
1599 }
1600
1601 while ((*pep_read_ptr) != cq_index) {
1602 (*pep_read_ptr)++;
1603 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1604
1605 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1606 *pep_read_ptr);
1607 if (writables == 0)
1608 WARN_ON(list_empty(pbusy_list));
1609
1610 if (!list_empty(pbusy_list)) {
1611 pasync_handle = list_entry(pbusy_list->next,
1612 struct async_pdu_handle,
1613 link);
1614 WARN_ON(!pasync_handle);
1615 pasync_handle->consumed = 1;
1616 }
1617
1618 writables++;
1619 }
1620
1621 if (!writables) {
99bc5d55
JSJ
1622 beiscsi_log(phba, KERN_ERR,
1623 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1624 "BM_%d : Duplicate notification received - index 0x%x!!\n",
1625 cq_index);
6733b39a
JK
1626 WARN_ON(1);
1627 }
1628
1629 *pwritables = *pwritables + writables;
1630 return 0;
1631}
1632
9728d8d0 1633static void hwi_free_async_msg(struct beiscsi_hba *phba,
6733b39a
JK
1634 unsigned int cri)
1635{
1636 struct hwi_controller *phwi_ctrlr;
1637 struct hwi_async_pdu_context *pasync_ctx;
1638 struct async_pdu_handle *pasync_handle, *tmp_handle;
1639 struct list_head *plist;
6733b39a
JK
1640
1641 phwi_ctrlr = phba->phwi_ctrlr;
1642 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1643
1644 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1645
1646 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1647 list_del(&pasync_handle->link);
1648
9728d8d0 1649 if (pasync_handle->is_header) {
6733b39a
JK
1650 list_add_tail(&pasync_handle->link,
1651 &pasync_ctx->async_header.free_list);
1652 pasync_ctx->async_header.free_entries++;
6733b39a
JK
1653 } else {
1654 list_add_tail(&pasync_handle->link,
1655 &pasync_ctx->async_data.free_list);
1656 pasync_ctx->async_data.free_entries++;
6733b39a
JK
1657 }
1658 }
1659
1660 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1661 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1662 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
6733b39a
JK
1663}
1664
1665static struct phys_addr *
1666hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1667 unsigned int is_header, unsigned int host_write_ptr)
1668{
1669 struct phys_addr *pasync_sge = NULL;
1670
1671 if (is_header)
1672 pasync_sge = pasync_ctx->async_header.ring_base;
1673 else
1674 pasync_sge = pasync_ctx->async_data.ring_base;
1675
1676 return pasync_sge + host_write_ptr;
1677}
1678
1679static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1680 unsigned int is_header)
1681{
1682 struct hwi_controller *phwi_ctrlr;
1683 struct hwi_async_pdu_context *pasync_ctx;
1684 struct async_pdu_handle *pasync_handle;
1685 struct list_head *pfree_link, *pbusy_list;
1686 struct phys_addr *pasync_sge;
1687 unsigned int ring_id, num_entries;
1688 unsigned int host_write_num;
1689 unsigned int writables;
1690 unsigned int i = 0;
1691 u32 doorbell = 0;
1692
1693 phwi_ctrlr = phba->phwi_ctrlr;
1694 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
dc63aac6 1695 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1696
1697 if (is_header) {
6733b39a
JK
1698 writables = min(pasync_ctx->async_header.writables,
1699 pasync_ctx->async_header.free_entries);
1700 pfree_link = pasync_ctx->async_header.free_list.next;
1701 host_write_num = pasync_ctx->async_header.host_write_ptr;
1702 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1703 } else {
6733b39a
JK
1704 writables = min(pasync_ctx->async_data.writables,
1705 pasync_ctx->async_data.free_entries);
1706 pfree_link = pasync_ctx->async_data.free_list.next;
1707 host_write_num = pasync_ctx->async_data.host_write_ptr;
1708 ring_id = phwi_ctrlr->default_pdu_data.id;
1709 }
1710
1711 writables = (writables / 8) * 8;
1712 if (writables) {
1713 for (i = 0; i < writables; i++) {
1714 pbusy_list =
1715 hwi_get_async_busy_list(pasync_ctx, is_header,
1716 host_write_num);
1717 pasync_handle =
1718 list_entry(pfree_link, struct async_pdu_handle,
1719 link);
1720 WARN_ON(!pasync_handle);
1721 pasync_handle->consumed = 0;
1722
1723 pfree_link = pfree_link->next;
1724
1725 pasync_sge = hwi_get_ring_address(pasync_ctx,
1726 is_header, host_write_num);
1727
1728 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1729 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1730
1731 list_move(&pasync_handle->link, pbusy_list);
1732
1733 host_write_num++;
1734 host_write_num = host_write_num % num_entries;
1735 }
1736
1737 if (is_header) {
1738 pasync_ctx->async_header.host_write_ptr =
1739 host_write_num;
1740 pasync_ctx->async_header.free_entries -= writables;
1741 pasync_ctx->async_header.writables -= writables;
1742 pasync_ctx->async_header.busy_entries += writables;
1743 } else {
1744 pasync_ctx->async_data.host_write_ptr = host_write_num;
1745 pasync_ctx->async_data.free_entries -= writables;
1746 pasync_ctx->async_data.writables -= writables;
1747 pasync_ctx->async_data.busy_entries += writables;
1748 }
1749
1750 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1751 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1752 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1753 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1754 << DB_DEF_PDU_CQPROC_SHIFT;
1755
1756 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1757 }
1758}
1759
1760static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1761 struct beiscsi_conn *beiscsi_conn,
1762 struct i_t_dpdu_cqe *pdpdu_cqe)
1763{
1764 struct hwi_controller *phwi_ctrlr;
1765 struct hwi_async_pdu_context *pasync_ctx;
1766 struct async_pdu_handle *pasync_handle = NULL;
1767 unsigned int cq_index = -1;
1768
1769 phwi_ctrlr = phba->phwi_ctrlr;
1770 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1771
1772 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1773 pdpdu_cqe, &cq_index);
1774 BUG_ON(pasync_handle->is_header != 0);
1775 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
1776 hwi_update_async_writables(phba, pasync_ctx,
1777 pasync_handle->is_header, cq_index);
6733b39a
JK
1778
1779 hwi_free_async_msg(phba, pasync_handle->cri);
1780 hwi_post_async_buffers(phba, pasync_handle->is_header);
1781}
1782
1783static unsigned int
1784hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1785 struct beiscsi_hba *phba,
1786 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1787{
1788 struct list_head *plist;
1789 struct async_pdu_handle *pasync_handle;
1790 void *phdr = NULL;
1791 unsigned int hdr_len = 0, buf_len = 0;
1792 unsigned int status, index = 0, offset = 0;
1793 void *pfirst_buffer = NULL;
1794 unsigned int num_buf = 0;
1795
1796 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1797
1798 list_for_each_entry(pasync_handle, plist, link) {
1799 if (index == 0) {
1800 phdr = pasync_handle->pbuffer;
1801 hdr_len = pasync_handle->buffer_len;
1802 } else {
1803 buf_len = pasync_handle->buffer_len;
1804 if (!num_buf) {
1805 pfirst_buffer = pasync_handle->pbuffer;
1806 num_buf++;
1807 }
1808 memcpy(pfirst_buffer + offset,
1809 pasync_handle->pbuffer, buf_len);
f2ba02b8 1810 offset += buf_len;
6733b39a
JK
1811 }
1812 index++;
1813 }
1814
1815 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
7da50879
JK
1816 (beiscsi_conn->beiscsi_conn_cid -
1817 phba->fw_config.iscsi_cid_start),
1818 phdr, hdr_len, pfirst_buffer,
f2ba02b8 1819 offset);
6733b39a 1820
605c6cd2 1821 hwi_free_async_msg(phba, cri);
6733b39a
JK
1822 return 0;
1823}
1824
1825static unsigned int
1826hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1827 struct beiscsi_hba *phba,
1828 struct async_pdu_handle *pasync_handle)
1829{
1830 struct hwi_async_pdu_context *pasync_ctx;
1831 struct hwi_controller *phwi_ctrlr;
1832 unsigned int bytes_needed = 0, status = 0;
1833 unsigned short cri = pasync_handle->cri;
1834 struct pdu_base *ppdu;
1835
1836 phwi_ctrlr = phba->phwi_ctrlr;
1837 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1838
1839 list_del(&pasync_handle->link);
1840 if (pasync_handle->is_header) {
1841 pasync_ctx->async_header.busy_entries--;
1842 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1843 hwi_free_async_msg(phba, cri);
1844 BUG();
1845 }
1846
1847 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1848 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1849 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1850 (unsigned short)pasync_handle->buffer_len;
1851 list_add_tail(&pasync_handle->link,
1852 &pasync_ctx->async_entry[cri].wait_queue.list);
1853
1854 ppdu = pasync_handle->pbuffer;
1855 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1856 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1857 0xFFFF0000) | ((be16_to_cpu((ppdu->
1858 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1859 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1860
1861 if (status == 0) {
1862 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1863 bytes_needed;
1864
1865 if (bytes_needed == 0)
1866 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1867 pasync_ctx, cri);
1868 }
1869 } else {
1870 pasync_ctx->async_data.busy_entries--;
1871 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1872 list_add_tail(&pasync_handle->link,
1873 &pasync_ctx->async_entry[cri].wait_queue.
1874 list);
1875 pasync_ctx->async_entry[cri].wait_queue.
1876 bytes_received +=
1877 (unsigned short)pasync_handle->buffer_len;
1878
1879 if (pasync_ctx->async_entry[cri].wait_queue.
1880 bytes_received >=
1881 pasync_ctx->async_entry[cri].wait_queue.
1882 bytes_needed)
1883 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1884 pasync_ctx, cri);
1885 }
1886 }
1887 return status;
1888}
1889
1890static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1891 struct beiscsi_hba *phba,
1892 struct i_t_dpdu_cqe *pdpdu_cqe)
1893{
1894 struct hwi_controller *phwi_ctrlr;
1895 struct hwi_async_pdu_context *pasync_ctx;
1896 struct async_pdu_handle *pasync_handle = NULL;
1897 unsigned int cq_index = -1;
1898
1899 phwi_ctrlr = phba->phwi_ctrlr;
1900 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1901 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1902 pdpdu_cqe, &cq_index);
1903
1904 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
1905 hwi_update_async_writables(phba, pasync_ctx,
1906 pasync_handle->is_header, cq_index);
1907
6733b39a
JK
1908 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1909 hwi_post_async_buffers(phba, pasync_handle->is_header);
1910}
1911
756d29c8
JK
1912static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1913{
1914 struct be_queue_info *mcc_cq;
1915 struct be_mcc_compl *mcc_compl;
1916 unsigned int num_processed = 0;
1917
1918 mcc_cq = &phba->ctrl.mcc_obj.cq;
1919 mcc_compl = queue_tail_node(mcc_cq);
1920 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1921 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1922
1923 if (num_processed >= 32) {
1924 hwi_ring_cq_db(phba, mcc_cq->id,
1925 num_processed, 0, 0);
1926 num_processed = 0;
1927 }
1928 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1929 /* Interpret flags as an async trailer */
1930 if (is_link_state_evt(mcc_compl->flags))
1931 /* Interpret compl as a async link evt */
1932 beiscsi_async_link_state_process(phba,
1933 (struct be_async_event_link_state *) mcc_compl);
1934 else
99bc5d55
JSJ
1935 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
1936 "BM_%d : Unsupported Async Event, flags"
1937 " = 0x%08x\n",
1938 mcc_compl->flags);
756d29c8
JK
1939 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1940 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1941 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1942 }
1943
1944 mcc_compl->flags = 0;
1945 queue_tail_inc(mcc_cq);
1946 mcc_compl = queue_tail_node(mcc_cq);
1947 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1948 num_processed++;
1949 }
1950
1951 if (num_processed > 0)
1952 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1953
1954}
bfead3b2 1955
6763daae
JSJ
1956/**
1957 * beiscsi_process_cq()- Process the Completion Queue
1958 * @pbe_eq: Event Q on which the Completion has come
1959 *
1960 * return
1961 * Number of Completion Entries processed.
1962 **/
bfead3b2 1963static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
6733b39a 1964{
6733b39a
JK
1965 struct be_queue_info *cq;
1966 struct sol_cqe *sol;
1967 struct dmsg_cqe *dmsg;
1968 unsigned int num_processed = 0;
1969 unsigned int tot_nump = 0;
0a513dd8 1970 unsigned short code = 0, cid = 0;
6733b39a 1971 struct beiscsi_conn *beiscsi_conn;
c2462288
JK
1972 struct beiscsi_endpoint *beiscsi_ep;
1973 struct iscsi_endpoint *ep;
bfead3b2 1974 struct beiscsi_hba *phba;
6733b39a 1975
bfead3b2 1976 cq = pbe_eq->cq;
6733b39a 1977 sol = queue_tail_node(cq);
bfead3b2 1978 phba = pbe_eq->phba;
6733b39a
JK
1979
1980 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1981 CQE_VALID_MASK) {
1982 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1983
0a513dd8
JSJ
1984 cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
1985 CQE_CID_MASK) >> 6);
1986 code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
1987 CQE_CODE_MASK);
1988 ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
32951dd8 1989
c2462288
JK
1990 beiscsi_ep = ep->dd_data;
1991 beiscsi_conn = beiscsi_ep->conn;
756d29c8 1992
6733b39a 1993 if (num_processed >= 32) {
bfead3b2 1994 hwi_ring_cq_db(phba, cq->id,
6733b39a
JK
1995 num_processed, 0, 0);
1996 tot_nump += num_processed;
1997 num_processed = 0;
1998 }
1999
0a513dd8 2000 switch (code) {
6733b39a
JK
2001 case SOL_CMD_COMPLETE:
2002 hwi_complete_cmd(beiscsi_conn, phba, sol);
2003 break;
2004 case DRIVERMSG_NOTIFY:
99bc5d55
JSJ
2005 beiscsi_log(phba, KERN_INFO,
2006 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2007 "BM_%d : Received %s[%d] on CID : %d\n",
2008 cqe_desc[code], code, cid);
99bc5d55 2009
6733b39a
JK
2010 dmsg = (struct dmsg_cqe *)sol;
2011 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
2012 break;
2013 case UNSOL_HDR_NOTIFY:
99bc5d55
JSJ
2014 beiscsi_log(phba, KERN_INFO,
2015 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2016 "BM_%d : Received %s[%d] on CID : %d\n",
2017 cqe_desc[code], code, cid);
99bc5d55 2018
bfead3b2
JK
2019 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2020 (struct i_t_dpdu_cqe *)sol);
2021 break;
6733b39a 2022 case UNSOL_DATA_NOTIFY:
99bc5d55
JSJ
2023 beiscsi_log(phba, KERN_INFO,
2024 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2025 "BM_%d : Received %s[%d] on CID : %d\n",
2026 cqe_desc[code], code, cid);
99bc5d55 2027
6733b39a
JK
2028 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2029 (struct i_t_dpdu_cqe *)sol);
2030 break;
2031 case CXN_INVALIDATE_INDEX_NOTIFY:
2032 case CMD_INVALIDATED_NOTIFY:
2033 case CXN_INVALIDATE_NOTIFY:
99bc5d55
JSJ
2034 beiscsi_log(phba, KERN_ERR,
2035 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2036 "BM_%d : Ignoring %s[%d] on CID : %d\n",
2037 cqe_desc[code], code, cid);
6733b39a
JK
2038 break;
2039 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
2040 case CMD_KILLED_INVALID_STATSN_RCVD:
2041 case CMD_KILLED_INVALID_R2T_RCVD:
2042 case CMD_CXN_KILLED_LUN_INVALID:
2043 case CMD_CXN_KILLED_ICD_INVALID:
2044 case CMD_CXN_KILLED_ITT_INVALID:
2045 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2046 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
99bc5d55
JSJ
2047 beiscsi_log(phba, KERN_ERR,
2048 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2049 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2050 cqe_desc[code], code, cid);
6733b39a
JK
2051 break;
2052 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
99bc5d55
JSJ
2053 beiscsi_log(phba, KERN_ERR,
2054 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2055 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2056 cqe_desc[code], code, cid);
6733b39a
JK
2057 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2058 (struct i_t_dpdu_cqe *) sol);
2059 break;
2060 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2061 case CXN_KILLED_BURST_LEN_MISMATCH:
2062 case CXN_KILLED_AHS_RCVD:
2063 case CXN_KILLED_HDR_DIGEST_ERR:
2064 case CXN_KILLED_UNKNOWN_HDR:
2065 case CXN_KILLED_STALE_ITT_TTT_RCVD:
2066 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2067 case CXN_KILLED_TIMED_OUT:
2068 case CXN_KILLED_FIN_RCVD:
6763daae
JSJ
2069 case CXN_KILLED_RST_SENT:
2070 case CXN_KILLED_RST_RCVD:
6733b39a
JK
2071 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2072 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2073 case CXN_KILLED_OVER_RUN_RESIDUAL:
2074 case CXN_KILLED_UNDER_RUN_RESIDUAL:
2075 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
99bc5d55
JSJ
2076 beiscsi_log(phba, KERN_ERR,
2077 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2078 "BM_%d : Event %s[%d] received on CID : %d\n",
2079 cqe_desc[code], code, cid);
0a513dd8
JSJ
2080 if (beiscsi_conn)
2081 iscsi_conn_failure(beiscsi_conn->conn,
2082 ISCSI_ERR_CONN_FAILED);
6733b39a
JK
2083 break;
2084 default:
99bc5d55
JSJ
2085 beiscsi_log(phba, KERN_ERR,
2086 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2087 "BM_%d : Invalid CQE Event Received Code : %d"
2088 "CID 0x%x...\n",
0a513dd8 2089 code, cid);
6733b39a
JK
2090 break;
2091 }
2092
2093 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2094 queue_tail_inc(cq);
2095 sol = queue_tail_node(cq);
2096 num_processed++;
2097 }
2098
2099 if (num_processed > 0) {
2100 tot_nump += num_processed;
bfead3b2 2101 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
6733b39a
JK
2102 }
2103 return tot_nump;
2104}
2105
756d29c8 2106void beiscsi_process_all_cqs(struct work_struct *work)
6733b39a
JK
2107{
2108 unsigned long flags;
bfead3b2
JK
2109 struct hwi_controller *phwi_ctrlr;
2110 struct hwi_context_memory *phwi_context;
2111 struct be_eq_obj *pbe_eq;
6733b39a
JK
2112 struct beiscsi_hba *phba =
2113 container_of(work, struct beiscsi_hba, work_cqs);
2114
bfead3b2
JK
2115 phwi_ctrlr = phba->phwi_ctrlr;
2116 phwi_context = phwi_ctrlr->phwi_ctxt;
2117 if (phba->msix_enabled)
2118 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
2119 else
2120 pbe_eq = &phwi_context->be_eq[0];
2121
6733b39a
JK
2122 if (phba->todo_mcc_cq) {
2123 spin_lock_irqsave(&phba->isr_lock, flags);
2124 phba->todo_mcc_cq = 0;
2125 spin_unlock_irqrestore(&phba->isr_lock, flags);
756d29c8 2126 beiscsi_process_mcc_isr(phba);
6733b39a
JK
2127 }
2128
2129 if (phba->todo_cq) {
2130 spin_lock_irqsave(&phba->isr_lock, flags);
2131 phba->todo_cq = 0;
2132 spin_unlock_irqrestore(&phba->isr_lock, flags);
bfead3b2 2133 beiscsi_process_cq(pbe_eq);
6733b39a
JK
2134 }
2135}
2136
2137static int be_iopoll(struct blk_iopoll *iop, int budget)
2138{
2139 static unsigned int ret;
2140 struct beiscsi_hba *phba;
bfead3b2 2141 struct be_eq_obj *pbe_eq;
6733b39a 2142
bfead3b2
JK
2143 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2144 ret = beiscsi_process_cq(pbe_eq);
6733b39a 2145 if (ret < budget) {
bfead3b2 2146 phba = pbe_eq->phba;
6733b39a 2147 blk_iopoll_complete(iop);
99bc5d55
JSJ
2148 beiscsi_log(phba, KERN_INFO,
2149 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2150 "BM_%d : rearm pbe_eq->q.id =%d\n",
2151 pbe_eq->q.id);
bfead3b2 2152 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
2153 }
2154 return ret;
2155}
2156
2157static void
2158hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2159 unsigned int num_sg, struct beiscsi_io_task *io_task)
2160{
2161 struct iscsi_sge *psgl;
58ff4bd0 2162 unsigned int sg_len, index;
6733b39a
JK
2163 unsigned int sge_len = 0;
2164 unsigned long long addr;
2165 struct scatterlist *l_sg;
2166 unsigned int offset;
2167
2168 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2169 io_task->bhs_pa.u.a32.address_lo);
2170 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2171 io_task->bhs_pa.u.a32.address_hi);
2172
2173 l_sg = sg;
48bd86cf
JK
2174 for (index = 0; (index < num_sg) && (index < 2); index++,
2175 sg = sg_next(sg)) {
6733b39a
JK
2176 if (index == 0) {
2177 sg_len = sg_dma_len(sg);
2178 addr = (u64) sg_dma_address(sg);
2179 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
457ff3b7 2180 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2181 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
457ff3b7 2182 ((u32)(addr >> 32)));
6733b39a
JK
2183 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2184 sg_len);
2185 sge_len = sg_len;
6733b39a 2186 } else {
6733b39a
JK
2187 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2188 pwrb, sge_len);
2189 sg_len = sg_dma_len(sg);
2190 addr = (u64) sg_dma_address(sg);
2191 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
457ff3b7 2192 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2193 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
457ff3b7 2194 ((u32)(addr >> 32)));
6733b39a
JK
2195 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2196 sg_len);
2197 }
2198 }
2199 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2200 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2201
2202 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2203
2204 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2205 io_task->bhs_pa.u.a32.address_hi);
2206 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2207 io_task->bhs_pa.u.a32.address_lo);
2208
caf818f1
JK
2209 if (num_sg == 1) {
2210 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2211 1);
2212 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2213 0);
2214 } else if (num_sg == 2) {
2215 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2216 0);
2217 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2218 1);
2219 } else {
2220 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2221 0);
2222 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2223 0);
2224 }
6733b39a
JK
2225 sg = l_sg;
2226 psgl++;
2227 psgl++;
2228 offset = 0;
48bd86cf 2229 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
6733b39a
JK
2230 sg_len = sg_dma_len(sg);
2231 addr = (u64) sg_dma_address(sg);
2232 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2233 (addr & 0xFFFFFFFF));
2234 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2235 (addr >> 32));
2236 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2237 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2238 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2239 offset += sg_len;
2240 }
2241 psgl--;
2242 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2243}
2244
d629c471
JSJ
2245/**
2246 * hwi_write_buffer()- Populate the WRB with task info
2247 * @pwrb: ptr to the WRB entry
2248 * @task: iscsi task which is to be executed
2249 **/
6733b39a
JK
2250static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2251{
2252 struct iscsi_sge *psgl;
6733b39a
JK
2253 struct beiscsi_io_task *io_task = task->dd_data;
2254 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2255 struct beiscsi_hba *phba = beiscsi_conn->phba;
2256
2257 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2258 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2259 io_task->bhs_pa.u.a32.address_lo);
2260 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2261 io_task->bhs_pa.u.a32.address_hi);
2262
2263 if (task->data) {
2264 if (task->data_count) {
2265 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
d629c471
JSJ
2266 io_task->mtask_addr = pci_map_single(phba->pcidev,
2267 task->data,
2268 task->data_count,
2269 PCI_DMA_TODEVICE);
2270
2271 io_task->mtask_data_count = task->data_count;
6733b39a
JK
2272 } else {
2273 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
d629c471 2274 io_task->mtask_addr = 0;
6733b39a
JK
2275 }
2276 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
d629c471 2277 lower_32_bits(io_task->mtask_addr));
6733b39a 2278 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
d629c471 2279 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2280 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2281 task->data_count);
2282
2283 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2284 } else {
2285 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
d629c471 2286 io_task->mtask_addr = 0;
6733b39a
JK
2287 }
2288
2289 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2290
2291 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2292
2293 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2294 io_task->bhs_pa.u.a32.address_hi);
2295 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2296 io_task->bhs_pa.u.a32.address_lo);
2297 if (task->data) {
2298 psgl++;
2299 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2300 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2301 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2302 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2303 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2304 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2305
2306 psgl++;
2307 if (task->data) {
2308 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
d629c471 2309 lower_32_bits(io_task->mtask_addr));
6733b39a 2310 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
d629c471 2311 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2312 }
2313 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2314 }
2315 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2316}
2317
2318static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2319{
bfead3b2 2320 unsigned int num_cq_pages, num_async_pdu_buf_pages;
6733b39a
JK
2321 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2322 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2323
2324 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2325 sizeof(struct sol_cqe));
6733b39a
JK
2326 num_async_pdu_buf_pages =
2327 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2328 phba->params.defpdu_hdr_sz);
2329 num_async_pdu_buf_sgl_pages =
2330 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2331 sizeof(struct phys_addr));
2332 num_async_pdu_data_pages =
2333 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2334 phba->params.defpdu_data_sz);
2335 num_async_pdu_data_sgl_pages =
2336 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2337 sizeof(struct phys_addr));
2338
2339 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2340
2341 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2342 BE_ISCSI_PDU_HEADER_SIZE;
2343 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2344 sizeof(struct hwi_context_memory);
2345
6733b39a
JK
2346
2347 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2348 * (phba->params.wrbs_per_cxn)
2349 * phba->params.cxns_per_ctrl;
2350 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2351 (phba->params.wrbs_per_cxn);
2352 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2353 phba->params.cxns_per_ctrl);
2354
2355 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2356 phba->params.icds_per_ctrl;
2357 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2358 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2359
2360 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2361 num_async_pdu_buf_pages * PAGE_SIZE;
2362 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2363 num_async_pdu_data_pages * PAGE_SIZE;
2364 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2365 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2366 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2367 num_async_pdu_data_sgl_pages * PAGE_SIZE;
2368 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2369 phba->params.asyncpdus_per_ctrl *
2370 sizeof(struct async_pdu_handle);
2371 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2372 phba->params.asyncpdus_per_ctrl *
2373 sizeof(struct async_pdu_handle);
2374 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2375 sizeof(struct hwi_async_pdu_context) +
2376 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2377}
2378
2379static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2380{
2381 struct be_mem_descriptor *mem_descr;
2382 dma_addr_t bus_add;
2383 struct mem_array *mem_arr, *mem_arr_orig;
2384 unsigned int i, j, alloc_size, curr_alloc_size;
2385
3ec78271 2386 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
6733b39a
JK
2387 if (!phba->phwi_ctrlr)
2388 return -ENOMEM;
2389
2390 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2391 GFP_KERNEL);
2392 if (!phba->init_mem) {
2393 kfree(phba->phwi_ctrlr);
2394 return -ENOMEM;
2395 }
2396
2397 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2398 GFP_KERNEL);
2399 if (!mem_arr_orig) {
2400 kfree(phba->init_mem);
2401 kfree(phba->phwi_ctrlr);
2402 return -ENOMEM;
2403 }
2404
2405 mem_descr = phba->init_mem;
2406 for (i = 0; i < SE_MEM_MAX; i++) {
2407 j = 0;
2408 mem_arr = mem_arr_orig;
2409 alloc_size = phba->mem_req[i];
2410 memset(mem_arr, 0, sizeof(struct mem_array) *
2411 BEISCSI_MAX_FRAGS_INIT);
2412 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2413 do {
2414 mem_arr->virtual_address = pci_alloc_consistent(
2415 phba->pcidev,
2416 curr_alloc_size,
2417 &bus_add);
2418 if (!mem_arr->virtual_address) {
2419 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2420 goto free_mem;
2421 if (curr_alloc_size -
2422 rounddown_pow_of_two(curr_alloc_size))
2423 curr_alloc_size = rounddown_pow_of_two
2424 (curr_alloc_size);
2425 else
2426 curr_alloc_size = curr_alloc_size / 2;
2427 } else {
2428 mem_arr->bus_address.u.
2429 a64.address = (__u64) bus_add;
2430 mem_arr->size = curr_alloc_size;
2431 alloc_size -= curr_alloc_size;
2432 curr_alloc_size = min(be_max_phys_size *
2433 1024, alloc_size);
2434 j++;
2435 mem_arr++;
2436 }
2437 } while (alloc_size);
2438 mem_descr->num_elements = j;
2439 mem_descr->size_in_bytes = phba->mem_req[i];
2440 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2441 GFP_KERNEL);
2442 if (!mem_descr->mem_array)
2443 goto free_mem;
2444
2445 memcpy(mem_descr->mem_array, mem_arr_orig,
2446 sizeof(struct mem_array) * j);
2447 mem_descr++;
2448 }
2449 kfree(mem_arr_orig);
2450 return 0;
2451free_mem:
2452 mem_descr->num_elements = j;
2453 while ((i) || (j)) {
2454 for (j = mem_descr->num_elements; j > 0; j--) {
2455 pci_free_consistent(phba->pcidev,
2456 mem_descr->mem_array[j - 1].size,
2457 mem_descr->mem_array[j - 1].
2458 virtual_address,
457ff3b7
JK
2459 (unsigned long)mem_descr->
2460 mem_array[j - 1].
6733b39a
JK
2461 bus_address.u.a64.address);
2462 }
2463 if (i) {
2464 i--;
2465 kfree(mem_descr->mem_array);
2466 mem_descr--;
2467 }
2468 }
2469 kfree(mem_arr_orig);
2470 kfree(phba->init_mem);
2471 kfree(phba->phwi_ctrlr);
2472 return -ENOMEM;
2473}
2474
2475static int beiscsi_get_memory(struct beiscsi_hba *phba)
2476{
2477 beiscsi_find_mem_req(phba);
2478 return beiscsi_alloc_mem(phba);
2479}
2480
2481static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2482{
2483 struct pdu_data_out *pdata_out;
2484 struct pdu_nop_out *pnop_out;
2485 struct be_mem_descriptor *mem_descr;
2486
2487 mem_descr = phba->init_mem;
2488 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2489 pdata_out =
2490 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2491 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2492
2493 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2494 IIOC_SCSI_DATA);
2495
2496 pnop_out =
2497 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2498 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2499
2500 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2501 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2502 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2503 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2504}
2505
3ec78271 2506static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
6733b39a
JK
2507{
2508 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
3ec78271 2509 struct wrb_handle *pwrb_handle = NULL;
6733b39a
JK
2510 struct hwi_controller *phwi_ctrlr;
2511 struct hwi_wrb_context *pwrb_context;
3ec78271
JK
2512 struct iscsi_wrb *pwrb = NULL;
2513 unsigned int num_cxn_wrbh = 0;
2514 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
6733b39a
JK
2515
2516 mem_descr_wrbh = phba->init_mem;
2517 mem_descr_wrbh += HWI_MEM_WRBH;
2518
2519 mem_descr_wrb = phba->init_mem;
2520 mem_descr_wrb += HWI_MEM_WRB;
6733b39a
JK
2521 phwi_ctrlr = phba->phwi_ctrlr;
2522
2523 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2524 pwrb_context = &phwi_ctrlr->wrb_context[index];
6733b39a
JK
2525 pwrb_context->pwrb_handle_base =
2526 kzalloc(sizeof(struct wrb_handle *) *
2527 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2528 if (!pwrb_context->pwrb_handle_base) {
99bc5d55
JSJ
2529 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2530 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2531 goto init_wrb_hndl_failed;
2532 }
6733b39a
JK
2533 pwrb_context->pwrb_handle_basestd =
2534 kzalloc(sizeof(struct wrb_handle *) *
2535 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2536 if (!pwrb_context->pwrb_handle_basestd) {
99bc5d55
JSJ
2537 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2538 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2539 goto init_wrb_hndl_failed;
2540 }
2541 if (!num_cxn_wrbh) {
2542 pwrb_handle =
2543 mem_descr_wrbh->mem_array[idx].virtual_address;
2544 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2545 ((sizeof(struct wrb_handle)) *
2546 phba->params.wrbs_per_cxn));
2547 idx++;
2548 }
2549 pwrb_context->alloc_index = 0;
2550 pwrb_context->wrb_handles_available = 0;
2551 pwrb_context->free_index = 0;
2552
6733b39a 2553 if (num_cxn_wrbh) {
6733b39a
JK
2554 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2555 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2556 pwrb_context->pwrb_handle_basestd[j] =
2557 pwrb_handle;
2558 pwrb_context->wrb_handles_available++;
bfead3b2 2559 pwrb_handle->wrb_index = j;
6733b39a
JK
2560 pwrb_handle++;
2561 }
6733b39a
JK
2562 num_cxn_wrbh--;
2563 }
2564 }
2565 idx = 0;
ed58ea2a 2566 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
6733b39a 2567 pwrb_context = &phwi_ctrlr->wrb_context[index];
3ec78271 2568 if (!num_cxn_wrb) {
6733b39a 2569 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
7c56533c 2570 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
3ec78271
JK
2571 ((sizeof(struct iscsi_wrb) *
2572 phba->params.wrbs_per_cxn));
2573 idx++;
2574 }
2575
2576 if (num_cxn_wrb) {
6733b39a
JK
2577 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2578 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2579 pwrb_handle->pwrb = pwrb;
2580 pwrb++;
2581 }
2582 num_cxn_wrb--;
2583 }
2584 }
3ec78271
JK
2585 return 0;
2586init_wrb_hndl_failed:
2587 for (j = index; j > 0; j--) {
2588 pwrb_context = &phwi_ctrlr->wrb_context[j];
2589 kfree(pwrb_context->pwrb_handle_base);
2590 kfree(pwrb_context->pwrb_handle_basestd);
2591 }
2592 return -ENOMEM;
6733b39a
JK
2593}
2594
2595static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2596{
2597 struct hwi_controller *phwi_ctrlr;
2598 struct hba_parameters *p = &phba->params;
2599 struct hwi_async_pdu_context *pasync_ctx;
2600 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
dc63aac6 2601 unsigned int index, idx, num_per_mem, num_async_data;
6733b39a
JK
2602 struct be_mem_descriptor *mem_descr;
2603
2604 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2605 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2606
2607 phwi_ctrlr = phba->phwi_ctrlr;
2608 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2609 mem_descr->mem_array[0].virtual_address;
2610 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2611 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2612
dc63aac6
JK
2613 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2614 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
6733b39a
JK
2615
2616 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2617 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2618 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2619 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2620 "BM_%d : hwi_init_async_pdu_ctx"
2621 " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
2622 mem_descr->mem_array[0].virtual_address);
6733b39a 2623 } else
99bc5d55
JSJ
2624 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2625 "BM_%d : No Virtual address\n");
6733b39a
JK
2626
2627 pasync_ctx->async_header.va_base =
2628 mem_descr->mem_array[0].virtual_address;
2629
2630 pasync_ctx->async_header.pa_base.u.a64.address =
2631 mem_descr->mem_array[0].bus_address.u.a64.address;
2632
2633 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2634 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2635 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2636 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2637 "BM_%d : hwi_init_async_pdu_ctx"
2638 " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
2639 mem_descr->mem_array[0].virtual_address);
6733b39a 2640 } else
99bc5d55
JSJ
2641 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2642 "BM_%d : No Virtual address\n");
2643
6733b39a
JK
2644 pasync_ctx->async_header.ring_base =
2645 mem_descr->mem_array[0].virtual_address;
2646
2647 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2648 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2649 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2650 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2651 "BM_%d : hwi_init_async_pdu_ctx"
2652 " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
2653 mem_descr->mem_array[0].virtual_address);
6733b39a 2654 } else
99bc5d55
JSJ
2655 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2656 "BM_%d : No Virtual address\n");
6733b39a
JK
2657
2658 pasync_ctx->async_header.handle_base =
2659 mem_descr->mem_array[0].virtual_address;
2660 pasync_ctx->async_header.writables = 0;
2661 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2662
6733b39a
JK
2663
2664 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2665 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2666 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2667 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2668 "BM_%d : hwi_init_async_pdu_ctx"
2669 " HWI_MEM_ASYNC_DATA_RING va=%p\n",
2670 mem_descr->mem_array[0].virtual_address);
6733b39a 2671 } else
99bc5d55
JSJ
2672 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2673 "BM_%d : No Virtual address\n");
6733b39a
JK
2674
2675 pasync_ctx->async_data.ring_base =
2676 mem_descr->mem_array[0].virtual_address;
2677
2678 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2679 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2680 if (!mem_descr->mem_array[0].virtual_address)
99bc5d55
JSJ
2681 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2682 "BM_%d : No Virtual address\n");
6733b39a
JK
2683
2684 pasync_ctx->async_data.handle_base =
2685 mem_descr->mem_array[0].virtual_address;
2686 pasync_ctx->async_data.writables = 0;
2687 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2688
2689 pasync_header_h =
2690 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2691 pasync_data_h =
2692 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2693
dc63aac6
JK
2694 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2695 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2696 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2697 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2698 "BM_%d : hwi_init_async_pdu_ctx"
2699 " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
2700 mem_descr->mem_array[0].virtual_address);
dc63aac6 2701 } else
99bc5d55
JSJ
2702 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2703 "BM_%d : No Virtual address\n");
2704
dc63aac6
JK
2705 idx = 0;
2706 pasync_ctx->async_data.va_base =
2707 mem_descr->mem_array[idx].virtual_address;
2708 pasync_ctx->async_data.pa_base.u.a64.address =
2709 mem_descr->mem_array[idx].bus_address.u.a64.address;
2710
2711 num_async_data = ((mem_descr->mem_array[idx].size) /
2712 phba->params.defpdu_data_sz);
2713 num_per_mem = 0;
2714
6733b39a
JK
2715 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2716 pasync_header_h->cri = -1;
2717 pasync_header_h->index = (char)index;
2718 INIT_LIST_HEAD(&pasync_header_h->link);
2719 pasync_header_h->pbuffer =
2720 (void *)((unsigned long)
2721 (pasync_ctx->async_header.va_base) +
2722 (p->defpdu_hdr_sz * index));
2723
2724 pasync_header_h->pa.u.a64.address =
2725 pasync_ctx->async_header.pa_base.u.a64.address +
2726 (p->defpdu_hdr_sz * index);
2727
2728 list_add_tail(&pasync_header_h->link,
2729 &pasync_ctx->async_header.free_list);
2730 pasync_header_h++;
2731 pasync_ctx->async_header.free_entries++;
2732 pasync_ctx->async_header.writables++;
2733
2734 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2735 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2736 header_busy_list);
2737 pasync_data_h->cri = -1;
2738 pasync_data_h->index = (char)index;
2739 INIT_LIST_HEAD(&pasync_data_h->link);
dc63aac6
JK
2740
2741 if (!num_async_data) {
2742 num_per_mem = 0;
2743 idx++;
2744 pasync_ctx->async_data.va_base =
2745 mem_descr->mem_array[idx].virtual_address;
2746 pasync_ctx->async_data.pa_base.u.a64.address =
2747 mem_descr->mem_array[idx].
2748 bus_address.u.a64.address;
2749
2750 num_async_data = ((mem_descr->mem_array[idx].size) /
2751 phba->params.defpdu_data_sz);
2752 }
6733b39a
JK
2753 pasync_data_h->pbuffer =
2754 (void *)((unsigned long)
2755 (pasync_ctx->async_data.va_base) +
dc63aac6 2756 (p->defpdu_data_sz * num_per_mem));
6733b39a
JK
2757
2758 pasync_data_h->pa.u.a64.address =
2759 pasync_ctx->async_data.pa_base.u.a64.address +
dc63aac6
JK
2760 (p->defpdu_data_sz * num_per_mem);
2761 num_per_mem++;
2762 num_async_data--;
6733b39a
JK
2763
2764 list_add_tail(&pasync_data_h->link,
2765 &pasync_ctx->async_data.free_list);
2766 pasync_data_h++;
2767 pasync_ctx->async_data.free_entries++;
2768 pasync_ctx->async_data.writables++;
2769
2770 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2771 }
2772
2773 pasync_ctx->async_header.host_write_ptr = 0;
2774 pasync_ctx->async_header.ep_read_ptr = -1;
2775 pasync_ctx->async_data.host_write_ptr = 0;
2776 pasync_ctx->async_data.ep_read_ptr = -1;
2777}
2778
2779static int
2780be_sgl_create_contiguous(void *virtual_address,
2781 u64 physical_address, u32 length,
2782 struct be_dma_mem *sgl)
2783{
2784 WARN_ON(!virtual_address);
2785 WARN_ON(!physical_address);
2786 WARN_ON(!length > 0);
2787 WARN_ON(!sgl);
2788
2789 sgl->va = virtual_address;
457ff3b7 2790 sgl->dma = (unsigned long)physical_address;
6733b39a
JK
2791 sgl->size = length;
2792
2793 return 0;
2794}
2795
2796static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2797{
2798 memset(sgl, 0, sizeof(*sgl));
2799}
2800
2801static void
2802hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2803 struct mem_array *pmem, struct be_dma_mem *sgl)
2804{
2805 if (sgl->va)
2806 be_sgl_destroy_contiguous(sgl);
2807
2808 be_sgl_create_contiguous(pmem->virtual_address,
2809 pmem->bus_address.u.a64.address,
2810 pmem->size, sgl);
2811}
2812
2813static void
2814hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2815 struct mem_array *pmem, struct be_dma_mem *sgl)
2816{
2817 if (sgl->va)
2818 be_sgl_destroy_contiguous(sgl);
2819
2820 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2821 pmem->bus_address.u.a64.address,
2822 pmem->size, sgl);
2823}
2824
2825static int be_fill_queue(struct be_queue_info *q,
2826 u16 len, u16 entry_size, void *vaddress)
2827{
2828 struct be_dma_mem *mem = &q->dma_mem;
2829
2830 memset(q, 0, sizeof(*q));
2831 q->len = len;
2832 q->entry_size = entry_size;
2833 mem->size = len * entry_size;
2834 mem->va = vaddress;
2835 if (!mem->va)
2836 return -ENOMEM;
2837 memset(mem->va, 0, mem->size);
2838 return 0;
2839}
2840
bfead3b2 2841static int beiscsi_create_eqs(struct beiscsi_hba *phba,
6733b39a
JK
2842 struct hwi_context_memory *phwi_context)
2843{
bfead3b2 2844 unsigned int i, num_eq_pages;
99bc5d55 2845 int ret = 0, eq_for_mcc;
6733b39a
JK
2846 struct be_queue_info *eq;
2847 struct be_dma_mem *mem;
6733b39a 2848 void *eq_vaddress;
bfead3b2 2849 dma_addr_t paddr;
6733b39a 2850
bfead3b2
JK
2851 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2852 sizeof(struct be_eq_entry));
6733b39a 2853
bfead3b2
JK
2854 if (phba->msix_enabled)
2855 eq_for_mcc = 1;
2856 else
2857 eq_for_mcc = 0;
2858 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2859 eq = &phwi_context->be_eq[i].q;
2860 mem = &eq->dma_mem;
2861 phwi_context->be_eq[i].phba = phba;
2862 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2863 num_eq_pages * PAGE_SIZE,
2864 &paddr);
2865 if (!eq_vaddress)
2866 goto create_eq_error;
2867
2868 mem->va = eq_vaddress;
2869 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2870 sizeof(struct be_eq_entry), eq_vaddress);
2871 if (ret) {
99bc5d55
JSJ
2872 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2873 "BM_%d : be_fill_queue Failed for EQ\n");
bfead3b2
JK
2874 goto create_eq_error;
2875 }
6733b39a 2876
bfead3b2
JK
2877 mem->dma = paddr;
2878 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2879 phwi_context->cur_eqd);
2880 if (ret) {
99bc5d55
JSJ
2881 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2882 "BM_%d : beiscsi_cmd_eq_create"
2883 "Failed for EQ\n");
bfead3b2
JK
2884 goto create_eq_error;
2885 }
99bc5d55
JSJ
2886
2887 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2888 "BM_%d : eqid = %d\n",
2889 phwi_context->be_eq[i].q.id);
6733b39a 2890 }
6733b39a 2891 return 0;
bfead3b2
JK
2892create_eq_error:
2893 for (i = 0; i < (phba->num_cpus + 1); i++) {
2894 eq = &phwi_context->be_eq[i].q;
2895 mem = &eq->dma_mem;
2896 if (mem->va)
2897 pci_free_consistent(phba->pcidev, num_eq_pages
2898 * PAGE_SIZE,
2899 mem->va, mem->dma);
2900 }
2901 return ret;
6733b39a
JK
2902}
2903
bfead3b2 2904static int beiscsi_create_cqs(struct beiscsi_hba *phba,
6733b39a
JK
2905 struct hwi_context_memory *phwi_context)
2906{
bfead3b2 2907 unsigned int i, num_cq_pages;
99bc5d55 2908 int ret = 0;
6733b39a
JK
2909 struct be_queue_info *cq, *eq;
2910 struct be_dma_mem *mem;
bfead3b2 2911 struct be_eq_obj *pbe_eq;
6733b39a 2912 void *cq_vaddress;
bfead3b2 2913 dma_addr_t paddr;
6733b39a 2914
bfead3b2
JK
2915 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2916 sizeof(struct sol_cqe));
6733b39a 2917
bfead3b2
JK
2918 for (i = 0; i < phba->num_cpus; i++) {
2919 cq = &phwi_context->be_cq[i];
2920 eq = &phwi_context->be_eq[i].q;
2921 pbe_eq = &phwi_context->be_eq[i];
2922 pbe_eq->cq = cq;
2923 pbe_eq->phba = phba;
2924 mem = &cq->dma_mem;
2925 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2926 num_cq_pages * PAGE_SIZE,
2927 &paddr);
2928 if (!cq_vaddress)
2929 goto create_cq_error;
7da50879 2930 ret = be_fill_queue(cq, phba->params.num_cq_entries,
bfead3b2
JK
2931 sizeof(struct sol_cqe), cq_vaddress);
2932 if (ret) {
99bc5d55
JSJ
2933 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2934 "BM_%d : be_fill_queue Failed "
2935 "for ISCSI CQ\n");
bfead3b2
JK
2936 goto create_cq_error;
2937 }
2938
2939 mem->dma = paddr;
2940 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2941 false, 0);
2942 if (ret) {
99bc5d55
JSJ
2943 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2944 "BM_%d : beiscsi_cmd_eq_create"
2945 "Failed for ISCSI CQ\n");
bfead3b2
JK
2946 goto create_cq_error;
2947 }
99bc5d55
JSJ
2948 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2949 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
2950 "iSCSI CQ CREATED\n", cq->id, eq->id);
6733b39a 2951 }
6733b39a 2952 return 0;
bfead3b2
JK
2953
2954create_cq_error:
2955 for (i = 0; i < phba->num_cpus; i++) {
2956 cq = &phwi_context->be_cq[i];
2957 mem = &cq->dma_mem;
2958 if (mem->va)
2959 pci_free_consistent(phba->pcidev, num_cq_pages
2960 * PAGE_SIZE,
2961 mem->va, mem->dma);
2962 }
2963 return ret;
2964
6733b39a
JK
2965}
2966
2967static int
2968beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2969 struct hwi_context_memory *phwi_context,
2970 struct hwi_controller *phwi_ctrlr,
2971 unsigned int def_pdu_ring_sz)
2972{
2973 unsigned int idx;
2974 int ret;
2975 struct be_queue_info *dq, *cq;
2976 struct be_dma_mem *mem;
2977 struct be_mem_descriptor *mem_descr;
2978 void *dq_vaddress;
2979
2980 idx = 0;
2981 dq = &phwi_context->be_def_hdrq;
bfead3b2 2982 cq = &phwi_context->be_cq[0];
6733b39a
JK
2983 mem = &dq->dma_mem;
2984 mem_descr = phba->init_mem;
2985 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2986 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2987 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2988 sizeof(struct phys_addr),
2989 sizeof(struct phys_addr), dq_vaddress);
2990 if (ret) {
99bc5d55
JSJ
2991 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2992 "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
6733b39a
JK
2993 return ret;
2994 }
457ff3b7
JK
2995 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2996 bus_address.u.a64.address;
6733b39a
JK
2997 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2998 def_pdu_ring_sz,
2999 phba->params.defpdu_hdr_sz);
3000 if (ret) {
99bc5d55
JSJ
3001 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3002 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
6733b39a
JK
3003 return ret;
3004 }
3005 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
99bc5d55
JSJ
3006 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3007 "BM_%d : iscsi def pdu id is %d\n",
3008 phwi_context->be_def_hdrq.id);
3009
6733b39a
JK
3010 hwi_post_async_buffers(phba, 1);
3011 return 0;
3012}
3013
3014static int
3015beiscsi_create_def_data(struct beiscsi_hba *phba,
3016 struct hwi_context_memory *phwi_context,
3017 struct hwi_controller *phwi_ctrlr,
3018 unsigned int def_pdu_ring_sz)
3019{
3020 unsigned int idx;
3021 int ret;
3022 struct be_queue_info *dataq, *cq;
3023 struct be_dma_mem *mem;
3024 struct be_mem_descriptor *mem_descr;
3025 void *dq_vaddress;
3026
3027 idx = 0;
3028 dataq = &phwi_context->be_def_dataq;
bfead3b2 3029 cq = &phwi_context->be_cq[0];
6733b39a
JK
3030 mem = &dataq->dma_mem;
3031 mem_descr = phba->init_mem;
3032 mem_descr += HWI_MEM_ASYNC_DATA_RING;
3033 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3034 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3035 sizeof(struct phys_addr),
3036 sizeof(struct phys_addr), dq_vaddress);
3037 if (ret) {
99bc5d55
JSJ
3038 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3039 "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
6733b39a
JK
3040 return ret;
3041 }
457ff3b7
JK
3042 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3043 bus_address.u.a64.address;
6733b39a
JK
3044 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3045 def_pdu_ring_sz,
3046 phba->params.defpdu_data_sz);
3047 if (ret) {
99bc5d55
JSJ
3048 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3049 "BM_%d be_cmd_create_default_pdu_queue"
3050 " Failed for DEF PDU DATA\n");
6733b39a
JK
3051 return ret;
3052 }
3053 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
99bc5d55
JSJ
3054 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3055 "BM_%d : iscsi def data id is %d\n",
3056 phwi_context->be_def_dataq.id);
3057
6733b39a 3058 hwi_post_async_buffers(phba, 0);
99bc5d55
JSJ
3059 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3060 "BM_%d : DEFAULT PDU DATA RING CREATED\n");
3061
6733b39a
JK
3062 return 0;
3063}
3064
3065static int
3066beiscsi_post_pages(struct beiscsi_hba *phba)
3067{
3068 struct be_mem_descriptor *mem_descr;
3069 struct mem_array *pm_arr;
3070 unsigned int page_offset, i;
3071 struct be_dma_mem sgl;
3072 int status;
3073
3074 mem_descr = phba->init_mem;
3075 mem_descr += HWI_MEM_SGE;
3076 pm_arr = mem_descr->mem_array;
3077
3078 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3079 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
3080 for (i = 0; i < mem_descr->num_elements; i++) {
3081 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3082 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3083 page_offset,
3084 (pm_arr->size / PAGE_SIZE));
3085 page_offset += pm_arr->size / PAGE_SIZE;
3086 if (status != 0) {
99bc5d55
JSJ
3087 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3088 "BM_%d : post sgl failed.\n");
6733b39a
JK
3089 return status;
3090 }
3091 pm_arr++;
3092 }
99bc5d55
JSJ
3093 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3094 "BM_%d : POSTED PAGES\n");
6733b39a
JK
3095 return 0;
3096}
3097
bfead3b2
JK
3098static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3099{
3100 struct be_dma_mem *mem = &q->dma_mem;
c8b25598 3101 if (mem->va) {
bfead3b2
JK
3102 pci_free_consistent(phba->pcidev, mem->size,
3103 mem->va, mem->dma);
c8b25598
JK
3104 mem->va = NULL;
3105 }
bfead3b2
JK
3106}
3107
3108static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3109 u16 len, u16 entry_size)
3110{
3111 struct be_dma_mem *mem = &q->dma_mem;
3112
3113 memset(q, 0, sizeof(*q));
3114 q->len = len;
3115 q->entry_size = entry_size;
3116 mem->size = len * entry_size;
3117 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
3118 if (!mem->va)
d3ad2bb3 3119 return -ENOMEM;
bfead3b2
JK
3120 memset(mem->va, 0, mem->size);
3121 return 0;
3122}
3123
6733b39a
JK
3124static int
3125beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3126 struct hwi_context_memory *phwi_context,
3127 struct hwi_controller *phwi_ctrlr)
3128{
3129 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3130 u64 pa_addr_lo;
3131 unsigned int idx, num, i;
3132 struct mem_array *pwrb_arr;
3133 void *wrb_vaddr;
3134 struct be_dma_mem sgl;
3135 struct be_mem_descriptor *mem_descr;
3136 int status;
3137
3138 idx = 0;
3139 mem_descr = phba->init_mem;
3140 mem_descr += HWI_MEM_WRB;
3141 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3142 GFP_KERNEL);
3143 if (!pwrb_arr) {
99bc5d55
JSJ
3144 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3145 "BM_%d : Memory alloc failed in create wrb ring.\n");
6733b39a
JK
3146 return -ENOMEM;
3147 }
3148 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3149 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3150 num_wrb_rings = mem_descr->mem_array[idx].size /
3151 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3152
3153 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3154 if (num_wrb_rings) {
3155 pwrb_arr[num].virtual_address = wrb_vaddr;
3156 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3157 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3158 sizeof(struct iscsi_wrb);
3159 wrb_vaddr += pwrb_arr[num].size;
3160 pa_addr_lo += pwrb_arr[num].size;
3161 num_wrb_rings--;
3162 } else {
3163 idx++;
3164 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3165 pa_addr_lo = mem_descr->mem_array[idx].\
3166 bus_address.u.a64.address;
3167 num_wrb_rings = mem_descr->mem_array[idx].size /
3168 (phba->params.wrbs_per_cxn *
3169 sizeof(struct iscsi_wrb));
3170 pwrb_arr[num].virtual_address = wrb_vaddr;
3171 pwrb_arr[num].bus_address.u.a64.address\
3172 = pa_addr_lo;
3173 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3174 sizeof(struct iscsi_wrb);
3175 wrb_vaddr += pwrb_arr[num].size;
3176 pa_addr_lo += pwrb_arr[num].size;
3177 num_wrb_rings--;
3178 }
3179 }
3180 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3181 wrb_mem_index = 0;
3182 offset = 0;
3183 size = 0;
3184
3185 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3186 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3187 &phwi_context->be_wrbq[i]);
3188 if (status != 0) {
99bc5d55
JSJ
3189 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3190 "BM_%d : wrbq create failed.");
1462b8ff 3191 kfree(pwrb_arr);
6733b39a
JK
3192 return status;
3193 }
7da50879
JK
3194 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
3195 id;
6733b39a
JK
3196 }
3197 kfree(pwrb_arr);
3198 return 0;
3199}
3200
3201static void free_wrb_handles(struct beiscsi_hba *phba)
3202{
3203 unsigned int index;
3204 struct hwi_controller *phwi_ctrlr;
3205 struct hwi_wrb_context *pwrb_context;
3206
3207 phwi_ctrlr = phba->phwi_ctrlr;
3208 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3209 pwrb_context = &phwi_ctrlr->wrb_context[index];
3210 kfree(pwrb_context->pwrb_handle_base);
3211 kfree(pwrb_context->pwrb_handle_basestd);
3212 }
3213}
3214
bfead3b2
JK
3215static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3216{
3217 struct be_queue_info *q;
3218 struct be_ctrl_info *ctrl = &phba->ctrl;
3219
3220 q = &phba->ctrl.mcc_obj.q;
3221 if (q->created)
3222 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3223 be_queue_free(phba, q);
3224
3225 q = &phba->ctrl.mcc_obj.cq;
3226 if (q->created)
3227 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3228 be_queue_free(phba, q);
3229}
3230
6733b39a
JK
3231static void hwi_cleanup(struct beiscsi_hba *phba)
3232{
3233 struct be_queue_info *q;
3234 struct be_ctrl_info *ctrl = &phba->ctrl;
3235 struct hwi_controller *phwi_ctrlr;
3236 struct hwi_context_memory *phwi_context;
bfead3b2 3237 int i, eq_num;
6733b39a
JK
3238
3239 phwi_ctrlr = phba->phwi_ctrlr;
3240 phwi_context = phwi_ctrlr->phwi_ctxt;
3241 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3242 q = &phwi_context->be_wrbq[i];
3243 if (q->created)
3244 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3245 }
6733b39a
JK
3246 free_wrb_handles(phba);
3247
3248 q = &phwi_context->be_def_hdrq;
3249 if (q->created)
3250 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3251
3252 q = &phwi_context->be_def_dataq;
3253 if (q->created)
3254 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3255
3256 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3257
bfead3b2
JK
3258 for (i = 0; i < (phba->num_cpus); i++) {
3259 q = &phwi_context->be_cq[i];
3260 if (q->created)
3261 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3262 }
3263 if (phba->msix_enabled)
3264 eq_num = 1;
3265 else
3266 eq_num = 0;
3267 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3268 q = &phwi_context->be_eq[i].q;
3269 if (q->created)
3270 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3271 }
3272 be_mcc_queues_destroy(phba);
3273}
6733b39a 3274
bfead3b2
JK
3275static int be_mcc_queues_create(struct beiscsi_hba *phba,
3276 struct hwi_context_memory *phwi_context)
3277{
3278 struct be_queue_info *q, *cq;
3279 struct be_ctrl_info *ctrl = &phba->ctrl;
3280
3281 /* Alloc MCC compl queue */
3282 cq = &phba->ctrl.mcc_obj.cq;
3283 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3284 sizeof(struct be_mcc_compl)))
3285 goto err;
3286 /* Ask BE to create MCC compl queue; */
3287 if (phba->msix_enabled) {
3288 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3289 [phba->num_cpus].q, false, true, 0))
3290 goto mcc_cq_free;
3291 } else {
3292 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3293 false, true, 0))
3294 goto mcc_cq_free;
3295 }
3296
3297 /* Alloc MCC queue */
3298 q = &phba->ctrl.mcc_obj.q;
3299 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3300 goto mcc_cq_destroy;
3301
3302 /* Ask BE to create MCC queue */
35e66019 3303 if (beiscsi_cmd_mccq_create(phba, q, cq))
bfead3b2
JK
3304 goto mcc_q_free;
3305
3306 return 0;
3307
3308mcc_q_free:
3309 be_queue_free(phba, q);
3310mcc_cq_destroy:
3311 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3312mcc_cq_free:
3313 be_queue_free(phba, cq);
3314err:
d3ad2bb3 3315 return -ENOMEM;
bfead3b2
JK
3316}
3317
3318static int find_num_cpus(void)
3319{
3320 int num_cpus = 0;
3321
3322 num_cpus = num_online_cpus();
3323 if (num_cpus >= MAX_CPUS)
3324 num_cpus = MAX_CPUS - 1;
3325
bfead3b2 3326 return num_cpus;
6733b39a
JK
3327}
3328
3329static int hwi_init_port(struct beiscsi_hba *phba)
3330{
3331 struct hwi_controller *phwi_ctrlr;
3332 struct hwi_context_memory *phwi_context;
3333 unsigned int def_pdu_ring_sz;
3334 struct be_ctrl_info *ctrl = &phba->ctrl;
3335 int status;
3336
3337 def_pdu_ring_sz =
3338 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3339 phwi_ctrlr = phba->phwi_ctrlr;
6733b39a 3340 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3341 phwi_context->max_eqd = 0;
3342 phwi_context->min_eqd = 0;
3343 phwi_context->cur_eqd = 64;
6733b39a 3344 be_cmd_fw_initialize(&phba->ctrl);
bfead3b2
JK
3345
3346 status = beiscsi_create_eqs(phba, phwi_context);
6733b39a 3347 if (status != 0) {
99bc5d55
JSJ
3348 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3349 "BM_%d : EQ not created\n");
6733b39a
JK
3350 goto error;
3351 }
3352
bfead3b2
JK
3353 status = be_mcc_queues_create(phba, phwi_context);
3354 if (status != 0)
3355 goto error;
3356
3357 status = mgmt_check_supported_fw(ctrl, phba);
6733b39a 3358 if (status != 0) {
99bc5d55
JSJ
3359 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3360 "BM_%d : Unsupported fw version\n");
6733b39a
JK
3361 goto error;
3362 }
3363
bfead3b2 3364 status = beiscsi_create_cqs(phba, phwi_context);
6733b39a 3365 if (status != 0) {
99bc5d55
JSJ
3366 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3367 "BM_%d : CQ not created\n");
6733b39a
JK
3368 goto error;
3369 }
3370
3371 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3372 def_pdu_ring_sz);
3373 if (status != 0) {
99bc5d55
JSJ
3374 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3375 "BM_%d : Default Header not created\n");
6733b39a
JK
3376 goto error;
3377 }
3378
3379 status = beiscsi_create_def_data(phba, phwi_context,
3380 phwi_ctrlr, def_pdu_ring_sz);
3381 if (status != 0) {
99bc5d55
JSJ
3382 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3383 "BM_%d : Default Data not created\n");
6733b39a
JK
3384 goto error;
3385 }
3386
3387 status = beiscsi_post_pages(phba);
3388 if (status != 0) {
99bc5d55
JSJ
3389 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3390 "BM_%d : Post SGL Pages Failed\n");
6733b39a
JK
3391 goto error;
3392 }
3393
3394 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3395 if (status != 0) {
99bc5d55
JSJ
3396 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3397 "BM_%d : WRB Rings not created\n");
6733b39a
JK
3398 goto error;
3399 }
3400
99bc5d55
JSJ
3401 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3402 "BM_%d : hwi_init_port success\n");
6733b39a
JK
3403 return 0;
3404
3405error:
99bc5d55
JSJ
3406 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3407 "BM_%d : hwi_init_port failed");
6733b39a 3408 hwi_cleanup(phba);
a49e06d5 3409 return status;
6733b39a
JK
3410}
3411
6733b39a
JK
3412static int hwi_init_controller(struct beiscsi_hba *phba)
3413{
3414 struct hwi_controller *phwi_ctrlr;
3415
3416 phwi_ctrlr = phba->phwi_ctrlr;
3417 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3418 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3419 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
99bc5d55
JSJ
3420 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3421 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3422 phwi_ctrlr->phwi_ctxt);
6733b39a 3423 } else {
99bc5d55
JSJ
3424 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3425 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3426 "than one element.Failing to load\n");
6733b39a
JK
3427 return -ENOMEM;
3428 }
3429
3430 iscsi_init_global_templates(phba);
3ec78271
JK
3431 if (beiscsi_init_wrb_handle(phba))
3432 return -ENOMEM;
3433
6733b39a
JK
3434 hwi_init_async_pdu_ctx(phba);
3435 if (hwi_init_port(phba) != 0) {
99bc5d55
JSJ
3436 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3437 "BM_%d : hwi_init_controller failed\n");
3438
6733b39a
JK
3439 return -ENOMEM;
3440 }
3441 return 0;
3442}
3443
3444static void beiscsi_free_mem(struct beiscsi_hba *phba)
3445{
3446 struct be_mem_descriptor *mem_descr;
3447 int i, j;
3448
3449 mem_descr = phba->init_mem;
3450 i = 0;
3451 j = 0;
3452 for (i = 0; i < SE_MEM_MAX; i++) {
3453 for (j = mem_descr->num_elements; j > 0; j--) {
3454 pci_free_consistent(phba->pcidev,
3455 mem_descr->mem_array[j - 1].size,
3456 mem_descr->mem_array[j - 1].virtual_address,
457ff3b7
JK
3457 (unsigned long)mem_descr->mem_array[j - 1].
3458 bus_address.u.a64.address);
6733b39a
JK
3459 }
3460 kfree(mem_descr->mem_array);
3461 mem_descr++;
3462 }
3463 kfree(phba->init_mem);
3464 kfree(phba->phwi_ctrlr);
3465}
3466
3467static int beiscsi_init_controller(struct beiscsi_hba *phba)
3468{
3469 int ret = -ENOMEM;
3470
3471 ret = beiscsi_get_memory(phba);
3472 if (ret < 0) {
99bc5d55
JSJ
3473 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3474 "BM_%d : beiscsi_dev_probe -"
3475 "Failed in beiscsi_alloc_memory\n");
6733b39a
JK
3476 return ret;
3477 }
3478
3479 ret = hwi_init_controller(phba);
3480 if (ret)
3481 goto free_init;
99bc5d55
JSJ
3482 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3483 "BM_%d : Return success from beiscsi_init_controller");
3484
6733b39a
JK
3485 return 0;
3486
3487free_init:
3488 beiscsi_free_mem(phba);
a49e06d5 3489 return ret;
6733b39a
JK
3490}
3491
3492static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3493{
3494 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3495 struct sgl_handle *psgl_handle;
3496 struct iscsi_sge *pfrag;
3497 unsigned int arr_index, i, idx;
3498
3499 phba->io_sgl_hndl_avbl = 0;
3500 phba->eh_sgl_hndl_avbl = 0;
bfead3b2 3501
6733b39a
JK
3502 mem_descr_sglh = phba->init_mem;
3503 mem_descr_sglh += HWI_MEM_SGLH;
3504 if (1 == mem_descr_sglh->num_elements) {
3505 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3506 phba->params.ios_per_ctrl,
3507 GFP_KERNEL);
3508 if (!phba->io_sgl_hndl_base) {
99bc5d55
JSJ
3509 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3510 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
3511 return -ENOMEM;
3512 }
3513 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3514 (phba->params.icds_per_ctrl -
3515 phba->params.ios_per_ctrl),
3516 GFP_KERNEL);
3517 if (!phba->eh_sgl_hndl_base) {
3518 kfree(phba->io_sgl_hndl_base);
99bc5d55
JSJ
3519 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3520 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
3521 return -ENOMEM;
3522 }
3523 } else {
99bc5d55
JSJ
3524 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3525 "BM_%d : HWI_MEM_SGLH is more than one element."
3526 "Failing to load\n");
6733b39a
JK
3527 return -ENOMEM;
3528 }
3529
3530 arr_index = 0;
3531 idx = 0;
3532 while (idx < mem_descr_sglh->num_elements) {
3533 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3534
3535 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3536 sizeof(struct sgl_handle)); i++) {
3537 if (arr_index < phba->params.ios_per_ctrl) {
3538 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3539 phba->io_sgl_hndl_avbl++;
3540 arr_index++;
3541 } else {
3542 phba->eh_sgl_hndl_base[arr_index -
3543 phba->params.ios_per_ctrl] =
3544 psgl_handle;
3545 arr_index++;
3546 phba->eh_sgl_hndl_avbl++;
3547 }
3548 psgl_handle++;
3549 }
3550 idx++;
3551 }
99bc5d55
JSJ
3552 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3553 "BM_%d : phba->io_sgl_hndl_avbl=%d"
3554 "phba->eh_sgl_hndl_avbl=%d\n",
3555 phba->io_sgl_hndl_avbl,
3556 phba->eh_sgl_hndl_avbl);
3557
6733b39a
JK
3558 mem_descr_sg = phba->init_mem;
3559 mem_descr_sg += HWI_MEM_SGE;
99bc5d55
JSJ
3560 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3561 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3562 mem_descr_sg->num_elements);
3563
6733b39a
JK
3564 arr_index = 0;
3565 idx = 0;
3566 while (idx < mem_descr_sg->num_elements) {
3567 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3568
3569 for (i = 0;
3570 i < (mem_descr_sg->mem_array[idx].size) /
3571 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3572 i++) {
3573 if (arr_index < phba->params.ios_per_ctrl)
3574 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3575 else
3576 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3577 phba->params.ios_per_ctrl];
3578 psgl_handle->pfrag = pfrag;
3579 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3580 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3581 pfrag += phba->params.num_sge_per_io;
3582 psgl_handle->sgl_index =
7da50879 3583 phba->fw_config.iscsi_icd_start + arr_index++;
6733b39a
JK
3584 }
3585 idx++;
3586 }
3587 phba->io_sgl_free_index = 0;
3588 phba->io_sgl_alloc_index = 0;
3589 phba->eh_sgl_free_index = 0;
3590 phba->eh_sgl_alloc_index = 0;
3591 return 0;
3592}
3593
3594static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3595{
3596 int i, new_cid;
3597
c2462288 3598 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
6733b39a
JK
3599 GFP_KERNEL);
3600 if (!phba->cid_array) {
99bc5d55
JSJ
3601 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3602 "BM_%d : Failed to allocate memory in "
3603 "hba_setup_cid_tbls\n");
6733b39a
JK
3604 return -ENOMEM;
3605 }
c2462288 3606 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
6733b39a
JK
3607 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3608 if (!phba->ep_array) {
99bc5d55
JSJ
3609 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3610 "BM_%d : Failed to allocate memory in "
3611 "hba_setup_cid_tbls\n");
6733b39a
JK
3612 kfree(phba->cid_array);
3613 return -ENOMEM;
3614 }
7da50879 3615 new_cid = phba->fw_config.iscsi_cid_start;
6733b39a
JK
3616 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3617 phba->cid_array[i] = new_cid;
3618 new_cid += 2;
3619 }
3620 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3621 return 0;
3622}
3623
238f6b72 3624static void hwi_enable_intr(struct beiscsi_hba *phba)
6733b39a
JK
3625{
3626 struct be_ctrl_info *ctrl = &phba->ctrl;
3627 struct hwi_controller *phwi_ctrlr;
3628 struct hwi_context_memory *phwi_context;
3629 struct be_queue_info *eq;
3630 u8 __iomem *addr;
bfead3b2 3631 u32 reg, i;
6733b39a
JK
3632 u32 enabled;
3633
3634 phwi_ctrlr = phba->phwi_ctrlr;
3635 phwi_context = phwi_ctrlr->phwi_ctxt;
3636
6733b39a
JK
3637 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3638 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3639 reg = ioread32(addr);
6733b39a
JK
3640
3641 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3642 if (!enabled) {
3643 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
99bc5d55
JSJ
3644 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3645 "BM_%d : reg =x%08x addr=%p\n", reg, addr);
6733b39a 3646 iowrite32(reg, addr);
665d6d94
JK
3647 }
3648
3649 if (!phba->msix_enabled) {
3650 eq = &phwi_context->be_eq[0].q;
99bc5d55
JSJ
3651 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3652 "BM_%d : eq->id=%d\n", eq->id);
3653
665d6d94
JK
3654 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3655 } else {
3656 for (i = 0; i <= phba->num_cpus; i++) {
3657 eq = &phwi_context->be_eq[i].q;
99bc5d55
JSJ
3658 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3659 "BM_%d : eq->id=%d\n", eq->id);
bfead3b2
JK
3660 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3661 }
c03af1ae 3662 }
6733b39a
JK
3663}
3664
3665static void hwi_disable_intr(struct beiscsi_hba *phba)
3666{
3667 struct be_ctrl_info *ctrl = &phba->ctrl;
3668
3669 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3670 u32 reg = ioread32(addr);
3671
3672 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3673 if (enabled) {
3674 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3675 iowrite32(reg, addr);
3676 } else
99bc5d55
JSJ
3677 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
3678 "BM_%d : In hwi_disable_intr, Already Disabled\n");
6733b39a
JK
3679}
3680
9aef4200
JSJ
3681/**
3682 * beiscsi_get_boot_info()- Get the boot session info
3683 * @phba: The device priv structure instance
3684 *
3685 * Get the boot target info and store in driver priv structure
3686 *
3687 * return values
3688 * Success: 0
3689 * Failure: Non-Zero Value
3690 **/
c7acc5b8
JK
3691static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3692{
0e43895e 3693 struct be_cmd_get_session_resp *session_resp;
c7acc5b8
JK
3694 struct be_mcc_wrb *wrb;
3695 struct be_dma_mem nonemb_cmd;
3696 unsigned int tag, wrb_num;
3697 unsigned short status, extd_status;
9aef4200 3698 unsigned int s_handle;
c7acc5b8 3699 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
f457a46f 3700 int ret = -ENOMEM;
c7acc5b8 3701
9aef4200
JSJ
3702 /* Get the session handle of the boot target */
3703 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
3704 if (ret) {
99bc5d55
JSJ
3705 beiscsi_log(phba, KERN_ERR,
3706 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3707 "BM_%d : No boot session\n");
9aef4200 3708 return ret;
c7acc5b8 3709 }
c7acc5b8
JK
3710 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3711 sizeof(*session_resp),
3712 &nonemb_cmd.dma);
3713 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
3714 beiscsi_log(phba, KERN_ERR,
3715 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3716 "BM_%d : Failed to allocate memory for"
3717 "beiscsi_get_session_info\n");
3718
c7acc5b8
JK
3719 return -ENOMEM;
3720 }
3721
3722 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
9aef4200 3723 tag = mgmt_get_session_info(phba, s_handle,
0e43895e 3724 &nonemb_cmd);
c7acc5b8 3725 if (!tag) {
99bc5d55
JSJ
3726 beiscsi_log(phba, KERN_ERR,
3727 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3728 "BM_%d : beiscsi_get_session_info"
3729 " Failed\n");
3730
c7acc5b8
JK
3731 goto boot_freemem;
3732 } else
3733 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3734 phba->ctrl.mcc_numtag[tag]);
3735
3736 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3737 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3738 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3739 if (status || extd_status) {
99bc5d55
JSJ
3740 beiscsi_log(phba, KERN_ERR,
3741 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3742 "BM_%d : beiscsi_get_session_info Failed"
3743 " status = %d extd_status = %d\n",
3744 status, extd_status);
3745
c7acc5b8
JK
3746 free_mcc_tag(&phba->ctrl, tag);
3747 goto boot_freemem;
3748 }
3749 wrb = queue_get_wrb(mccq, wrb_num);
3750 free_mcc_tag(&phba->ctrl, tag);
3751 session_resp = nonemb_cmd.va ;
f457a46f 3752
c7acc5b8
JK
3753 memcpy(&phba->boot_sess, &session_resp->session_info,
3754 sizeof(struct mgmt_session_info));
f457a46f
MC
3755 ret = 0;
3756
c7acc5b8
JK
3757boot_freemem:
3758 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3759 nonemb_cmd.va, nonemb_cmd.dma);
f457a46f
MC
3760 return ret;
3761}
3762
3763static void beiscsi_boot_release(void *data)
3764{
3765 struct beiscsi_hba *phba = data;
3766
3767 scsi_host_put(phba->shost);
3768}
3769
3770static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3771{
3772 struct iscsi_boot_kobj *boot_kobj;
3773
3774 /* get boot info using mgmt cmd */
3775 if (beiscsi_get_boot_info(phba))
3776 /* Try to see if we can carry on without this */
3777 return 0;
3778
3779 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3780 if (!phba->boot_kset)
3781 return -ENOMEM;
3782
3783 /* get a ref because the show function will ref the phba */
3784 if (!scsi_host_get(phba->shost))
3785 goto free_kset;
3786 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3787 beiscsi_show_boot_tgt_info,
3788 beiscsi_tgt_get_attr_visibility,
3789 beiscsi_boot_release);
3790 if (!boot_kobj)
3791 goto put_shost;
3792
3793 if (!scsi_host_get(phba->shost))
3794 goto free_kset;
3795 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3796 beiscsi_show_boot_ini_info,
3797 beiscsi_ini_get_attr_visibility,
3798 beiscsi_boot_release);
3799 if (!boot_kobj)
3800 goto put_shost;
3801
3802 if (!scsi_host_get(phba->shost))
3803 goto free_kset;
3804 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3805 beiscsi_show_boot_eth_info,
3806 beiscsi_eth_get_attr_visibility,
3807 beiscsi_boot_release);
3808 if (!boot_kobj)
3809 goto put_shost;
3810 return 0;
3811
3812put_shost:
3813 scsi_host_put(phba->shost);
3814free_kset:
3815 iscsi_boot_destroy_kset(phba->boot_kset);
c7acc5b8
JK
3816 return -ENOMEM;
3817}
3818
6733b39a
JK
3819static int beiscsi_init_port(struct beiscsi_hba *phba)
3820{
3821 int ret;
3822
3823 ret = beiscsi_init_controller(phba);
3824 if (ret < 0) {
99bc5d55
JSJ
3825 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3826 "BM_%d : beiscsi_dev_probe - Failed in"
3827 "beiscsi_init_controller\n");
6733b39a
JK
3828 return ret;
3829 }
3830 ret = beiscsi_init_sgl_handle(phba);
3831 if (ret < 0) {
99bc5d55
JSJ
3832 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3833 "BM_%d : beiscsi_dev_probe - Failed in"
3834 "beiscsi_init_sgl_handle\n");
6733b39a
JK
3835 goto do_cleanup_ctrlr;
3836 }
3837
3838 if (hba_setup_cid_tbls(phba)) {
99bc5d55
JSJ
3839 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3840 "BM_%d : Failed in hba_setup_cid_tbls\n");
6733b39a
JK
3841 kfree(phba->io_sgl_hndl_base);
3842 kfree(phba->eh_sgl_hndl_base);
3843 goto do_cleanup_ctrlr;
3844 }
3845
3846 return ret;
3847
3848do_cleanup_ctrlr:
3849 hwi_cleanup(phba);
3850 return ret;
3851}
3852
3853static void hwi_purge_eq(struct beiscsi_hba *phba)
3854{
3855 struct hwi_controller *phwi_ctrlr;
3856 struct hwi_context_memory *phwi_context;
3857 struct be_queue_info *eq;
3858 struct be_eq_entry *eqe = NULL;
bfead3b2 3859 int i, eq_msix;
756d29c8 3860 unsigned int num_processed;
6733b39a
JK
3861
3862 phwi_ctrlr = phba->phwi_ctrlr;
3863 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3864 if (phba->msix_enabled)
3865 eq_msix = 1;
3866 else
3867 eq_msix = 0;
6733b39a 3868
bfead3b2
JK
3869 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3870 eq = &phwi_context->be_eq[i].q;
6733b39a 3871 eqe = queue_tail_node(eq);
756d29c8 3872 num_processed = 0;
bfead3b2
JK
3873 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3874 & EQE_VALID_MASK) {
3875 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3876 queue_tail_inc(eq);
3877 eqe = queue_tail_node(eq);
756d29c8 3878 num_processed++;
bfead3b2 3879 }
756d29c8
JK
3880
3881 if (num_processed)
3882 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
6733b39a
JK
3883 }
3884}
3885
3886static void beiscsi_clean_port(struct beiscsi_hba *phba)
3887{
03a12310 3888 int mgmt_status;
6733b39a
JK
3889
3890 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3891 if (mgmt_status)
99bc5d55
JSJ
3892 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
3893 "BM_%d : mgmt_epfw_cleanup FAILED\n");
756d29c8 3894
6733b39a 3895 hwi_purge_eq(phba);
756d29c8 3896 hwi_cleanup(phba);
6733b39a
JK
3897 kfree(phba->io_sgl_hndl_base);
3898 kfree(phba->eh_sgl_hndl_base);
3899 kfree(phba->cid_array);
3900 kfree(phba->ep_array);
3901}
3902
d629c471
JSJ
3903/**
3904 * beiscsi_cleanup_task()- Free driver resources of the task
3905 * @task: ptr to the iscsi task
3906 *
3907 **/
1282ab76
MC
3908static void beiscsi_cleanup_task(struct iscsi_task *task)
3909{
3910 struct beiscsi_io_task *io_task = task->dd_data;
3911 struct iscsi_conn *conn = task->conn;
3912 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3913 struct beiscsi_hba *phba = beiscsi_conn->phba;
3914 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3915 struct hwi_wrb_context *pwrb_context;
3916 struct hwi_controller *phwi_ctrlr;
3917
3918 phwi_ctrlr = phba->phwi_ctrlr;
3919 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3920 - phba->fw_config.iscsi_cid_start];
3921
3922 if (io_task->cmd_bhs) {
3923 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3924 io_task->bhs_pa.u.a64.address);
3925 io_task->cmd_bhs = NULL;
3926 }
3927
3928 if (task->sc) {
3929 if (io_task->pwrb_handle) {
3930 free_wrb_handle(phba, pwrb_context,
3931 io_task->pwrb_handle);
3932 io_task->pwrb_handle = NULL;
3933 }
3934
3935 if (io_task->psgl_handle) {
3936 spin_lock(&phba->io_sgl_lock);
3937 free_io_sgl_handle(phba, io_task->psgl_handle);
3938 spin_unlock(&phba->io_sgl_lock);
3939 io_task->psgl_handle = NULL;
3940 }
3941 } else {
3942 if (!beiscsi_conn->login_in_progress) {
3943 if (io_task->pwrb_handle) {
3944 free_wrb_handle(phba, pwrb_context,
3945 io_task->pwrb_handle);
3946 io_task->pwrb_handle = NULL;
3947 }
3948 if (io_task->psgl_handle) {
3949 spin_lock(&phba->mgmt_sgl_lock);
3950 free_mgmt_sgl_handle(phba,
3951 io_task->psgl_handle);
3952 spin_unlock(&phba->mgmt_sgl_lock);
3953 io_task->psgl_handle = NULL;
3954 }
d629c471
JSJ
3955 if (io_task->mtask_addr) {
3956 pci_unmap_single(phba->pcidev,
3957 io_task->mtask_addr,
3958 io_task->mtask_data_count,
3959 PCI_DMA_TODEVICE);
3960 io_task->mtask_addr = 0;
3961 }
1282ab76
MC
3962 }
3963 }
3964}
3965
6733b39a
JK
3966void
3967beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3968 struct beiscsi_offload_params *params)
3969{
3970 struct wrb_handle *pwrb_handle;
3971 struct iscsi_target_context_update_wrb *pwrb = NULL;
3972 struct be_mem_descriptor *mem_descr;
3973 struct beiscsi_hba *phba = beiscsi_conn->phba;
1282ab76
MC
3974 struct iscsi_task *task = beiscsi_conn->task;
3975 struct iscsi_session *session = task->conn->session;
6733b39a
JK
3976 u32 doorbell = 0;
3977
3978 /*
3979 * We can always use 0 here because it is reserved by libiscsi for
3980 * login/startup related tasks.
3981 */
1282ab76
MC
3982 beiscsi_conn->login_in_progress = 0;
3983 spin_lock_bh(&session->lock);
3984 beiscsi_cleanup_task(task);
3985 spin_unlock_bh(&session->lock);
3986
7da50879 3987 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
d5431488 3988 phba->fw_config.iscsi_cid_start));
6733b39a
JK
3989 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3990 memset(pwrb, 0, sizeof(*pwrb));
3991 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3992 max_burst_length, pwrb, params->dw[offsetof
3993 (struct amap_beiscsi_offload_params,
3994 max_burst_length) / 32]);
3995 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3996 max_send_data_segment_length, pwrb,
3997 params->dw[offsetof(struct amap_beiscsi_offload_params,
3998 max_send_data_segment_length) / 32]);
3999 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4000 first_burst_length,
4001 pwrb,
4002 params->dw[offsetof(struct amap_beiscsi_offload_params,
4003 first_burst_length) / 32]);
4004
4005 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
4006 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4007 erl) / 32] & OFFLD_PARAMS_ERL));
4008 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
4009 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4010 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
4011 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
4012 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4013 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
4014 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
4015 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4016 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
4017 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
4018 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4019 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
4020 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
4021 pwrb,
4022 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4023 exp_statsn) / 32] + 1));
4024 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
4025 0x7);
4026 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
4027 pwrb, pwrb_handle->wrb_index);
4028 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
4029 pwrb, pwrb_handle->nxt_wrb_index);
4030 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4031 session_state, pwrb, 0);
4032 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
4033 pwrb, 1);
4034 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
4035 pwrb, 0);
4036 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
4037 0);
4038
4039 mem_descr = phba->init_mem;
4040 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
4041
4042 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4043 pad_buffer_addr_hi, pwrb,
4044 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
4045 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4046 pad_buffer_addr_lo, pwrb,
4047 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
4048
4049 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
4050
4051 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4052 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
bfead3b2 4053 << DB_DEF_PDU_WRB_INDEX_SHIFT;
6733b39a
JK
4054 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4055
4056 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4057}
4058
4059static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4060 int *index, int *age)
4061{
bfead3b2 4062 *index = (int)itt;
6733b39a
JK
4063 if (age)
4064 *age = conn->session->age;
4065}
4066
4067/**
4068 * beiscsi_alloc_pdu - allocates pdu and related resources
4069 * @task: libiscsi task
4070 * @opcode: opcode of pdu for task
4071 *
4072 * This is called with the session lock held. It will allocate
4073 * the wrb and sgl if needed for the command. And it will prep
4074 * the pdu's itt. beiscsi_parse_pdu will later translate
4075 * the pdu itt to the libiscsi task itt.
4076 */
4077static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4078{
4079 struct beiscsi_io_task *io_task = task->dd_data;
4080 struct iscsi_conn *conn = task->conn;
4081 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4082 struct beiscsi_hba *phba = beiscsi_conn->phba;
4083 struct hwi_wrb_context *pwrb_context;
4084 struct hwi_controller *phwi_ctrlr;
4085 itt_t itt;
2afc95bf
JK
4086 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4087 dma_addr_t paddr;
6733b39a 4088
2afc95bf 4089 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
bc7accec 4090 GFP_ATOMIC, &paddr);
2afc95bf
JK
4091 if (!io_task->cmd_bhs)
4092 return -ENOMEM;
2afc95bf 4093 io_task->bhs_pa.u.a64.address = paddr;
bfead3b2 4094 io_task->libiscsi_itt = (itt_t)task->itt;
6733b39a
JK
4095 io_task->conn = beiscsi_conn;
4096
4097 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4098 task->hdr_max = sizeof(struct be_cmd_bhs);
d2cecf0d 4099 io_task->psgl_handle = NULL;
3ec78271 4100 io_task->pwrb_handle = NULL;
6733b39a
JK
4101
4102 if (task->sc) {
4103 spin_lock(&phba->io_sgl_lock);
4104 io_task->psgl_handle = alloc_io_sgl_handle(phba);
4105 spin_unlock(&phba->io_sgl_lock);
2afc95bf
JK
4106 if (!io_task->psgl_handle)
4107 goto free_hndls;
d2cecf0d
JK
4108 io_task->pwrb_handle = alloc_wrb_handle(phba,
4109 beiscsi_conn->beiscsi_conn_cid -
4110 phba->fw_config.iscsi_cid_start);
4111 if (!io_task->pwrb_handle)
4112 goto free_io_hndls;
6733b39a
JK
4113 } else {
4114 io_task->scsi_cmnd = NULL;
d7aea67b 4115 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
6733b39a
JK
4116 if (!beiscsi_conn->login_in_progress) {
4117 spin_lock(&phba->mgmt_sgl_lock);
4118 io_task->psgl_handle = (struct sgl_handle *)
4119 alloc_mgmt_sgl_handle(phba);
4120 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
4121 if (!io_task->psgl_handle)
4122 goto free_hndls;
4123
6733b39a
JK
4124 beiscsi_conn->login_in_progress = 1;
4125 beiscsi_conn->plogin_sgl_handle =
4126 io_task->psgl_handle;
d2cecf0d
JK
4127 io_task->pwrb_handle =
4128 alloc_wrb_handle(phba,
4129 beiscsi_conn->beiscsi_conn_cid -
4130 phba->fw_config.iscsi_cid_start);
4131 if (!io_task->pwrb_handle)
4132 goto free_io_hndls;
4133 beiscsi_conn->plogin_wrb_handle =
4134 io_task->pwrb_handle;
4135
6733b39a
JK
4136 } else {
4137 io_task->psgl_handle =
4138 beiscsi_conn->plogin_sgl_handle;
d2cecf0d
JK
4139 io_task->pwrb_handle =
4140 beiscsi_conn->plogin_wrb_handle;
6733b39a 4141 }
1282ab76 4142 beiscsi_conn->task = task;
6733b39a
JK
4143 } else {
4144 spin_lock(&phba->mgmt_sgl_lock);
4145 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4146 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
4147 if (!io_task->psgl_handle)
4148 goto free_hndls;
d2cecf0d
JK
4149 io_task->pwrb_handle =
4150 alloc_wrb_handle(phba,
4151 beiscsi_conn->beiscsi_conn_cid -
4152 phba->fw_config.iscsi_cid_start);
4153 if (!io_task->pwrb_handle)
4154 goto free_mgmt_hndls;
4155
6733b39a
JK
4156 }
4157 }
bfead3b2
JK
4158 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4159 wrb_index << 16) | (unsigned int)
4160 (io_task->psgl_handle->sgl_index));
32951dd8 4161 io_task->pwrb_handle->pio_handle = task;
bfead3b2 4162
6733b39a
JK
4163 io_task->cmd_bhs->iscsi_hdr.itt = itt;
4164 return 0;
2afc95bf 4165
d2cecf0d
JK
4166free_io_hndls:
4167 spin_lock(&phba->io_sgl_lock);
4168 free_io_sgl_handle(phba, io_task->psgl_handle);
4169 spin_unlock(&phba->io_sgl_lock);
4170 goto free_hndls;
4171free_mgmt_hndls:
4172 spin_lock(&phba->mgmt_sgl_lock);
4173 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4174 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
4175free_hndls:
4176 phwi_ctrlr = phba->phwi_ctrlr;
7da50879
JK
4177 pwrb_context = &phwi_ctrlr->wrb_context[
4178 beiscsi_conn->beiscsi_conn_cid -
4179 phba->fw_config.iscsi_cid_start];
d2cecf0d
JK
4180 if (io_task->pwrb_handle)
4181 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2afc95bf
JK
4182 io_task->pwrb_handle = NULL;
4183 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4184 io_task->bhs_pa.u.a64.address);
1282ab76 4185 io_task->cmd_bhs = NULL;
99bc5d55
JSJ
4186 beiscsi_log(phba, KERN_ERR,
4187 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4188 "BM_%d : Alloc of SGL_ICD Failed\n");
2afc95bf 4189 return -ENOMEM;
6733b39a
JK
4190}
4191
6733b39a
JK
4192static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4193 unsigned int num_sg, unsigned int xferlen,
4194 unsigned int writedir)
4195{
4196
4197 struct beiscsi_io_task *io_task = task->dd_data;
4198 struct iscsi_conn *conn = task->conn;
4199 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4200 struct beiscsi_hba *phba = beiscsi_conn->phba;
4201 struct iscsi_wrb *pwrb = NULL;
4202 unsigned int doorbell = 0;
4203
4204 pwrb = io_task->pwrb_handle->pwrb;
6733b39a
JK
4205 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4206 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4207
4208 if (writedir) {
32951dd8
JK
4209 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4210 INI_WR_CMD);
6733b39a 4211 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
6733b39a 4212 } else {
32951dd8
JK
4213 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4214 INI_RD_CMD);
6733b39a
JK
4215 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4216 }
6733b39a
JK
4217
4218 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
dc63aac6
JK
4219 cpu_to_be16(*(unsigned short *)
4220 &io_task->cmd_bhs->iscsi_hdr.lun));
6733b39a
JK
4221 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4222 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4223 io_task->pwrb_handle->wrb_index);
4224 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4225 be32_to_cpu(task->cmdsn));
4226 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4227 io_task->psgl_handle->sgl_index);
4228
4229 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4230
4231 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4232 io_task->pwrb_handle->nxt_wrb_index);
4233 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4234
4235 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4236 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4237 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4238 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4239
4240 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4241 return 0;
4242}
4243
4244static int beiscsi_mtask(struct iscsi_task *task)
4245{
dafab8e0 4246 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
4247 struct iscsi_conn *conn = task->conn;
4248 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4249 struct beiscsi_hba *phba = beiscsi_conn->phba;
4250 struct iscsi_wrb *pwrb = NULL;
4251 unsigned int doorbell = 0;
dafab8e0 4252 unsigned int cid;
6733b39a 4253
bfead3b2 4254 cid = beiscsi_conn->beiscsi_conn_cid;
6733b39a 4255 pwrb = io_task->pwrb_handle->pwrb;
caf818f1 4256 memset(pwrb, 0, sizeof(*pwrb));
6733b39a
JK
4257 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4258 be32_to_cpu(task->cmdsn));
4259 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4260 io_task->pwrb_handle->wrb_index);
4261 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4262 io_task->psgl_handle->sgl_index);
dafab8e0 4263
6733b39a
JK
4264 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4265 case ISCSI_OP_LOGIN:
32951dd8
JK
4266 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4267 TGT_DM_CMD);
6733b39a
JK
4268 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4269 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4270 hwi_write_buffer(pwrb, task);
4271 break;
4272 case ISCSI_OP_NOOP_OUT:
1390b01b
JK
4273 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4274 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4275 TGT_DM_CMD);
4276 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4277 pwrb, 0);
685e16fd 4278 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
1390b01b
JK
4279 } else {
4280 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4281 INI_RD_CMD);
685e16fd 4282 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
1390b01b 4283 }
6733b39a
JK
4284 hwi_write_buffer(pwrb, task);
4285 break;
4286 case ISCSI_OP_TEXT:
32951dd8 4287 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
b30c6dab 4288 TGT_DM_CMD);
0ecb0b45 4289 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
6733b39a
JK
4290 hwi_write_buffer(pwrb, task);
4291 break;
4292 case ISCSI_OP_SCSI_TMFUNC:
32951dd8
JK
4293 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4294 INI_TMF_CMD);
6733b39a
JK
4295 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4296 hwi_write_buffer(pwrb, task);
4297 break;
4298 case ISCSI_OP_LOGOUT:
4299 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4300 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
dafab8e0 4301 HWH_TYPE_LOGOUT);
6733b39a
JK
4302 hwi_write_buffer(pwrb, task);
4303 break;
4304
4305 default:
99bc5d55
JSJ
4306 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4307 "BM_%d : opcode =%d Not supported\n",
4308 task->hdr->opcode & ISCSI_OPCODE_MASK);
4309
6733b39a
JK
4310 return -EINVAL;
4311 }
4312
4313 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
51a46250 4314 task->data_count);
6733b39a
JK
4315 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4316 io_task->pwrb_handle->nxt_wrb_index);
4317 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4318
bfead3b2 4319 doorbell |= cid & DB_WRB_POST_CID_MASK;
32951dd8 4320 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4321 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4322 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4323 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4324 return 0;
4325}
4326
4327static int beiscsi_task_xmit(struct iscsi_task *task)
4328{
6733b39a
JK
4329 struct beiscsi_io_task *io_task = task->dd_data;
4330 struct scsi_cmnd *sc = task->sc;
6733b39a
JK
4331 struct scatterlist *sg;
4332 int num_sg;
4333 unsigned int writedir = 0, xferlen = 0;
4334
6733b39a
JK
4335 if (!sc)
4336 return beiscsi_mtask(task);
4337
4338 io_task->scsi_cmnd = sc;
4339 num_sg = scsi_dma_map(sc);
4340 if (num_sg < 0) {
99bc5d55
JSJ
4341 struct iscsi_conn *conn = task->conn;
4342 struct beiscsi_hba *phba = NULL;
4343
4344 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
4345 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
4346 "BM_%d : scsi_dma_map Failed\n");
4347
6733b39a
JK
4348 return num_sg;
4349 }
6733b39a
JK
4350 xferlen = scsi_bufflen(sc);
4351 sg = scsi_sglist(sc);
99bc5d55 4352 if (sc->sc_data_direction == DMA_TO_DEVICE)
6733b39a 4353 writedir = 1;
99bc5d55 4354 else
6733b39a 4355 writedir = 0;
99bc5d55 4356
6733b39a
JK
4357 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4358}
4359
ffce3e2e
JK
4360/**
4361 * beiscsi_bsg_request - handle bsg request from ISCSI transport
4362 * @job: job to handle
4363 */
4364static int beiscsi_bsg_request(struct bsg_job *job)
4365{
4366 struct Scsi_Host *shost;
4367 struct beiscsi_hba *phba;
4368 struct iscsi_bsg_request *bsg_req = job->request;
4369 int rc = -EINVAL;
4370 unsigned int tag;
4371 struct be_dma_mem nonemb_cmd;
4372 struct be_cmd_resp_hdr *resp;
4373 struct iscsi_bsg_reply *bsg_reply = job->reply;
4374 unsigned short status, extd_status;
4375
4376 shost = iscsi_job_to_shost(job);
4377 phba = iscsi_host_priv(shost);
4378
4379 switch (bsg_req->msgcode) {
4380 case ISCSI_BSG_HST_VENDOR:
4381 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4382 job->request_payload.payload_len,
4383 &nonemb_cmd.dma);
4384 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
4385 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4386 "BM_%d : Failed to allocate memory for "
4387 "beiscsi_bsg_request\n");
ffce3e2e
JK
4388 return -EIO;
4389 }
4390 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4391 &nonemb_cmd);
4392 if (!tag) {
99bc5d55
JSJ
4393 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4394 "BM_%d : be_cmd_get_mac_addr Failed\n");
4395
ffce3e2e
JK
4396 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4397 nonemb_cmd.va, nonemb_cmd.dma);
4398 return -EAGAIN;
4399 } else
4400 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
4401 phba->ctrl.mcc_numtag[tag]);
4402 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
4403 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
4404 free_mcc_tag(&phba->ctrl, tag);
4405 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4406 sg_copy_from_buffer(job->reply_payload.sg_list,
4407 job->reply_payload.sg_cnt,
4408 nonemb_cmd.va, (resp->response_length
4409 + sizeof(*resp)));
4410 bsg_reply->reply_payload_rcv_len = resp->response_length;
4411 bsg_reply->result = status;
4412 bsg_job_done(job, bsg_reply->result,
4413 bsg_reply->reply_payload_rcv_len);
4414 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4415 nonemb_cmd.va, nonemb_cmd.dma);
4416 if (status || extd_status) {
99bc5d55
JSJ
4417 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4418 "BM_%d : be_cmd_get_mac_addr Failed"
4419 " status = %d extd_status = %d\n",
4420 status, extd_status);
4421
ffce3e2e
JK
4422 return -EIO;
4423 }
4424 break;
4425
4426 default:
99bc5d55
JSJ
4427 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4428 "BM_%d : Unsupported bsg command: 0x%x\n",
4429 bsg_req->msgcode);
ffce3e2e
JK
4430 break;
4431 }
4432
4433 return rc;
4434}
4435
99bc5d55
JSJ
4436void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4437{
4438 /* Set the logging parameter */
4439 beiscsi_log_enable_init(phba, beiscsi_log_enable);
4440}
4441
25602c97 4442static void beiscsi_quiesce(struct beiscsi_hba *phba)
6733b39a 4443{
bfead3b2
JK
4444 struct hwi_controller *phwi_ctrlr;
4445 struct hwi_context_memory *phwi_context;
4446 struct be_eq_obj *pbe_eq;
4447 unsigned int i, msix_vec;
e9b91193
JK
4448 u8 *real_offset = 0;
4449 u32 value = 0;
6733b39a 4450
bfead3b2
JK
4451 phwi_ctrlr = phba->phwi_ctrlr;
4452 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 4453 hwi_disable_intr(phba);
bfead3b2
JK
4454 if (phba->msix_enabled) {
4455 for (i = 0; i <= phba->num_cpus; i++) {
4456 msix_vec = phba->msix_entries[i].vector;
4457 free_irq(msix_vec, &phwi_context->be_eq[i]);
8fcfb210 4458 kfree(phba->msi_name[i]);
bfead3b2
JK
4459 }
4460 } else
4461 if (phba->pcidev->irq)
4462 free_irq(phba->pcidev->irq, phba);
4463 pci_disable_msix(phba->pcidev);
6733b39a
JK
4464 destroy_workqueue(phba->wq);
4465 if (blk_iopoll_enabled)
bfead3b2
JK
4466 for (i = 0; i < phba->num_cpus; i++) {
4467 pbe_eq = &phwi_context->be_eq[i];
4468 blk_iopoll_disable(&pbe_eq->iopoll);
4469 }
6733b39a
JK
4470
4471 beiscsi_clean_port(phba);
4472 beiscsi_free_mem(phba);
e9b91193
JK
4473 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4474
4475 value = readl((void *)real_offset);
4476
4477 if (value & 0x00010000) {
4478 value &= 0xfffeffff;
4479 writel(value, (void *)real_offset);
4480 }
6733b39a
JK
4481 beiscsi_unmap_pci_function(phba);
4482 pci_free_consistent(phba->pcidev,
4483 phba->ctrl.mbox_mem_alloced.size,
4484 phba->ctrl.mbox_mem_alloced.va,
4485 phba->ctrl.mbox_mem_alloced.dma);
25602c97
JK
4486}
4487
4488static void beiscsi_remove(struct pci_dev *pcidev)
4489{
4490
4491 struct beiscsi_hba *phba = NULL;
4492
4493 phba = pci_get_drvdata(pcidev);
4494 if (!phba) {
4495 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4496 return;
4497 }
4498
0e43895e 4499 beiscsi_destroy_def_ifaces(phba);
25602c97 4500 beiscsi_quiesce(phba);
9d045163 4501 iscsi_boot_destroy_kset(phba->boot_kset);
6733b39a
JK
4502 iscsi_host_remove(phba->shost);
4503 pci_dev_put(phba->pcidev);
4504 iscsi_host_free(phba->shost);
8dce69ff 4505 pci_disable_device(pcidev);
6733b39a
JK
4506}
4507
25602c97
JK
4508static void beiscsi_shutdown(struct pci_dev *pcidev)
4509{
4510
4511 struct beiscsi_hba *phba = NULL;
4512
4513 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4514 if (!phba) {
4515 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4516 return;
4517 }
4518
4519 beiscsi_quiesce(phba);
8dce69ff 4520 pci_disable_device(pcidev);
25602c97
JK
4521}
4522
bfead3b2
JK
4523static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4524{
4525 int i, status;
4526
4527 for (i = 0; i <= phba->num_cpus; i++)
4528 phba->msix_entries[i].entry = i;
4529
4530 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4531 (phba->num_cpus + 1));
4532 if (!status)
4533 phba->msix_enabled = true;
4534
4535 return;
4536}
4537
6733b39a
JK
4538static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4539 const struct pci_device_id *id)
4540{
4541 struct beiscsi_hba *phba = NULL;
bfead3b2
JK
4542 struct hwi_controller *phwi_ctrlr;
4543 struct hwi_context_memory *phwi_context;
4544 struct be_eq_obj *pbe_eq;
238f6b72 4545 int ret, num_cpus, i;
e9b91193
JK
4546 u8 *real_offset = 0;
4547 u32 value = 0;
6733b39a
JK
4548
4549 ret = beiscsi_enable_pci(pcidev);
4550 if (ret < 0) {
99bc5d55
JSJ
4551 dev_err(&pcidev->dev,
4552 "beiscsi_dev_probe - Failed to enable pci device\n");
6733b39a
JK
4553 return ret;
4554 }
4555
4556 phba = beiscsi_hba_alloc(pcidev);
4557 if (!phba) {
99bc5d55
JSJ
4558 dev_err(&pcidev->dev,
4559 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
6733b39a
JK
4560 goto disable_pci;
4561 }
4562
99bc5d55
JSJ
4563 /* Initialize Driver configuration Paramters */
4564 beiscsi_hba_attrs_init(phba);
4565
f98c96b0
JK
4566 switch (pcidev->device) {
4567 case BE_DEVICE_ID1:
4568 case OC_DEVICE_ID1:
4569 case OC_DEVICE_ID2:
4570 phba->generation = BE_GEN2;
4571 break;
4572 case BE_DEVICE_ID2:
4573 case OC_DEVICE_ID3:
4574 phba->generation = BE_GEN3;
4575 break;
4576 default:
4577 phba->generation = 0;
4578 }
4579
bfead3b2
JK
4580 if (enable_msix)
4581 num_cpus = find_num_cpus();
4582 else
4583 num_cpus = 1;
4584 phba->num_cpus = num_cpus;
99bc5d55
JSJ
4585 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4586 "BM_%d : num_cpus = %d\n",
4587 phba->num_cpus);
bfead3b2 4588
b547f2d6 4589 if (enable_msix) {
bfead3b2 4590 beiscsi_msix_enable(phba);
b547f2d6
JK
4591 if (!phba->msix_enabled)
4592 phba->num_cpus = 1;
4593 }
6733b39a
JK
4594 ret = be_ctrl_init(phba, pcidev);
4595 if (ret) {
99bc5d55
JSJ
4596 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4597 "BM_%d : beiscsi_dev_probe-"
4598 "Failed in be_ctrl_init\n");
6733b39a
JK
4599 goto hba_free;
4600 }
4601
e9b91193
JK
4602 if (!num_hba) {
4603 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4604 value = readl((void *)real_offset);
4605 if (value & 0x00010000) {
4606 gcrashmode++;
99bc5d55
JSJ
4607 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4608 "BM_%d : Loading Driver in crashdump mode\n");
e5285860 4609 ret = beiscsi_cmd_reset_function(phba);
e9b91193 4610 if (ret) {
99bc5d55
JSJ
4611 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4612 "BM_%d : Reset Failed. Aborting Crashdump\n");
e9b91193
JK
4613 goto hba_free;
4614 }
4615 ret = be_chk_reset_complete(phba);
4616 if (ret) {
99bc5d55
JSJ
4617 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4618 "BM_%d : Failed to get out of reset."
4619 "Aborting Crashdump\n");
e9b91193
JK
4620 goto hba_free;
4621 }
4622 } else {
4623 value |= 0x00010000;
4624 writel(value, (void *)real_offset);
4625 num_hba++;
4626 }
4627 }
4628
6733b39a
JK
4629 spin_lock_init(&phba->io_sgl_lock);
4630 spin_lock_init(&phba->mgmt_sgl_lock);
4631 spin_lock_init(&phba->isr_lock);
7da50879
JK
4632 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4633 if (ret != 0) {
99bc5d55
JSJ
4634 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4635 "BM_%d : Error getting fw config\n");
7da50879
JK
4636 goto free_port;
4637 }
4638 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
6733b39a 4639 beiscsi_get_params(phba);
aa874f07 4640 phba->shost->can_queue = phba->params.ios_per_ctrl;
6733b39a
JK
4641 ret = beiscsi_init_port(phba);
4642 if (ret < 0) {
99bc5d55
JSJ
4643 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4644 "BM_%d : beiscsi_dev_probe-"
4645 "Failed in beiscsi_init_port\n");
6733b39a
JK
4646 goto free_port;
4647 }
4648
756d29c8
JK
4649 for (i = 0; i < MAX_MCC_CMD ; i++) {
4650 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4651 phba->ctrl.mcc_tag[i] = i + 1;
4652 phba->ctrl.mcc_numtag[i + 1] = 0;
4653 phba->ctrl.mcc_tag_available++;
4654 }
4655
4656 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4657
6733b39a
JK
4658 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
4659 phba->shost->host_no);
278274d5 4660 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
6733b39a 4661 if (!phba->wq) {
99bc5d55
JSJ
4662 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4663 "BM_%d : beiscsi_dev_probe-"
4664 "Failed to allocate work queue\n");
6733b39a
JK
4665 goto free_twq;
4666 }
4667
4668 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
4669
bfead3b2
JK
4670 phwi_ctrlr = phba->phwi_ctrlr;
4671 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 4672 if (blk_iopoll_enabled) {
bfead3b2
JK
4673 for (i = 0; i < phba->num_cpus; i++) {
4674 pbe_eq = &phwi_context->be_eq[i];
4675 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4676 be_iopoll);
4677 blk_iopoll_enable(&pbe_eq->iopoll);
4678 }
6733b39a 4679 }
6733b39a
JK
4680 ret = beiscsi_init_irqs(phba);
4681 if (ret < 0) {
99bc5d55
JSJ
4682 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4683 "BM_%d : beiscsi_dev_probe-"
4684 "Failed to beiscsi_init_irqs\n");
6733b39a
JK
4685 goto free_blkenbld;
4686 }
238f6b72 4687 hwi_enable_intr(phba);
f457a46f
MC
4688
4689 if (beiscsi_setup_boot_info(phba))
4690 /*
4691 * log error but continue, because we may not be using
4692 * iscsi boot.
4693 */
99bc5d55
JSJ
4694 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4695 "BM_%d : Could not set up "
4696 "iSCSI boot info.\n");
f457a46f 4697
0e43895e 4698 beiscsi_create_def_ifaces(phba);
99bc5d55
JSJ
4699 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4700 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
6733b39a
JK
4701 return 0;
4702
6733b39a
JK
4703free_blkenbld:
4704 destroy_workqueue(phba->wq);
4705 if (blk_iopoll_enabled)
bfead3b2
JK
4706 for (i = 0; i < phba->num_cpus; i++) {
4707 pbe_eq = &phwi_context->be_eq[i];
4708 blk_iopoll_disable(&pbe_eq->iopoll);
4709 }
6733b39a
JK
4710free_twq:
4711 beiscsi_clean_port(phba);
4712 beiscsi_free_mem(phba);
4713free_port:
e9b91193
JK
4714 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4715
4716 value = readl((void *)real_offset);
4717
4718 if (value & 0x00010000) {
4719 value &= 0xfffeffff;
4720 writel(value, (void *)real_offset);
4721 }
4722
6733b39a
JK
4723 pci_free_consistent(phba->pcidev,
4724 phba->ctrl.mbox_mem_alloced.size,
4725 phba->ctrl.mbox_mem_alloced.va,
4726 phba->ctrl.mbox_mem_alloced.dma);
4727 beiscsi_unmap_pci_function(phba);
4728hba_free:
238f6b72
JK
4729 if (phba->msix_enabled)
4730 pci_disable_msix(phba->pcidev);
6733b39a
JK
4731 iscsi_host_remove(phba->shost);
4732 pci_dev_put(phba->pcidev);
4733 iscsi_host_free(phba->shost);
4734disable_pci:
4735 pci_disable_device(pcidev);
4736 return ret;
4737}
4738
4739struct iscsi_transport beiscsi_iscsi_transport = {
4740 .owner = THIS_MODULE,
4741 .name = DRV_NAME,
9db0fb3a 4742 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
6733b39a 4743 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
6733b39a
JK
4744 .create_session = beiscsi_session_create,
4745 .destroy_session = beiscsi_session_destroy,
4746 .create_conn = beiscsi_conn_create,
4747 .bind_conn = beiscsi_conn_bind,
4748 .destroy_conn = iscsi_conn_teardown,
3128c6c7 4749 .attr_is_visible = be2iscsi_attr_is_visible,
0e43895e
MC
4750 .set_iface_param = be2iscsi_iface_set_param,
4751 .get_iface_param = be2iscsi_iface_get_param,
6733b39a 4752 .set_param = beiscsi_set_param,
c7f7fd5b 4753 .get_conn_param = iscsi_conn_get_param,
6733b39a
JK
4754 .get_session_param = iscsi_session_get_param,
4755 .get_host_param = beiscsi_get_host_param,
4756 .start_conn = beiscsi_conn_start,
fa95d206 4757 .stop_conn = iscsi_conn_stop,
6733b39a
JK
4758 .send_pdu = iscsi_conn_send_pdu,
4759 .xmit_task = beiscsi_task_xmit,
4760 .cleanup_task = beiscsi_cleanup_task,
4761 .alloc_pdu = beiscsi_alloc_pdu,
4762 .parse_pdu_itt = beiscsi_parse_pdu,
4763 .get_stats = beiscsi_conn_get_stats,
c7f7fd5b 4764 .get_ep_param = beiscsi_ep_get_param,
6733b39a
JK
4765 .ep_connect = beiscsi_ep_connect,
4766 .ep_poll = beiscsi_ep_poll,
4767 .ep_disconnect = beiscsi_ep_disconnect,
4768 .session_recovery_timedout = iscsi_session_recovery_timedout,
ffce3e2e 4769 .bsg_request = beiscsi_bsg_request,
6733b39a
JK
4770};
4771
4772static struct pci_driver beiscsi_pci_driver = {
4773 .name = DRV_NAME,
4774 .probe = beiscsi_dev_probe,
4775 .remove = beiscsi_remove,
25602c97 4776 .shutdown = beiscsi_shutdown,
6733b39a
JK
4777 .id_table = beiscsi_pci_id_table
4778};
4779
bfead3b2 4780
6733b39a
JK
4781static int __init beiscsi_module_init(void)
4782{
4783 int ret;
4784
4785 beiscsi_scsi_transport =
4786 iscsi_register_transport(&beiscsi_iscsi_transport);
4787 if (!beiscsi_scsi_transport) {
99bc5d55
JSJ
4788 printk(KERN_ERR
4789 "beiscsi_module_init - Unable to register beiscsi transport.\n");
f55a24f2 4790 return -ENOMEM;
6733b39a 4791 }
99bc5d55
JSJ
4792 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
4793 &beiscsi_iscsi_transport);
6733b39a
JK
4794
4795 ret = pci_register_driver(&beiscsi_pci_driver);
4796 if (ret) {
99bc5d55
JSJ
4797 printk(KERN_ERR
4798 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
6733b39a
JK
4799 goto unregister_iscsi_transport;
4800 }
4801 return 0;
4802
4803unregister_iscsi_transport:
4804 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4805 return ret;
4806}
4807
4808static void __exit beiscsi_module_exit(void)
4809{
4810 pci_unregister_driver(&beiscsi_pci_driver);
4811 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4812}
4813
4814module_init(beiscsi_module_init);
4815module_exit(beiscsi_module_exit);