numa, cpu hotplug: change links of CPU and node when changing node number by onlining CPU
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / pm8001 / pm8001_init.c
1 /*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm8001_chips.h"
44
45 static struct scsi_transport_template *pm8001_stt;
46
47 static const struct pm8001_chip_info pm8001_chips[] = {
48 [chip_8001] = { 8, &pm8001_8001_dispatch,},
49 };
50 static int pm8001_id;
51
52 LIST_HEAD(hba_list);
53
54 struct workqueue_struct *pm8001_wq;
55
56 /**
57 * The main structure which LLDD must register for scsi core.
58 */
59 static struct scsi_host_template pm8001_sht = {
60 .module = THIS_MODULE,
61 .name = DRV_NAME,
62 .queuecommand = sas_queuecommand,
63 .target_alloc = sas_target_alloc,
64 .slave_configure = sas_slave_configure,
65 .scan_finished = pm8001_scan_finished,
66 .scan_start = pm8001_scan_start,
67 .change_queue_depth = sas_change_queue_depth,
68 .change_queue_type = sas_change_queue_type,
69 .bios_param = sas_bios_param,
70 .can_queue = 1,
71 .cmd_per_lun = 1,
72 .this_id = -1,
73 .sg_tablesize = SG_ALL,
74 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
75 .use_clustering = ENABLE_CLUSTERING,
76 .eh_device_reset_handler = sas_eh_device_reset_handler,
77 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
78 .target_destroy = sas_target_destroy,
79 .ioctl = sas_ioctl,
80 .shost_attrs = pm8001_host_attrs,
81 };
82
83 /**
84 * Sas layer call this function to execute specific task.
85 */
86 static struct sas_domain_function_template pm8001_transport_ops = {
87 .lldd_dev_found = pm8001_dev_found,
88 .lldd_dev_gone = pm8001_dev_gone,
89
90 .lldd_execute_task = pm8001_queue_command,
91 .lldd_control_phy = pm8001_phy_control,
92
93 .lldd_abort_task = pm8001_abort_task,
94 .lldd_abort_task_set = pm8001_abort_task_set,
95 .lldd_clear_aca = pm8001_clear_aca,
96 .lldd_clear_task_set = pm8001_clear_task_set,
97 .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
98 .lldd_lu_reset = pm8001_lu_reset,
99 .lldd_query_task = pm8001_query_task,
100 };
101
102 /**
103 *pm8001_phy_init - initiate our adapter phys
104 *@pm8001_ha: our hba structure.
105 *@phy_id: phy id.
106 */
107 static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
108 {
109 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
110 struct asd_sas_phy *sas_phy = &phy->sas_phy;
111 phy->phy_state = 0;
112 phy->pm8001_ha = pm8001_ha;
113 sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
114 sas_phy->class = SAS;
115 sas_phy->iproto = SAS_PROTOCOL_ALL;
116 sas_phy->tproto = 0;
117 sas_phy->type = PHY_TYPE_PHYSICAL;
118 sas_phy->role = PHY_ROLE_INITIATOR;
119 sas_phy->oob_mode = OOB_NOT_CONNECTED;
120 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
121 sas_phy->id = phy_id;
122 sas_phy->sas_addr = &pm8001_ha->sas_addr[0];
123 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
124 sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
125 sas_phy->lldd_phy = phy;
126 }
127
128 /**
129 *pm8001_free - free hba
130 *@pm8001_ha: our hba structure.
131 *
132 */
133 static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
134 {
135 int i;
136
137 if (!pm8001_ha)
138 return;
139
140 for (i = 0; i < USI_MAX_MEMCNT; i++) {
141 if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
142 pci_free_consistent(pm8001_ha->pdev,
143 (pm8001_ha->memoryMap.region[i].total_len +
144 pm8001_ha->memoryMap.region[i].alignment),
145 pm8001_ha->memoryMap.region[i].virt_ptr,
146 pm8001_ha->memoryMap.region[i].phys_addr);
147 }
148 }
149 PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
150 if (pm8001_ha->shost)
151 scsi_host_put(pm8001_ha->shost);
152 flush_workqueue(pm8001_wq);
153 kfree(pm8001_ha->tags);
154 kfree(pm8001_ha);
155 }
156
157 #ifdef PM8001_USE_TASKLET
158 static void pm8001_tasklet(unsigned long opaque)
159 {
160 struct pm8001_hba_info *pm8001_ha;
161 pm8001_ha = (struct pm8001_hba_info *)opaque;
162 if (unlikely(!pm8001_ha))
163 BUG_ON(1);
164 PM8001_CHIP_DISP->isr(pm8001_ha);
165 }
166 #endif
167
168
169 /**
170 * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
171 * dispatcher to handle each case.
172 * @irq: irq number.
173 * @opaque: the passed general host adapter struct
174 */
175 static irqreturn_t pm8001_interrupt(int irq, void *opaque)
176 {
177 struct pm8001_hba_info *pm8001_ha;
178 irqreturn_t ret = IRQ_HANDLED;
179 struct sas_ha_struct *sha = opaque;
180 pm8001_ha = sha->lldd_ha;
181 if (unlikely(!pm8001_ha))
182 return IRQ_NONE;
183 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
184 return IRQ_NONE;
185 #ifdef PM8001_USE_TASKLET
186 tasklet_schedule(&pm8001_ha->tasklet);
187 #else
188 ret = PM8001_CHIP_DISP->isr(pm8001_ha);
189 #endif
190 return ret;
191 }
192
193 /**
194 * pm8001_alloc - initiate our hba structure and 6 DMAs area.
195 * @pm8001_ha:our hba structure.
196 *
197 */
198 static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
199 {
200 int i;
201 spin_lock_init(&pm8001_ha->lock);
202 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
203 pm8001_phy_init(pm8001_ha, i);
204 pm8001_ha->port[i].wide_port_phymap = 0;
205 pm8001_ha->port[i].port_attached = 0;
206 pm8001_ha->port[i].port_state = 0;
207 INIT_LIST_HEAD(&pm8001_ha->port[i].list);
208 }
209
210 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
211 if (!pm8001_ha->tags)
212 goto err_out;
213 /* MPI Memory region 1 for AAP Event Log for fw */
214 pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
215 pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
216 pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE;
217 pm8001_ha->memoryMap.region[AAP1].alignment = 32;
218
219 /* MPI Memory region 2 for IOP Event Log for fw */
220 pm8001_ha->memoryMap.region[IOP].num_elements = 1;
221 pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE;
222 pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
223 pm8001_ha->memoryMap.region[IOP].alignment = 32;
224
225 /* MPI Memory region 3 for consumer Index of inbound queues */
226 pm8001_ha->memoryMap.region[CI].num_elements = 1;
227 pm8001_ha->memoryMap.region[CI].element_size = 4;
228 pm8001_ha->memoryMap.region[CI].total_len = 4;
229 pm8001_ha->memoryMap.region[CI].alignment = 4;
230
231 /* MPI Memory region 4 for producer Index of outbound queues */
232 pm8001_ha->memoryMap.region[PI].num_elements = 1;
233 pm8001_ha->memoryMap.region[PI].element_size = 4;
234 pm8001_ha->memoryMap.region[PI].total_len = 4;
235 pm8001_ha->memoryMap.region[PI].alignment = 4;
236
237 /* MPI Memory region 5 inbound queues */
238 pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
239 pm8001_ha->memoryMap.region[IB].element_size = 64;
240 pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
241 pm8001_ha->memoryMap.region[IB].alignment = 64;
242
243 /* MPI Memory region 6 outbound queues */
244 pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
245 pm8001_ha->memoryMap.region[OB].element_size = 64;
246 pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
247 pm8001_ha->memoryMap.region[OB].alignment = 64;
248
249 /* Memory region write DMA*/
250 pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
251 pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
252 pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
253 /* Memory region for devices*/
254 pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
255 pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
256 sizeof(struct pm8001_device);
257 pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
258 sizeof(struct pm8001_device);
259
260 /* Memory region for ccb_info*/
261 pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
262 pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
263 sizeof(struct pm8001_ccb_info);
264 pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
265 sizeof(struct pm8001_ccb_info);
266
267 for (i = 0; i < USI_MAX_MEMCNT; i++) {
268 if (pm8001_mem_alloc(pm8001_ha->pdev,
269 &pm8001_ha->memoryMap.region[i].virt_ptr,
270 &pm8001_ha->memoryMap.region[i].phys_addr,
271 &pm8001_ha->memoryMap.region[i].phys_addr_hi,
272 &pm8001_ha->memoryMap.region[i].phys_addr_lo,
273 pm8001_ha->memoryMap.region[i].total_len,
274 pm8001_ha->memoryMap.region[i].alignment) != 0) {
275 PM8001_FAIL_DBG(pm8001_ha,
276 pm8001_printk("Mem%d alloc failed\n",
277 i));
278 goto err_out;
279 }
280 }
281
282 pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
283 for (i = 0; i < PM8001_MAX_DEVICES; i++) {
284 pm8001_ha->devices[i].dev_type = NO_DEVICE;
285 pm8001_ha->devices[i].id = i;
286 pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
287 pm8001_ha->devices[i].running_req = 0;
288 }
289 pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
290 for (i = 0; i < PM8001_MAX_CCB; i++) {
291 pm8001_ha->ccb_info[i].ccb_dma_handle =
292 pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
293 i * sizeof(struct pm8001_ccb_info);
294 pm8001_ha->ccb_info[i].task = NULL;
295 pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
296 pm8001_ha->ccb_info[i].device = NULL;
297 ++pm8001_ha->tags_num;
298 }
299 pm8001_ha->flags = PM8001F_INIT_TIME;
300 /* Initialize tags */
301 pm8001_tag_init(pm8001_ha);
302 return 0;
303 err_out:
304 return 1;
305 }
306
307 /**
308 * pm8001_ioremap - remap the pci high physical address to kernal virtual
309 * address so that we can access them.
310 * @pm8001_ha:our hba structure.
311 */
312 static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
313 {
314 u32 bar;
315 u32 logicalBar = 0;
316 struct pci_dev *pdev;
317
318 pdev = pm8001_ha->pdev;
319 /* map pci mem (PMC pci base 0-3)*/
320 for (bar = 0; bar < 6; bar++) {
321 /*
322 ** logical BARs for SPC:
323 ** bar 0 and 1 - logical BAR0
324 ** bar 2 and 3 - logical BAR1
325 ** bar4 - logical BAR2
326 ** bar5 - logical BAR3
327 ** Skip the appropriate assignments:
328 */
329 if ((bar == 1) || (bar == 3))
330 continue;
331 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
332 pm8001_ha->io_mem[logicalBar].membase =
333 pci_resource_start(pdev, bar);
334 pm8001_ha->io_mem[logicalBar].membase &=
335 (u32)PCI_BASE_ADDRESS_MEM_MASK;
336 pm8001_ha->io_mem[logicalBar].memsize =
337 pci_resource_len(pdev, bar);
338 pm8001_ha->io_mem[logicalBar].memvirtaddr =
339 ioremap(pm8001_ha->io_mem[logicalBar].membase,
340 pm8001_ha->io_mem[logicalBar].memsize);
341 PM8001_INIT_DBG(pm8001_ha,
342 pm8001_printk("PCI: bar %d, logicalBar %d "
343 "virt_addr=%lx,len=%d\n", bar, logicalBar,
344 (unsigned long)
345 pm8001_ha->io_mem[logicalBar].memvirtaddr,
346 pm8001_ha->io_mem[logicalBar].memsize));
347 } else {
348 pm8001_ha->io_mem[logicalBar].membase = 0;
349 pm8001_ha->io_mem[logicalBar].memsize = 0;
350 pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
351 }
352 logicalBar++;
353 }
354 return 0;
355 }
356
357 /**
358 * pm8001_pci_alloc - initialize our ha card structure
359 * @pdev: pci device.
360 * @ent: ent
361 * @shost: scsi host struct which has been initialized before.
362 */
363 static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
364 u32 chip_id,
365 struct Scsi_Host *shost)
366 {
367 struct pm8001_hba_info *pm8001_ha;
368 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
369
370
371 pm8001_ha = sha->lldd_ha;
372 if (!pm8001_ha)
373 return NULL;
374
375 pm8001_ha->pdev = pdev;
376 pm8001_ha->dev = &pdev->dev;
377 pm8001_ha->chip_id = chip_id;
378 pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
379 pm8001_ha->irq = pdev->irq;
380 pm8001_ha->sas = sha;
381 pm8001_ha->shost = shost;
382 pm8001_ha->id = pm8001_id++;
383 pm8001_ha->logging_level = 0x01;
384 sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
385 #ifdef PM8001_USE_TASKLET
386 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
387 (unsigned long)pm8001_ha);
388 #endif
389 pm8001_ioremap(pm8001_ha);
390 if (!pm8001_alloc(pm8001_ha))
391 return pm8001_ha;
392 pm8001_free(pm8001_ha);
393 return NULL;
394 }
395
396 /**
397 * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit
398 * @pdev: pci device.
399 */
400 static int pci_go_44(struct pci_dev *pdev)
401 {
402 int rc;
403
404 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
405 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
406 if (rc) {
407 rc = pci_set_consistent_dma_mask(pdev,
408 DMA_BIT_MASK(32));
409 if (rc) {
410 dev_printk(KERN_ERR, &pdev->dev,
411 "44-bit DMA enable failed\n");
412 return rc;
413 }
414 }
415 } else {
416 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
417 if (rc) {
418 dev_printk(KERN_ERR, &pdev->dev,
419 "32-bit DMA enable failed\n");
420 return rc;
421 }
422 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
423 if (rc) {
424 dev_printk(KERN_ERR, &pdev->dev,
425 "32-bit consistent DMA enable failed\n");
426 return rc;
427 }
428 }
429 return rc;
430 }
431
432 /**
433 * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them.
434 * @shost: scsi host which has been allocated outside.
435 * @chip_info: our ha struct.
436 */
437 static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
438 const struct pm8001_chip_info *chip_info)
439 {
440 int phy_nr, port_nr;
441 struct asd_sas_phy **arr_phy;
442 struct asd_sas_port **arr_port;
443 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
444
445 phy_nr = chip_info->n_phy;
446 port_nr = phy_nr;
447 memset(sha, 0x00, sizeof(*sha));
448 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
449 if (!arr_phy)
450 goto exit;
451 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
452 if (!arr_port)
453 goto exit_free2;
454
455 sha->sas_phy = arr_phy;
456 sha->sas_port = arr_port;
457 sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL);
458 if (!sha->lldd_ha)
459 goto exit_free1;
460
461 shost->transportt = pm8001_stt;
462 shost->max_id = PM8001_MAX_DEVICES;
463 shost->max_lun = 8;
464 shost->max_channel = 0;
465 shost->unique_id = pm8001_id;
466 shost->max_cmd_len = 16;
467 shost->can_queue = PM8001_CAN_QUEUE;
468 shost->cmd_per_lun = 32;
469 return 0;
470 exit_free1:
471 kfree(arr_port);
472 exit_free2:
473 kfree(arr_phy);
474 exit:
475 return -1;
476 }
477
478 /**
479 * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas
480 * @shost: scsi host which has been allocated outside
481 * @chip_info: our ha struct.
482 */
483 static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
484 const struct pm8001_chip_info *chip_info)
485 {
486 int i = 0;
487 struct pm8001_hba_info *pm8001_ha;
488 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
489
490 pm8001_ha = sha->lldd_ha;
491 for (i = 0; i < chip_info->n_phy; i++) {
492 sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
493 sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
494 }
495 sha->sas_ha_name = DRV_NAME;
496 sha->dev = pm8001_ha->dev;
497
498 sha->lldd_module = THIS_MODULE;
499 sha->sas_addr = &pm8001_ha->sas_addr[0];
500 sha->num_phys = chip_info->n_phy;
501 sha->lldd_max_execute_num = 1;
502 sha->lldd_queue_size = PM8001_CAN_QUEUE;
503 sha->core.shost = shost;
504 }
505
506 /**
507 * pm8001_init_sas_add - initialize sas address
508 * @chip_info: our ha struct.
509 *
510 * Currently we just set the fixed SAS address to our HBA,for manufacture,
511 * it should read from the EEPROM
512 */
513 static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
514 {
515 u8 i;
516 #ifdef PM8001_READ_VPD
517 DECLARE_COMPLETION_ONSTACK(completion);
518 struct pm8001_ioctl_payload payload;
519 pm8001_ha->nvmd_completion = &completion;
520 payload.minor_function = 0;
521 payload.length = 128;
522 payload.func_specific = kzalloc(128, GFP_KERNEL);
523 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
524 wait_for_completion(&completion);
525 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
526 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
527 SAS_ADDR_SIZE);
528 PM8001_INIT_DBG(pm8001_ha,
529 pm8001_printk("phy %d sas_addr = %016llx \n", i,
530 pm8001_ha->phy[i].dev_sas_addr));
531 }
532 #else
533 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
534 pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
535 pm8001_ha->phy[i].dev_sas_addr =
536 cpu_to_be64((u64)
537 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
538 }
539 memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
540 SAS_ADDR_SIZE);
541 #endif
542 }
543
544 #ifdef PM8001_USE_MSIX
545 /**
546 * pm8001_setup_msix - enable MSI-X interrupt
547 * @chip_info: our ha struct.
548 * @irq_handler: irq_handler
549 */
550 static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
551 irq_handler_t irq_handler)
552 {
553 u32 i = 0, j = 0;
554 u32 number_of_intr = 1;
555 int flag = 0;
556 u32 max_entry;
557 int rc;
558 max_entry = sizeof(pm8001_ha->msix_entries) /
559 sizeof(pm8001_ha->msix_entries[0]);
560 flag |= IRQF_DISABLED;
561 for (i = 0; i < max_entry ; i++)
562 pm8001_ha->msix_entries[i].entry = i;
563 rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
564 number_of_intr);
565 pm8001_ha->number_of_intr = number_of_intr;
566 if (!rc) {
567 for (i = 0; i < number_of_intr; i++) {
568 if (request_irq(pm8001_ha->msix_entries[i].vector,
569 irq_handler, flag, DRV_NAME,
570 SHOST_TO_SAS_HA(pm8001_ha->shost))) {
571 for (j = 0; j < i; j++)
572 free_irq(
573 pm8001_ha->msix_entries[j].vector,
574 SHOST_TO_SAS_HA(pm8001_ha->shost));
575 pci_disable_msix(pm8001_ha->pdev);
576 break;
577 }
578 }
579 }
580 return rc;
581 }
582 #endif
583
584 /**
585 * pm8001_request_irq - register interrupt
586 * @chip_info: our ha struct.
587 */
588 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
589 {
590 struct pci_dev *pdev;
591 irq_handler_t irq_handler = pm8001_interrupt;
592 int rc;
593
594 pdev = pm8001_ha->pdev;
595
596 #ifdef PM8001_USE_MSIX
597 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
598 return pm8001_setup_msix(pm8001_ha, irq_handler);
599 else
600 goto intx;
601 #endif
602
603 intx:
604 /* initialize the INT-X interrupt */
605 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
606 SHOST_TO_SAS_HA(pm8001_ha->shost));
607 return rc;
608 }
609
610 /**
611 * pm8001_pci_probe - probe supported device
612 * @pdev: pci device which kernel has been prepared for.
613 * @ent: pci device id
614 *
615 * This function is the main initialization function, when register a new
616 * pci driver it is invoked, all struct an hardware initilization should be done
617 * here, also, register interrupt
618 */
619 static int pm8001_pci_probe(struct pci_dev *pdev,
620 const struct pci_device_id *ent)
621 {
622 unsigned int rc;
623 u32 pci_reg;
624 struct pm8001_hba_info *pm8001_ha;
625 struct Scsi_Host *shost = NULL;
626 const struct pm8001_chip_info *chip;
627
628 dev_printk(KERN_INFO, &pdev->dev,
629 "pm8001: driver version %s\n", DRV_VERSION);
630 rc = pci_enable_device(pdev);
631 if (rc)
632 goto err_out_enable;
633 pci_set_master(pdev);
634 /*
635 * Enable pci slot busmaster by setting pci command register.
636 * This is required by FW for Cyclone card.
637 */
638
639 pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg);
640 pci_reg |= 0x157;
641 pci_write_config_dword(pdev, PCI_COMMAND, pci_reg);
642 rc = pci_request_regions(pdev, DRV_NAME);
643 if (rc)
644 goto err_out_disable;
645 rc = pci_go_44(pdev);
646 if (rc)
647 goto err_out_regions;
648
649 shost = scsi_host_alloc(&pm8001_sht, sizeof(void *));
650 if (!shost) {
651 rc = -ENOMEM;
652 goto err_out_regions;
653 }
654 chip = &pm8001_chips[ent->driver_data];
655 SHOST_TO_SAS_HA(shost) =
656 kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
657 if (!SHOST_TO_SAS_HA(shost)) {
658 rc = -ENOMEM;
659 goto err_out_free_host;
660 }
661
662 rc = pm8001_prep_sas_ha_init(shost, chip);
663 if (rc) {
664 rc = -ENOMEM;
665 goto err_out_free;
666 }
667 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
668 pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
669 if (!pm8001_ha) {
670 rc = -ENOMEM;
671 goto err_out_free;
672 }
673 list_add_tail(&pm8001_ha->list, &hba_list);
674 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
675 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
676 if (rc)
677 goto err_out_ha_free;
678
679 rc = scsi_add_host(shost, &pdev->dev);
680 if (rc)
681 goto err_out_ha_free;
682 rc = pm8001_request_irq(pm8001_ha);
683 if (rc)
684 goto err_out_shost;
685
686 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
687 pm8001_init_sas_add(pm8001_ha);
688 pm8001_post_sas_ha_init(shost, chip);
689 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
690 if (rc)
691 goto err_out_shost;
692 scsi_scan_host(pm8001_ha->shost);
693 return 0;
694
695 err_out_shost:
696 scsi_remove_host(pm8001_ha->shost);
697 err_out_ha_free:
698 pm8001_free(pm8001_ha);
699 err_out_free:
700 kfree(SHOST_TO_SAS_HA(shost));
701 err_out_free_host:
702 kfree(shost);
703 err_out_regions:
704 pci_release_regions(pdev);
705 err_out_disable:
706 pci_disable_device(pdev);
707 err_out_enable:
708 return rc;
709 }
710
711 static void pm8001_pci_remove(struct pci_dev *pdev)
712 {
713 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
714 struct pm8001_hba_info *pm8001_ha;
715 int i;
716 pm8001_ha = sha->lldd_ha;
717 pci_set_drvdata(pdev, NULL);
718 sas_unregister_ha(sha);
719 sas_remove_host(pm8001_ha->shost);
720 list_del(&pm8001_ha->list);
721 scsi_remove_host(pm8001_ha->shost);
722 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
723 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
724
725 #ifdef PM8001_USE_MSIX
726 for (i = 0; i < pm8001_ha->number_of_intr; i++)
727 synchronize_irq(pm8001_ha->msix_entries[i].vector);
728 for (i = 0; i < pm8001_ha->number_of_intr; i++)
729 free_irq(pm8001_ha->msix_entries[i].vector, sha);
730 pci_disable_msix(pdev);
731 #else
732 free_irq(pm8001_ha->irq, sha);
733 #endif
734 #ifdef PM8001_USE_TASKLET
735 tasklet_kill(&pm8001_ha->tasklet);
736 #endif
737 pm8001_free(pm8001_ha);
738 kfree(sha->sas_phy);
739 kfree(sha->sas_port);
740 kfree(sha);
741 pci_release_regions(pdev);
742 pci_disable_device(pdev);
743 }
744
745 /**
746 * pm8001_pci_suspend - power management suspend main entry point
747 * @pdev: PCI device struct
748 * @state: PM state change to (usually PCI_D3)
749 *
750 * Returns 0 success, anything else error.
751 */
752 static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
753 {
754 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
755 struct pm8001_hba_info *pm8001_ha;
756 int i , pos;
757 u32 device_state;
758 pm8001_ha = sha->lldd_ha;
759 flush_workqueue(pm8001_wq);
760 scsi_block_requests(pm8001_ha->shost);
761 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
762 if (pos == 0) {
763 printk(KERN_ERR " PCI PM not supported\n");
764 return -ENODEV;
765 }
766 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
767 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
768 #ifdef PM8001_USE_MSIX
769 for (i = 0; i < pm8001_ha->number_of_intr; i++)
770 synchronize_irq(pm8001_ha->msix_entries[i].vector);
771 for (i = 0; i < pm8001_ha->number_of_intr; i++)
772 free_irq(pm8001_ha->msix_entries[i].vector, sha);
773 pci_disable_msix(pdev);
774 #else
775 free_irq(pm8001_ha->irq, sha);
776 #endif
777 #ifdef PM8001_USE_TASKLET
778 tasklet_kill(&pm8001_ha->tasklet);
779 #endif
780 device_state = pci_choose_state(pdev, state);
781 pm8001_printk("pdev=0x%p, slot=%s, entering "
782 "operating state [D%d]\n", pdev,
783 pm8001_ha->name, device_state);
784 pci_save_state(pdev);
785 pci_disable_device(pdev);
786 pci_set_power_state(pdev, device_state);
787 return 0;
788 }
789
790 /**
791 * pm8001_pci_resume - power management resume main entry point
792 * @pdev: PCI device struct
793 *
794 * Returns 0 success, anything else error.
795 */
796 static int pm8001_pci_resume(struct pci_dev *pdev)
797 {
798 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
799 struct pm8001_hba_info *pm8001_ha;
800 int rc;
801 u32 device_state;
802 pm8001_ha = sha->lldd_ha;
803 device_state = pdev->current_state;
804
805 pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
806 "operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
807
808 pci_set_power_state(pdev, PCI_D0);
809 pci_enable_wake(pdev, PCI_D0, 0);
810 pci_restore_state(pdev);
811 rc = pci_enable_device(pdev);
812 if (rc) {
813 pm8001_printk("slot=%s Enable device failed during resume\n",
814 pm8001_ha->name);
815 goto err_out_enable;
816 }
817
818 pci_set_master(pdev);
819 rc = pci_go_44(pdev);
820 if (rc)
821 goto err_out_disable;
822
823 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
824 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
825 if (rc)
826 goto err_out_disable;
827 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
828 rc = pm8001_request_irq(pm8001_ha);
829 if (rc)
830 goto err_out_disable;
831 #ifdef PM8001_USE_TASKLET
832 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
833 (unsigned long)pm8001_ha);
834 #endif
835 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
836 scsi_unblock_requests(pm8001_ha->shost);
837 return 0;
838
839 err_out_disable:
840 scsi_remove_host(pm8001_ha->shost);
841 pci_disable_device(pdev);
842 err_out_enable:
843 return rc;
844 }
845
846 static struct pci_device_id pm8001_pci_table[] = {
847 {
848 PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
849 },
850 {
851 PCI_DEVICE(0x117c, 0x0042),
852 .driver_data = chip_8001
853 },
854 {} /* terminate list */
855 };
856
857 static struct pci_driver pm8001_pci_driver = {
858 .name = DRV_NAME,
859 .id_table = pm8001_pci_table,
860 .probe = pm8001_pci_probe,
861 .remove = pm8001_pci_remove,
862 .suspend = pm8001_pci_suspend,
863 .resume = pm8001_pci_resume,
864 };
865
866 /**
867 * pm8001_init - initialize scsi transport template
868 */
869 static int __init pm8001_init(void)
870 {
871 int rc = -ENOMEM;
872
873 pm8001_wq = alloc_workqueue("pm8001", 0, 0);
874 if (!pm8001_wq)
875 goto err;
876
877 pm8001_id = 0;
878 pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
879 if (!pm8001_stt)
880 goto err_wq;
881 rc = pci_register_driver(&pm8001_pci_driver);
882 if (rc)
883 goto err_tp;
884 return 0;
885
886 err_tp:
887 sas_release_transport(pm8001_stt);
888 err_wq:
889 destroy_workqueue(pm8001_wq);
890 err:
891 return rc;
892 }
893
894 static void __exit pm8001_exit(void)
895 {
896 pci_unregister_driver(&pm8001_pci_driver);
897 sas_release_transport(pm8001_stt);
898 destroy_workqueue(pm8001_wq);
899 }
900
901 module_init(pm8001_init);
902 module_exit(pm8001_exit);
903
904 MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
905 MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
906 MODULE_VERSION(DRV_VERSION);
907 MODULE_LICENSE("GPL");
908 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
909