Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / drivers / scsi / megaraid / megaraid_sas_base.c
1 /*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Authors: Avago Technologies
21 * Sreenivas Bagalkote
22 * Sumant Patro
23 * Bo Yang
24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
27 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <asm/uaccess.h>
46 #include <linux/fs.h>
47 #include <linux/compat.h>
48 #include <linux/blkdev.h>
49 #include <linux/mutex.h>
50 #include <linux/poll.h>
51
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_tcq.h>
57 #include "megaraid_sas_fusion.h"
58 #include "megaraid_sas.h"
59
60 /*
61 * Number of sectors per IO command
62 * Will be set in megasas_init_mfi if user does not provide
63 */
64 static unsigned int max_sectors;
65 module_param_named(max_sectors, max_sectors, int, 0);
66 MODULE_PARM_DESC(max_sectors,
67 "Maximum number of sectors per IO command");
68
69 static int msix_disable;
70 module_param(msix_disable, int, S_IRUGO);
71 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
72
73 static unsigned int msix_vectors;
74 module_param(msix_vectors, int, S_IRUGO);
75 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
76
77 static int allow_vf_ioctls;
78 module_param(allow_vf_ioctls, int, S_IRUGO);
79 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
80
81 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
82 module_param(throttlequeuedepth, int, S_IRUGO);
83 MODULE_PARM_DESC(throttlequeuedepth,
84 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
85
86 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
87 module_param(resetwaittime, int, S_IRUGO);
88 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
89 "before resetting adapter. Default: 180");
90
91 int smp_affinity_enable = 1;
92 module_param(smp_affinity_enable, int, S_IRUGO);
93 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
94
95 int rdpq_enable = 1;
96 module_param(rdpq_enable, int, S_IRUGO);
97 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
98
99 unsigned int dual_qdepth_disable;
100 module_param(dual_qdepth_disable, int, S_IRUGO);
101 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
102
103 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
104 module_param(scmd_timeout, int, S_IRUGO);
105 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
106
107 MODULE_LICENSE("GPL");
108 MODULE_VERSION(MEGASAS_VERSION);
109 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
110 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
111
112 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
113 static int megasas_get_pd_list(struct megasas_instance *instance);
114 static int megasas_ld_list_query(struct megasas_instance *instance,
115 u8 query_type);
116 static int megasas_issue_init_mfi(struct megasas_instance *instance);
117 static int megasas_register_aen(struct megasas_instance *instance,
118 u32 seq_num, u32 class_locale_word);
119 static int
120 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id);
121 /*
122 * PCI ID table for all supported controllers
123 */
124 static struct pci_device_id megasas_pci_table[] = {
125
126 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
127 /* xscale IOP */
128 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
129 /* ppc IOP */
130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
131 /* ppc IOP */
132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
133 /* gen2*/
134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
135 /* gen2*/
136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
137 /* skinny*/
138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
139 /* skinny*/
140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
141 /* xscale IOP, vega */
142 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
143 /* xscale IOP */
144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
145 /* Fusion */
146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
147 /* Plasma */
148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
149 /* Invader */
150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
151 /* Fury */
152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
153 /* Intruder */
154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
155 /* Intruder 24 port*/
156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
158 {}
159 };
160
161 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
162
163 static int megasas_mgmt_majorno;
164 struct megasas_mgmt_info megasas_mgmt_info;
165 static struct fasync_struct *megasas_async_queue;
166 static DEFINE_MUTEX(megasas_async_queue_mutex);
167
168 static int megasas_poll_wait_aen;
169 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
170 static u32 support_poll_for_event;
171 u32 megasas_dbg_lvl;
172 static u32 support_device_change;
173
174 /* define lock for aen poll */
175 spinlock_t poll_aen_lock;
176
177 void
178 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
179 u8 alt_status);
180 static u32
181 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
182 static int
183 megasas_adp_reset_gen2(struct megasas_instance *instance,
184 struct megasas_register_set __iomem *reg_set);
185 static irqreturn_t megasas_isr(int irq, void *devp);
186 static u32
187 megasas_init_adapter_mfi(struct megasas_instance *instance);
188 u32
189 megasas_build_and_issue_cmd(struct megasas_instance *instance,
190 struct scsi_cmnd *scmd);
191 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
192 void
193 megasas_release_fusion(struct megasas_instance *instance);
194 int
195 megasas_ioc_init_fusion(struct megasas_instance *instance);
196 void
197 megasas_free_cmds_fusion(struct megasas_instance *instance);
198 u8
199 megasas_get_map_info(struct megasas_instance *instance);
200 int
201 megasas_sync_map_info(struct megasas_instance *instance);
202 int
203 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
204 int seconds);
205 void megasas_reset_reply_desc(struct megasas_instance *instance);
206 void megasas_fusion_ocr_wq(struct work_struct *work);
207 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
208 int initial);
209 int megasas_check_mpio_paths(struct megasas_instance *instance,
210 struct scsi_cmnd *scmd);
211
212 int
213 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
214 {
215 instance->instancet->fire_cmd(instance,
216 cmd->frame_phys_addr, 0, instance->reg_set);
217 return 0;
218 }
219
220 /**
221 * megasas_get_cmd - Get a command from the free pool
222 * @instance: Adapter soft state
223 *
224 * Returns a free command from the pool
225 */
226 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
227 *instance)
228 {
229 unsigned long flags;
230 struct megasas_cmd *cmd = NULL;
231
232 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
233
234 if (!list_empty(&instance->cmd_pool)) {
235 cmd = list_entry((&instance->cmd_pool)->next,
236 struct megasas_cmd, list);
237 list_del_init(&cmd->list);
238 } else {
239 dev_err(&instance->pdev->dev, "Command pool empty!\n");
240 }
241
242 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
243 return cmd;
244 }
245
246 /**
247 * megasas_return_cmd - Return a cmd to free command pool
248 * @instance: Adapter soft state
249 * @cmd: Command packet to be returned to free command pool
250 */
251 inline void
252 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
253 {
254 unsigned long flags;
255 u32 blk_tags;
256 struct megasas_cmd_fusion *cmd_fusion;
257 struct fusion_context *fusion = instance->ctrl_context;
258
259 /* This flag is used only for fusion adapter.
260 * Wait for Interrupt for Polled mode DCMD
261 */
262 if (cmd->flags & DRV_DCMD_POLLED_MODE)
263 return;
264
265 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
266
267 if (fusion) {
268 blk_tags = instance->max_scsi_cmds + cmd->index;
269 cmd_fusion = fusion->cmd_list[blk_tags];
270 megasas_return_cmd_fusion(instance, cmd_fusion);
271 }
272 cmd->scmd = NULL;
273 cmd->frame_count = 0;
274 cmd->flags = 0;
275 if (!fusion && reset_devices)
276 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
277 list_add(&cmd->list, (&instance->cmd_pool)->next);
278
279 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
280
281 }
282
283 static const char *
284 format_timestamp(uint32_t timestamp)
285 {
286 static char buffer[32];
287
288 if ((timestamp & 0xff000000) == 0xff000000)
289 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
290 0x00ffffff);
291 else
292 snprintf(buffer, sizeof(buffer), "%us", timestamp);
293 return buffer;
294 }
295
296 static const char *
297 format_class(int8_t class)
298 {
299 static char buffer[6];
300
301 switch (class) {
302 case MFI_EVT_CLASS_DEBUG:
303 return "debug";
304 case MFI_EVT_CLASS_PROGRESS:
305 return "progress";
306 case MFI_EVT_CLASS_INFO:
307 return "info";
308 case MFI_EVT_CLASS_WARNING:
309 return "WARN";
310 case MFI_EVT_CLASS_CRITICAL:
311 return "CRIT";
312 case MFI_EVT_CLASS_FATAL:
313 return "FATAL";
314 case MFI_EVT_CLASS_DEAD:
315 return "DEAD";
316 default:
317 snprintf(buffer, sizeof(buffer), "%d", class);
318 return buffer;
319 }
320 }
321
322 /**
323 * megasas_decode_evt: Decode FW AEN event and print critical event
324 * for information.
325 * @instance: Adapter soft state
326 */
327 static void
328 megasas_decode_evt(struct megasas_instance *instance)
329 {
330 struct megasas_evt_detail *evt_detail = instance->evt_detail;
331 union megasas_evt_class_locale class_locale;
332 class_locale.word = le32_to_cpu(evt_detail->cl.word);
333
334 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
335 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
336 le32_to_cpu(evt_detail->seq_num),
337 format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
338 (class_locale.members.locale),
339 format_class(class_locale.members.class),
340 evt_detail->description);
341 }
342
343 /**
344 * The following functions are defined for xscale
345 * (deviceid : 1064R, PERC5) controllers
346 */
347
348 /**
349 * megasas_enable_intr_xscale - Enables interrupts
350 * @regs: MFI register set
351 */
352 static inline void
353 megasas_enable_intr_xscale(struct megasas_instance *instance)
354 {
355 struct megasas_register_set __iomem *regs;
356
357 regs = instance->reg_set;
358 writel(0, &(regs)->outbound_intr_mask);
359
360 /* Dummy readl to force pci flush */
361 readl(&regs->outbound_intr_mask);
362 }
363
364 /**
365 * megasas_disable_intr_xscale -Disables interrupt
366 * @regs: MFI register set
367 */
368 static inline void
369 megasas_disable_intr_xscale(struct megasas_instance *instance)
370 {
371 struct megasas_register_set __iomem *regs;
372 u32 mask = 0x1f;
373
374 regs = instance->reg_set;
375 writel(mask, &regs->outbound_intr_mask);
376 /* Dummy readl to force pci flush */
377 readl(&regs->outbound_intr_mask);
378 }
379
380 /**
381 * megasas_read_fw_status_reg_xscale - returns the current FW status value
382 * @regs: MFI register set
383 */
384 static u32
385 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
386 {
387 return readl(&(regs)->outbound_msg_0);
388 }
389 /**
390 * megasas_clear_interrupt_xscale - Check & clear interrupt
391 * @regs: MFI register set
392 */
393 static int
394 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
395 {
396 u32 status;
397 u32 mfiStatus = 0;
398
399 /*
400 * Check if it is our interrupt
401 */
402 status = readl(&regs->outbound_intr_status);
403
404 if (status & MFI_OB_INTR_STATUS_MASK)
405 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
406 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
407 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
408
409 /*
410 * Clear the interrupt by writing back the same value
411 */
412 if (mfiStatus)
413 writel(status, &regs->outbound_intr_status);
414
415 /* Dummy readl to force pci flush */
416 readl(&regs->outbound_intr_status);
417
418 return mfiStatus;
419 }
420
421 /**
422 * megasas_fire_cmd_xscale - Sends command to the FW
423 * @frame_phys_addr : Physical address of cmd
424 * @frame_count : Number of frames for the command
425 * @regs : MFI register set
426 */
427 static inline void
428 megasas_fire_cmd_xscale(struct megasas_instance *instance,
429 dma_addr_t frame_phys_addr,
430 u32 frame_count,
431 struct megasas_register_set __iomem *regs)
432 {
433 unsigned long flags;
434
435 spin_lock_irqsave(&instance->hba_lock, flags);
436 writel((frame_phys_addr >> 3)|(frame_count),
437 &(regs)->inbound_queue_port);
438 spin_unlock_irqrestore(&instance->hba_lock, flags);
439 }
440
441 /**
442 * megasas_adp_reset_xscale - For controller reset
443 * @regs: MFI register set
444 */
445 static int
446 megasas_adp_reset_xscale(struct megasas_instance *instance,
447 struct megasas_register_set __iomem *regs)
448 {
449 u32 i;
450 u32 pcidata;
451
452 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
453
454 for (i = 0; i < 3; i++)
455 msleep(1000); /* sleep for 3 secs */
456 pcidata = 0;
457 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
458 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
459 if (pcidata & 0x2) {
460 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
461 pcidata &= ~0x2;
462 pci_write_config_dword(instance->pdev,
463 MFI_1068_PCSR_OFFSET, pcidata);
464
465 for (i = 0; i < 2; i++)
466 msleep(1000); /* need to wait 2 secs again */
467
468 pcidata = 0;
469 pci_read_config_dword(instance->pdev,
470 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
471 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
472 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
473 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
474 pcidata = 0;
475 pci_write_config_dword(instance->pdev,
476 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
477 }
478 }
479 return 0;
480 }
481
482 /**
483 * megasas_check_reset_xscale - For controller reset check
484 * @regs: MFI register set
485 */
486 static int
487 megasas_check_reset_xscale(struct megasas_instance *instance,
488 struct megasas_register_set __iomem *regs)
489 {
490 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
491 (le32_to_cpu(*instance->consumer) ==
492 MEGASAS_ADPRESET_INPROG_SIGN))
493 return 1;
494 return 0;
495 }
496
497 static struct megasas_instance_template megasas_instance_template_xscale = {
498
499 .fire_cmd = megasas_fire_cmd_xscale,
500 .enable_intr = megasas_enable_intr_xscale,
501 .disable_intr = megasas_disable_intr_xscale,
502 .clear_intr = megasas_clear_intr_xscale,
503 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
504 .adp_reset = megasas_adp_reset_xscale,
505 .check_reset = megasas_check_reset_xscale,
506 .service_isr = megasas_isr,
507 .tasklet = megasas_complete_cmd_dpc,
508 .init_adapter = megasas_init_adapter_mfi,
509 .build_and_issue_cmd = megasas_build_and_issue_cmd,
510 .issue_dcmd = megasas_issue_dcmd,
511 };
512
513 /**
514 * This is the end of set of functions & definitions specific
515 * to xscale (deviceid : 1064R, PERC5) controllers
516 */
517
518 /**
519 * The following functions are defined for ppc (deviceid : 0x60)
520 * controllers
521 */
522
523 /**
524 * megasas_enable_intr_ppc - Enables interrupts
525 * @regs: MFI register set
526 */
527 static inline void
528 megasas_enable_intr_ppc(struct megasas_instance *instance)
529 {
530 struct megasas_register_set __iomem *regs;
531
532 regs = instance->reg_set;
533 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
534
535 writel(~0x80000000, &(regs)->outbound_intr_mask);
536
537 /* Dummy readl to force pci flush */
538 readl(&regs->outbound_intr_mask);
539 }
540
541 /**
542 * megasas_disable_intr_ppc - Disable interrupt
543 * @regs: MFI register set
544 */
545 static inline void
546 megasas_disable_intr_ppc(struct megasas_instance *instance)
547 {
548 struct megasas_register_set __iomem *regs;
549 u32 mask = 0xFFFFFFFF;
550
551 regs = instance->reg_set;
552 writel(mask, &regs->outbound_intr_mask);
553 /* Dummy readl to force pci flush */
554 readl(&regs->outbound_intr_mask);
555 }
556
557 /**
558 * megasas_read_fw_status_reg_ppc - returns the current FW status value
559 * @regs: MFI register set
560 */
561 static u32
562 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
563 {
564 return readl(&(regs)->outbound_scratch_pad);
565 }
566
567 /**
568 * megasas_clear_interrupt_ppc - Check & clear interrupt
569 * @regs: MFI register set
570 */
571 static int
572 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
573 {
574 u32 status, mfiStatus = 0;
575
576 /*
577 * Check if it is our interrupt
578 */
579 status = readl(&regs->outbound_intr_status);
580
581 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
582 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
583
584 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
585 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
586
587 /*
588 * Clear the interrupt by writing back the same value
589 */
590 writel(status, &regs->outbound_doorbell_clear);
591
592 /* Dummy readl to force pci flush */
593 readl(&regs->outbound_doorbell_clear);
594
595 return mfiStatus;
596 }
597
598 /**
599 * megasas_fire_cmd_ppc - Sends command to the FW
600 * @frame_phys_addr : Physical address of cmd
601 * @frame_count : Number of frames for the command
602 * @regs : MFI register set
603 */
604 static inline void
605 megasas_fire_cmd_ppc(struct megasas_instance *instance,
606 dma_addr_t frame_phys_addr,
607 u32 frame_count,
608 struct megasas_register_set __iomem *regs)
609 {
610 unsigned long flags;
611
612 spin_lock_irqsave(&instance->hba_lock, flags);
613 writel((frame_phys_addr | (frame_count<<1))|1,
614 &(regs)->inbound_queue_port);
615 spin_unlock_irqrestore(&instance->hba_lock, flags);
616 }
617
618 /**
619 * megasas_check_reset_ppc - For controller reset check
620 * @regs: MFI register set
621 */
622 static int
623 megasas_check_reset_ppc(struct megasas_instance *instance,
624 struct megasas_register_set __iomem *regs)
625 {
626 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
627 return 1;
628
629 return 0;
630 }
631
632 static struct megasas_instance_template megasas_instance_template_ppc = {
633
634 .fire_cmd = megasas_fire_cmd_ppc,
635 .enable_intr = megasas_enable_intr_ppc,
636 .disable_intr = megasas_disable_intr_ppc,
637 .clear_intr = megasas_clear_intr_ppc,
638 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
639 .adp_reset = megasas_adp_reset_xscale,
640 .check_reset = megasas_check_reset_ppc,
641 .service_isr = megasas_isr,
642 .tasklet = megasas_complete_cmd_dpc,
643 .init_adapter = megasas_init_adapter_mfi,
644 .build_and_issue_cmd = megasas_build_and_issue_cmd,
645 .issue_dcmd = megasas_issue_dcmd,
646 };
647
648 /**
649 * megasas_enable_intr_skinny - Enables interrupts
650 * @regs: MFI register set
651 */
652 static inline void
653 megasas_enable_intr_skinny(struct megasas_instance *instance)
654 {
655 struct megasas_register_set __iomem *regs;
656
657 regs = instance->reg_set;
658 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
659
660 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
661
662 /* Dummy readl to force pci flush */
663 readl(&regs->outbound_intr_mask);
664 }
665
666 /**
667 * megasas_disable_intr_skinny - Disables interrupt
668 * @regs: MFI register set
669 */
670 static inline void
671 megasas_disable_intr_skinny(struct megasas_instance *instance)
672 {
673 struct megasas_register_set __iomem *regs;
674 u32 mask = 0xFFFFFFFF;
675
676 regs = instance->reg_set;
677 writel(mask, &regs->outbound_intr_mask);
678 /* Dummy readl to force pci flush */
679 readl(&regs->outbound_intr_mask);
680 }
681
682 /**
683 * megasas_read_fw_status_reg_skinny - returns the current FW status value
684 * @regs: MFI register set
685 */
686 static u32
687 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
688 {
689 return readl(&(regs)->outbound_scratch_pad);
690 }
691
692 /**
693 * megasas_clear_interrupt_skinny - Check & clear interrupt
694 * @regs: MFI register set
695 */
696 static int
697 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
698 {
699 u32 status;
700 u32 mfiStatus = 0;
701
702 /*
703 * Check if it is our interrupt
704 */
705 status = readl(&regs->outbound_intr_status);
706
707 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
708 return 0;
709 }
710
711 /*
712 * Check if it is our interrupt
713 */
714 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
715 MFI_STATE_FAULT) {
716 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
717 } else
718 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
719
720 /*
721 * Clear the interrupt by writing back the same value
722 */
723 writel(status, &regs->outbound_intr_status);
724
725 /*
726 * dummy read to flush PCI
727 */
728 readl(&regs->outbound_intr_status);
729
730 return mfiStatus;
731 }
732
733 /**
734 * megasas_fire_cmd_skinny - Sends command to the FW
735 * @frame_phys_addr : Physical address of cmd
736 * @frame_count : Number of frames for the command
737 * @regs : MFI register set
738 */
739 static inline void
740 megasas_fire_cmd_skinny(struct megasas_instance *instance,
741 dma_addr_t frame_phys_addr,
742 u32 frame_count,
743 struct megasas_register_set __iomem *regs)
744 {
745 unsigned long flags;
746
747 spin_lock_irqsave(&instance->hba_lock, flags);
748 writel(upper_32_bits(frame_phys_addr),
749 &(regs)->inbound_high_queue_port);
750 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
751 &(regs)->inbound_low_queue_port);
752 mmiowb();
753 spin_unlock_irqrestore(&instance->hba_lock, flags);
754 }
755
756 /**
757 * megasas_check_reset_skinny - For controller reset check
758 * @regs: MFI register set
759 */
760 static int
761 megasas_check_reset_skinny(struct megasas_instance *instance,
762 struct megasas_register_set __iomem *regs)
763 {
764 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
765 return 1;
766
767 return 0;
768 }
769
770 static struct megasas_instance_template megasas_instance_template_skinny = {
771
772 .fire_cmd = megasas_fire_cmd_skinny,
773 .enable_intr = megasas_enable_intr_skinny,
774 .disable_intr = megasas_disable_intr_skinny,
775 .clear_intr = megasas_clear_intr_skinny,
776 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
777 .adp_reset = megasas_adp_reset_gen2,
778 .check_reset = megasas_check_reset_skinny,
779 .service_isr = megasas_isr,
780 .tasklet = megasas_complete_cmd_dpc,
781 .init_adapter = megasas_init_adapter_mfi,
782 .build_and_issue_cmd = megasas_build_and_issue_cmd,
783 .issue_dcmd = megasas_issue_dcmd,
784 };
785
786
787 /**
788 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
789 * controllers
790 */
791
792 /**
793 * megasas_enable_intr_gen2 - Enables interrupts
794 * @regs: MFI register set
795 */
796 static inline void
797 megasas_enable_intr_gen2(struct megasas_instance *instance)
798 {
799 struct megasas_register_set __iomem *regs;
800
801 regs = instance->reg_set;
802 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
803
804 /* write ~0x00000005 (4 & 1) to the intr mask*/
805 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
806
807 /* Dummy readl to force pci flush */
808 readl(&regs->outbound_intr_mask);
809 }
810
811 /**
812 * megasas_disable_intr_gen2 - Disables interrupt
813 * @regs: MFI register set
814 */
815 static inline void
816 megasas_disable_intr_gen2(struct megasas_instance *instance)
817 {
818 struct megasas_register_set __iomem *regs;
819 u32 mask = 0xFFFFFFFF;
820
821 regs = instance->reg_set;
822 writel(mask, &regs->outbound_intr_mask);
823 /* Dummy readl to force pci flush */
824 readl(&regs->outbound_intr_mask);
825 }
826
827 /**
828 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
829 * @regs: MFI register set
830 */
831 static u32
832 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
833 {
834 return readl(&(regs)->outbound_scratch_pad);
835 }
836
837 /**
838 * megasas_clear_interrupt_gen2 - Check & clear interrupt
839 * @regs: MFI register set
840 */
841 static int
842 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
843 {
844 u32 status;
845 u32 mfiStatus = 0;
846
847 /*
848 * Check if it is our interrupt
849 */
850 status = readl(&regs->outbound_intr_status);
851
852 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
853 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
854 }
855 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
856 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
857 }
858
859 /*
860 * Clear the interrupt by writing back the same value
861 */
862 if (mfiStatus)
863 writel(status, &regs->outbound_doorbell_clear);
864
865 /* Dummy readl to force pci flush */
866 readl(&regs->outbound_intr_status);
867
868 return mfiStatus;
869 }
870 /**
871 * megasas_fire_cmd_gen2 - Sends command to the FW
872 * @frame_phys_addr : Physical address of cmd
873 * @frame_count : Number of frames for the command
874 * @regs : MFI register set
875 */
876 static inline void
877 megasas_fire_cmd_gen2(struct megasas_instance *instance,
878 dma_addr_t frame_phys_addr,
879 u32 frame_count,
880 struct megasas_register_set __iomem *regs)
881 {
882 unsigned long flags;
883
884 spin_lock_irqsave(&instance->hba_lock, flags);
885 writel((frame_phys_addr | (frame_count<<1))|1,
886 &(regs)->inbound_queue_port);
887 spin_unlock_irqrestore(&instance->hba_lock, flags);
888 }
889
890 /**
891 * megasas_adp_reset_gen2 - For controller reset
892 * @regs: MFI register set
893 */
894 static int
895 megasas_adp_reset_gen2(struct megasas_instance *instance,
896 struct megasas_register_set __iomem *reg_set)
897 {
898 u32 retry = 0 ;
899 u32 HostDiag;
900 u32 __iomem *seq_offset = &reg_set->seq_offset;
901 u32 __iomem *hostdiag_offset = &reg_set->host_diag;
902
903 if (instance->instancet == &megasas_instance_template_skinny) {
904 seq_offset = &reg_set->fusion_seq_offset;
905 hostdiag_offset = &reg_set->fusion_host_diag;
906 }
907
908 writel(0, seq_offset);
909 writel(4, seq_offset);
910 writel(0xb, seq_offset);
911 writel(2, seq_offset);
912 writel(7, seq_offset);
913 writel(0xd, seq_offset);
914
915 msleep(1000);
916
917 HostDiag = (u32)readl(hostdiag_offset);
918
919 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
920 msleep(100);
921 HostDiag = (u32)readl(hostdiag_offset);
922 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
923 retry, HostDiag);
924
925 if (retry++ >= 100)
926 return 1;
927
928 }
929
930 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
931
932 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
933
934 ssleep(10);
935
936 HostDiag = (u32)readl(hostdiag_offset);
937 while (HostDiag & DIAG_RESET_ADAPTER) {
938 msleep(100);
939 HostDiag = (u32)readl(hostdiag_offset);
940 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
941 retry, HostDiag);
942
943 if (retry++ >= 1000)
944 return 1;
945
946 }
947 return 0;
948 }
949
950 /**
951 * megasas_check_reset_gen2 - For controller reset check
952 * @regs: MFI register set
953 */
954 static int
955 megasas_check_reset_gen2(struct megasas_instance *instance,
956 struct megasas_register_set __iomem *regs)
957 {
958 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
959 return 1;
960
961 return 0;
962 }
963
964 static struct megasas_instance_template megasas_instance_template_gen2 = {
965
966 .fire_cmd = megasas_fire_cmd_gen2,
967 .enable_intr = megasas_enable_intr_gen2,
968 .disable_intr = megasas_disable_intr_gen2,
969 .clear_intr = megasas_clear_intr_gen2,
970 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
971 .adp_reset = megasas_adp_reset_gen2,
972 .check_reset = megasas_check_reset_gen2,
973 .service_isr = megasas_isr,
974 .tasklet = megasas_complete_cmd_dpc,
975 .init_adapter = megasas_init_adapter_mfi,
976 .build_and_issue_cmd = megasas_build_and_issue_cmd,
977 .issue_dcmd = megasas_issue_dcmd,
978 };
979
980 /**
981 * This is the end of set of functions & definitions
982 * specific to gen2 (deviceid : 0x78, 0x79) controllers
983 */
984
985 /*
986 * Template added for TB (Fusion)
987 */
988 extern struct megasas_instance_template megasas_instance_template_fusion;
989
990 /**
991 * megasas_issue_polled - Issues a polling command
992 * @instance: Adapter soft state
993 * @cmd: Command packet to be issued
994 *
995 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
996 */
997 int
998 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
999 {
1000 struct megasas_header *frame_hdr = &cmd->frame->hdr;
1001
1002 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1003 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1004
1005 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
1006 (instance->instancet->issue_dcmd(instance, cmd))) {
1007 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1008 __func__, __LINE__);
1009 return DCMD_NOT_FIRED;
1010 }
1011
1012 return wait_and_poll(instance, cmd, instance->requestorId ?
1013 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1014 }
1015
1016 /**
1017 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1018 * @instance: Adapter soft state
1019 * @cmd: Command to be issued
1020 * @timeout: Timeout in seconds
1021 *
1022 * This function waits on an event for the command to be returned from ISR.
1023 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1024 * Used to issue ioctl commands.
1025 */
1026 int
1027 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1028 struct megasas_cmd *cmd, int timeout)
1029 {
1030 int ret = 0;
1031 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1032
1033 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
1034 (instance->instancet->issue_dcmd(instance, cmd))) {
1035 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1036 __func__, __LINE__);
1037 return DCMD_NOT_FIRED;
1038 }
1039
1040 if (timeout) {
1041 ret = wait_event_timeout(instance->int_cmd_wait_q,
1042 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1043 if (!ret) {
1044 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1045 __func__, __LINE__);
1046 return DCMD_TIMEOUT;
1047 }
1048 } else
1049 wait_event(instance->int_cmd_wait_q,
1050 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1051
1052 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1053 DCMD_SUCCESS : DCMD_FAILED;
1054 }
1055
1056 /**
1057 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1058 * @instance: Adapter soft state
1059 * @cmd_to_abort: Previously issued cmd to be aborted
1060 * @timeout: Timeout in seconds
1061 *
1062 * MFI firmware can abort previously issued AEN comamnd (automatic event
1063 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1064 * cmd and waits for return status.
1065 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1066 */
1067 static int
1068 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1069 struct megasas_cmd *cmd_to_abort, int timeout)
1070 {
1071 struct megasas_cmd *cmd;
1072 struct megasas_abort_frame *abort_fr;
1073 int ret = 0;
1074
1075 cmd = megasas_get_cmd(instance);
1076
1077 if (!cmd)
1078 return -1;
1079
1080 abort_fr = &cmd->frame->abort;
1081
1082 /*
1083 * Prepare and issue the abort frame
1084 */
1085 abort_fr->cmd = MFI_CMD_ABORT;
1086 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1087 abort_fr->flags = cpu_to_le16(0);
1088 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1089 abort_fr->abort_mfi_phys_addr_lo =
1090 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1091 abort_fr->abort_mfi_phys_addr_hi =
1092 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1093
1094 cmd->sync_cmd = 1;
1095 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1096
1097 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
1098 (instance->instancet->issue_dcmd(instance, cmd))) {
1099 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1100 __func__, __LINE__);
1101 return DCMD_NOT_FIRED;
1102 }
1103
1104 if (timeout) {
1105 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1106 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1107 if (!ret) {
1108 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1109 __func__, __LINE__);
1110 return DCMD_TIMEOUT;
1111 }
1112 } else
1113 wait_event(instance->abort_cmd_wait_q,
1114 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1115
1116 cmd->sync_cmd = 0;
1117
1118 megasas_return_cmd(instance, cmd);
1119 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1120 DCMD_SUCCESS : DCMD_FAILED;
1121 }
1122
1123 /**
1124 * megasas_make_sgl32 - Prepares 32-bit SGL
1125 * @instance: Adapter soft state
1126 * @scp: SCSI command from the mid-layer
1127 * @mfi_sgl: SGL to be filled in
1128 *
1129 * If successful, this function returns the number of SG elements. Otherwise,
1130 * it returnes -1.
1131 */
1132 static int
1133 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1134 union megasas_sgl *mfi_sgl)
1135 {
1136 int i;
1137 int sge_count;
1138 struct scatterlist *os_sgl;
1139
1140 sge_count = scsi_dma_map(scp);
1141 BUG_ON(sge_count < 0);
1142
1143 if (sge_count) {
1144 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1145 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1146 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1147 }
1148 }
1149 return sge_count;
1150 }
1151
1152 /**
1153 * megasas_make_sgl64 - Prepares 64-bit SGL
1154 * @instance: Adapter soft state
1155 * @scp: SCSI command from the mid-layer
1156 * @mfi_sgl: SGL to be filled in
1157 *
1158 * If successful, this function returns the number of SG elements. Otherwise,
1159 * it returnes -1.
1160 */
1161 static int
1162 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1163 union megasas_sgl *mfi_sgl)
1164 {
1165 int i;
1166 int sge_count;
1167 struct scatterlist *os_sgl;
1168
1169 sge_count = scsi_dma_map(scp);
1170 BUG_ON(sge_count < 0);
1171
1172 if (sge_count) {
1173 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1174 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1175 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1176 }
1177 }
1178 return sge_count;
1179 }
1180
1181 /**
1182 * megasas_make_sgl_skinny - Prepares IEEE SGL
1183 * @instance: Adapter soft state
1184 * @scp: SCSI command from the mid-layer
1185 * @mfi_sgl: SGL to be filled in
1186 *
1187 * If successful, this function returns the number of SG elements. Otherwise,
1188 * it returnes -1.
1189 */
1190 static int
1191 megasas_make_sgl_skinny(struct megasas_instance *instance,
1192 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1193 {
1194 int i;
1195 int sge_count;
1196 struct scatterlist *os_sgl;
1197
1198 sge_count = scsi_dma_map(scp);
1199
1200 if (sge_count) {
1201 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1202 mfi_sgl->sge_skinny[i].length =
1203 cpu_to_le32(sg_dma_len(os_sgl));
1204 mfi_sgl->sge_skinny[i].phys_addr =
1205 cpu_to_le64(sg_dma_address(os_sgl));
1206 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1207 }
1208 }
1209 return sge_count;
1210 }
1211
1212 /**
1213 * megasas_get_frame_count - Computes the number of frames
1214 * @frame_type : type of frame- io or pthru frame
1215 * @sge_count : number of sg elements
1216 *
1217 * Returns the number of frames required for numnber of sge's (sge_count)
1218 */
1219
1220 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1221 u8 sge_count, u8 frame_type)
1222 {
1223 int num_cnt;
1224 int sge_bytes;
1225 u32 sge_sz;
1226 u32 frame_count = 0;
1227
1228 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1229 sizeof(struct megasas_sge32);
1230
1231 if (instance->flag_ieee) {
1232 sge_sz = sizeof(struct megasas_sge_skinny);
1233 }
1234
1235 /*
1236 * Main frame can contain 2 SGEs for 64-bit SGLs and
1237 * 3 SGEs for 32-bit SGLs for ldio &
1238 * 1 SGEs for 64-bit SGLs and
1239 * 2 SGEs for 32-bit SGLs for pthru frame
1240 */
1241 if (unlikely(frame_type == PTHRU_FRAME)) {
1242 if (instance->flag_ieee == 1) {
1243 num_cnt = sge_count - 1;
1244 } else if (IS_DMA64)
1245 num_cnt = sge_count - 1;
1246 else
1247 num_cnt = sge_count - 2;
1248 } else {
1249 if (instance->flag_ieee == 1) {
1250 num_cnt = sge_count - 1;
1251 } else if (IS_DMA64)
1252 num_cnt = sge_count - 2;
1253 else
1254 num_cnt = sge_count - 3;
1255 }
1256
1257 if (num_cnt > 0) {
1258 sge_bytes = sge_sz * num_cnt;
1259
1260 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1261 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1262 }
1263 /* Main frame */
1264 frame_count += 1;
1265
1266 if (frame_count > 7)
1267 frame_count = 8;
1268 return frame_count;
1269 }
1270
1271 /**
1272 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1273 * @instance: Adapter soft state
1274 * @scp: SCSI command
1275 * @cmd: Command to be prepared in
1276 *
1277 * This function prepares CDB commands. These are typcially pass-through
1278 * commands to the devices.
1279 */
1280 static int
1281 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1282 struct megasas_cmd *cmd)
1283 {
1284 u32 is_logical;
1285 u32 device_id;
1286 u16 flags = 0;
1287 struct megasas_pthru_frame *pthru;
1288
1289 is_logical = MEGASAS_IS_LOGICAL(scp);
1290 device_id = MEGASAS_DEV_INDEX(scp);
1291 pthru = (struct megasas_pthru_frame *)cmd->frame;
1292
1293 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1294 flags = MFI_FRAME_DIR_WRITE;
1295 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1296 flags = MFI_FRAME_DIR_READ;
1297 else if (scp->sc_data_direction == PCI_DMA_NONE)
1298 flags = MFI_FRAME_DIR_NONE;
1299
1300 if (instance->flag_ieee == 1) {
1301 flags |= MFI_FRAME_IEEE;
1302 }
1303
1304 /*
1305 * Prepare the DCDB frame
1306 */
1307 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1308 pthru->cmd_status = 0x0;
1309 pthru->scsi_status = 0x0;
1310 pthru->target_id = device_id;
1311 pthru->lun = scp->device->lun;
1312 pthru->cdb_len = scp->cmd_len;
1313 pthru->timeout = 0;
1314 pthru->pad_0 = 0;
1315 pthru->flags = cpu_to_le16(flags);
1316 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1317
1318 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1319
1320 /*
1321 * If the command is for the tape device, set the
1322 * pthru timeout to the os layer timeout value.
1323 */
1324 if (scp->device->type == TYPE_TAPE) {
1325 if ((scp->request->timeout / HZ) > 0xFFFF)
1326 pthru->timeout = cpu_to_le16(0xFFFF);
1327 else
1328 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1329 }
1330
1331 /*
1332 * Construct SGL
1333 */
1334 if (instance->flag_ieee == 1) {
1335 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1336 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1337 &pthru->sgl);
1338 } else if (IS_DMA64) {
1339 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1340 pthru->sge_count = megasas_make_sgl64(instance, scp,
1341 &pthru->sgl);
1342 } else
1343 pthru->sge_count = megasas_make_sgl32(instance, scp,
1344 &pthru->sgl);
1345
1346 if (pthru->sge_count > instance->max_num_sge) {
1347 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1348 pthru->sge_count);
1349 return 0;
1350 }
1351
1352 /*
1353 * Sense info specific
1354 */
1355 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1356 pthru->sense_buf_phys_addr_hi =
1357 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1358 pthru->sense_buf_phys_addr_lo =
1359 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1360
1361 /*
1362 * Compute the total number of frames this command consumes. FW uses
1363 * this number to pull sufficient number of frames from host memory.
1364 */
1365 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1366 PTHRU_FRAME);
1367
1368 return cmd->frame_count;
1369 }
1370
1371 /**
1372 * megasas_build_ldio - Prepares IOs to logical devices
1373 * @instance: Adapter soft state
1374 * @scp: SCSI command
1375 * @cmd: Command to be prepared
1376 *
1377 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1378 */
1379 static int
1380 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1381 struct megasas_cmd *cmd)
1382 {
1383 u32 device_id;
1384 u8 sc = scp->cmnd[0];
1385 u16 flags = 0;
1386 struct megasas_io_frame *ldio;
1387
1388 device_id = MEGASAS_DEV_INDEX(scp);
1389 ldio = (struct megasas_io_frame *)cmd->frame;
1390
1391 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1392 flags = MFI_FRAME_DIR_WRITE;
1393 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1394 flags = MFI_FRAME_DIR_READ;
1395
1396 if (instance->flag_ieee == 1) {
1397 flags |= MFI_FRAME_IEEE;
1398 }
1399
1400 /*
1401 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1402 */
1403 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1404 ldio->cmd_status = 0x0;
1405 ldio->scsi_status = 0x0;
1406 ldio->target_id = device_id;
1407 ldio->timeout = 0;
1408 ldio->reserved_0 = 0;
1409 ldio->pad_0 = 0;
1410 ldio->flags = cpu_to_le16(flags);
1411 ldio->start_lba_hi = 0;
1412 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1413
1414 /*
1415 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1416 */
1417 if (scp->cmd_len == 6) {
1418 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1419 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1420 ((u32) scp->cmnd[2] << 8) |
1421 (u32) scp->cmnd[3]);
1422
1423 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1424 }
1425
1426 /*
1427 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1428 */
1429 else if (scp->cmd_len == 10) {
1430 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1431 ((u32) scp->cmnd[7] << 8));
1432 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1433 ((u32) scp->cmnd[3] << 16) |
1434 ((u32) scp->cmnd[4] << 8) |
1435 (u32) scp->cmnd[5]);
1436 }
1437
1438 /*
1439 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1440 */
1441 else if (scp->cmd_len == 12) {
1442 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1443 ((u32) scp->cmnd[7] << 16) |
1444 ((u32) scp->cmnd[8] << 8) |
1445 (u32) scp->cmnd[9]);
1446
1447 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1448 ((u32) scp->cmnd[3] << 16) |
1449 ((u32) scp->cmnd[4] << 8) |
1450 (u32) scp->cmnd[5]);
1451 }
1452
1453 /*
1454 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1455 */
1456 else if (scp->cmd_len == 16) {
1457 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1458 ((u32) scp->cmnd[11] << 16) |
1459 ((u32) scp->cmnd[12] << 8) |
1460 (u32) scp->cmnd[13]);
1461
1462 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1463 ((u32) scp->cmnd[7] << 16) |
1464 ((u32) scp->cmnd[8] << 8) |
1465 (u32) scp->cmnd[9]);
1466
1467 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1468 ((u32) scp->cmnd[3] << 16) |
1469 ((u32) scp->cmnd[4] << 8) |
1470 (u32) scp->cmnd[5]);
1471
1472 }
1473
1474 /*
1475 * Construct SGL
1476 */
1477 if (instance->flag_ieee) {
1478 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1479 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1480 &ldio->sgl);
1481 } else if (IS_DMA64) {
1482 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1483 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1484 } else
1485 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1486
1487 if (ldio->sge_count > instance->max_num_sge) {
1488 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1489 ldio->sge_count);
1490 return 0;
1491 }
1492
1493 /*
1494 * Sense info specific
1495 */
1496 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1497 ldio->sense_buf_phys_addr_hi = 0;
1498 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1499
1500 /*
1501 * Compute the total number of frames this command consumes. FW uses
1502 * this number to pull sufficient number of frames from host memory.
1503 */
1504 cmd->frame_count = megasas_get_frame_count(instance,
1505 ldio->sge_count, IO_FRAME);
1506
1507 return cmd->frame_count;
1508 }
1509
1510 /**
1511 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1512 * and whether it's RW or non RW
1513 * @scmd: SCSI command
1514 *
1515 */
1516 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1517 {
1518 int ret;
1519
1520 switch (cmd->cmnd[0]) {
1521 case READ_10:
1522 case WRITE_10:
1523 case READ_12:
1524 case WRITE_12:
1525 case READ_6:
1526 case WRITE_6:
1527 case READ_16:
1528 case WRITE_16:
1529 ret = (MEGASAS_IS_LOGICAL(cmd)) ?
1530 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1531 break;
1532 default:
1533 ret = (MEGASAS_IS_LOGICAL(cmd)) ?
1534 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1535 }
1536 return ret;
1537 }
1538
1539 /**
1540 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1541 * in FW
1542 * @instance: Adapter soft state
1543 */
1544 static inline void
1545 megasas_dump_pending_frames(struct megasas_instance *instance)
1546 {
1547 struct megasas_cmd *cmd;
1548 int i,n;
1549 union megasas_sgl *mfi_sgl;
1550 struct megasas_io_frame *ldio;
1551 struct megasas_pthru_frame *pthru;
1552 u32 sgcount;
1553 u32 max_cmd = instance->max_fw_cmds;
1554
1555 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1556 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1557 if (IS_DMA64)
1558 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1559 else
1560 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1561
1562 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1563 for (i = 0; i < max_cmd; i++) {
1564 cmd = instance->cmd_list[i];
1565 if (!cmd->scmd)
1566 continue;
1567 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1568 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1569 ldio = (struct megasas_io_frame *)cmd->frame;
1570 mfi_sgl = &ldio->sgl;
1571 sgcount = ldio->sge_count;
1572 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1573 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1574 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1575 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1576 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1577 } else {
1578 pthru = (struct megasas_pthru_frame *) cmd->frame;
1579 mfi_sgl = &pthru->sgl;
1580 sgcount = pthru->sge_count;
1581 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1582 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1583 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1584 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1585 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1586 }
1587 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1588 for (n = 0; n < sgcount; n++) {
1589 if (IS_DMA64)
1590 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1591 le32_to_cpu(mfi_sgl->sge64[n].length),
1592 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1593 else
1594 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1595 le32_to_cpu(mfi_sgl->sge32[n].length),
1596 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1597 }
1598 }
1599 } /*for max_cmd*/
1600 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1601 for (i = 0; i < max_cmd; i++) {
1602
1603 cmd = instance->cmd_list[i];
1604
1605 if (cmd->sync_cmd == 1)
1606 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1607 }
1608 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1609 }
1610
1611 u32
1612 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1613 struct scsi_cmnd *scmd)
1614 {
1615 struct megasas_cmd *cmd;
1616 u32 frame_count;
1617
1618 cmd = megasas_get_cmd(instance);
1619 if (!cmd)
1620 return SCSI_MLQUEUE_HOST_BUSY;
1621
1622 /*
1623 * Logical drive command
1624 */
1625 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1626 frame_count = megasas_build_ldio(instance, scmd, cmd);
1627 else
1628 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1629
1630 if (!frame_count)
1631 goto out_return_cmd;
1632
1633 cmd->scmd = scmd;
1634 scmd->SCp.ptr = (char *)cmd;
1635
1636 /*
1637 * Issue the command to the FW
1638 */
1639 atomic_inc(&instance->fw_outstanding);
1640
1641 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1642 cmd->frame_count-1, instance->reg_set);
1643
1644 return 0;
1645 out_return_cmd:
1646 megasas_return_cmd(instance, cmd);
1647 return SCSI_MLQUEUE_HOST_BUSY;
1648 }
1649
1650
1651 /**
1652 * megasas_queue_command - Queue entry point
1653 * @scmd: SCSI command to be queued
1654 * @done: Callback entry point
1655 */
1656 static int
1657 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1658 {
1659 struct megasas_instance *instance;
1660 struct MR_PRIV_DEVICE *mr_device_priv_data;
1661
1662 instance = (struct megasas_instance *)
1663 scmd->device->host->hostdata;
1664
1665 if (instance->unload == 1) {
1666 scmd->result = DID_NO_CONNECT << 16;
1667 scmd->scsi_done(scmd);
1668 return 0;
1669 }
1670
1671 if (instance->issuepend_done == 0)
1672 return SCSI_MLQUEUE_HOST_BUSY;
1673
1674
1675 /* Check for an mpio path and adjust behavior */
1676 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1677 if (megasas_check_mpio_paths(instance, scmd) ==
1678 (DID_RESET << 16)) {
1679 return SCSI_MLQUEUE_HOST_BUSY;
1680 } else {
1681 scmd->result = DID_NO_CONNECT << 16;
1682 scmd->scsi_done(scmd);
1683 return 0;
1684 }
1685 }
1686
1687 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1688 scmd->result = DID_NO_CONNECT << 16;
1689 scmd->scsi_done(scmd);
1690 return 0;
1691 }
1692
1693 mr_device_priv_data = scmd->device->hostdata;
1694 if (!mr_device_priv_data) {
1695 scmd->result = DID_NO_CONNECT << 16;
1696 scmd->scsi_done(scmd);
1697 return 0;
1698 }
1699
1700 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1701 return SCSI_MLQUEUE_HOST_BUSY;
1702
1703 if (mr_device_priv_data->tm_busy)
1704 return SCSI_MLQUEUE_DEVICE_BUSY;
1705
1706
1707 scmd->result = 0;
1708
1709 if (MEGASAS_IS_LOGICAL(scmd) &&
1710 (scmd->device->id >= instance->fw_supported_vd_count ||
1711 scmd->device->lun)) {
1712 scmd->result = DID_BAD_TARGET << 16;
1713 goto out_done;
1714 }
1715
1716 switch (scmd->cmnd[0]) {
1717 case SYNCHRONIZE_CACHE:
1718 /*
1719 * FW takes care of flush cache on its own
1720 * No need to send it down
1721 */
1722 scmd->result = DID_OK << 16;
1723 goto out_done;
1724 default:
1725 break;
1726 }
1727
1728 return instance->instancet->build_and_issue_cmd(instance, scmd);
1729
1730 out_done:
1731 scmd->scsi_done(scmd);
1732 return 0;
1733 }
1734
1735 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1736 {
1737 int i;
1738
1739 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1740
1741 if ((megasas_mgmt_info.instance[i]) &&
1742 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1743 return megasas_mgmt_info.instance[i];
1744 }
1745
1746 return NULL;
1747 }
1748
1749 /*
1750 * megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities
1751 *
1752 * @sdev: OS provided scsi device
1753 *
1754 * Returns void
1755 */
1756 void megasas_update_sdev_properties(struct scsi_device *sdev)
1757 {
1758 u16 pd_index = 0;
1759 u32 device_id, ld;
1760 struct megasas_instance *instance;
1761 struct fusion_context *fusion;
1762 struct MR_PRIV_DEVICE *mr_device_priv_data;
1763 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1764 struct MR_LD_RAID *raid;
1765 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1766
1767 instance = megasas_lookup_instance(sdev->host->host_no);
1768 fusion = instance->ctrl_context;
1769 mr_device_priv_data = sdev->hostdata;
1770
1771 if (!fusion)
1772 return;
1773
1774 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
1775 instance->use_seqnum_jbod_fp) {
1776 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1777 sdev->id;
1778 pd_sync = (void *)fusion->pd_seq_sync
1779 [(instance->pd_seq_map_id - 1) & 1];
1780 mr_device_priv_data->is_tm_capable =
1781 pd_sync->seq[pd_index].capability.tmCapable;
1782 } else {
1783 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1784 + sdev->id;
1785 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1786 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1787 raid = MR_LdRaidGet(ld, local_map_ptr);
1788
1789 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1790 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1791 mr_device_priv_data->is_tm_capable =
1792 raid->capability.tmCapable;
1793 }
1794 }
1795
1796 static void megasas_set_device_queue_depth(struct scsi_device *sdev)
1797 {
1798 u16 pd_index = 0;
1799 int ret = DCMD_FAILED;
1800 struct megasas_instance *instance;
1801
1802 instance = megasas_lookup_instance(sdev->host->host_no);
1803
1804 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
1805 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1806
1807 if (instance->pd_info) {
1808 mutex_lock(&instance->hba_mutex);
1809 ret = megasas_get_pd_info(instance, pd_index);
1810 mutex_unlock(&instance->hba_mutex);
1811 }
1812
1813 if (ret != DCMD_SUCCESS)
1814 return;
1815
1816 if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
1817
1818 switch (instance->pd_list[pd_index].interface) {
1819 case SAS_PD:
1820 scsi_change_queue_depth(sdev, MEGASAS_SAS_QD);
1821 break;
1822
1823 case SATA_PD:
1824 scsi_change_queue_depth(sdev, MEGASAS_SATA_QD);
1825 break;
1826
1827 default:
1828 scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD);
1829 }
1830 }
1831 }
1832 }
1833
1834
1835 static int megasas_slave_configure(struct scsi_device *sdev)
1836 {
1837 u16 pd_index = 0;
1838 struct megasas_instance *instance;
1839
1840 instance = megasas_lookup_instance(sdev->host->host_no);
1841 if (instance->allow_fw_scan) {
1842 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
1843 sdev->type == TYPE_DISK) {
1844 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1845 sdev->id;
1846 if (instance->pd_list[pd_index].driveState !=
1847 MR_PD_STATE_SYSTEM)
1848 return -ENXIO;
1849 }
1850 }
1851 megasas_set_device_queue_depth(sdev);
1852 megasas_update_sdev_properties(sdev);
1853
1854 /*
1855 * The RAID firmware may require extended timeouts.
1856 */
1857 blk_queue_rq_timeout(sdev->request_queue,
1858 scmd_timeout * HZ);
1859
1860 return 0;
1861 }
1862
1863 static int megasas_slave_alloc(struct scsi_device *sdev)
1864 {
1865 u16 pd_index = 0;
1866 struct megasas_instance *instance ;
1867 struct MR_PRIV_DEVICE *mr_device_priv_data;
1868
1869 instance = megasas_lookup_instance(sdev->host->host_no);
1870 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
1871 /*
1872 * Open the OS scan to the SYSTEM PD
1873 */
1874 pd_index =
1875 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1876 sdev->id;
1877 if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState ==
1878 MR_PD_STATE_SYSTEM)) {
1879 goto scan_target;
1880 }
1881 return -ENXIO;
1882 }
1883
1884 scan_target:
1885 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
1886 GFP_KERNEL);
1887 if (!mr_device_priv_data)
1888 return -ENOMEM;
1889 sdev->hostdata = mr_device_priv_data;
1890 return 0;
1891 }
1892
1893 static void megasas_slave_destroy(struct scsi_device *sdev)
1894 {
1895 kfree(sdev->hostdata);
1896 sdev->hostdata = NULL;
1897 }
1898
1899 /*
1900 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
1901 * kill adapter
1902 * @instance: Adapter soft state
1903 *
1904 */
1905 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
1906 {
1907 int i;
1908 struct megasas_cmd *cmd_mfi;
1909 struct megasas_cmd_fusion *cmd_fusion;
1910 struct fusion_context *fusion = instance->ctrl_context;
1911
1912 /* Find all outstanding ioctls */
1913 if (fusion) {
1914 for (i = 0; i < instance->max_fw_cmds; i++) {
1915 cmd_fusion = fusion->cmd_list[i];
1916 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
1917 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
1918 if (cmd_mfi->sync_cmd &&
1919 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
1920 megasas_complete_cmd(instance,
1921 cmd_mfi, DID_OK);
1922 }
1923 }
1924 } else {
1925 for (i = 0; i < instance->max_fw_cmds; i++) {
1926 cmd_mfi = instance->cmd_list[i];
1927 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
1928 MFI_CMD_ABORT)
1929 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
1930 }
1931 }
1932 }
1933
1934
1935 void megaraid_sas_kill_hba(struct megasas_instance *instance)
1936 {
1937 /* Set critical error to block I/O & ioctls in case caller didn't */
1938 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
1939 /* Wait 1 second to ensure IO or ioctls in build have posted */
1940 msleep(1000);
1941 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1942 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
1943 (instance->ctrl_context)) {
1944 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
1945 /* Flush */
1946 readl(&instance->reg_set->doorbell);
1947 if (instance->requestorId && instance->peerIsPresent)
1948 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
1949 } else {
1950 writel(MFI_STOP_ADP,
1951 &instance->reg_set->inbound_doorbell);
1952 }
1953 /* Complete outstanding ioctls when adapter is killed */
1954 megasas_complete_outstanding_ioctls(instance);
1955 }
1956
1957 /**
1958 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
1959 * restored to max value
1960 * @instance: Adapter soft state
1961 *
1962 */
1963 void
1964 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
1965 {
1966 unsigned long flags;
1967
1968 if (instance->flag & MEGASAS_FW_BUSY
1969 && time_after(jiffies, instance->last_time + 5 * HZ)
1970 && atomic_read(&instance->fw_outstanding) <
1971 instance->throttlequeuedepth + 1) {
1972
1973 spin_lock_irqsave(instance->host->host_lock, flags);
1974 instance->flag &= ~MEGASAS_FW_BUSY;
1975
1976 instance->host->can_queue = instance->cur_can_queue;
1977 spin_unlock_irqrestore(instance->host->host_lock, flags);
1978 }
1979 }
1980
1981 /**
1982 * megasas_complete_cmd_dpc - Returns FW's controller structure
1983 * @instance_addr: Address of adapter soft state
1984 *
1985 * Tasklet to complete cmds
1986 */
1987 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1988 {
1989 u32 producer;
1990 u32 consumer;
1991 u32 context;
1992 struct megasas_cmd *cmd;
1993 struct megasas_instance *instance =
1994 (struct megasas_instance *)instance_addr;
1995 unsigned long flags;
1996
1997 /* If we have already declared adapter dead, donot complete cmds */
1998 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
1999 return;
2000
2001 spin_lock_irqsave(&instance->completion_lock, flags);
2002
2003 producer = le32_to_cpu(*instance->producer);
2004 consumer = le32_to_cpu(*instance->consumer);
2005
2006 while (consumer != producer) {
2007 context = le32_to_cpu(instance->reply_queue[consumer]);
2008 if (context >= instance->max_fw_cmds) {
2009 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2010 context);
2011 BUG();
2012 }
2013
2014 cmd = instance->cmd_list[context];
2015
2016 megasas_complete_cmd(instance, cmd, DID_OK);
2017
2018 consumer++;
2019 if (consumer == (instance->max_fw_cmds + 1)) {
2020 consumer = 0;
2021 }
2022 }
2023
2024 *instance->consumer = cpu_to_le32(producer);
2025
2026 spin_unlock_irqrestore(&instance->completion_lock, flags);
2027
2028 /*
2029 * Check if we can restore can_queue
2030 */
2031 megasas_check_and_restore_queue_depth(instance);
2032 }
2033
2034 /**
2035 * megasas_start_timer - Initializes a timer object
2036 * @instance: Adapter soft state
2037 * @timer: timer object to be initialized
2038 * @fn: timer function
2039 * @interval: time interval between timer function call
2040 *
2041 */
2042 void megasas_start_timer(struct megasas_instance *instance,
2043 struct timer_list *timer,
2044 void *fn, unsigned long interval)
2045 {
2046 init_timer(timer);
2047 timer->expires = jiffies + interval;
2048 timer->data = (unsigned long)instance;
2049 timer->function = fn;
2050 add_timer(timer);
2051 }
2052
2053 static void
2054 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2055
2056 static void
2057 process_fw_state_change_wq(struct work_struct *work);
2058
2059 void megasas_do_ocr(struct megasas_instance *instance)
2060 {
2061 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2062 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2063 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2064 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2065 }
2066 instance->instancet->disable_intr(instance);
2067 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2068 instance->issuepend_done = 0;
2069
2070 atomic_set(&instance->fw_outstanding, 0);
2071 megasas_internal_reset_defer_cmds(instance);
2072 process_fw_state_change_wq(&instance->work_init);
2073 }
2074
2075 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2076 int initial)
2077 {
2078 struct megasas_cmd *cmd;
2079 struct megasas_dcmd_frame *dcmd;
2080 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2081 dma_addr_t new_affiliation_111_h;
2082 int ld, retval = 0;
2083 u8 thisVf;
2084
2085 cmd = megasas_get_cmd(instance);
2086
2087 if (!cmd) {
2088 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2089 "Failed to get cmd for scsi%d\n",
2090 instance->host->host_no);
2091 return -ENOMEM;
2092 }
2093
2094 dcmd = &cmd->frame->dcmd;
2095
2096 if (!instance->vf_affiliation_111) {
2097 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2098 "affiliation for scsi%d\n", instance->host->host_no);
2099 megasas_return_cmd(instance, cmd);
2100 return -ENOMEM;
2101 }
2102
2103 if (initial)
2104 memset(instance->vf_affiliation_111, 0,
2105 sizeof(struct MR_LD_VF_AFFILIATION_111));
2106 else {
2107 new_affiliation_111 =
2108 pci_alloc_consistent(instance->pdev,
2109 sizeof(struct MR_LD_VF_AFFILIATION_111),
2110 &new_affiliation_111_h);
2111 if (!new_affiliation_111) {
2112 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2113 "memory for new affiliation for scsi%d\n",
2114 instance->host->host_no);
2115 megasas_return_cmd(instance, cmd);
2116 return -ENOMEM;
2117 }
2118 memset(new_affiliation_111, 0,
2119 sizeof(struct MR_LD_VF_AFFILIATION_111));
2120 }
2121
2122 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2123
2124 dcmd->cmd = MFI_CMD_DCMD;
2125 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2126 dcmd->sge_count = 1;
2127 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2128 dcmd->timeout = 0;
2129 dcmd->pad_0 = 0;
2130 dcmd->data_xfer_len =
2131 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2132 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2133
2134 if (initial)
2135 dcmd->sgl.sge32[0].phys_addr =
2136 cpu_to_le32(instance->vf_affiliation_111_h);
2137 else
2138 dcmd->sgl.sge32[0].phys_addr =
2139 cpu_to_le32(new_affiliation_111_h);
2140
2141 dcmd->sgl.sge32[0].length = cpu_to_le32(
2142 sizeof(struct MR_LD_VF_AFFILIATION_111));
2143
2144 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2145 "scsi%d\n", instance->host->host_no);
2146
2147 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2148 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2149 " failed with status 0x%x for scsi%d\n",
2150 dcmd->cmd_status, instance->host->host_no);
2151 retval = 1; /* Do a scan if we couldn't get affiliation */
2152 goto out;
2153 }
2154
2155 if (!initial) {
2156 thisVf = new_affiliation_111->thisVf;
2157 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2158 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2159 new_affiliation_111->map[ld].policy[thisVf]) {
2160 dev_warn(&instance->pdev->dev, "SR-IOV: "
2161 "Got new LD/VF affiliation for scsi%d\n",
2162 instance->host->host_no);
2163 memcpy(instance->vf_affiliation_111,
2164 new_affiliation_111,
2165 sizeof(struct MR_LD_VF_AFFILIATION_111));
2166 retval = 1;
2167 goto out;
2168 }
2169 }
2170 out:
2171 if (new_affiliation_111) {
2172 pci_free_consistent(instance->pdev,
2173 sizeof(struct MR_LD_VF_AFFILIATION_111),
2174 new_affiliation_111,
2175 new_affiliation_111_h);
2176 }
2177
2178 megasas_return_cmd(instance, cmd);
2179
2180 return retval;
2181 }
2182
2183 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2184 int initial)
2185 {
2186 struct megasas_cmd *cmd;
2187 struct megasas_dcmd_frame *dcmd;
2188 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2189 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2190 dma_addr_t new_affiliation_h;
2191 int i, j, retval = 0, found = 0, doscan = 0;
2192 u8 thisVf;
2193
2194 cmd = megasas_get_cmd(instance);
2195
2196 if (!cmd) {
2197 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2198 "Failed to get cmd for scsi%d\n",
2199 instance->host->host_no);
2200 return -ENOMEM;
2201 }
2202
2203 dcmd = &cmd->frame->dcmd;
2204
2205 if (!instance->vf_affiliation) {
2206 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2207 "affiliation for scsi%d\n", instance->host->host_no);
2208 megasas_return_cmd(instance, cmd);
2209 return -ENOMEM;
2210 }
2211
2212 if (initial)
2213 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2214 sizeof(struct MR_LD_VF_AFFILIATION));
2215 else {
2216 new_affiliation =
2217 pci_alloc_consistent(instance->pdev,
2218 (MAX_LOGICAL_DRIVES + 1) *
2219 sizeof(struct MR_LD_VF_AFFILIATION),
2220 &new_affiliation_h);
2221 if (!new_affiliation) {
2222 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2223 "memory for new affiliation for scsi%d\n",
2224 instance->host->host_no);
2225 megasas_return_cmd(instance, cmd);
2226 return -ENOMEM;
2227 }
2228 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2229 sizeof(struct MR_LD_VF_AFFILIATION));
2230 }
2231
2232 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2233
2234 dcmd->cmd = MFI_CMD_DCMD;
2235 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2236 dcmd->sge_count = 1;
2237 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2238 dcmd->timeout = 0;
2239 dcmd->pad_0 = 0;
2240 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2241 sizeof(struct MR_LD_VF_AFFILIATION));
2242 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2243
2244 if (initial)
2245 dcmd->sgl.sge32[0].phys_addr =
2246 cpu_to_le32(instance->vf_affiliation_h);
2247 else
2248 dcmd->sgl.sge32[0].phys_addr =
2249 cpu_to_le32(new_affiliation_h);
2250
2251 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2252 sizeof(struct MR_LD_VF_AFFILIATION));
2253
2254 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2255 "scsi%d\n", instance->host->host_no);
2256
2257
2258 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2259 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2260 " failed with status 0x%x for scsi%d\n",
2261 dcmd->cmd_status, instance->host->host_no);
2262 retval = 1; /* Do a scan if we couldn't get affiliation */
2263 goto out;
2264 }
2265
2266 if (!initial) {
2267 if (!new_affiliation->ldCount) {
2268 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2269 "affiliation for passive path for scsi%d\n",
2270 instance->host->host_no);
2271 retval = 1;
2272 goto out;
2273 }
2274 newmap = new_affiliation->map;
2275 savedmap = instance->vf_affiliation->map;
2276 thisVf = new_affiliation->thisVf;
2277 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2278 found = 0;
2279 for (j = 0; j < instance->vf_affiliation->ldCount;
2280 j++) {
2281 if (newmap->ref.targetId ==
2282 savedmap->ref.targetId) {
2283 found = 1;
2284 if (newmap->policy[thisVf] !=
2285 savedmap->policy[thisVf]) {
2286 doscan = 1;
2287 goto out;
2288 }
2289 }
2290 savedmap = (struct MR_LD_VF_MAP *)
2291 ((unsigned char *)savedmap +
2292 savedmap->size);
2293 }
2294 if (!found && newmap->policy[thisVf] !=
2295 MR_LD_ACCESS_HIDDEN) {
2296 doscan = 1;
2297 goto out;
2298 }
2299 newmap = (struct MR_LD_VF_MAP *)
2300 ((unsigned char *)newmap + newmap->size);
2301 }
2302
2303 newmap = new_affiliation->map;
2304 savedmap = instance->vf_affiliation->map;
2305
2306 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2307 found = 0;
2308 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2309 if (savedmap->ref.targetId ==
2310 newmap->ref.targetId) {
2311 found = 1;
2312 if (savedmap->policy[thisVf] !=
2313 newmap->policy[thisVf]) {
2314 doscan = 1;
2315 goto out;
2316 }
2317 }
2318 newmap = (struct MR_LD_VF_MAP *)
2319 ((unsigned char *)newmap +
2320 newmap->size);
2321 }
2322 if (!found && savedmap->policy[thisVf] !=
2323 MR_LD_ACCESS_HIDDEN) {
2324 doscan = 1;
2325 goto out;
2326 }
2327 savedmap = (struct MR_LD_VF_MAP *)
2328 ((unsigned char *)savedmap +
2329 savedmap->size);
2330 }
2331 }
2332 out:
2333 if (doscan) {
2334 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2335 "affiliation for scsi%d\n", instance->host->host_no);
2336 memcpy(instance->vf_affiliation, new_affiliation,
2337 new_affiliation->size);
2338 retval = 1;
2339 }
2340
2341 if (new_affiliation)
2342 pci_free_consistent(instance->pdev,
2343 (MAX_LOGICAL_DRIVES + 1) *
2344 sizeof(struct MR_LD_VF_AFFILIATION),
2345 new_affiliation, new_affiliation_h);
2346 megasas_return_cmd(instance, cmd);
2347
2348 return retval;
2349 }
2350
2351 /* This function will get the current SR-IOV LD/VF affiliation */
2352 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2353 int initial)
2354 {
2355 int retval;
2356
2357 if (instance->PlasmaFW111)
2358 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2359 else
2360 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2361 return retval;
2362 }
2363
2364 /* This function will tell FW to start the SR-IOV heartbeat */
2365 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2366 int initial)
2367 {
2368 struct megasas_cmd *cmd;
2369 struct megasas_dcmd_frame *dcmd;
2370 int retval = 0;
2371
2372 cmd = megasas_get_cmd(instance);
2373
2374 if (!cmd) {
2375 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2376 "Failed to get cmd for scsi%d\n",
2377 instance->host->host_no);
2378 return -ENOMEM;
2379 }
2380
2381 dcmd = &cmd->frame->dcmd;
2382
2383 if (initial) {
2384 instance->hb_host_mem =
2385 pci_zalloc_consistent(instance->pdev,
2386 sizeof(struct MR_CTRL_HB_HOST_MEM),
2387 &instance->hb_host_mem_h);
2388 if (!instance->hb_host_mem) {
2389 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2390 " memory for heartbeat host memory for scsi%d\n",
2391 instance->host->host_no);
2392 retval = -ENOMEM;
2393 goto out;
2394 }
2395 }
2396
2397 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2398
2399 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2400 dcmd->cmd = MFI_CMD_DCMD;
2401 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2402 dcmd->sge_count = 1;
2403 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2404 dcmd->timeout = 0;
2405 dcmd->pad_0 = 0;
2406 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2407 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2408 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
2409 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2410
2411 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2412 instance->host->host_no);
2413
2414 if (instance->ctrl_context && !instance->mask_interrupts)
2415 retval = megasas_issue_blocked_cmd(instance, cmd,
2416 MEGASAS_ROUTINE_WAIT_TIME_VF);
2417 else
2418 retval = megasas_issue_polled(instance, cmd);
2419
2420 if (retval) {
2421 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2422 "_MEM_ALLOC DCMD %s for scsi%d\n",
2423 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2424 "timed out" : "failed", instance->host->host_no);
2425 retval = 1;
2426 }
2427
2428 out:
2429 megasas_return_cmd(instance, cmd);
2430
2431 return retval;
2432 }
2433
2434 /* Handler for SR-IOV heartbeat */
2435 void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
2436 {
2437 struct megasas_instance *instance =
2438 (struct megasas_instance *)instance_addr;
2439
2440 if (instance->hb_host_mem->HB.fwCounter !=
2441 instance->hb_host_mem->HB.driverCounter) {
2442 instance->hb_host_mem->HB.driverCounter =
2443 instance->hb_host_mem->HB.fwCounter;
2444 mod_timer(&instance->sriov_heartbeat_timer,
2445 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2446 } else {
2447 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2448 "completed for scsi%d\n", instance->host->host_no);
2449 schedule_work(&instance->work_init);
2450 }
2451 }
2452
2453 /**
2454 * megasas_wait_for_outstanding - Wait for all outstanding cmds
2455 * @instance: Adapter soft state
2456 *
2457 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2458 * complete all its outstanding commands. Returns error if one or more IOs
2459 * are pending after this time period. It also marks the controller dead.
2460 */
2461 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2462 {
2463 int i, sl, outstanding;
2464 u32 reset_index;
2465 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2466 unsigned long flags;
2467 struct list_head clist_local;
2468 struct megasas_cmd *reset_cmd;
2469 u32 fw_state;
2470
2471 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2472 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2473 __func__, __LINE__);
2474 return FAILED;
2475 }
2476
2477 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2478
2479 INIT_LIST_HEAD(&clist_local);
2480 spin_lock_irqsave(&instance->hba_lock, flags);
2481 list_splice_init(&instance->internal_reset_pending_q,
2482 &clist_local);
2483 spin_unlock_irqrestore(&instance->hba_lock, flags);
2484
2485 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2486 for (i = 0; i < wait_time; i++) {
2487 msleep(1000);
2488 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2489 break;
2490 }
2491
2492 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2493 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2494 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2495 return FAILED;
2496 }
2497
2498 reset_index = 0;
2499 while (!list_empty(&clist_local)) {
2500 reset_cmd = list_entry((&clist_local)->next,
2501 struct megasas_cmd, list);
2502 list_del_init(&reset_cmd->list);
2503 if (reset_cmd->scmd) {
2504 reset_cmd->scmd->result = DID_RESET << 16;
2505 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2506 reset_index, reset_cmd,
2507 reset_cmd->scmd->cmnd[0]);
2508
2509 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2510 megasas_return_cmd(instance, reset_cmd);
2511 } else if (reset_cmd->sync_cmd) {
2512 dev_notice(&instance->pdev->dev, "%p synch cmds"
2513 "reset queue\n",
2514 reset_cmd);
2515
2516 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2517 instance->instancet->fire_cmd(instance,
2518 reset_cmd->frame_phys_addr,
2519 0, instance->reg_set);
2520 } else {
2521 dev_notice(&instance->pdev->dev, "%p unexpected"
2522 "cmds lst\n",
2523 reset_cmd);
2524 }
2525 reset_index++;
2526 }
2527
2528 return SUCCESS;
2529 }
2530
2531 for (i = 0; i < resetwaittime; i++) {
2532 outstanding = atomic_read(&instance->fw_outstanding);
2533
2534 if (!outstanding)
2535 break;
2536
2537 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2538 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2539 "commands to complete\n",i,outstanding);
2540 /*
2541 * Call cmd completion routine. Cmd to be
2542 * be completed directly without depending on isr.
2543 */
2544 megasas_complete_cmd_dpc((unsigned long)instance);
2545 }
2546
2547 msleep(1000);
2548 }
2549
2550 i = 0;
2551 outstanding = atomic_read(&instance->fw_outstanding);
2552 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2553
2554 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2555 goto no_outstanding;
2556
2557 if (instance->disableOnlineCtrlReset)
2558 goto kill_hba_and_failed;
2559 do {
2560 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2561 dev_info(&instance->pdev->dev,
2562 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2563 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2564 if (i == 3)
2565 goto kill_hba_and_failed;
2566 megasas_do_ocr(instance);
2567
2568 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2569 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2570 __func__, __LINE__);
2571 return FAILED;
2572 }
2573 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2574 __func__, __LINE__);
2575
2576 for (sl = 0; sl < 10; sl++)
2577 msleep(500);
2578
2579 outstanding = atomic_read(&instance->fw_outstanding);
2580
2581 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2582 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2583 goto no_outstanding;
2584 }
2585 i++;
2586 } while (i <= 3);
2587
2588 no_outstanding:
2589
2590 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2591 __func__, __LINE__);
2592 return SUCCESS;
2593
2594 kill_hba_and_failed:
2595
2596 /* Reset not supported, kill adapter */
2597 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2598 " disableOnlineCtrlReset %d fw_outstanding %d \n",
2599 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2600 atomic_read(&instance->fw_outstanding));
2601 megasas_dump_pending_frames(instance);
2602 megaraid_sas_kill_hba(instance);
2603
2604 return FAILED;
2605 }
2606
2607 /**
2608 * megasas_generic_reset - Generic reset routine
2609 * @scmd: Mid-layer SCSI command
2610 *
2611 * This routine implements a generic reset handler for device, bus and host
2612 * reset requests. Device, bus and host specific reset handlers can use this
2613 * function after they do their specific tasks.
2614 */
2615 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2616 {
2617 int ret_val;
2618 struct megasas_instance *instance;
2619
2620 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2621
2622 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2623 scmd->cmnd[0], scmd->retries);
2624
2625 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2626 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2627 return FAILED;
2628 }
2629
2630 ret_val = megasas_wait_for_outstanding(instance);
2631 if (ret_val == SUCCESS)
2632 dev_notice(&instance->pdev->dev, "reset successful\n");
2633 else
2634 dev_err(&instance->pdev->dev, "failed to do reset\n");
2635
2636 return ret_val;
2637 }
2638
2639 /**
2640 * megasas_reset_timer - quiesce the adapter if required
2641 * @scmd: scsi cmnd
2642 *
2643 * Sets the FW busy flag and reduces the host->can_queue if the
2644 * cmd has not been completed within the timeout period.
2645 */
2646 static enum
2647 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2648 {
2649 struct megasas_instance *instance;
2650 unsigned long flags;
2651
2652 if (time_after(jiffies, scmd->jiffies_at_alloc +
2653 (scmd_timeout * 2) * HZ)) {
2654 return BLK_EH_NOT_HANDLED;
2655 }
2656
2657 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2658 if (!(instance->flag & MEGASAS_FW_BUSY)) {
2659 /* FW is busy, throttle IO */
2660 spin_lock_irqsave(instance->host->host_lock, flags);
2661
2662 instance->host->can_queue = instance->throttlequeuedepth;
2663 instance->last_time = jiffies;
2664 instance->flag |= MEGASAS_FW_BUSY;
2665
2666 spin_unlock_irqrestore(instance->host->host_lock, flags);
2667 }
2668 return BLK_EH_RESET_TIMER;
2669 }
2670
2671 /**
2672 * megasas_reset_device - Device reset handler entry point
2673 */
2674 static int megasas_reset_device(struct scsi_cmnd *scmd)
2675 {
2676 /*
2677 * First wait for all commands to complete
2678 */
2679 return megasas_generic_reset(scmd);
2680 }
2681
2682 /**
2683 * megasas_reset_bus_host - Bus & host reset handler entry point
2684 */
2685 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2686 {
2687 int ret;
2688 struct megasas_instance *instance;
2689
2690 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2691
2692 /*
2693 * First wait for all commands to complete
2694 */
2695 if (instance->ctrl_context)
2696 ret = megasas_reset_fusion(scmd->device->host, 1);
2697 else
2698 ret = megasas_generic_reset(scmd);
2699
2700 return ret;
2701 }
2702
2703 /**
2704 * megasas_bios_param - Returns disk geometry for a disk
2705 * @sdev: device handle
2706 * @bdev: block device
2707 * @capacity: drive capacity
2708 * @geom: geometry parameters
2709 */
2710 static int
2711 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2712 sector_t capacity, int geom[])
2713 {
2714 int heads;
2715 int sectors;
2716 sector_t cylinders;
2717 unsigned long tmp;
2718
2719 /* Default heads (64) & sectors (32) */
2720 heads = 64;
2721 sectors = 32;
2722
2723 tmp = heads * sectors;
2724 cylinders = capacity;
2725
2726 sector_div(cylinders, tmp);
2727
2728 /*
2729 * Handle extended translation size for logical drives > 1Gb
2730 */
2731
2732 if (capacity >= 0x200000) {
2733 heads = 255;
2734 sectors = 63;
2735 tmp = heads*sectors;
2736 cylinders = capacity;
2737 sector_div(cylinders, tmp);
2738 }
2739
2740 geom[0] = heads;
2741 geom[1] = sectors;
2742 geom[2] = cylinders;
2743
2744 return 0;
2745 }
2746
2747 static void megasas_aen_polling(struct work_struct *work);
2748
2749 /**
2750 * megasas_service_aen - Processes an event notification
2751 * @instance: Adapter soft state
2752 * @cmd: AEN command completed by the ISR
2753 *
2754 * For AEN, driver sends a command down to FW that is held by the FW till an
2755 * event occurs. When an event of interest occurs, FW completes the command
2756 * that it was previously holding.
2757 *
2758 * This routines sends SIGIO signal to processes that have registered with the
2759 * driver for AEN.
2760 */
2761 static void
2762 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2763 {
2764 unsigned long flags;
2765
2766 /*
2767 * Don't signal app if it is just an aborted previously registered aen
2768 */
2769 if ((!cmd->abort_aen) && (instance->unload == 0)) {
2770 spin_lock_irqsave(&poll_aen_lock, flags);
2771 megasas_poll_wait_aen = 1;
2772 spin_unlock_irqrestore(&poll_aen_lock, flags);
2773 wake_up(&megasas_poll_wait);
2774 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2775 }
2776 else
2777 cmd->abort_aen = 0;
2778
2779 instance->aen_cmd = NULL;
2780
2781 megasas_return_cmd(instance, cmd);
2782
2783 if ((instance->unload == 0) &&
2784 ((instance->issuepend_done == 1))) {
2785 struct megasas_aen_event *ev;
2786
2787 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2788 if (!ev) {
2789 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2790 } else {
2791 ev->instance = instance;
2792 instance->ev = ev;
2793 INIT_DELAYED_WORK(&ev->hotplug_work,
2794 megasas_aen_polling);
2795 schedule_delayed_work(&ev->hotplug_work, 0);
2796 }
2797 }
2798 }
2799
2800 static ssize_t
2801 megasas_fw_crash_buffer_store(struct device *cdev,
2802 struct device_attribute *attr, const char *buf, size_t count)
2803 {
2804 struct Scsi_Host *shost = class_to_shost(cdev);
2805 struct megasas_instance *instance =
2806 (struct megasas_instance *) shost->hostdata;
2807 int val = 0;
2808 unsigned long flags;
2809
2810 if (kstrtoint(buf, 0, &val) != 0)
2811 return -EINVAL;
2812
2813 spin_lock_irqsave(&instance->crashdump_lock, flags);
2814 instance->fw_crash_buffer_offset = val;
2815 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2816 return strlen(buf);
2817 }
2818
2819 static ssize_t
2820 megasas_fw_crash_buffer_show(struct device *cdev,
2821 struct device_attribute *attr, char *buf)
2822 {
2823 struct Scsi_Host *shost = class_to_shost(cdev);
2824 struct megasas_instance *instance =
2825 (struct megasas_instance *) shost->hostdata;
2826 u32 size;
2827 unsigned long buff_addr;
2828 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
2829 unsigned long src_addr;
2830 unsigned long flags;
2831 u32 buff_offset;
2832
2833 spin_lock_irqsave(&instance->crashdump_lock, flags);
2834 buff_offset = instance->fw_crash_buffer_offset;
2835 if (!instance->crash_dump_buf &&
2836 !((instance->fw_crash_state == AVAILABLE) ||
2837 (instance->fw_crash_state == COPYING))) {
2838 dev_err(&instance->pdev->dev,
2839 "Firmware crash dump is not available\n");
2840 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2841 return -EINVAL;
2842 }
2843
2844 buff_addr = (unsigned long) buf;
2845
2846 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
2847 dev_err(&instance->pdev->dev,
2848 "Firmware crash dump offset is out of range\n");
2849 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2850 return 0;
2851 }
2852
2853 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
2854 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
2855
2856 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
2857 (buff_offset % dmachunk);
2858 memcpy(buf, (void *)src_addr, size);
2859 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2860
2861 return size;
2862 }
2863
2864 static ssize_t
2865 megasas_fw_crash_buffer_size_show(struct device *cdev,
2866 struct device_attribute *attr, char *buf)
2867 {
2868 struct Scsi_Host *shost = class_to_shost(cdev);
2869 struct megasas_instance *instance =
2870 (struct megasas_instance *) shost->hostdata;
2871
2872 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
2873 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
2874 }
2875
2876 static ssize_t
2877 megasas_fw_crash_state_store(struct device *cdev,
2878 struct device_attribute *attr, const char *buf, size_t count)
2879 {
2880 struct Scsi_Host *shost = class_to_shost(cdev);
2881 struct megasas_instance *instance =
2882 (struct megasas_instance *) shost->hostdata;
2883 int val = 0;
2884 unsigned long flags;
2885
2886 if (kstrtoint(buf, 0, &val) != 0)
2887 return -EINVAL;
2888
2889 if ((val <= AVAILABLE || val > COPY_ERROR)) {
2890 dev_err(&instance->pdev->dev, "application updates invalid "
2891 "firmware crash state\n");
2892 return -EINVAL;
2893 }
2894
2895 instance->fw_crash_state = val;
2896
2897 if ((val == COPIED) || (val == COPY_ERROR)) {
2898 spin_lock_irqsave(&instance->crashdump_lock, flags);
2899 megasas_free_host_crash_buffer(instance);
2900 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2901 if (val == COPY_ERROR)
2902 dev_info(&instance->pdev->dev, "application failed to "
2903 "copy Firmware crash dump\n");
2904 else
2905 dev_info(&instance->pdev->dev, "Firmware crash dump "
2906 "copied successfully\n");
2907 }
2908 return strlen(buf);
2909 }
2910
2911 static ssize_t
2912 megasas_fw_crash_state_show(struct device *cdev,
2913 struct device_attribute *attr, char *buf)
2914 {
2915 struct Scsi_Host *shost = class_to_shost(cdev);
2916 struct megasas_instance *instance =
2917 (struct megasas_instance *) shost->hostdata;
2918
2919 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
2920 }
2921
2922 static ssize_t
2923 megasas_page_size_show(struct device *cdev,
2924 struct device_attribute *attr, char *buf)
2925 {
2926 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
2927 }
2928
2929 static ssize_t
2930 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
2931 char *buf)
2932 {
2933 struct Scsi_Host *shost = class_to_shost(cdev);
2934 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
2935
2936 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
2937 }
2938
2939 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
2940 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
2941 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
2942 megasas_fw_crash_buffer_size_show, NULL);
2943 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
2944 megasas_fw_crash_state_show, megasas_fw_crash_state_store);
2945 static DEVICE_ATTR(page_size, S_IRUGO,
2946 megasas_page_size_show, NULL);
2947 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
2948 megasas_ldio_outstanding_show, NULL);
2949
2950 struct device_attribute *megaraid_host_attrs[] = {
2951 &dev_attr_fw_crash_buffer_size,
2952 &dev_attr_fw_crash_buffer,
2953 &dev_attr_fw_crash_state,
2954 &dev_attr_page_size,
2955 &dev_attr_ldio_outstanding,
2956 NULL,
2957 };
2958
2959 /*
2960 * Scsi host template for megaraid_sas driver
2961 */
2962 static struct scsi_host_template megasas_template = {
2963
2964 .module = THIS_MODULE,
2965 .name = "Avago SAS based MegaRAID driver",
2966 .proc_name = "megaraid_sas",
2967 .slave_configure = megasas_slave_configure,
2968 .slave_alloc = megasas_slave_alloc,
2969 .slave_destroy = megasas_slave_destroy,
2970 .queuecommand = megasas_queue_command,
2971 .eh_device_reset_handler = megasas_reset_device,
2972 .eh_bus_reset_handler = megasas_reset_bus_host,
2973 .eh_host_reset_handler = megasas_reset_bus_host,
2974 .eh_timed_out = megasas_reset_timer,
2975 .shost_attrs = megaraid_host_attrs,
2976 .bios_param = megasas_bios_param,
2977 .use_clustering = ENABLE_CLUSTERING,
2978 .change_queue_depth = scsi_change_queue_depth,
2979 .no_write_same = 1,
2980 };
2981
2982 /**
2983 * megasas_complete_int_cmd - Completes an internal command
2984 * @instance: Adapter soft state
2985 * @cmd: Command to be completed
2986 *
2987 * The megasas_issue_blocked_cmd() function waits for a command to complete
2988 * after it issues a command. This function wakes up that waiting routine by
2989 * calling wake_up() on the wait queue.
2990 */
2991 static void
2992 megasas_complete_int_cmd(struct megasas_instance *instance,
2993 struct megasas_cmd *cmd)
2994 {
2995 cmd->cmd_status_drv = cmd->frame->io.cmd_status;
2996 wake_up(&instance->int_cmd_wait_q);
2997 }
2998
2999 /**
3000 * megasas_complete_abort - Completes aborting a command
3001 * @instance: Adapter soft state
3002 * @cmd: Cmd that was issued to abort another cmd
3003 *
3004 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3005 * after it issues an abort on a previously issued command. This function
3006 * wakes up all functions waiting on the same wait queue.
3007 */
3008 static void
3009 megasas_complete_abort(struct megasas_instance *instance,
3010 struct megasas_cmd *cmd)
3011 {
3012 if (cmd->sync_cmd) {
3013 cmd->sync_cmd = 0;
3014 cmd->cmd_status_drv = 0;
3015 wake_up(&instance->abort_cmd_wait_q);
3016 }
3017 }
3018
3019 /**
3020 * megasas_complete_cmd - Completes a command
3021 * @instance: Adapter soft state
3022 * @cmd: Command to be completed
3023 * @alt_status: If non-zero, use this value as status to
3024 * SCSI mid-layer instead of the value returned
3025 * by the FW. This should be used if caller wants
3026 * an alternate status (as in the case of aborted
3027 * commands)
3028 */
3029 void
3030 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3031 u8 alt_status)
3032 {
3033 int exception = 0;
3034 struct megasas_header *hdr = &cmd->frame->hdr;
3035 unsigned long flags;
3036 struct fusion_context *fusion = instance->ctrl_context;
3037 u32 opcode, status;
3038
3039 /* flag for the retry reset */
3040 cmd->retry_for_fw_reset = 0;
3041
3042 if (cmd->scmd)
3043 cmd->scmd->SCp.ptr = NULL;
3044
3045 switch (hdr->cmd) {
3046 case MFI_CMD_INVALID:
3047 /* Some older 1068 controller FW may keep a pended
3048 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3049 when booting the kdump kernel. Ignore this command to
3050 prevent a kernel panic on shutdown of the kdump kernel. */
3051 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3052 "completed\n");
3053 dev_warn(&instance->pdev->dev, "If you have a controller "
3054 "other than PERC5, please upgrade your firmware\n");
3055 break;
3056 case MFI_CMD_PD_SCSI_IO:
3057 case MFI_CMD_LD_SCSI_IO:
3058
3059 /*
3060 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3061 * issued either through an IO path or an IOCTL path. If it
3062 * was via IOCTL, we will send it to internal completion.
3063 */
3064 if (cmd->sync_cmd) {
3065 cmd->sync_cmd = 0;
3066 megasas_complete_int_cmd(instance, cmd);
3067 break;
3068 }
3069
3070 case MFI_CMD_LD_READ:
3071 case MFI_CMD_LD_WRITE:
3072
3073 if (alt_status) {
3074 cmd->scmd->result = alt_status << 16;
3075 exception = 1;
3076 }
3077
3078 if (exception) {
3079
3080 atomic_dec(&instance->fw_outstanding);
3081
3082 scsi_dma_unmap(cmd->scmd);
3083 cmd->scmd->scsi_done(cmd->scmd);
3084 megasas_return_cmd(instance, cmd);
3085
3086 break;
3087 }
3088
3089 switch (hdr->cmd_status) {
3090
3091 case MFI_STAT_OK:
3092 cmd->scmd->result = DID_OK << 16;
3093 break;
3094
3095 case MFI_STAT_SCSI_IO_FAILED:
3096 case MFI_STAT_LD_INIT_IN_PROGRESS:
3097 cmd->scmd->result =
3098 (DID_ERROR << 16) | hdr->scsi_status;
3099 break;
3100
3101 case MFI_STAT_SCSI_DONE_WITH_ERROR:
3102
3103 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3104
3105 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3106 memset(cmd->scmd->sense_buffer, 0,
3107 SCSI_SENSE_BUFFERSIZE);
3108 memcpy(cmd->scmd->sense_buffer, cmd->sense,
3109 hdr->sense_len);
3110
3111 cmd->scmd->result |= DRIVER_SENSE << 24;
3112 }
3113
3114 break;
3115
3116 case MFI_STAT_LD_OFFLINE:
3117 case MFI_STAT_DEVICE_NOT_FOUND:
3118 cmd->scmd->result = DID_BAD_TARGET << 16;
3119 break;
3120
3121 default:
3122 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3123 hdr->cmd_status);
3124 cmd->scmd->result = DID_ERROR << 16;
3125 break;
3126 }
3127
3128 atomic_dec(&instance->fw_outstanding);
3129
3130 scsi_dma_unmap(cmd->scmd);
3131 cmd->scmd->scsi_done(cmd->scmd);
3132 megasas_return_cmd(instance, cmd);
3133
3134 break;
3135
3136 case MFI_CMD_SMP:
3137 case MFI_CMD_STP:
3138 case MFI_CMD_DCMD:
3139 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3140 /* Check for LD map update */
3141 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3142 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3143 fusion->fast_path_io = 0;
3144 spin_lock_irqsave(instance->host->host_lock, flags);
3145 instance->map_update_cmd = NULL;
3146 if (cmd->frame->hdr.cmd_status != 0) {
3147 if (cmd->frame->hdr.cmd_status !=
3148 MFI_STAT_NOT_FOUND)
3149 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3150 cmd->frame->hdr.cmd_status);
3151 else {
3152 megasas_return_cmd(instance, cmd);
3153 spin_unlock_irqrestore(
3154 instance->host->host_lock,
3155 flags);
3156 break;
3157 }
3158 } else
3159 instance->map_id++;
3160 megasas_return_cmd(instance, cmd);
3161
3162 /*
3163 * Set fast path IO to ZERO.
3164 * Validate Map will set proper value.
3165 * Meanwhile all IOs will go as LD IO.
3166 */
3167 if (MR_ValidateMapInfo(instance))
3168 fusion->fast_path_io = 1;
3169 else
3170 fusion->fast_path_io = 0;
3171 megasas_sync_map_info(instance);
3172 spin_unlock_irqrestore(instance->host->host_lock,
3173 flags);
3174 break;
3175 }
3176 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3177 opcode == MR_DCMD_CTRL_EVENT_GET) {
3178 spin_lock_irqsave(&poll_aen_lock, flags);
3179 megasas_poll_wait_aen = 0;
3180 spin_unlock_irqrestore(&poll_aen_lock, flags);
3181 }
3182
3183 /* FW has an updated PD sequence */
3184 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3185 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3186
3187 spin_lock_irqsave(instance->host->host_lock, flags);
3188 status = cmd->frame->hdr.cmd_status;
3189 instance->jbod_seq_cmd = NULL;
3190 megasas_return_cmd(instance, cmd);
3191
3192 if (status == MFI_STAT_OK) {
3193 instance->pd_seq_map_id++;
3194 /* Re-register a pd sync seq num cmd */
3195 if (megasas_sync_pd_seq_num(instance, true))
3196 instance->use_seqnum_jbod_fp = false;
3197 } else
3198 instance->use_seqnum_jbod_fp = false;
3199
3200 spin_unlock_irqrestore(instance->host->host_lock, flags);
3201 break;
3202 }
3203
3204 /*
3205 * See if got an event notification
3206 */
3207 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3208 megasas_service_aen(instance, cmd);
3209 else
3210 megasas_complete_int_cmd(instance, cmd);
3211
3212 break;
3213
3214 case MFI_CMD_ABORT:
3215 /*
3216 * Cmd issued to abort another cmd returned
3217 */
3218 megasas_complete_abort(instance, cmd);
3219 break;
3220
3221 default:
3222 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3223 hdr->cmd);
3224 break;
3225 }
3226 }
3227
3228 /**
3229 * megasas_issue_pending_cmds_again - issue all pending cmds
3230 * in FW again because of the fw reset
3231 * @instance: Adapter soft state
3232 */
3233 static inline void
3234 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3235 {
3236 struct megasas_cmd *cmd;
3237 struct list_head clist_local;
3238 union megasas_evt_class_locale class_locale;
3239 unsigned long flags;
3240 u32 seq_num;
3241
3242 INIT_LIST_HEAD(&clist_local);
3243 spin_lock_irqsave(&instance->hba_lock, flags);
3244 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3245 spin_unlock_irqrestore(&instance->hba_lock, flags);
3246
3247 while (!list_empty(&clist_local)) {
3248 cmd = list_entry((&clist_local)->next,
3249 struct megasas_cmd, list);
3250 list_del_init(&cmd->list);
3251
3252 if (cmd->sync_cmd || cmd->scmd) {
3253 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3254 "detected to be pending while HBA reset\n",
3255 cmd, cmd->scmd, cmd->sync_cmd);
3256
3257 cmd->retry_for_fw_reset++;
3258
3259 if (cmd->retry_for_fw_reset == 3) {
3260 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3261 "was tried multiple times during reset."
3262 "Shutting down the HBA\n",
3263 cmd, cmd->scmd, cmd->sync_cmd);
3264 instance->instancet->disable_intr(instance);
3265 atomic_set(&instance->fw_reset_no_pci_access, 1);
3266 megaraid_sas_kill_hba(instance);
3267 return;
3268 }
3269 }
3270
3271 if (cmd->sync_cmd == 1) {
3272 if (cmd->scmd) {
3273 dev_notice(&instance->pdev->dev, "unexpected"
3274 "cmd attached to internal command!\n");
3275 }
3276 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3277 "on the internal reset queue,"
3278 "issue it again.\n", cmd);
3279 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3280 instance->instancet->fire_cmd(instance,
3281 cmd->frame_phys_addr,
3282 0, instance->reg_set);
3283 } else if (cmd->scmd) {
3284 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3285 "detected on the internal queue, issue again.\n",
3286 cmd, cmd->scmd->cmnd[0]);
3287
3288 atomic_inc(&instance->fw_outstanding);
3289 instance->instancet->fire_cmd(instance,
3290 cmd->frame_phys_addr,
3291 cmd->frame_count-1, instance->reg_set);
3292 } else {
3293 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3294 "internal reset defer list while re-issue!!\n",
3295 cmd);
3296 }
3297 }
3298
3299 if (instance->aen_cmd) {
3300 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3301 megasas_return_cmd(instance, instance->aen_cmd);
3302
3303 instance->aen_cmd = NULL;
3304 }
3305
3306 /*
3307 * Initiate AEN (Asynchronous Event Notification)
3308 */
3309 seq_num = instance->last_seq_num;
3310 class_locale.members.reserved = 0;
3311 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3312 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3313
3314 megasas_register_aen(instance, seq_num, class_locale.word);
3315 }
3316
3317 /**
3318 * Move the internal reset pending commands to a deferred queue.
3319 *
3320 * We move the commands pending at internal reset time to a
3321 * pending queue. This queue would be flushed after successful
3322 * completion of the internal reset sequence. if the internal reset
3323 * did not complete in time, the kernel reset handler would flush
3324 * these commands.
3325 **/
3326 static void
3327 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3328 {
3329 struct megasas_cmd *cmd;
3330 int i;
3331 u32 max_cmd = instance->max_fw_cmds;
3332 u32 defer_index;
3333 unsigned long flags;
3334
3335 defer_index = 0;
3336 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3337 for (i = 0; i < max_cmd; i++) {
3338 cmd = instance->cmd_list[i];
3339 if (cmd->sync_cmd == 1 || cmd->scmd) {
3340 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3341 "on the defer queue as internal\n",
3342 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3343
3344 if (!list_empty(&cmd->list)) {
3345 dev_notice(&instance->pdev->dev, "ERROR while"
3346 " moving this cmd:%p, %d %p, it was"
3347 "discovered on some list?\n",
3348 cmd, cmd->sync_cmd, cmd->scmd);
3349
3350 list_del_init(&cmd->list);
3351 }
3352 defer_index++;
3353 list_add_tail(&cmd->list,
3354 &instance->internal_reset_pending_q);
3355 }
3356 }
3357 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3358 }
3359
3360
3361 static void
3362 process_fw_state_change_wq(struct work_struct *work)
3363 {
3364 struct megasas_instance *instance =
3365 container_of(work, struct megasas_instance, work_init);
3366 u32 wait;
3367 unsigned long flags;
3368
3369 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3370 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3371 atomic_read(&instance->adprecovery));
3372 return ;
3373 }
3374
3375 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3376 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3377 "state, restarting it...\n");
3378
3379 instance->instancet->disable_intr(instance);
3380 atomic_set(&instance->fw_outstanding, 0);
3381
3382 atomic_set(&instance->fw_reset_no_pci_access, 1);
3383 instance->instancet->adp_reset(instance, instance->reg_set);
3384 atomic_set(&instance->fw_reset_no_pci_access, 0);
3385
3386 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3387 "initiating next stage...\n");
3388
3389 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3390 "state 2 starting...\n");
3391
3392 /* waiting for about 20 second before start the second init */
3393 for (wait = 0; wait < 30; wait++) {
3394 msleep(1000);
3395 }
3396
3397 if (megasas_transition_to_ready(instance, 1)) {
3398 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3399
3400 atomic_set(&instance->fw_reset_no_pci_access, 1);
3401 megaraid_sas_kill_hba(instance);
3402 return ;
3403 }
3404
3405 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3406 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3407 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3408 ) {
3409 *instance->consumer = *instance->producer;
3410 } else {
3411 *instance->consumer = 0;
3412 *instance->producer = 0;
3413 }
3414
3415 megasas_issue_init_mfi(instance);
3416
3417 spin_lock_irqsave(&instance->hba_lock, flags);
3418 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3419 spin_unlock_irqrestore(&instance->hba_lock, flags);
3420 instance->instancet->enable_intr(instance);
3421
3422 megasas_issue_pending_cmds_again(instance);
3423 instance->issuepend_done = 1;
3424 }
3425 }
3426
3427 /**
3428 * megasas_deplete_reply_queue - Processes all completed commands
3429 * @instance: Adapter soft state
3430 * @alt_status: Alternate status to be returned to
3431 * SCSI mid-layer instead of the status
3432 * returned by the FW
3433 * Note: this must be called with hba lock held
3434 */
3435 static int
3436 megasas_deplete_reply_queue(struct megasas_instance *instance,
3437 u8 alt_status)
3438 {
3439 u32 mfiStatus;
3440 u32 fw_state;
3441
3442 if ((mfiStatus = instance->instancet->check_reset(instance,
3443 instance->reg_set)) == 1) {
3444 return IRQ_HANDLED;
3445 }
3446
3447 if ((mfiStatus = instance->instancet->clear_intr(
3448 instance->reg_set)
3449 ) == 0) {
3450 /* Hardware may not set outbound_intr_status in MSI-X mode */
3451 if (!instance->msix_vectors)
3452 return IRQ_NONE;
3453 }
3454
3455 instance->mfiStatus = mfiStatus;
3456
3457 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3458 fw_state = instance->instancet->read_fw_status_reg(
3459 instance->reg_set) & MFI_STATE_MASK;
3460
3461 if (fw_state != MFI_STATE_FAULT) {
3462 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3463 fw_state);
3464 }
3465
3466 if ((fw_state == MFI_STATE_FAULT) &&
3467 (instance->disableOnlineCtrlReset == 0)) {
3468 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3469
3470 if ((instance->pdev->device ==
3471 PCI_DEVICE_ID_LSI_SAS1064R) ||
3472 (instance->pdev->device ==
3473 PCI_DEVICE_ID_DELL_PERC5) ||
3474 (instance->pdev->device ==
3475 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3476
3477 *instance->consumer =
3478 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3479 }
3480
3481
3482 instance->instancet->disable_intr(instance);
3483 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3484 instance->issuepend_done = 0;
3485
3486 atomic_set(&instance->fw_outstanding, 0);
3487 megasas_internal_reset_defer_cmds(instance);
3488
3489 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3490 fw_state, atomic_read(&instance->adprecovery));
3491
3492 schedule_work(&instance->work_init);
3493 return IRQ_HANDLED;
3494
3495 } else {
3496 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3497 fw_state, instance->disableOnlineCtrlReset);
3498 }
3499 }
3500
3501 tasklet_schedule(&instance->isr_tasklet);
3502 return IRQ_HANDLED;
3503 }
3504 /**
3505 * megasas_isr - isr entry point
3506 */
3507 static irqreturn_t megasas_isr(int irq, void *devp)
3508 {
3509 struct megasas_irq_context *irq_context = devp;
3510 struct megasas_instance *instance = irq_context->instance;
3511 unsigned long flags;
3512 irqreturn_t rc;
3513
3514 if (atomic_read(&instance->fw_reset_no_pci_access))
3515 return IRQ_HANDLED;
3516
3517 spin_lock_irqsave(&instance->hba_lock, flags);
3518 rc = megasas_deplete_reply_queue(instance, DID_OK);
3519 spin_unlock_irqrestore(&instance->hba_lock, flags);
3520
3521 return rc;
3522 }
3523
3524 /**
3525 * megasas_transition_to_ready - Move the FW to READY state
3526 * @instance: Adapter soft state
3527 *
3528 * During the initialization, FW passes can potentially be in any one of
3529 * several possible states. If the FW in operational, waiting-for-handshake
3530 * states, driver must take steps to bring it to ready state. Otherwise, it
3531 * has to wait for the ready state.
3532 */
3533 int
3534 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3535 {
3536 int i;
3537 u8 max_wait;
3538 u32 fw_state;
3539 u32 cur_state;
3540 u32 abs_state, curr_abs_state;
3541
3542 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3543 fw_state = abs_state & MFI_STATE_MASK;
3544
3545 if (fw_state != MFI_STATE_READY)
3546 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3547 " state\n");
3548
3549 while (fw_state != MFI_STATE_READY) {
3550
3551 switch (fw_state) {
3552
3553 case MFI_STATE_FAULT:
3554 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3555 if (ocr) {
3556 max_wait = MEGASAS_RESET_WAIT_TIME;
3557 cur_state = MFI_STATE_FAULT;
3558 break;
3559 } else
3560 return -ENODEV;
3561
3562 case MFI_STATE_WAIT_HANDSHAKE:
3563 /*
3564 * Set the CLR bit in inbound doorbell
3565 */
3566 if ((instance->pdev->device ==
3567 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3568 (instance->pdev->device ==
3569 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3570 (instance->ctrl_context))
3571 writel(
3572 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3573 &instance->reg_set->doorbell);
3574 else
3575 writel(
3576 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3577 &instance->reg_set->inbound_doorbell);
3578
3579 max_wait = MEGASAS_RESET_WAIT_TIME;
3580 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3581 break;
3582
3583 case MFI_STATE_BOOT_MESSAGE_PENDING:
3584 if ((instance->pdev->device ==
3585 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3586 (instance->pdev->device ==
3587 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3588 (instance->ctrl_context))
3589 writel(MFI_INIT_HOTPLUG,
3590 &instance->reg_set->doorbell);
3591 else
3592 writel(MFI_INIT_HOTPLUG,
3593 &instance->reg_set->inbound_doorbell);
3594
3595 max_wait = MEGASAS_RESET_WAIT_TIME;
3596 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3597 break;
3598
3599 case MFI_STATE_OPERATIONAL:
3600 /*
3601 * Bring it to READY state; assuming max wait 10 secs
3602 */
3603 instance->instancet->disable_intr(instance);
3604 if ((instance->pdev->device ==
3605 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3606 (instance->pdev->device ==
3607 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3608 (instance->ctrl_context)) {
3609 writel(MFI_RESET_FLAGS,
3610 &instance->reg_set->doorbell);
3611
3612 if (instance->ctrl_context) {
3613 for (i = 0; i < (10 * 1000); i += 20) {
3614 if (readl(
3615 &instance->
3616 reg_set->
3617 doorbell) & 1)
3618 msleep(20);
3619 else
3620 break;
3621 }
3622 }
3623 } else
3624 writel(MFI_RESET_FLAGS,
3625 &instance->reg_set->inbound_doorbell);
3626
3627 max_wait = MEGASAS_RESET_WAIT_TIME;
3628 cur_state = MFI_STATE_OPERATIONAL;
3629 break;
3630
3631 case MFI_STATE_UNDEFINED:
3632 /*
3633 * This state should not last for more than 2 seconds
3634 */
3635 max_wait = MEGASAS_RESET_WAIT_TIME;
3636 cur_state = MFI_STATE_UNDEFINED;
3637 break;
3638
3639 case MFI_STATE_BB_INIT:
3640 max_wait = MEGASAS_RESET_WAIT_TIME;
3641 cur_state = MFI_STATE_BB_INIT;
3642 break;
3643
3644 case MFI_STATE_FW_INIT:
3645 max_wait = MEGASAS_RESET_WAIT_TIME;
3646 cur_state = MFI_STATE_FW_INIT;
3647 break;
3648
3649 case MFI_STATE_FW_INIT_2:
3650 max_wait = MEGASAS_RESET_WAIT_TIME;
3651 cur_state = MFI_STATE_FW_INIT_2;
3652 break;
3653
3654 case MFI_STATE_DEVICE_SCAN:
3655 max_wait = MEGASAS_RESET_WAIT_TIME;
3656 cur_state = MFI_STATE_DEVICE_SCAN;
3657 break;
3658
3659 case MFI_STATE_FLUSH_CACHE:
3660 max_wait = MEGASAS_RESET_WAIT_TIME;
3661 cur_state = MFI_STATE_FLUSH_CACHE;
3662 break;
3663
3664 default:
3665 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3666 fw_state);
3667 return -ENODEV;
3668 }
3669
3670 /*
3671 * The cur_state should not last for more than max_wait secs
3672 */
3673 for (i = 0; i < (max_wait * 1000); i++) {
3674 curr_abs_state = instance->instancet->
3675 read_fw_status_reg(instance->reg_set);
3676
3677 if (abs_state == curr_abs_state) {
3678 msleep(1);
3679 } else
3680 break;
3681 }
3682
3683 /*
3684 * Return error if fw_state hasn't changed after max_wait
3685 */
3686 if (curr_abs_state == abs_state) {
3687 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3688 "in %d secs\n", fw_state, max_wait);
3689 return -ENODEV;
3690 }
3691
3692 abs_state = curr_abs_state;
3693 fw_state = curr_abs_state & MFI_STATE_MASK;
3694 }
3695 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3696
3697 return 0;
3698 }
3699
3700 /**
3701 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
3702 * @instance: Adapter soft state
3703 */
3704 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3705 {
3706 int i;
3707 u32 max_cmd = instance->max_mfi_cmds;
3708 struct megasas_cmd *cmd;
3709
3710 if (!instance->frame_dma_pool)
3711 return;
3712
3713 /*
3714 * Return all frames to pool
3715 */
3716 for (i = 0; i < max_cmd; i++) {
3717
3718 cmd = instance->cmd_list[i];
3719
3720 if (cmd->frame)
3721 pci_pool_free(instance->frame_dma_pool, cmd->frame,
3722 cmd->frame_phys_addr);
3723
3724 if (cmd->sense)
3725 pci_pool_free(instance->sense_dma_pool, cmd->sense,
3726 cmd->sense_phys_addr);
3727 }
3728
3729 /*
3730 * Now destroy the pool itself
3731 */
3732 pci_pool_destroy(instance->frame_dma_pool);
3733 pci_pool_destroy(instance->sense_dma_pool);
3734
3735 instance->frame_dma_pool = NULL;
3736 instance->sense_dma_pool = NULL;
3737 }
3738
3739 /**
3740 * megasas_create_frame_pool - Creates DMA pool for cmd frames
3741 * @instance: Adapter soft state
3742 *
3743 * Each command packet has an embedded DMA memory buffer that is used for
3744 * filling MFI frame and the SG list that immediately follows the frame. This
3745 * function creates those DMA memory buffers for each command packet by using
3746 * PCI pool facility.
3747 */
3748 static int megasas_create_frame_pool(struct megasas_instance *instance)
3749 {
3750 int i;
3751 u32 max_cmd;
3752 u32 sge_sz;
3753 u32 total_sz;
3754 u32 frame_count;
3755 struct megasas_cmd *cmd;
3756
3757 max_cmd = instance->max_mfi_cmds;
3758
3759 /*
3760 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3761 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3762 */
3763 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3764 sizeof(struct megasas_sge32);
3765
3766 if (instance->flag_ieee)
3767 sge_sz = sizeof(struct megasas_sge_skinny);
3768
3769 /*
3770 * For MFI controllers.
3771 * max_num_sge = 60
3772 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
3773 * Total 960 byte (15 MFI frame of 64 byte)
3774 *
3775 * Fusion adapter require only 3 extra frame.
3776 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3777 * max_sge_sz = 12 byte (sizeof megasas_sge64)
3778 * Total 192 byte (3 MFI frame of 64 byte)
3779 */
3780 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
3781 total_sz = MEGAMFI_FRAME_SIZE * frame_count;
3782 /*
3783 * Use DMA pool facility provided by PCI layer
3784 */
3785 instance->frame_dma_pool = pci_pool_create("megasas frame pool",
3786 instance->pdev, total_sz, 256, 0);
3787
3788 if (!instance->frame_dma_pool) {
3789 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
3790 return -ENOMEM;
3791 }
3792
3793 instance->sense_dma_pool = pci_pool_create("megasas sense pool",
3794 instance->pdev, 128, 4, 0);
3795
3796 if (!instance->sense_dma_pool) {
3797 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
3798
3799 pci_pool_destroy(instance->frame_dma_pool);
3800 instance->frame_dma_pool = NULL;
3801
3802 return -ENOMEM;
3803 }
3804
3805 /*
3806 * Allocate and attach a frame to each of the commands in cmd_list.
3807 * By making cmd->index as the context instead of the &cmd, we can
3808 * always use 32bit context regardless of the architecture
3809 */
3810 for (i = 0; i < max_cmd; i++) {
3811
3812 cmd = instance->cmd_list[i];
3813
3814 cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
3815 GFP_KERNEL, &cmd->frame_phys_addr);
3816
3817 cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
3818 GFP_KERNEL, &cmd->sense_phys_addr);
3819
3820 /*
3821 * megasas_teardown_frame_pool() takes care of freeing
3822 * whatever has been allocated
3823 */
3824 if (!cmd->frame || !cmd->sense) {
3825 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
3826 megasas_teardown_frame_pool(instance);
3827 return -ENOMEM;
3828 }
3829
3830 memset(cmd->frame, 0, total_sz);
3831 cmd->frame->io.context = cpu_to_le32(cmd->index);
3832 cmd->frame->io.pad_0 = 0;
3833 if (!instance->ctrl_context && reset_devices)
3834 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
3835 }
3836
3837 return 0;
3838 }
3839
3840 /**
3841 * megasas_free_cmds - Free all the cmds in the free cmd pool
3842 * @instance: Adapter soft state
3843 */
3844 void megasas_free_cmds(struct megasas_instance *instance)
3845 {
3846 int i;
3847
3848 /* First free the MFI frame pool */
3849 megasas_teardown_frame_pool(instance);
3850
3851 /* Free all the commands in the cmd_list */
3852 for (i = 0; i < instance->max_mfi_cmds; i++)
3853
3854 kfree(instance->cmd_list[i]);
3855
3856 /* Free the cmd_list buffer itself */
3857 kfree(instance->cmd_list);
3858 instance->cmd_list = NULL;
3859
3860 INIT_LIST_HEAD(&instance->cmd_pool);
3861 }
3862
3863 /**
3864 * megasas_alloc_cmds - Allocates the command packets
3865 * @instance: Adapter soft state
3866 *
3867 * Each command that is issued to the FW, whether IO commands from the OS or
3868 * internal commands like IOCTLs, are wrapped in local data structure called
3869 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
3870 * the FW.
3871 *
3872 * Each frame has a 32-bit field called context (tag). This context is used
3873 * to get back the megasas_cmd from the frame when a frame gets completed in
3874 * the ISR. Typically the address of the megasas_cmd itself would be used as
3875 * the context. But we wanted to keep the differences between 32 and 64 bit
3876 * systems to the mininum. We always use 32 bit integers for the context. In
3877 * this driver, the 32 bit values are the indices into an array cmd_list.
3878 * This array is used only to look up the megasas_cmd given the context. The
3879 * free commands themselves are maintained in a linked list called cmd_pool.
3880 */
3881 int megasas_alloc_cmds(struct megasas_instance *instance)
3882 {
3883 int i;
3884 int j;
3885 u32 max_cmd;
3886 struct megasas_cmd *cmd;
3887 struct fusion_context *fusion;
3888
3889 fusion = instance->ctrl_context;
3890 max_cmd = instance->max_mfi_cmds;
3891
3892 /*
3893 * instance->cmd_list is an array of struct megasas_cmd pointers.
3894 * Allocate the dynamic array first and then allocate individual
3895 * commands.
3896 */
3897 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
3898
3899 if (!instance->cmd_list) {
3900 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
3901 return -ENOMEM;
3902 }
3903
3904 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
3905
3906 for (i = 0; i < max_cmd; i++) {
3907 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
3908 GFP_KERNEL);
3909
3910 if (!instance->cmd_list[i]) {
3911
3912 for (j = 0; j < i; j++)
3913 kfree(instance->cmd_list[j]);
3914
3915 kfree(instance->cmd_list);
3916 instance->cmd_list = NULL;
3917
3918 return -ENOMEM;
3919 }
3920 }
3921
3922 for (i = 0; i < max_cmd; i++) {
3923 cmd = instance->cmd_list[i];
3924 memset(cmd, 0, sizeof(struct megasas_cmd));
3925 cmd->index = i;
3926 cmd->scmd = NULL;
3927 cmd->instance = instance;
3928
3929 list_add_tail(&cmd->list, &instance->cmd_pool);
3930 }
3931
3932 /*
3933 * Create a frame pool and assign one frame to each cmd
3934 */
3935 if (megasas_create_frame_pool(instance)) {
3936 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
3937 megasas_free_cmds(instance);
3938 }
3939
3940 return 0;
3941 }
3942
3943 /*
3944 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
3945 * @instance: Adapter soft state
3946 *
3947 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
3948 * or FW is not under OCR.
3949 */
3950 inline int
3951 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
3952
3953 if (!instance->ctrl_context)
3954 return KILL_ADAPTER;
3955 else if (instance->unload ||
3956 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
3957 return IGNORE_TIMEOUT;
3958 else
3959 return INITIATE_OCR;
3960 }
3961
3962 static int
3963 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
3964 {
3965 int ret;
3966 struct megasas_cmd *cmd;
3967 struct megasas_dcmd_frame *dcmd;
3968
3969 cmd = megasas_get_cmd(instance);
3970
3971 if (!cmd) {
3972 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
3973 return -ENOMEM;
3974 }
3975
3976 dcmd = &cmd->frame->dcmd;
3977
3978 memset(instance->pd_info, 0, sizeof(*instance->pd_info));
3979 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3980
3981 dcmd->mbox.s[0] = cpu_to_le16(device_id);
3982 dcmd->cmd = MFI_CMD_DCMD;
3983 dcmd->cmd_status = 0xFF;
3984 dcmd->sge_count = 1;
3985 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
3986 dcmd->timeout = 0;
3987 dcmd->pad_0 = 0;
3988 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
3989 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
3990 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
3991 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
3992
3993 if (instance->ctrl_context && !instance->mask_interrupts)
3994 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
3995 else
3996 ret = megasas_issue_polled(instance, cmd);
3997
3998 switch (ret) {
3999 case DCMD_SUCCESS:
4000 instance->pd_list[device_id].interface =
4001 instance->pd_info->state.ddf.pdType.intf;
4002 break;
4003
4004 case DCMD_TIMEOUT:
4005
4006 switch (dcmd_timeout_ocr_possible(instance)) {
4007 case INITIATE_OCR:
4008 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4009 megasas_reset_fusion(instance->host,
4010 MFI_IO_TIMEOUT_OCR);
4011 break;
4012 case KILL_ADAPTER:
4013 megaraid_sas_kill_hba(instance);
4014 break;
4015 case IGNORE_TIMEOUT:
4016 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4017 __func__, __LINE__);
4018 break;
4019 }
4020
4021 break;
4022 }
4023
4024 if (ret != DCMD_TIMEOUT)
4025 megasas_return_cmd(instance, cmd);
4026
4027 return ret;
4028 }
4029 /*
4030 * megasas_get_pd_list_info - Returns FW's pd_list structure
4031 * @instance: Adapter soft state
4032 * @pd_list: pd_list structure
4033 *
4034 * Issues an internal command (DCMD) to get the FW's controller PD
4035 * list structure. This information is mainly used to find out SYSTEM
4036 * supported by the FW.
4037 */
4038 static int
4039 megasas_get_pd_list(struct megasas_instance *instance)
4040 {
4041 int ret = 0, pd_index = 0;
4042 struct megasas_cmd *cmd;
4043 struct megasas_dcmd_frame *dcmd;
4044 struct MR_PD_LIST *ci;
4045 struct MR_PD_ADDRESS *pd_addr;
4046 dma_addr_t ci_h = 0;
4047
4048 cmd = megasas_get_cmd(instance);
4049
4050 if (!cmd) {
4051 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4052 return -ENOMEM;
4053 }
4054
4055 dcmd = &cmd->frame->dcmd;
4056
4057 ci = pci_alloc_consistent(instance->pdev,
4058 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
4059
4060 if (!ci) {
4061 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
4062 megasas_return_cmd(instance, cmd);
4063 return -ENOMEM;
4064 }
4065
4066 memset(ci, 0, sizeof(*ci));
4067 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4068
4069 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4070 dcmd->mbox.b[1] = 0;
4071 dcmd->cmd = MFI_CMD_DCMD;
4072 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4073 dcmd->sge_count = 1;
4074 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4075 dcmd->timeout = 0;
4076 dcmd->pad_0 = 0;
4077 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4078 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4079 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4080 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4081
4082 if (instance->ctrl_context && !instance->mask_interrupts)
4083 ret = megasas_issue_blocked_cmd(instance, cmd,
4084 MFI_IO_TIMEOUT_SECS);
4085 else
4086 ret = megasas_issue_polled(instance, cmd);
4087
4088 switch (ret) {
4089 case DCMD_FAILED:
4090 megaraid_sas_kill_hba(instance);
4091 break;
4092 case DCMD_TIMEOUT:
4093
4094 switch (dcmd_timeout_ocr_possible(instance)) {
4095 case INITIATE_OCR:
4096 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4097 /*
4098 * DCMD failed from AEN path.
4099 * AEN path already hold reset_mutex to avoid PCI access
4100 * while OCR is in progress.
4101 */
4102 mutex_unlock(&instance->reset_mutex);
4103 megasas_reset_fusion(instance->host,
4104 MFI_IO_TIMEOUT_OCR);
4105 mutex_lock(&instance->reset_mutex);
4106 break;
4107 case KILL_ADAPTER:
4108 megaraid_sas_kill_hba(instance);
4109 break;
4110 case IGNORE_TIMEOUT:
4111 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4112 __func__, __LINE__);
4113 break;
4114 }
4115
4116 break;
4117
4118 case DCMD_SUCCESS:
4119 pd_addr = ci->addr;
4120
4121 if ((le32_to_cpu(ci->count) >
4122 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4123 break;
4124
4125 memset(instance->local_pd_list, 0,
4126 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4127
4128 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4129 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4130 le16_to_cpu(pd_addr->deviceId);
4131 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4132 pd_addr->scsiDevType;
4133 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4134 MR_PD_STATE_SYSTEM;
4135 pd_addr++;
4136 }
4137
4138 memcpy(instance->pd_list, instance->local_pd_list,
4139 sizeof(instance->pd_list));
4140 break;
4141
4142 }
4143
4144 pci_free_consistent(instance->pdev,
4145 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
4146 ci, ci_h);
4147
4148 if (ret != DCMD_TIMEOUT)
4149 megasas_return_cmd(instance, cmd);
4150
4151 return ret;
4152 }
4153
4154 /*
4155 * megasas_get_ld_list_info - Returns FW's ld_list structure
4156 * @instance: Adapter soft state
4157 * @ld_list: ld_list structure
4158 *
4159 * Issues an internal command (DCMD) to get the FW's controller PD
4160 * list structure. This information is mainly used to find out SYSTEM
4161 * supported by the FW.
4162 */
4163 static int
4164 megasas_get_ld_list(struct megasas_instance *instance)
4165 {
4166 int ret = 0, ld_index = 0, ids = 0;
4167 struct megasas_cmd *cmd;
4168 struct megasas_dcmd_frame *dcmd;
4169 struct MR_LD_LIST *ci;
4170 dma_addr_t ci_h = 0;
4171 u32 ld_count;
4172
4173 cmd = megasas_get_cmd(instance);
4174
4175 if (!cmd) {
4176 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4177 return -ENOMEM;
4178 }
4179
4180 dcmd = &cmd->frame->dcmd;
4181
4182 ci = pci_alloc_consistent(instance->pdev,
4183 sizeof(struct MR_LD_LIST),
4184 &ci_h);
4185
4186 if (!ci) {
4187 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
4188 megasas_return_cmd(instance, cmd);
4189 return -ENOMEM;
4190 }
4191
4192 memset(ci, 0, sizeof(*ci));
4193 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4194
4195 if (instance->supportmax256vd)
4196 dcmd->mbox.b[0] = 1;
4197 dcmd->cmd = MFI_CMD_DCMD;
4198 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4199 dcmd->sge_count = 1;
4200 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4201 dcmd->timeout = 0;
4202 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4203 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4204 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4205 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
4206 dcmd->pad_0 = 0;
4207
4208 if (instance->ctrl_context && !instance->mask_interrupts)
4209 ret = megasas_issue_blocked_cmd(instance, cmd,
4210 MFI_IO_TIMEOUT_SECS);
4211 else
4212 ret = megasas_issue_polled(instance, cmd);
4213
4214 ld_count = le32_to_cpu(ci->ldCount);
4215
4216 switch (ret) {
4217 case DCMD_FAILED:
4218 megaraid_sas_kill_hba(instance);
4219 break;
4220 case DCMD_TIMEOUT:
4221
4222 switch (dcmd_timeout_ocr_possible(instance)) {
4223 case INITIATE_OCR:
4224 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4225 /*
4226 * DCMD failed from AEN path.
4227 * AEN path already hold reset_mutex to avoid PCI access
4228 * while OCR is in progress.
4229 */
4230 mutex_unlock(&instance->reset_mutex);
4231 megasas_reset_fusion(instance->host,
4232 MFI_IO_TIMEOUT_OCR);
4233 mutex_lock(&instance->reset_mutex);
4234 break;
4235 case KILL_ADAPTER:
4236 megaraid_sas_kill_hba(instance);
4237 break;
4238 case IGNORE_TIMEOUT:
4239 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4240 __func__, __LINE__);
4241 break;
4242 }
4243
4244 break;
4245
4246 case DCMD_SUCCESS:
4247 if (ld_count > instance->fw_supported_vd_count)
4248 break;
4249
4250 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4251
4252 for (ld_index = 0; ld_index < ld_count; ld_index++) {
4253 if (ci->ldList[ld_index].state != 0) {
4254 ids = ci->ldList[ld_index].ref.targetId;
4255 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4256 }
4257 }
4258
4259 break;
4260 }
4261
4262 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
4263
4264 if (ret != DCMD_TIMEOUT)
4265 megasas_return_cmd(instance, cmd);
4266
4267 return ret;
4268 }
4269
4270 /**
4271 * megasas_ld_list_query - Returns FW's ld_list structure
4272 * @instance: Adapter soft state
4273 * @ld_list: ld_list structure
4274 *
4275 * Issues an internal command (DCMD) to get the FW's controller PD
4276 * list structure. This information is mainly used to find out SYSTEM
4277 * supported by the FW.
4278 */
4279 static int
4280 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4281 {
4282 int ret = 0, ld_index = 0, ids = 0;
4283 struct megasas_cmd *cmd;
4284 struct megasas_dcmd_frame *dcmd;
4285 struct MR_LD_TARGETID_LIST *ci;
4286 dma_addr_t ci_h = 0;
4287 u32 tgtid_count;
4288
4289 cmd = megasas_get_cmd(instance);
4290
4291 if (!cmd) {
4292 dev_warn(&instance->pdev->dev,
4293 "megasas_ld_list_query: Failed to get cmd\n");
4294 return -ENOMEM;
4295 }
4296
4297 dcmd = &cmd->frame->dcmd;
4298
4299 ci = pci_alloc_consistent(instance->pdev,
4300 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
4301
4302 if (!ci) {
4303 dev_warn(&instance->pdev->dev,
4304 "Failed to alloc mem for ld_list_query\n");
4305 megasas_return_cmd(instance, cmd);
4306 return -ENOMEM;
4307 }
4308
4309 memset(ci, 0, sizeof(*ci));
4310 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4311
4312 dcmd->mbox.b[0] = query_type;
4313 if (instance->supportmax256vd)
4314 dcmd->mbox.b[2] = 1;
4315
4316 dcmd->cmd = MFI_CMD_DCMD;
4317 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4318 dcmd->sge_count = 1;
4319 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4320 dcmd->timeout = 0;
4321 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4322 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4323 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4324 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4325 dcmd->pad_0 = 0;
4326
4327 if (instance->ctrl_context && !instance->mask_interrupts)
4328 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4329 else
4330 ret = megasas_issue_polled(instance, cmd);
4331
4332 switch (ret) {
4333 case DCMD_FAILED:
4334 dev_info(&instance->pdev->dev,
4335 "DCMD not supported by firmware - %s %d\n",
4336 __func__, __LINE__);
4337 ret = megasas_get_ld_list(instance);
4338 break;
4339 case DCMD_TIMEOUT:
4340 switch (dcmd_timeout_ocr_possible(instance)) {
4341 case INITIATE_OCR:
4342 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4343 /*
4344 * DCMD failed from AEN path.
4345 * AEN path already hold reset_mutex to avoid PCI access
4346 * while OCR is in progress.
4347 */
4348 mutex_unlock(&instance->reset_mutex);
4349 megasas_reset_fusion(instance->host,
4350 MFI_IO_TIMEOUT_OCR);
4351 mutex_lock(&instance->reset_mutex);
4352 break;
4353 case KILL_ADAPTER:
4354 megaraid_sas_kill_hba(instance);
4355 break;
4356 case IGNORE_TIMEOUT:
4357 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4358 __func__, __LINE__);
4359 break;
4360 }
4361
4362 break;
4363 case DCMD_SUCCESS:
4364 tgtid_count = le32_to_cpu(ci->count);
4365
4366 if ((tgtid_count > (instance->fw_supported_vd_count)))
4367 break;
4368
4369 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4370 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4371 ids = ci->targetId[ld_index];
4372 instance->ld_ids[ids] = ci->targetId[ld_index];
4373 }
4374
4375 break;
4376 }
4377
4378 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
4379 ci, ci_h);
4380
4381 if (ret != DCMD_TIMEOUT)
4382 megasas_return_cmd(instance, cmd);
4383
4384 return ret;
4385 }
4386
4387 /*
4388 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4389 * instance : Controller's instance
4390 */
4391 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4392 {
4393 struct fusion_context *fusion;
4394 u32 old_map_sz;
4395 u32 new_map_sz;
4396
4397 fusion = instance->ctrl_context;
4398 /* For MFI based controllers return dummy success */
4399 if (!fusion)
4400 return;
4401
4402 instance->supportmax256vd =
4403 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
4404 /* Below is additional check to address future FW enhancement */
4405 if (instance->ctrl_info->max_lds > 64)
4406 instance->supportmax256vd = 1;
4407
4408 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4409 * MEGASAS_MAX_DEV_PER_CHANNEL;
4410 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4411 * MEGASAS_MAX_DEV_PER_CHANNEL;
4412 if (instance->supportmax256vd) {
4413 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4414 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4415 } else {
4416 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4417 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4418 }
4419
4420 dev_info(&instance->pdev->dev,
4421 "firmware type\t: %s\n",
4422 instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4423 "Legacy(64 VD) firmware");
4424
4425 old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4426 (sizeof(struct MR_LD_SPAN_MAP) *
4427 (instance->fw_supported_vd_count - 1));
4428 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4429 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
4430 (sizeof(struct MR_LD_SPAN_MAP) *
4431 (instance->drv_supported_vd_count - 1));
4432
4433 fusion->max_map_sz = max(old_map_sz, new_map_sz);
4434
4435
4436 if (instance->supportmax256vd)
4437 fusion->current_map_sz = new_map_sz;
4438 else
4439 fusion->current_map_sz = old_map_sz;
4440 }
4441
4442 /**
4443 * megasas_get_controller_info - Returns FW's controller structure
4444 * @instance: Adapter soft state
4445 *
4446 * Issues an internal command (DCMD) to get the FW's controller structure.
4447 * This information is mainly used to find out the maximum IO transfer per
4448 * command supported by the FW.
4449 */
4450 int
4451 megasas_get_ctrl_info(struct megasas_instance *instance)
4452 {
4453 int ret = 0;
4454 struct megasas_cmd *cmd;
4455 struct megasas_dcmd_frame *dcmd;
4456 struct megasas_ctrl_info *ci;
4457 struct megasas_ctrl_info *ctrl_info;
4458 dma_addr_t ci_h = 0;
4459
4460 ctrl_info = instance->ctrl_info;
4461
4462 cmd = megasas_get_cmd(instance);
4463
4464 if (!cmd) {
4465 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4466 return -ENOMEM;
4467 }
4468
4469 dcmd = &cmd->frame->dcmd;
4470
4471 ci = pci_alloc_consistent(instance->pdev,
4472 sizeof(struct megasas_ctrl_info), &ci_h);
4473
4474 if (!ci) {
4475 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
4476 megasas_return_cmd(instance, cmd);
4477 return -ENOMEM;
4478 }
4479
4480 memset(ci, 0, sizeof(*ci));
4481 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4482
4483 dcmd->cmd = MFI_CMD_DCMD;
4484 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4485 dcmd->sge_count = 1;
4486 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4487 dcmd->timeout = 0;
4488 dcmd->pad_0 = 0;
4489 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4490 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4491 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4492 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4493 dcmd->mbox.b[0] = 1;
4494
4495 if (instance->ctrl_context && !instance->mask_interrupts)
4496 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4497 else
4498 ret = megasas_issue_polled(instance, cmd);
4499
4500 switch (ret) {
4501 case DCMD_SUCCESS:
4502 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4503 /* Save required controller information in
4504 * CPU endianness format.
4505 */
4506 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4507 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4508 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4509
4510 /* Update the latest Ext VD info.
4511 * From Init path, store current firmware details.
4512 * From OCR path, detect any firmware properties changes.
4513 * in case of Firmware upgrade without system reboot.
4514 */
4515 megasas_update_ext_vd_details(instance);
4516 instance->use_seqnum_jbod_fp =
4517 ctrl_info->adapterOperations3.useSeqNumJbodFP;
4518
4519 /*Check whether controller is iMR or MR */
4520 instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
4521 dev_info(&instance->pdev->dev,
4522 "controller type\t: %s(%dMB)\n",
4523 instance->is_imr ? "iMR" : "MR",
4524 le16_to_cpu(ctrl_info->memory_size));
4525
4526 instance->disableOnlineCtrlReset =
4527 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4528 instance->secure_jbod_support =
4529 ctrl_info->adapterOperations3.supportSecurityonJBOD;
4530 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4531 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4532 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4533 instance->secure_jbod_support ? "Yes" : "No");
4534 break;
4535
4536 case DCMD_TIMEOUT:
4537 switch (dcmd_timeout_ocr_possible(instance)) {
4538 case INITIATE_OCR:
4539 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4540 megasas_reset_fusion(instance->host,
4541 MFI_IO_TIMEOUT_OCR);
4542 break;
4543 case KILL_ADAPTER:
4544 megaraid_sas_kill_hba(instance);
4545 break;
4546 case IGNORE_TIMEOUT:
4547 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4548 __func__, __LINE__);
4549 break;
4550 }
4551 case DCMD_FAILED:
4552 megaraid_sas_kill_hba(instance);
4553 break;
4554
4555 }
4556
4557 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4558 ci, ci_h);
4559
4560 megasas_return_cmd(instance, cmd);
4561
4562
4563 return ret;
4564 }
4565
4566 /*
4567 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
4568 * to firmware
4569 *
4570 * @instance: Adapter soft state
4571 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
4572 MR_CRASH_BUF_TURN_OFF = 0
4573 MR_CRASH_BUF_TURN_ON = 1
4574 * @return 0 on success non-zero on failure.
4575 * Issues an internal command (DCMD) to set parameters for crash dump feature.
4576 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4577 * that driver supports crash dump feature. This DCMD will be sent only if
4578 * crash dump feature is supported by the FW.
4579 *
4580 */
4581 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4582 u8 crash_buf_state)
4583 {
4584 int ret = 0;
4585 struct megasas_cmd *cmd;
4586 struct megasas_dcmd_frame *dcmd;
4587
4588 cmd = megasas_get_cmd(instance);
4589
4590 if (!cmd) {
4591 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4592 return -ENOMEM;
4593 }
4594
4595
4596 dcmd = &cmd->frame->dcmd;
4597
4598 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4599 dcmd->mbox.b[0] = crash_buf_state;
4600 dcmd->cmd = MFI_CMD_DCMD;
4601 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4602 dcmd->sge_count = 1;
4603 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4604 dcmd->timeout = 0;
4605 dcmd->pad_0 = 0;
4606 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4607 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4608 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
4609 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4610
4611 if (instance->ctrl_context && !instance->mask_interrupts)
4612 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4613 else
4614 ret = megasas_issue_polled(instance, cmd);
4615
4616 if (ret == DCMD_TIMEOUT) {
4617 switch (dcmd_timeout_ocr_possible(instance)) {
4618 case INITIATE_OCR:
4619 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4620 megasas_reset_fusion(instance->host,
4621 MFI_IO_TIMEOUT_OCR);
4622 break;
4623 case KILL_ADAPTER:
4624 megaraid_sas_kill_hba(instance);
4625 break;
4626 case IGNORE_TIMEOUT:
4627 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4628 __func__, __LINE__);
4629 break;
4630 }
4631 } else
4632 megasas_return_cmd(instance, cmd);
4633
4634 return ret;
4635 }
4636
4637 /**
4638 * megasas_issue_init_mfi - Initializes the FW
4639 * @instance: Adapter soft state
4640 *
4641 * Issues the INIT MFI cmd
4642 */
4643 static int
4644 megasas_issue_init_mfi(struct megasas_instance *instance)
4645 {
4646 __le32 context;
4647 struct megasas_cmd *cmd;
4648 struct megasas_init_frame *init_frame;
4649 struct megasas_init_queue_info *initq_info;
4650 dma_addr_t init_frame_h;
4651 dma_addr_t initq_info_h;
4652
4653 /*
4654 * Prepare a init frame. Note the init frame points to queue info
4655 * structure. Each frame has SGL allocated after first 64 bytes. For
4656 * this frame - since we don't need any SGL - we use SGL's space as
4657 * queue info structure
4658 *
4659 * We will not get a NULL command below. We just created the pool.
4660 */
4661 cmd = megasas_get_cmd(instance);
4662
4663 init_frame = (struct megasas_init_frame *)cmd->frame;
4664 initq_info = (struct megasas_init_queue_info *)
4665 ((unsigned long)init_frame + 64);
4666
4667 init_frame_h = cmd->frame_phys_addr;
4668 initq_info_h = init_frame_h + 64;
4669
4670 context = init_frame->context;
4671 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4672 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4673 init_frame->context = context;
4674
4675 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4676 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4677
4678 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4679 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4680
4681 init_frame->cmd = MFI_CMD_INIT;
4682 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4683 init_frame->queue_info_new_phys_addr_lo =
4684 cpu_to_le32(lower_32_bits(initq_info_h));
4685 init_frame->queue_info_new_phys_addr_hi =
4686 cpu_to_le32(upper_32_bits(initq_info_h));
4687
4688 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4689
4690 /*
4691 * disable the intr before firing the init frame to FW
4692 */
4693 instance->instancet->disable_intr(instance);
4694
4695 /*
4696 * Issue the init frame in polled mode
4697 */
4698
4699 if (megasas_issue_polled(instance, cmd)) {
4700 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4701 megasas_return_cmd(instance, cmd);
4702 goto fail_fw_init;
4703 }
4704
4705 megasas_return_cmd(instance, cmd);
4706
4707 return 0;
4708
4709 fail_fw_init:
4710 return -EINVAL;
4711 }
4712
4713 static u32
4714 megasas_init_adapter_mfi(struct megasas_instance *instance)
4715 {
4716 struct megasas_register_set __iomem *reg_set;
4717 u32 context_sz;
4718 u32 reply_q_sz;
4719
4720 reg_set = instance->reg_set;
4721
4722 /*
4723 * Get various operational parameters from status register
4724 */
4725 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4726 /*
4727 * Reduce the max supported cmds by 1. This is to ensure that the
4728 * reply_q_sz (1 more than the max cmd that driver may send)
4729 * does not exceed max cmds that the FW can support
4730 */
4731 instance->max_fw_cmds = instance->max_fw_cmds-1;
4732 instance->max_mfi_cmds = instance->max_fw_cmds;
4733 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4734 0x10;
4735 /*
4736 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4737 * are reserved for IOCTL + driver's internal DCMDs.
4738 */
4739 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4740 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4741 instance->max_scsi_cmds = (instance->max_fw_cmds -
4742 MEGASAS_SKINNY_INT_CMDS);
4743 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4744 } else {
4745 instance->max_scsi_cmds = (instance->max_fw_cmds -
4746 MEGASAS_INT_CMDS);
4747 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4748 }
4749
4750 instance->cur_can_queue = instance->max_scsi_cmds;
4751 /*
4752 * Create a pool of commands
4753 */
4754 if (megasas_alloc_cmds(instance))
4755 goto fail_alloc_cmds;
4756
4757 /*
4758 * Allocate memory for reply queue. Length of reply queue should
4759 * be _one_ more than the maximum commands handled by the firmware.
4760 *
4761 * Note: When FW completes commands, it places corresponding contex
4762 * values in this circular reply queue. This circular queue is a fairly
4763 * typical producer-consumer queue. FW is the producer (of completed
4764 * commands) and the driver is the consumer.
4765 */
4766 context_sz = sizeof(u32);
4767 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4768
4769 instance->reply_queue = pci_alloc_consistent(instance->pdev,
4770 reply_q_sz,
4771 &instance->reply_queue_h);
4772
4773 if (!instance->reply_queue) {
4774 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4775 goto fail_reply_queue;
4776 }
4777
4778 if (megasas_issue_init_mfi(instance))
4779 goto fail_fw_init;
4780
4781 if (megasas_get_ctrl_info(instance)) {
4782 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4783 "Fail from %s %d\n", instance->unique_id,
4784 __func__, __LINE__);
4785 goto fail_fw_init;
4786 }
4787
4788 instance->fw_support_ieee = 0;
4789 instance->fw_support_ieee =
4790 (instance->instancet->read_fw_status_reg(reg_set) &
4791 0x04000000);
4792
4793 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
4794 instance->fw_support_ieee);
4795
4796 if (instance->fw_support_ieee)
4797 instance->flag_ieee = 1;
4798
4799 return 0;
4800
4801 fail_fw_init:
4802
4803 pci_free_consistent(instance->pdev, reply_q_sz,
4804 instance->reply_queue, instance->reply_queue_h);
4805 fail_reply_queue:
4806 megasas_free_cmds(instance);
4807
4808 fail_alloc_cmds:
4809 return 1;
4810 }
4811
4812 /*
4813 * megasas_setup_irqs_msix - register legacy interrupts.
4814 * @instance: Adapter soft state
4815 *
4816 * Do not enable interrupt, only setup ISRs.
4817 *
4818 * Return 0 on success.
4819 */
4820 static int
4821 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
4822 {
4823 struct pci_dev *pdev;
4824
4825 pdev = instance->pdev;
4826 instance->irq_context[0].instance = instance;
4827 instance->irq_context[0].MSIxIndex = 0;
4828 if (request_irq(pdev->irq, instance->instancet->service_isr,
4829 IRQF_SHARED, "megasas", &instance->irq_context[0])) {
4830 dev_err(&instance->pdev->dev,
4831 "Failed to register IRQ from %s %d\n",
4832 __func__, __LINE__);
4833 return -1;
4834 }
4835 return 0;
4836 }
4837
4838 /**
4839 * megasas_setup_irqs_msix - register MSI-x interrupts.
4840 * @instance: Adapter soft state
4841 * @is_probe: Driver probe check
4842 *
4843 * Do not enable interrupt, only setup ISRs.
4844 *
4845 * Return 0 on success.
4846 */
4847 static int
4848 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
4849 {
4850 int i, j, cpu;
4851 struct pci_dev *pdev;
4852
4853 pdev = instance->pdev;
4854
4855 /* Try MSI-x */
4856 cpu = cpumask_first(cpu_online_mask);
4857 for (i = 0; i < instance->msix_vectors; i++) {
4858 instance->irq_context[i].instance = instance;
4859 instance->irq_context[i].MSIxIndex = i;
4860 if (request_irq(instance->msixentry[i].vector,
4861 instance->instancet->service_isr, 0, "megasas",
4862 &instance->irq_context[i])) {
4863 dev_err(&instance->pdev->dev,
4864 "Failed to register IRQ for vector %d.\n", i);
4865 for (j = 0; j < i; j++) {
4866 if (smp_affinity_enable)
4867 irq_set_affinity_hint(
4868 instance->msixentry[j].vector, NULL);
4869 free_irq(instance->msixentry[j].vector,
4870 &instance->irq_context[j]);
4871 }
4872 /* Retry irq register for IO_APIC*/
4873 instance->msix_vectors = 0;
4874 if (is_probe)
4875 return megasas_setup_irqs_ioapic(instance);
4876 else
4877 return -1;
4878 }
4879 if (smp_affinity_enable) {
4880 if (irq_set_affinity_hint(instance->msixentry[i].vector,
4881 get_cpu_mask(cpu)))
4882 dev_err(&instance->pdev->dev,
4883 "Failed to set affinity hint"
4884 " for cpu %d\n", cpu);
4885 cpu = cpumask_next(cpu, cpu_online_mask);
4886 }
4887 }
4888 return 0;
4889 }
4890
4891 /*
4892 * megasas_destroy_irqs- unregister interrupts.
4893 * @instance: Adapter soft state
4894 * return: void
4895 */
4896 static void
4897 megasas_destroy_irqs(struct megasas_instance *instance) {
4898
4899 int i;
4900
4901 if (instance->msix_vectors)
4902 for (i = 0; i < instance->msix_vectors; i++) {
4903 if (smp_affinity_enable)
4904 irq_set_affinity_hint(
4905 instance->msixentry[i].vector, NULL);
4906 free_irq(instance->msixentry[i].vector,
4907 &instance->irq_context[i]);
4908 }
4909 else
4910 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4911 }
4912
4913 /**
4914 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
4915 * @instance: Adapter soft state
4916 * @is_probe: Driver probe check
4917 *
4918 * Return 0 on success.
4919 */
4920 void
4921 megasas_setup_jbod_map(struct megasas_instance *instance)
4922 {
4923 int i;
4924 struct fusion_context *fusion = instance->ctrl_context;
4925 u32 pd_seq_map_sz;
4926
4927 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4928 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
4929
4930 if (reset_devices || !fusion ||
4931 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
4932 dev_info(&instance->pdev->dev,
4933 "Jbod map is not supported %s %d\n",
4934 __func__, __LINE__);
4935 instance->use_seqnum_jbod_fp = false;
4936 return;
4937 }
4938
4939 if (fusion->pd_seq_sync[0])
4940 goto skip_alloc;
4941
4942 for (i = 0; i < JBOD_MAPS_COUNT; i++) {
4943 fusion->pd_seq_sync[i] = dma_alloc_coherent
4944 (&instance->pdev->dev, pd_seq_map_sz,
4945 &fusion->pd_seq_phys[i], GFP_KERNEL);
4946 if (!fusion->pd_seq_sync[i]) {
4947 dev_err(&instance->pdev->dev,
4948 "Failed to allocate memory from %s %d\n",
4949 __func__, __LINE__);
4950 if (i == 1) {
4951 dma_free_coherent(&instance->pdev->dev,
4952 pd_seq_map_sz, fusion->pd_seq_sync[0],
4953 fusion->pd_seq_phys[0]);
4954 fusion->pd_seq_sync[0] = NULL;
4955 }
4956 instance->use_seqnum_jbod_fp = false;
4957 return;
4958 }
4959 }
4960
4961 skip_alloc:
4962 if (!megasas_sync_pd_seq_num(instance, false) &&
4963 !megasas_sync_pd_seq_num(instance, true))
4964 instance->use_seqnum_jbod_fp = true;
4965 else
4966 instance->use_seqnum_jbod_fp = false;
4967 }
4968
4969 /**
4970 * megasas_init_fw - Initializes the FW
4971 * @instance: Adapter soft state
4972 *
4973 * This is the main function for initializing firmware
4974 */
4975
4976 static int megasas_init_fw(struct megasas_instance *instance)
4977 {
4978 u32 max_sectors_1;
4979 u32 max_sectors_2;
4980 u32 tmp_sectors, msix_enable, scratch_pad_2;
4981 resource_size_t base_addr;
4982 struct megasas_register_set __iomem *reg_set;
4983 struct megasas_ctrl_info *ctrl_info = NULL;
4984 unsigned long bar_list;
4985 int i, loop, fw_msix_count = 0;
4986 struct IOV_111 *iovPtr;
4987 struct fusion_context *fusion;
4988
4989 fusion = instance->ctrl_context;
4990
4991 /* Find first memory bar */
4992 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
4993 instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
4994 if (pci_request_selected_regions(instance->pdev, instance->bar,
4995 "megasas: LSI")) {
4996 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
4997 return -EBUSY;
4998 }
4999
5000 base_addr = pci_resource_start(instance->pdev, instance->bar);
5001 instance->reg_set = ioremap_nocache(base_addr, 8192);
5002
5003 if (!instance->reg_set) {
5004 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5005 goto fail_ioremap;
5006 }
5007
5008 reg_set = instance->reg_set;
5009
5010 switch (instance->pdev->device) {
5011 case PCI_DEVICE_ID_LSI_FUSION:
5012 case PCI_DEVICE_ID_LSI_PLASMA:
5013 case PCI_DEVICE_ID_LSI_INVADER:
5014 case PCI_DEVICE_ID_LSI_FURY:
5015 case PCI_DEVICE_ID_LSI_INTRUDER:
5016 case PCI_DEVICE_ID_LSI_INTRUDER_24:
5017 case PCI_DEVICE_ID_LSI_CUTLASS_52:
5018 case PCI_DEVICE_ID_LSI_CUTLASS_53:
5019 instance->instancet = &megasas_instance_template_fusion;
5020 break;
5021 case PCI_DEVICE_ID_LSI_SAS1078R:
5022 case PCI_DEVICE_ID_LSI_SAS1078DE:
5023 instance->instancet = &megasas_instance_template_ppc;
5024 break;
5025 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5026 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5027 instance->instancet = &megasas_instance_template_gen2;
5028 break;
5029 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5030 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5031 instance->instancet = &megasas_instance_template_skinny;
5032 break;
5033 case PCI_DEVICE_ID_LSI_SAS1064R:
5034 case PCI_DEVICE_ID_DELL_PERC5:
5035 default:
5036 instance->instancet = &megasas_instance_template_xscale;
5037 instance->allow_fw_scan = 1;
5038 break;
5039 }
5040
5041 if (megasas_transition_to_ready(instance, 0)) {
5042 atomic_set(&instance->fw_reset_no_pci_access, 1);
5043 instance->instancet->adp_reset
5044 (instance, instance->reg_set);
5045 atomic_set(&instance->fw_reset_no_pci_access, 0);
5046 dev_info(&instance->pdev->dev,
5047 "FW restarted successfully from %s!\n",
5048 __func__);
5049
5050 /*waitting for about 30 second before retry*/
5051 ssleep(30);
5052
5053 if (megasas_transition_to_ready(instance, 0))
5054 goto fail_ready_state;
5055 }
5056
5057 /*
5058 * MSI-X host index 0 is common for all adapter.
5059 * It is used for all MPT based Adapters.
5060 */
5061 instance->reply_post_host_index_addr[0] =
5062 (u32 __iomem *)((u8 __iomem *)instance->reg_set +
5063 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5064
5065 /* Check if MSI-X is supported while in ready state */
5066 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5067 0x4000000) >> 0x1a;
5068 if (msix_enable && !msix_disable) {
5069 scratch_pad_2 = readl
5070 (&instance->reg_set->outbound_scratch_pad_2);
5071 /* Check max MSI-X vectors */
5072 if (fusion) {
5073 if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
5074 instance->msix_vectors = (scratch_pad_2
5075 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5076 fw_msix_count = instance->msix_vectors;
5077 } else { /* Invader series supports more than 8 MSI-x vectors*/
5078 instance->msix_vectors = ((scratch_pad_2
5079 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5080 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5081 if (rdpq_enable)
5082 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5083 1 : 0;
5084 fw_msix_count = instance->msix_vectors;
5085 /* Save 1-15 reply post index address to local memory
5086 * Index 0 is already saved from reg offset
5087 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5088 */
5089 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5090 instance->reply_post_host_index_addr[loop] =
5091 (u32 __iomem *)
5092 ((u8 __iomem *)instance->reg_set +
5093 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5094 + (loop * 0x10));
5095 }
5096 }
5097 if (msix_vectors)
5098 instance->msix_vectors = min(msix_vectors,
5099 instance->msix_vectors);
5100 } else /* MFI adapters */
5101 instance->msix_vectors = 1;
5102 /* Don't bother allocating more MSI-X vectors than cpus */
5103 instance->msix_vectors = min(instance->msix_vectors,
5104 (unsigned int)num_online_cpus());
5105 for (i = 0; i < instance->msix_vectors; i++)
5106 instance->msixentry[i].entry = i;
5107 i = pci_enable_msix_range(instance->pdev, instance->msixentry,
5108 1, instance->msix_vectors);
5109 if (i > 0)
5110 instance->msix_vectors = i;
5111 else
5112 instance->msix_vectors = 0;
5113 }
5114
5115 dev_info(&instance->pdev->dev,
5116 "firmware supports msix\t: (%d)", fw_msix_count);
5117 dev_info(&instance->pdev->dev,
5118 "current msix/online cpus\t: (%d/%d)\n",
5119 instance->msix_vectors, (unsigned int)num_online_cpus());
5120 dev_info(&instance->pdev->dev,
5121 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5122
5123 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5124 (unsigned long)instance);
5125
5126 if (instance->msix_vectors ?
5127 megasas_setup_irqs_msix(instance, 1) :
5128 megasas_setup_irqs_ioapic(instance))
5129 goto fail_setup_irqs;
5130
5131 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
5132 GFP_KERNEL);
5133 if (instance->ctrl_info == NULL)
5134 goto fail_init_adapter;
5135
5136 /*
5137 * Below are default value for legacy Firmware.
5138 * non-fusion based controllers
5139 */
5140 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5141 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5142 /* Get operational params, sge flags, send init cmd to controller */
5143 if (instance->instancet->init_adapter(instance))
5144 goto fail_init_adapter;
5145
5146
5147 instance->instancet->enable_intr(instance);
5148
5149 dev_err(&instance->pdev->dev, "INIT adapter done\n");
5150
5151 megasas_setup_jbod_map(instance);
5152
5153 /** for passthrough
5154 * the following function will get the PD LIST.
5155 */
5156 memset(instance->pd_list, 0,
5157 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5158 if (megasas_get_pd_list(instance) < 0) {
5159 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5160 goto fail_get_pd_list;
5161 }
5162
5163 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5164 if (megasas_ld_list_query(instance,
5165 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5166 megasas_get_ld_list(instance);
5167
5168 /*
5169 * Compute the max allowed sectors per IO: The controller info has two
5170 * limits on max sectors. Driver should use the minimum of these two.
5171 *
5172 * 1 << stripe_sz_ops.min = max sectors per strip
5173 *
5174 * Note that older firmwares ( < FW ver 30) didn't report information
5175 * to calculate max_sectors_1. So the number ended up as zero always.
5176 */
5177 tmp_sectors = 0;
5178 ctrl_info = instance->ctrl_info;
5179
5180 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5181 le16_to_cpu(ctrl_info->max_strips_per_io);
5182 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5183
5184 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5185
5186 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5187 instance->passive = ctrl_info->cluster.passive;
5188 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5189 instance->UnevenSpanSupport =
5190 ctrl_info->adapterOperations2.supportUnevenSpans;
5191 if (instance->UnevenSpanSupport) {
5192 struct fusion_context *fusion = instance->ctrl_context;
5193 if (MR_ValidateMapInfo(instance))
5194 fusion->fast_path_io = 1;
5195 else
5196 fusion->fast_path_io = 0;
5197
5198 }
5199 if (ctrl_info->host_interface.SRIOV) {
5200 instance->requestorId = ctrl_info->iov.requestorId;
5201 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5202 if (!ctrl_info->adapterOperations2.activePassive)
5203 instance->PlasmaFW111 = 1;
5204
5205 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5206 instance->PlasmaFW111 ? "1.11" : "new");
5207
5208 if (instance->PlasmaFW111) {
5209 iovPtr = (struct IOV_111 *)
5210 ((unsigned char *)ctrl_info + IOV_111_OFFSET);
5211 instance->requestorId = iovPtr->requestorId;
5212 }
5213 }
5214 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5215 instance->requestorId);
5216 }
5217
5218 instance->crash_dump_fw_support =
5219 ctrl_info->adapterOperations3.supportCrashDump;
5220 instance->crash_dump_drv_support =
5221 (instance->crash_dump_fw_support &&
5222 instance->crash_dump_buf);
5223 if (instance->crash_dump_drv_support)
5224 megasas_set_crash_dump_params(instance,
5225 MR_CRASH_BUF_TURN_OFF);
5226
5227 else {
5228 if (instance->crash_dump_buf)
5229 pci_free_consistent(instance->pdev,
5230 CRASH_DMA_BUF_SIZE,
5231 instance->crash_dump_buf,
5232 instance->crash_dump_h);
5233 instance->crash_dump_buf = NULL;
5234 }
5235
5236
5237 dev_info(&instance->pdev->dev,
5238 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5239 le16_to_cpu(ctrl_info->pci.vendor_id),
5240 le16_to_cpu(ctrl_info->pci.device_id),
5241 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5242 le16_to_cpu(ctrl_info->pci.sub_device_id));
5243 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
5244 instance->UnevenSpanSupport ? "yes" : "no");
5245 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
5246 instance->crash_dump_drv_support ? "yes" : "no");
5247 dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
5248 instance->use_seqnum_jbod_fp ? "yes" : "no");
5249
5250
5251 instance->max_sectors_per_req = instance->max_num_sge *
5252 SGE_BUFFER_SIZE / 512;
5253 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5254 instance->max_sectors_per_req = tmp_sectors;
5255
5256 /* Check for valid throttlequeuedepth module parameter */
5257 if (throttlequeuedepth &&
5258 throttlequeuedepth <= instance->max_scsi_cmds)
5259 instance->throttlequeuedepth = throttlequeuedepth;
5260 else
5261 instance->throttlequeuedepth =
5262 MEGASAS_THROTTLE_QUEUE_DEPTH;
5263
5264 if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
5265 resetwaittime = MEGASAS_RESET_WAIT_TIME;
5266
5267 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5268 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5269
5270 /* Launch SR-IOV heartbeat timer */
5271 if (instance->requestorId) {
5272 if (!megasas_sriov_start_heartbeat(instance, 1))
5273 megasas_start_timer(instance,
5274 &instance->sriov_heartbeat_timer,
5275 megasas_sriov_heartbeat_handler,
5276 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
5277 else
5278 instance->skip_heartbeat_timer_del = 1;
5279 }
5280
5281 return 0;
5282
5283 fail_get_pd_list:
5284 instance->instancet->disable_intr(instance);
5285 fail_init_adapter:
5286 megasas_destroy_irqs(instance);
5287 fail_setup_irqs:
5288 if (instance->msix_vectors)
5289 pci_disable_msix(instance->pdev);
5290 instance->msix_vectors = 0;
5291 fail_ready_state:
5292 kfree(instance->ctrl_info);
5293 instance->ctrl_info = NULL;
5294 iounmap(instance->reg_set);
5295
5296 fail_ioremap:
5297 pci_release_selected_regions(instance->pdev, instance->bar);
5298
5299 return -EINVAL;
5300 }
5301
5302 /**
5303 * megasas_release_mfi - Reverses the FW initialization
5304 * @instance: Adapter soft state
5305 */
5306 static void megasas_release_mfi(struct megasas_instance *instance)
5307 {
5308 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5309
5310 if (instance->reply_queue)
5311 pci_free_consistent(instance->pdev, reply_q_sz,
5312 instance->reply_queue, instance->reply_queue_h);
5313
5314 megasas_free_cmds(instance);
5315
5316 iounmap(instance->reg_set);
5317
5318 pci_release_selected_regions(instance->pdev, instance->bar);
5319 }
5320
5321 /**
5322 * megasas_get_seq_num - Gets latest event sequence numbers
5323 * @instance: Adapter soft state
5324 * @eli: FW event log sequence numbers information
5325 *
5326 * FW maintains a log of all events in a non-volatile area. Upper layers would
5327 * usually find out the latest sequence number of the events, the seq number at
5328 * the boot etc. They would "read" all the events below the latest seq number
5329 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5330 * number), they would subsribe to AEN (asynchronous event notification) and
5331 * wait for the events to happen.
5332 */
5333 static int
5334 megasas_get_seq_num(struct megasas_instance *instance,
5335 struct megasas_evt_log_info *eli)
5336 {
5337 struct megasas_cmd *cmd;
5338 struct megasas_dcmd_frame *dcmd;
5339 struct megasas_evt_log_info *el_info;
5340 dma_addr_t el_info_h = 0;
5341
5342 cmd = megasas_get_cmd(instance);
5343
5344 if (!cmd) {
5345 return -ENOMEM;
5346 }
5347
5348 dcmd = &cmd->frame->dcmd;
5349 el_info = pci_alloc_consistent(instance->pdev,
5350 sizeof(struct megasas_evt_log_info),
5351 &el_info_h);
5352
5353 if (!el_info) {
5354 megasas_return_cmd(instance, cmd);
5355 return -ENOMEM;
5356 }
5357
5358 memset(el_info, 0, sizeof(*el_info));
5359 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5360
5361 dcmd->cmd = MFI_CMD_DCMD;
5362 dcmd->cmd_status = 0x0;
5363 dcmd->sge_count = 1;
5364 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5365 dcmd->timeout = 0;
5366 dcmd->pad_0 = 0;
5367 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5368 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5369 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
5370 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5371
5372 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
5373 DCMD_SUCCESS) {
5374 /*
5375 * Copy the data back into callers buffer
5376 */
5377 eli->newest_seq_num = el_info->newest_seq_num;
5378 eli->oldest_seq_num = el_info->oldest_seq_num;
5379 eli->clear_seq_num = el_info->clear_seq_num;
5380 eli->shutdown_seq_num = el_info->shutdown_seq_num;
5381 eli->boot_seq_num = el_info->boot_seq_num;
5382 } else
5383 dev_err(&instance->pdev->dev, "DCMD failed "
5384 "from %s\n", __func__);
5385
5386 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5387 el_info, el_info_h);
5388
5389 megasas_return_cmd(instance, cmd);
5390
5391 return 0;
5392 }
5393
5394 /**
5395 * megasas_register_aen - Registers for asynchronous event notification
5396 * @instance: Adapter soft state
5397 * @seq_num: The starting sequence number
5398 * @class_locale: Class of the event
5399 *
5400 * This function subscribes for AEN for events beyond the @seq_num. It requests
5401 * to be notified if and only if the event is of type @class_locale
5402 */
5403 static int
5404 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5405 u32 class_locale_word)
5406 {
5407 int ret_val;
5408 struct megasas_cmd *cmd;
5409 struct megasas_dcmd_frame *dcmd;
5410 union megasas_evt_class_locale curr_aen;
5411 union megasas_evt_class_locale prev_aen;
5412
5413 /*
5414 * If there an AEN pending already (aen_cmd), check if the
5415 * class_locale of that pending AEN is inclusive of the new
5416 * AEN request we currently have. If it is, then we don't have
5417 * to do anything. In other words, whichever events the current
5418 * AEN request is subscribing to, have already been subscribed
5419 * to.
5420 *
5421 * If the old_cmd is _not_ inclusive, then we have to abort
5422 * that command, form a class_locale that is superset of both
5423 * old and current and re-issue to the FW
5424 */
5425
5426 curr_aen.word = class_locale_word;
5427
5428 if (instance->aen_cmd) {
5429
5430 prev_aen.word =
5431 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5432
5433 /*
5434 * A class whose enum value is smaller is inclusive of all
5435 * higher values. If a PROGRESS (= -1) was previously
5436 * registered, then a new registration requests for higher
5437 * classes need not be sent to FW. They are automatically
5438 * included.
5439 *
5440 * Locale numbers don't have such hierarchy. They are bitmap
5441 * values
5442 */
5443 if ((prev_aen.members.class <= curr_aen.members.class) &&
5444 !((prev_aen.members.locale & curr_aen.members.locale) ^
5445 curr_aen.members.locale)) {
5446 /*
5447 * Previously issued event registration includes
5448 * current request. Nothing to do.
5449 */
5450 return 0;
5451 } else {
5452 curr_aen.members.locale |= prev_aen.members.locale;
5453
5454 if (prev_aen.members.class < curr_aen.members.class)
5455 curr_aen.members.class = prev_aen.members.class;
5456
5457 instance->aen_cmd->abort_aen = 1;
5458 ret_val = megasas_issue_blocked_abort_cmd(instance,
5459 instance->
5460 aen_cmd, 30);
5461
5462 if (ret_val) {
5463 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5464 "previous AEN command\n");
5465 return ret_val;
5466 }
5467 }
5468 }
5469
5470 cmd = megasas_get_cmd(instance);
5471
5472 if (!cmd)
5473 return -ENOMEM;
5474
5475 dcmd = &cmd->frame->dcmd;
5476
5477 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5478
5479 /*
5480 * Prepare DCMD for aen registration
5481 */
5482 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5483
5484 dcmd->cmd = MFI_CMD_DCMD;
5485 dcmd->cmd_status = 0x0;
5486 dcmd->sge_count = 1;
5487 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5488 dcmd->timeout = 0;
5489 dcmd->pad_0 = 0;
5490 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5491 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5492 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5493 instance->last_seq_num = seq_num;
5494 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5495 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
5496 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
5497
5498 if (instance->aen_cmd != NULL) {
5499 megasas_return_cmd(instance, cmd);
5500 return 0;
5501 }
5502
5503 /*
5504 * Store reference to the cmd used to register for AEN. When an
5505 * application wants us to register for AEN, we have to abort this
5506 * cmd and re-register with a new EVENT LOCALE supplied by that app
5507 */
5508 instance->aen_cmd = cmd;
5509
5510 /*
5511 * Issue the aen registration frame
5512 */
5513 instance->instancet->issue_dcmd(instance, cmd);
5514
5515 return 0;
5516 }
5517
5518 /**
5519 * megasas_start_aen - Subscribes to AEN during driver load time
5520 * @instance: Adapter soft state
5521 */
5522 static int megasas_start_aen(struct megasas_instance *instance)
5523 {
5524 struct megasas_evt_log_info eli;
5525 union megasas_evt_class_locale class_locale;
5526
5527 /*
5528 * Get the latest sequence number from FW
5529 */
5530 memset(&eli, 0, sizeof(eli));
5531
5532 if (megasas_get_seq_num(instance, &eli))
5533 return -1;
5534
5535 /*
5536 * Register AEN with FW for latest sequence number plus 1
5537 */
5538 class_locale.members.reserved = 0;
5539 class_locale.members.locale = MR_EVT_LOCALE_ALL;
5540 class_locale.members.class = MR_EVT_CLASS_DEBUG;
5541
5542 return megasas_register_aen(instance,
5543 le32_to_cpu(eli.newest_seq_num) + 1,
5544 class_locale.word);
5545 }
5546
5547 /**
5548 * megasas_io_attach - Attaches this driver to SCSI mid-layer
5549 * @instance: Adapter soft state
5550 */
5551 static int megasas_io_attach(struct megasas_instance *instance)
5552 {
5553 struct Scsi_Host *host = instance->host;
5554
5555 /*
5556 * Export parameters required by SCSI mid-layer
5557 */
5558 host->irq = instance->pdev->irq;
5559 host->unique_id = instance->unique_id;
5560 host->can_queue = instance->max_scsi_cmds;
5561 host->this_id = instance->init_id;
5562 host->sg_tablesize = instance->max_num_sge;
5563
5564 if (instance->fw_support_ieee)
5565 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5566
5567 /*
5568 * Check if the module parameter value for max_sectors can be used
5569 */
5570 if (max_sectors && max_sectors < instance->max_sectors_per_req)
5571 instance->max_sectors_per_req = max_sectors;
5572 else {
5573 if (max_sectors) {
5574 if (((instance->pdev->device ==
5575 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5576 (instance->pdev->device ==
5577 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5578 (max_sectors <= MEGASAS_MAX_SECTORS)) {
5579 instance->max_sectors_per_req = max_sectors;
5580 } else {
5581 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5582 "and <= %d (or < 1MB for GEN2 controller)\n",
5583 instance->max_sectors_per_req);
5584 }
5585 }
5586 }
5587
5588 host->max_sectors = instance->max_sectors_per_req;
5589 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5590 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5591 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5592 host->max_lun = MEGASAS_MAX_LUN;
5593 host->max_cmd_len = 16;
5594
5595 /* Fusion only supports host reset */
5596 if (instance->ctrl_context) {
5597 host->hostt->eh_device_reset_handler = NULL;
5598 host->hostt->eh_bus_reset_handler = NULL;
5599 host->hostt->eh_target_reset_handler = megasas_reset_target_fusion;
5600 host->hostt->eh_abort_handler = megasas_task_abort_fusion;
5601 }
5602
5603 /*
5604 * Notify the mid-layer about the new controller
5605 */
5606 if (scsi_add_host(host, &instance->pdev->dev)) {
5607 dev_err(&instance->pdev->dev,
5608 "Failed to add host from %s %d\n",
5609 __func__, __LINE__);
5610 return -ENODEV;
5611 }
5612
5613 return 0;
5614 }
5615
5616 static int
5617 megasas_set_dma_mask(struct pci_dev *pdev)
5618 {
5619 /*
5620 * All our controllers are capable of performing 64-bit DMA
5621 */
5622 if (IS_DMA64) {
5623 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
5624
5625 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5626 goto fail_set_dma_mask;
5627 }
5628 } else {
5629 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5630 goto fail_set_dma_mask;
5631 }
5632 /*
5633 * Ensure that all data structures are allocated in 32-bit
5634 * memory.
5635 */
5636 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
5637 /* Try 32bit DMA mask and 32 bit Consistent dma mask */
5638 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
5639 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
5640 dev_info(&pdev->dev, "set 32bit DMA mask"
5641 "and 32 bit consistent mask\n");
5642 else
5643 goto fail_set_dma_mask;
5644 }
5645
5646 return 0;
5647
5648 fail_set_dma_mask:
5649 return 1;
5650 }
5651
5652 /**
5653 * megasas_probe_one - PCI hotplug entry point
5654 * @pdev: PCI device structure
5655 * @id: PCI ids of supported hotplugged adapter
5656 */
5657 static int megasas_probe_one(struct pci_dev *pdev,
5658 const struct pci_device_id *id)
5659 {
5660 int rval, pos;
5661 struct Scsi_Host *host;
5662 struct megasas_instance *instance;
5663 u16 control = 0;
5664 struct fusion_context *fusion = NULL;
5665
5666 /* Reset MSI-X in the kdump kernel */
5667 if (reset_devices) {
5668 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
5669 if (pos) {
5670 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
5671 &control);
5672 if (control & PCI_MSIX_FLAGS_ENABLE) {
5673 dev_info(&pdev->dev, "resetting MSI-X\n");
5674 pci_write_config_word(pdev,
5675 pos + PCI_MSIX_FLAGS,
5676 control &
5677 ~PCI_MSIX_FLAGS_ENABLE);
5678 }
5679 }
5680 }
5681
5682 /*
5683 * PCI prepping: enable device set bus mastering and dma mask
5684 */
5685 rval = pci_enable_device_mem(pdev);
5686
5687 if (rval) {
5688 return rval;
5689 }
5690
5691 pci_set_master(pdev);
5692
5693 if (megasas_set_dma_mask(pdev))
5694 goto fail_set_dma_mask;
5695
5696 host = scsi_host_alloc(&megasas_template,
5697 sizeof(struct megasas_instance));
5698
5699 if (!host) {
5700 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
5701 goto fail_alloc_instance;
5702 }
5703
5704 instance = (struct megasas_instance *)host->hostdata;
5705 memset(instance, 0, sizeof(*instance));
5706 atomic_set(&instance->fw_reset_no_pci_access, 0);
5707 instance->pdev = pdev;
5708
5709 switch (instance->pdev->device) {
5710 case PCI_DEVICE_ID_LSI_FUSION:
5711 case PCI_DEVICE_ID_LSI_PLASMA:
5712 case PCI_DEVICE_ID_LSI_INVADER:
5713 case PCI_DEVICE_ID_LSI_FURY:
5714 case PCI_DEVICE_ID_LSI_INTRUDER:
5715 case PCI_DEVICE_ID_LSI_INTRUDER_24:
5716 case PCI_DEVICE_ID_LSI_CUTLASS_52:
5717 case PCI_DEVICE_ID_LSI_CUTLASS_53:
5718 {
5719 instance->ctrl_context_pages =
5720 get_order(sizeof(struct fusion_context));
5721 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
5722 instance->ctrl_context_pages);
5723 if (!instance->ctrl_context) {
5724 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
5725 "memory for Fusion context info\n");
5726 goto fail_alloc_dma_buf;
5727 }
5728 fusion = instance->ctrl_context;
5729 memset(fusion, 0,
5730 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
5731 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
5732 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
5733 fusion->adapter_type = THUNDERBOLT_SERIES;
5734 else
5735 fusion->adapter_type = INVADER_SERIES;
5736 }
5737 break;
5738 default: /* For all other supported controllers */
5739
5740 instance->producer =
5741 pci_alloc_consistent(pdev, sizeof(u32),
5742 &instance->producer_h);
5743 instance->consumer =
5744 pci_alloc_consistent(pdev, sizeof(u32),
5745 &instance->consumer_h);
5746
5747 if (!instance->producer || !instance->consumer) {
5748 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate"
5749 "memory for producer, consumer\n");
5750 goto fail_alloc_dma_buf;
5751 }
5752
5753 *instance->producer = 0;
5754 *instance->consumer = 0;
5755 break;
5756 }
5757
5758 instance->system_info_buf = pci_zalloc_consistent(pdev,
5759 sizeof(struct MR_DRV_SYSTEM_INFO),
5760 &instance->system_info_h);
5761
5762 if (!instance->system_info_buf)
5763 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
5764
5765 /* Crash dump feature related initialisation*/
5766 instance->drv_buf_index = 0;
5767 instance->drv_buf_alloc = 0;
5768 instance->crash_dump_fw_support = 0;
5769 instance->crash_dump_app_support = 0;
5770 instance->fw_crash_state = UNAVAILABLE;
5771 spin_lock_init(&instance->crashdump_lock);
5772 instance->crash_dump_buf = NULL;
5773
5774 if (!reset_devices)
5775 instance->crash_dump_buf = pci_alloc_consistent(pdev,
5776 CRASH_DMA_BUF_SIZE,
5777 &instance->crash_dump_h);
5778 if (!instance->crash_dump_buf)
5779 dev_err(&pdev->dev, "Can't allocate Firmware "
5780 "crash dump DMA buffer\n");
5781
5782 megasas_poll_wait_aen = 0;
5783 instance->flag_ieee = 0;
5784 instance->ev = NULL;
5785 instance->issuepend_done = 1;
5786 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
5787 instance->is_imr = 0;
5788
5789 instance->evt_detail = pci_alloc_consistent(pdev,
5790 sizeof(struct
5791 megasas_evt_detail),
5792 &instance->evt_detail_h);
5793
5794 if (!instance->evt_detail) {
5795 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
5796 "event detail structure\n");
5797 goto fail_alloc_dma_buf;
5798 }
5799
5800 instance->pd_info = pci_alloc_consistent(pdev,
5801 sizeof(struct MR_PD_INFO), &instance->pd_info_h);
5802
5803 if (!instance->pd_info)
5804 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
5805
5806 /*
5807 * Initialize locks and queues
5808 */
5809 INIT_LIST_HEAD(&instance->cmd_pool);
5810 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
5811
5812 atomic_set(&instance->fw_outstanding,0);
5813
5814 init_waitqueue_head(&instance->int_cmd_wait_q);
5815 init_waitqueue_head(&instance->abort_cmd_wait_q);
5816
5817 spin_lock_init(&instance->mfi_pool_lock);
5818 spin_lock_init(&instance->hba_lock);
5819 spin_lock_init(&instance->completion_lock);
5820
5821 mutex_init(&instance->reset_mutex);
5822 mutex_init(&instance->hba_mutex);
5823
5824 /*
5825 * Initialize PCI related and misc parameters
5826 */
5827 instance->host = host;
5828 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
5829 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
5830 instance->ctrl_info = NULL;
5831
5832
5833 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5834 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
5835 instance->flag_ieee = 1;
5836
5837 megasas_dbg_lvl = 0;
5838 instance->flag = 0;
5839 instance->unload = 1;
5840 instance->last_time = 0;
5841 instance->disableOnlineCtrlReset = 1;
5842 instance->UnevenSpanSupport = 0;
5843
5844 if (instance->ctrl_context) {
5845 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
5846 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
5847 } else
5848 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
5849
5850 /*
5851 * Initialize MFI Firmware
5852 */
5853 if (megasas_init_fw(instance))
5854 goto fail_init_mfi;
5855
5856 if (instance->requestorId) {
5857 if (instance->PlasmaFW111) {
5858 instance->vf_affiliation_111 =
5859 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
5860 &instance->vf_affiliation_111_h);
5861 if (!instance->vf_affiliation_111)
5862 dev_warn(&pdev->dev, "Can't allocate "
5863 "memory for VF affiliation buffer\n");
5864 } else {
5865 instance->vf_affiliation =
5866 pci_alloc_consistent(pdev,
5867 (MAX_LOGICAL_DRIVES + 1) *
5868 sizeof(struct MR_LD_VF_AFFILIATION),
5869 &instance->vf_affiliation_h);
5870 if (!instance->vf_affiliation)
5871 dev_warn(&pdev->dev, "Can't allocate "
5872 "memory for VF affiliation buffer\n");
5873 }
5874 }
5875
5876 /*
5877 * Store instance in PCI softstate
5878 */
5879 pci_set_drvdata(pdev, instance);
5880
5881 /*
5882 * Add this controller to megasas_mgmt_info structure so that it
5883 * can be exported to management applications
5884 */
5885 megasas_mgmt_info.count++;
5886 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
5887 megasas_mgmt_info.max_index++;
5888
5889 /*
5890 * Register with SCSI mid-layer
5891 */
5892 if (megasas_io_attach(instance))
5893 goto fail_io_attach;
5894
5895 instance->unload = 0;
5896 /*
5897 * Trigger SCSI to scan our drives
5898 */
5899 scsi_scan_host(host);
5900
5901 /*
5902 * Initiate AEN (Asynchronous Event Notification)
5903 */
5904 if (megasas_start_aen(instance)) {
5905 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
5906 goto fail_start_aen;
5907 }
5908
5909 /* Get current SR-IOV LD/VF affiliation */
5910 if (instance->requestorId)
5911 megasas_get_ld_vf_affiliation(instance, 1);
5912
5913 return 0;
5914
5915 fail_start_aen:
5916 fail_io_attach:
5917 megasas_mgmt_info.count--;
5918 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
5919 megasas_mgmt_info.max_index--;
5920
5921 instance->instancet->disable_intr(instance);
5922 megasas_destroy_irqs(instance);
5923
5924 if (instance->ctrl_context)
5925 megasas_release_fusion(instance);
5926 else
5927 megasas_release_mfi(instance);
5928 if (instance->msix_vectors)
5929 pci_disable_msix(instance->pdev);
5930 fail_init_mfi:
5931 fail_alloc_dma_buf:
5932 if (instance->evt_detail)
5933 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
5934 instance->evt_detail,
5935 instance->evt_detail_h);
5936
5937 if (instance->pd_info)
5938 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
5939 instance->pd_info,
5940 instance->pd_info_h);
5941 if (instance->producer)
5942 pci_free_consistent(pdev, sizeof(u32), instance->producer,
5943 instance->producer_h);
5944 if (instance->consumer)
5945 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
5946 instance->consumer_h);
5947 scsi_host_put(host);
5948
5949 fail_alloc_instance:
5950 fail_set_dma_mask:
5951 pci_disable_device(pdev);
5952
5953 return -ENODEV;
5954 }
5955
5956 /**
5957 * megasas_flush_cache - Requests FW to flush all its caches
5958 * @instance: Adapter soft state
5959 */
5960 static void megasas_flush_cache(struct megasas_instance *instance)
5961 {
5962 struct megasas_cmd *cmd;
5963 struct megasas_dcmd_frame *dcmd;
5964
5965 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
5966 return;
5967
5968 cmd = megasas_get_cmd(instance);
5969
5970 if (!cmd)
5971 return;
5972
5973 dcmd = &cmd->frame->dcmd;
5974
5975 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5976
5977 dcmd->cmd = MFI_CMD_DCMD;
5978 dcmd->cmd_status = 0x0;
5979 dcmd->sge_count = 0;
5980 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
5981 dcmd->timeout = 0;
5982 dcmd->pad_0 = 0;
5983 dcmd->data_xfer_len = 0;
5984 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
5985 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
5986
5987 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
5988 != DCMD_SUCCESS) {
5989 dev_err(&instance->pdev->dev,
5990 "return from %s %d\n", __func__, __LINE__);
5991 return;
5992 }
5993
5994 megasas_return_cmd(instance, cmd);
5995 }
5996
5997 /**
5998 * megasas_shutdown_controller - Instructs FW to shutdown the controller
5999 * @instance: Adapter soft state
6000 * @opcode: Shutdown/Hibernate
6001 */
6002 static void megasas_shutdown_controller(struct megasas_instance *instance,
6003 u32 opcode)
6004 {
6005 struct megasas_cmd *cmd;
6006 struct megasas_dcmd_frame *dcmd;
6007
6008 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6009 return;
6010
6011 cmd = megasas_get_cmd(instance);
6012
6013 if (!cmd)
6014 return;
6015
6016 if (instance->aen_cmd)
6017 megasas_issue_blocked_abort_cmd(instance,
6018 instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6019 if (instance->map_update_cmd)
6020 megasas_issue_blocked_abort_cmd(instance,
6021 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6022 if (instance->jbod_seq_cmd)
6023 megasas_issue_blocked_abort_cmd(instance,
6024 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6025
6026 dcmd = &cmd->frame->dcmd;
6027
6028 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6029
6030 dcmd->cmd = MFI_CMD_DCMD;
6031 dcmd->cmd_status = 0x0;
6032 dcmd->sge_count = 0;
6033 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6034 dcmd->timeout = 0;
6035 dcmd->pad_0 = 0;
6036 dcmd->data_xfer_len = 0;
6037 dcmd->opcode = cpu_to_le32(opcode);
6038
6039 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6040 != DCMD_SUCCESS) {
6041 dev_err(&instance->pdev->dev,
6042 "return from %s %d\n", __func__, __LINE__);
6043 return;
6044 }
6045
6046 megasas_return_cmd(instance, cmd);
6047 }
6048
6049 #ifdef CONFIG_PM
6050 /**
6051 * megasas_suspend - driver suspend entry point
6052 * @pdev: PCI device structure
6053 * @state: PCI power state to suspend routine
6054 */
6055 static int
6056 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6057 {
6058 struct Scsi_Host *host;
6059 struct megasas_instance *instance;
6060
6061 instance = pci_get_drvdata(pdev);
6062 host = instance->host;
6063 instance->unload = 1;
6064
6065 /* Shutdown SR-IOV heartbeat timer */
6066 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6067 del_timer_sync(&instance->sriov_heartbeat_timer);
6068
6069 megasas_flush_cache(instance);
6070 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6071
6072 /* cancel the delayed work if this work still in queue */
6073 if (instance->ev != NULL) {
6074 struct megasas_aen_event *ev = instance->ev;
6075 cancel_delayed_work_sync(&ev->hotplug_work);
6076 instance->ev = NULL;
6077 }
6078
6079 tasklet_kill(&instance->isr_tasklet);
6080
6081 pci_set_drvdata(instance->pdev, instance);
6082 instance->instancet->disable_intr(instance);
6083
6084 megasas_destroy_irqs(instance);
6085
6086 if (instance->msix_vectors)
6087 pci_disable_msix(instance->pdev);
6088
6089 pci_save_state(pdev);
6090 pci_disable_device(pdev);
6091
6092 pci_set_power_state(pdev, pci_choose_state(pdev, state));
6093
6094 return 0;
6095 }
6096
6097 /**
6098 * megasas_resume- driver resume entry point
6099 * @pdev: PCI device structure
6100 */
6101 static int
6102 megasas_resume(struct pci_dev *pdev)
6103 {
6104 int rval;
6105 struct Scsi_Host *host;
6106 struct megasas_instance *instance;
6107
6108 instance = pci_get_drvdata(pdev);
6109 host = instance->host;
6110 pci_set_power_state(pdev, PCI_D0);
6111 pci_enable_wake(pdev, PCI_D0, 0);
6112 pci_restore_state(pdev);
6113
6114 /*
6115 * PCI prepping: enable device set bus mastering and dma mask
6116 */
6117 rval = pci_enable_device_mem(pdev);
6118
6119 if (rval) {
6120 dev_err(&pdev->dev, "Enable device failed\n");
6121 return rval;
6122 }
6123
6124 pci_set_master(pdev);
6125
6126 if (megasas_set_dma_mask(pdev))
6127 goto fail_set_dma_mask;
6128
6129 /*
6130 * Initialize MFI Firmware
6131 */
6132
6133 atomic_set(&instance->fw_outstanding, 0);
6134
6135 /*
6136 * We expect the FW state to be READY
6137 */
6138 if (megasas_transition_to_ready(instance, 0))
6139 goto fail_ready_state;
6140
6141 /* Now re-enable MSI-X */
6142 if (instance->msix_vectors &&
6143 pci_enable_msix_exact(instance->pdev, instance->msixentry,
6144 instance->msix_vectors))
6145 goto fail_reenable_msix;
6146
6147 if (instance->ctrl_context) {
6148 megasas_reset_reply_desc(instance);
6149 if (megasas_ioc_init_fusion(instance)) {
6150 megasas_free_cmds(instance);
6151 megasas_free_cmds_fusion(instance);
6152 goto fail_init_mfi;
6153 }
6154 if (!megasas_get_map_info(instance))
6155 megasas_sync_map_info(instance);
6156 } else {
6157 *instance->producer = 0;
6158 *instance->consumer = 0;
6159 if (megasas_issue_init_mfi(instance))
6160 goto fail_init_mfi;
6161 }
6162
6163 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6164 (unsigned long)instance);
6165
6166 if (instance->msix_vectors ?
6167 megasas_setup_irqs_msix(instance, 0) :
6168 megasas_setup_irqs_ioapic(instance))
6169 goto fail_init_mfi;
6170
6171 /* Re-launch SR-IOV heartbeat timer */
6172 if (instance->requestorId) {
6173 if (!megasas_sriov_start_heartbeat(instance, 0))
6174 megasas_start_timer(instance,
6175 &instance->sriov_heartbeat_timer,
6176 megasas_sriov_heartbeat_handler,
6177 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
6178 else {
6179 instance->skip_heartbeat_timer_del = 1;
6180 goto fail_init_mfi;
6181 }
6182 }
6183
6184 instance->instancet->enable_intr(instance);
6185 megasas_setup_jbod_map(instance);
6186 instance->unload = 0;
6187
6188 /*
6189 * Initiate AEN (Asynchronous Event Notification)
6190 */
6191 if (megasas_start_aen(instance))
6192 dev_err(&instance->pdev->dev, "Start AEN failed\n");
6193
6194 return 0;
6195
6196 fail_init_mfi:
6197 if (instance->evt_detail)
6198 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6199 instance->evt_detail,
6200 instance->evt_detail_h);
6201
6202 if (instance->pd_info)
6203 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6204 instance->pd_info,
6205 instance->pd_info_h);
6206 if (instance->producer)
6207 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6208 instance->producer_h);
6209 if (instance->consumer)
6210 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6211 instance->consumer_h);
6212 scsi_host_put(host);
6213
6214 fail_set_dma_mask:
6215 fail_ready_state:
6216 fail_reenable_msix:
6217
6218 pci_disable_device(pdev);
6219
6220 return -ENODEV;
6221 }
6222 #else
6223 #define megasas_suspend NULL
6224 #define megasas_resume NULL
6225 #endif
6226
6227 /**
6228 * megasas_detach_one - PCI hot"un"plug entry point
6229 * @pdev: PCI device structure
6230 */
6231 static void megasas_detach_one(struct pci_dev *pdev)
6232 {
6233 int i;
6234 struct Scsi_Host *host;
6235 struct megasas_instance *instance;
6236 struct fusion_context *fusion;
6237 u32 pd_seq_map_sz;
6238
6239 instance = pci_get_drvdata(pdev);
6240 instance->unload = 1;
6241 host = instance->host;
6242 fusion = instance->ctrl_context;
6243
6244 /* Shutdown SR-IOV heartbeat timer */
6245 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6246 del_timer_sync(&instance->sriov_heartbeat_timer);
6247
6248 if (instance->fw_crash_state != UNAVAILABLE)
6249 megasas_free_host_crash_buffer(instance);
6250 scsi_remove_host(instance->host);
6251 megasas_flush_cache(instance);
6252 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6253
6254 /* cancel the delayed work if this work still in queue*/
6255 if (instance->ev != NULL) {
6256 struct megasas_aen_event *ev = instance->ev;
6257 cancel_delayed_work_sync(&ev->hotplug_work);
6258 instance->ev = NULL;
6259 }
6260
6261 /* cancel all wait events */
6262 wake_up_all(&instance->int_cmd_wait_q);
6263
6264 tasklet_kill(&instance->isr_tasklet);
6265
6266 /*
6267 * Take the instance off the instance array. Note that we will not
6268 * decrement the max_index. We let this array be sparse array
6269 */
6270 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6271 if (megasas_mgmt_info.instance[i] == instance) {
6272 megasas_mgmt_info.count--;
6273 megasas_mgmt_info.instance[i] = NULL;
6274
6275 break;
6276 }
6277 }
6278
6279 instance->instancet->disable_intr(instance);
6280
6281 megasas_destroy_irqs(instance);
6282
6283 if (instance->msix_vectors)
6284 pci_disable_msix(instance->pdev);
6285
6286 if (instance->ctrl_context) {
6287 megasas_release_fusion(instance);
6288 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6289 (sizeof(struct MR_PD_CFG_SEQ) *
6290 (MAX_PHYSICAL_DEVICES - 1));
6291 for (i = 0; i < 2 ; i++) {
6292 if (fusion->ld_map[i])
6293 dma_free_coherent(&instance->pdev->dev,
6294 fusion->max_map_sz,
6295 fusion->ld_map[i],
6296 fusion->ld_map_phys[i]);
6297 if (fusion->ld_drv_map[i])
6298 free_pages((ulong)fusion->ld_drv_map[i],
6299 fusion->drv_map_pages);
6300 if (fusion->pd_seq_sync[i])
6301 dma_free_coherent(&instance->pdev->dev,
6302 pd_seq_map_sz,
6303 fusion->pd_seq_sync[i],
6304 fusion->pd_seq_phys[i]);
6305 }
6306 free_pages((ulong)instance->ctrl_context,
6307 instance->ctrl_context_pages);
6308 } else {
6309 megasas_release_mfi(instance);
6310 pci_free_consistent(pdev, sizeof(u32),
6311 instance->producer,
6312 instance->producer_h);
6313 pci_free_consistent(pdev, sizeof(u32),
6314 instance->consumer,
6315 instance->consumer_h);
6316 }
6317
6318 kfree(instance->ctrl_info);
6319
6320 if (instance->evt_detail)
6321 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6322 instance->evt_detail, instance->evt_detail_h);
6323
6324 if (instance->pd_info)
6325 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6326 instance->pd_info,
6327 instance->pd_info_h);
6328 if (instance->vf_affiliation)
6329 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6330 sizeof(struct MR_LD_VF_AFFILIATION),
6331 instance->vf_affiliation,
6332 instance->vf_affiliation_h);
6333
6334 if (instance->vf_affiliation_111)
6335 pci_free_consistent(pdev,
6336 sizeof(struct MR_LD_VF_AFFILIATION_111),
6337 instance->vf_affiliation_111,
6338 instance->vf_affiliation_111_h);
6339
6340 if (instance->hb_host_mem)
6341 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6342 instance->hb_host_mem,
6343 instance->hb_host_mem_h);
6344
6345 if (instance->crash_dump_buf)
6346 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6347 instance->crash_dump_buf, instance->crash_dump_h);
6348
6349 if (instance->system_info_buf)
6350 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6351 instance->system_info_buf, instance->system_info_h);
6352
6353 scsi_host_put(host);
6354
6355 pci_disable_device(pdev);
6356 }
6357
6358 /**
6359 * megasas_shutdown - Shutdown entry point
6360 * @device: Generic device structure
6361 */
6362 static void megasas_shutdown(struct pci_dev *pdev)
6363 {
6364 struct megasas_instance *instance = pci_get_drvdata(pdev);
6365
6366 instance->unload = 1;
6367 megasas_flush_cache(instance);
6368 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6369 instance->instancet->disable_intr(instance);
6370 megasas_destroy_irqs(instance);
6371
6372 if (instance->msix_vectors)
6373 pci_disable_msix(instance->pdev);
6374 }
6375
6376 /**
6377 * megasas_mgmt_open - char node "open" entry point
6378 */
6379 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
6380 {
6381 /*
6382 * Allow only those users with admin rights
6383 */
6384 if (!capable(CAP_SYS_ADMIN))
6385 return -EACCES;
6386
6387 return 0;
6388 }
6389
6390 /**
6391 * megasas_mgmt_fasync - Async notifier registration from applications
6392 *
6393 * This function adds the calling process to a driver global queue. When an
6394 * event occurs, SIGIO will be sent to all processes in this queue.
6395 */
6396 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
6397 {
6398 int rc;
6399
6400 mutex_lock(&megasas_async_queue_mutex);
6401
6402 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
6403
6404 mutex_unlock(&megasas_async_queue_mutex);
6405
6406 if (rc >= 0) {
6407 /* For sanity check when we get ioctl */
6408 filep->private_data = filep;
6409 return 0;
6410 }
6411
6412 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
6413
6414 return rc;
6415 }
6416
6417 /**
6418 * megasas_mgmt_poll - char node "poll" entry point
6419 * */
6420 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
6421 {
6422 unsigned int mask;
6423 unsigned long flags;
6424
6425 poll_wait(file, &megasas_poll_wait, wait);
6426 spin_lock_irqsave(&poll_aen_lock, flags);
6427 if (megasas_poll_wait_aen)
6428 mask = (POLLIN | POLLRDNORM);
6429 else
6430 mask = 0;
6431 megasas_poll_wait_aen = 0;
6432 spin_unlock_irqrestore(&poll_aen_lock, flags);
6433 return mask;
6434 }
6435
6436 /*
6437 * megasas_set_crash_dump_params_ioctl:
6438 * Send CRASH_DUMP_MODE DCMD to all controllers
6439 * @cmd: MFI command frame
6440 */
6441
6442 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
6443 {
6444 struct megasas_instance *local_instance;
6445 int i, error = 0;
6446 int crash_support;
6447
6448 crash_support = cmd->frame->dcmd.mbox.w[0];
6449
6450 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6451 local_instance = megasas_mgmt_info.instance[i];
6452 if (local_instance && local_instance->crash_dump_drv_support) {
6453 if ((atomic_read(&local_instance->adprecovery) ==
6454 MEGASAS_HBA_OPERATIONAL) &&
6455 !megasas_set_crash_dump_params(local_instance,
6456 crash_support)) {
6457 local_instance->crash_dump_app_support =
6458 crash_support;
6459 dev_info(&local_instance->pdev->dev,
6460 "Application firmware crash "
6461 "dump mode set success\n");
6462 error = 0;
6463 } else {
6464 dev_info(&local_instance->pdev->dev,
6465 "Application firmware crash "
6466 "dump mode set failed\n");
6467 error = -1;
6468 }
6469 }
6470 }
6471 return error;
6472 }
6473
6474 /**
6475 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
6476 * @instance: Adapter soft state
6477 * @argp: User's ioctl packet
6478 */
6479 static int
6480 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6481 struct megasas_iocpacket __user * user_ioc,
6482 struct megasas_iocpacket *ioc)
6483 {
6484 struct megasas_sge32 *kern_sge32;
6485 struct megasas_cmd *cmd;
6486 void *kbuff_arr[MAX_IOCTL_SGE];
6487 dma_addr_t buf_handle = 0;
6488 int error = 0, i;
6489 void *sense = NULL;
6490 dma_addr_t sense_handle;
6491 unsigned long *sense_ptr;
6492
6493 memset(kbuff_arr, 0, sizeof(kbuff_arr));
6494
6495 if (ioc->sge_count > MAX_IOCTL_SGE) {
6496 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
6497 ioc->sge_count, MAX_IOCTL_SGE);
6498 return -EINVAL;
6499 }
6500
6501 cmd = megasas_get_cmd(instance);
6502 if (!cmd) {
6503 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
6504 return -ENOMEM;
6505 }
6506
6507 /*
6508 * User's IOCTL packet has 2 frames (maximum). Copy those two
6509 * frames into our cmd's frames. cmd->frame's context will get
6510 * overwritten when we copy from user's frames. So set that value
6511 * alone separately
6512 */
6513 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
6514 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
6515 cmd->frame->hdr.pad_0 = 0;
6516 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
6517 MFI_FRAME_SGL64 |
6518 MFI_FRAME_SENSE64));
6519
6520 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
6521 error = megasas_set_crash_dump_params_ioctl(cmd);
6522 megasas_return_cmd(instance, cmd);
6523 return error;
6524 }
6525
6526 /*
6527 * The management interface between applications and the fw uses
6528 * MFI frames. E.g, RAID configuration changes, LD property changes
6529 * etc are accomplishes through different kinds of MFI frames. The
6530 * driver needs to care only about substituting user buffers with
6531 * kernel buffers in SGLs. The location of SGL is embedded in the
6532 * struct iocpacket itself.
6533 */
6534 kern_sge32 = (struct megasas_sge32 *)
6535 ((unsigned long)cmd->frame + ioc->sgl_off);
6536
6537 /*
6538 * For each user buffer, create a mirror buffer and copy in
6539 */
6540 for (i = 0; i < ioc->sge_count; i++) {
6541 if (!ioc->sgl[i].iov_len)
6542 continue;
6543
6544 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
6545 ioc->sgl[i].iov_len,
6546 &buf_handle, GFP_KERNEL);
6547 if (!kbuff_arr[i]) {
6548 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
6549 "kernel SGL buffer for IOCTL\n");
6550 error = -ENOMEM;
6551 goto out;
6552 }
6553
6554 /*
6555 * We don't change the dma_coherent_mask, so
6556 * pci_alloc_consistent only returns 32bit addresses
6557 */
6558 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
6559 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
6560
6561 /*
6562 * We created a kernel buffer corresponding to the
6563 * user buffer. Now copy in from the user buffer
6564 */
6565 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
6566 (u32) (ioc->sgl[i].iov_len))) {
6567 error = -EFAULT;
6568 goto out;
6569 }
6570 }
6571
6572 if (ioc->sense_len) {
6573 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
6574 &sense_handle, GFP_KERNEL);
6575 if (!sense) {
6576 error = -ENOMEM;
6577 goto out;
6578 }
6579
6580 sense_ptr =
6581 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
6582 *sense_ptr = cpu_to_le32(sense_handle);
6583 }
6584
6585 /*
6586 * Set the sync_cmd flag so that the ISR knows not to complete this
6587 * cmd to the SCSI mid-layer
6588 */
6589 cmd->sync_cmd = 1;
6590 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
6591 cmd->sync_cmd = 0;
6592 dev_err(&instance->pdev->dev,
6593 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
6594 __func__, __LINE__, cmd->frame->dcmd.opcode,
6595 cmd->cmd_status_drv);
6596 return -EBUSY;
6597 }
6598
6599 cmd->sync_cmd = 0;
6600
6601 if (instance->unload == 1) {
6602 dev_info(&instance->pdev->dev, "Driver unload is in progress "
6603 "don't submit data to application\n");
6604 goto out;
6605 }
6606 /*
6607 * copy out the kernel buffers to user buffers
6608 */
6609 for (i = 0; i < ioc->sge_count; i++) {
6610 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
6611 ioc->sgl[i].iov_len)) {
6612 error = -EFAULT;
6613 goto out;
6614 }
6615 }
6616
6617 /*
6618 * copy out the sense
6619 */
6620 if (ioc->sense_len) {
6621 /*
6622 * sense_ptr points to the location that has the user
6623 * sense buffer address
6624 */
6625 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
6626 ioc->sense_off);
6627
6628 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
6629 sense, ioc->sense_len)) {
6630 dev_err(&instance->pdev->dev, "Failed to copy out to user "
6631 "sense data\n");
6632 error = -EFAULT;
6633 goto out;
6634 }
6635 }
6636
6637 /*
6638 * copy the status codes returned by the fw
6639 */
6640 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
6641 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
6642 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
6643 error = -EFAULT;
6644 }
6645
6646 out:
6647 if (sense) {
6648 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
6649 sense, sense_handle);
6650 }
6651
6652 for (i = 0; i < ioc->sge_count; i++) {
6653 if (kbuff_arr[i])
6654 dma_free_coherent(&instance->pdev->dev,
6655 le32_to_cpu(kern_sge32[i].length),
6656 kbuff_arr[i],
6657 le32_to_cpu(kern_sge32[i].phys_addr));
6658 kbuff_arr[i] = NULL;
6659 }
6660
6661 megasas_return_cmd(instance, cmd);
6662 return error;
6663 }
6664
6665 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
6666 {
6667 struct megasas_iocpacket __user *user_ioc =
6668 (struct megasas_iocpacket __user *)arg;
6669 struct megasas_iocpacket *ioc;
6670 struct megasas_instance *instance;
6671 int error;
6672 int i;
6673 unsigned long flags;
6674 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
6675
6676 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
6677 if (!ioc)
6678 return -ENOMEM;
6679
6680 if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
6681 error = -EFAULT;
6682 goto out_kfree_ioc;
6683 }
6684
6685 instance = megasas_lookup_instance(ioc->host_no);
6686 if (!instance) {
6687 error = -ENODEV;
6688 goto out_kfree_ioc;
6689 }
6690
6691 /* Adjust ioctl wait time for VF mode */
6692 if (instance->requestorId)
6693 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
6694
6695 /* Block ioctls in VF mode */
6696 if (instance->requestorId && !allow_vf_ioctls) {
6697 error = -ENODEV;
6698 goto out_kfree_ioc;
6699 }
6700
6701 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
6702 dev_err(&instance->pdev->dev, "Controller in crit error\n");
6703 error = -ENODEV;
6704 goto out_kfree_ioc;
6705 }
6706
6707 if (instance->unload == 1) {
6708 error = -ENODEV;
6709 goto out_kfree_ioc;
6710 }
6711
6712 if (down_interruptible(&instance->ioctl_sem)) {
6713 error = -ERESTARTSYS;
6714 goto out_kfree_ioc;
6715 }
6716
6717 for (i = 0; i < wait_time; i++) {
6718
6719 spin_lock_irqsave(&instance->hba_lock, flags);
6720 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
6721 spin_unlock_irqrestore(&instance->hba_lock, flags);
6722 break;
6723 }
6724 spin_unlock_irqrestore(&instance->hba_lock, flags);
6725
6726 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
6727 dev_notice(&instance->pdev->dev, "waiting"
6728 "for controller reset to finish\n");
6729 }
6730
6731 msleep(1000);
6732 }
6733
6734 spin_lock_irqsave(&instance->hba_lock, flags);
6735 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6736 spin_unlock_irqrestore(&instance->hba_lock, flags);
6737
6738 dev_err(&instance->pdev->dev, "timed out while"
6739 "waiting for HBA to recover\n");
6740 error = -ENODEV;
6741 goto out_up;
6742 }
6743 spin_unlock_irqrestore(&instance->hba_lock, flags);
6744
6745 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
6746 out_up:
6747 up(&instance->ioctl_sem);
6748
6749 out_kfree_ioc:
6750 kfree(ioc);
6751 return error;
6752 }
6753
6754 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
6755 {
6756 struct megasas_instance *instance;
6757 struct megasas_aen aen;
6758 int error;
6759 int i;
6760 unsigned long flags;
6761 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
6762
6763 if (file->private_data != file) {
6764 printk(KERN_DEBUG "megasas: fasync_helper was not "
6765 "called first\n");
6766 return -EINVAL;
6767 }
6768
6769 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
6770 return -EFAULT;
6771
6772 instance = megasas_lookup_instance(aen.host_no);
6773
6774 if (!instance)
6775 return -ENODEV;
6776
6777 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
6778 return -ENODEV;
6779 }
6780
6781 if (instance->unload == 1) {
6782 return -ENODEV;
6783 }
6784
6785 for (i = 0; i < wait_time; i++) {
6786
6787 spin_lock_irqsave(&instance->hba_lock, flags);
6788 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
6789 spin_unlock_irqrestore(&instance->hba_lock,
6790 flags);
6791 break;
6792 }
6793
6794 spin_unlock_irqrestore(&instance->hba_lock, flags);
6795
6796 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
6797 dev_notice(&instance->pdev->dev, "waiting for"
6798 "controller reset to finish\n");
6799 }
6800
6801 msleep(1000);
6802 }
6803
6804 spin_lock_irqsave(&instance->hba_lock, flags);
6805 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6806 spin_unlock_irqrestore(&instance->hba_lock, flags);
6807 dev_err(&instance->pdev->dev, "timed out while waiting"
6808 "for HBA to recover\n");
6809 return -ENODEV;
6810 }
6811 spin_unlock_irqrestore(&instance->hba_lock, flags);
6812
6813 mutex_lock(&instance->reset_mutex);
6814 error = megasas_register_aen(instance, aen.seq_num,
6815 aen.class_locale_word);
6816 mutex_unlock(&instance->reset_mutex);
6817 return error;
6818 }
6819
6820 /**
6821 * megasas_mgmt_ioctl - char node ioctl entry point
6822 */
6823 static long
6824 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6825 {
6826 switch (cmd) {
6827 case MEGASAS_IOC_FIRMWARE:
6828 return megasas_mgmt_ioctl_fw(file, arg);
6829
6830 case MEGASAS_IOC_GET_AEN:
6831 return megasas_mgmt_ioctl_aen(file, arg);
6832 }
6833
6834 return -ENOTTY;
6835 }
6836
6837 #ifdef CONFIG_COMPAT
6838 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
6839 {
6840 struct compat_megasas_iocpacket __user *cioc =
6841 (struct compat_megasas_iocpacket __user *)arg;
6842 struct megasas_iocpacket __user *ioc =
6843 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
6844 int i;
6845 int error = 0;
6846 compat_uptr_t ptr;
6847 u32 local_sense_off;
6848 u32 local_sense_len;
6849 u32 user_sense_off;
6850
6851 if (clear_user(ioc, sizeof(*ioc)))
6852 return -EFAULT;
6853
6854 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
6855 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
6856 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
6857 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
6858 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
6859 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
6860 return -EFAULT;
6861
6862 /*
6863 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
6864 * sense_len is not null, so prepare the 64bit value under
6865 * the same condition.
6866 */
6867 if (get_user(local_sense_off, &ioc->sense_off) ||
6868 get_user(local_sense_len, &ioc->sense_len) ||
6869 get_user(user_sense_off, &cioc->sense_off))
6870 return -EFAULT;
6871
6872 if (local_sense_len) {
6873 void __user **sense_ioc_ptr =
6874 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
6875 compat_uptr_t *sense_cioc_ptr =
6876 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
6877 if (get_user(ptr, sense_cioc_ptr) ||
6878 put_user(compat_ptr(ptr), sense_ioc_ptr))
6879 return -EFAULT;
6880 }
6881
6882 for (i = 0; i < MAX_IOCTL_SGE; i++) {
6883 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
6884 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
6885 copy_in_user(&ioc->sgl[i].iov_len,
6886 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
6887 return -EFAULT;
6888 }
6889
6890 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
6891
6892 if (copy_in_user(&cioc->frame.hdr.cmd_status,
6893 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
6894 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
6895 return -EFAULT;
6896 }
6897 return error;
6898 }
6899
6900 static long
6901 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
6902 unsigned long arg)
6903 {
6904 switch (cmd) {
6905 case MEGASAS_IOC_FIRMWARE32:
6906 return megasas_mgmt_compat_ioctl_fw(file, arg);
6907 case MEGASAS_IOC_GET_AEN:
6908 return megasas_mgmt_ioctl_aen(file, arg);
6909 }
6910
6911 return -ENOTTY;
6912 }
6913 #endif
6914
6915 /*
6916 * File operations structure for management interface
6917 */
6918 static const struct file_operations megasas_mgmt_fops = {
6919 .owner = THIS_MODULE,
6920 .open = megasas_mgmt_open,
6921 .fasync = megasas_mgmt_fasync,
6922 .unlocked_ioctl = megasas_mgmt_ioctl,
6923 .poll = megasas_mgmt_poll,
6924 #ifdef CONFIG_COMPAT
6925 .compat_ioctl = megasas_mgmt_compat_ioctl,
6926 #endif
6927 .llseek = noop_llseek,
6928 };
6929
6930 /*
6931 * PCI hotplug support registration structure
6932 */
6933 static struct pci_driver megasas_pci_driver = {
6934
6935 .name = "megaraid_sas",
6936 .id_table = megasas_pci_table,
6937 .probe = megasas_probe_one,
6938 .remove = megasas_detach_one,
6939 .suspend = megasas_suspend,
6940 .resume = megasas_resume,
6941 .shutdown = megasas_shutdown,
6942 };
6943
6944 /*
6945 * Sysfs driver attributes
6946 */
6947 static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
6948 {
6949 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
6950 MEGASAS_VERSION);
6951 }
6952
6953 static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
6954
6955 static ssize_t
6956 megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
6957 {
6958 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
6959 MEGASAS_RELDATE);
6960 }
6961
6962 static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL);
6963
6964 static ssize_t
6965 megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
6966 {
6967 return sprintf(buf, "%u\n", support_poll_for_event);
6968 }
6969
6970 static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
6971 megasas_sysfs_show_support_poll_for_event, NULL);
6972
6973 static ssize_t
6974 megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
6975 {
6976 return sprintf(buf, "%u\n", support_device_change);
6977 }
6978
6979 static DRIVER_ATTR(support_device_change, S_IRUGO,
6980 megasas_sysfs_show_support_device_change, NULL);
6981
6982 static ssize_t
6983 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
6984 {
6985 return sprintf(buf, "%u\n", megasas_dbg_lvl);
6986 }
6987
6988 static ssize_t
6989 megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
6990 {
6991 int retval = count;
6992
6993 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
6994 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
6995 retval = -EINVAL;
6996 }
6997 return retval;
6998 }
6999
7000 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
7001 megasas_sysfs_set_dbg_lvl);
7002
7003 static void
7004 megasas_aen_polling(struct work_struct *work)
7005 {
7006 struct megasas_aen_event *ev =
7007 container_of(work, struct megasas_aen_event, hotplug_work.work);
7008 struct megasas_instance *instance = ev->instance;
7009 union megasas_evt_class_locale class_locale;
7010 struct Scsi_Host *host;
7011 struct scsi_device *sdev1;
7012 u16 pd_index = 0;
7013 u16 ld_index = 0;
7014 int i, j, doscan = 0;
7015 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7016 int error;
7017 u8 dcmd_ret = DCMD_SUCCESS;
7018
7019 if (!instance) {
7020 printk(KERN_ERR "invalid instance!\n");
7021 kfree(ev);
7022 return;
7023 }
7024
7025 /* Adjust event workqueue thread wait time for VF mode */
7026 if (instance->requestorId)
7027 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7028
7029 /* Don't run the event workqueue thread if OCR is running */
7030 mutex_lock(&instance->reset_mutex);
7031
7032 instance->ev = NULL;
7033 host = instance->host;
7034 if (instance->evt_detail) {
7035 megasas_decode_evt(instance);
7036
7037 switch (le32_to_cpu(instance->evt_detail->code)) {
7038
7039 case MR_EVT_PD_INSERTED:
7040 case MR_EVT_PD_REMOVED:
7041 dcmd_ret = megasas_get_pd_list(instance);
7042 if (dcmd_ret == DCMD_SUCCESS)
7043 doscan = SCAN_PD_CHANNEL;
7044 break;
7045
7046 case MR_EVT_LD_OFFLINE:
7047 case MR_EVT_CFG_CLEARED:
7048 case MR_EVT_LD_DELETED:
7049 case MR_EVT_LD_CREATED:
7050 if (!instance->requestorId ||
7051 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7052 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7053
7054 if (dcmd_ret == DCMD_SUCCESS)
7055 doscan = SCAN_VD_CHANNEL;
7056
7057 break;
7058
7059 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7060 case MR_EVT_FOREIGN_CFG_IMPORTED:
7061 case MR_EVT_LD_STATE_CHANGE:
7062 dcmd_ret = megasas_get_pd_list(instance);
7063
7064 if (dcmd_ret != DCMD_SUCCESS)
7065 break;
7066
7067 if (!instance->requestorId ||
7068 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7069 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7070
7071 if (dcmd_ret != DCMD_SUCCESS)
7072 break;
7073
7074 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7075 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7076 instance->host->host_no);
7077 break;
7078
7079 case MR_EVT_CTRL_PROP_CHANGED:
7080 dcmd_ret = megasas_get_ctrl_info(instance);
7081 break;
7082 default:
7083 doscan = 0;
7084 break;
7085 }
7086 } else {
7087 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7088 mutex_unlock(&instance->reset_mutex);
7089 kfree(ev);
7090 return;
7091 }
7092
7093 mutex_unlock(&instance->reset_mutex);
7094
7095 if (doscan & SCAN_PD_CHANNEL) {
7096 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7097 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7098 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7099 sdev1 = scsi_device_lookup(host, i, j, 0);
7100 if (instance->pd_list[pd_index].driveState ==
7101 MR_PD_STATE_SYSTEM) {
7102 if (!sdev1)
7103 scsi_add_device(host, i, j, 0);
7104 else
7105 scsi_device_put(sdev1);
7106 } else {
7107 if (sdev1) {
7108 scsi_remove_device(sdev1);
7109 scsi_device_put(sdev1);
7110 }
7111 }
7112 }
7113 }
7114 }
7115
7116 if (doscan & SCAN_VD_CHANNEL) {
7117 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7118 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7119 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7120 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7121 if (instance->ld_ids[ld_index] != 0xff) {
7122 if (!sdev1)
7123 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7124 else
7125 scsi_device_put(sdev1);
7126 } else {
7127 if (sdev1) {
7128 scsi_remove_device(sdev1);
7129 scsi_device_put(sdev1);
7130 }
7131 }
7132 }
7133 }
7134 }
7135
7136 if (dcmd_ret == DCMD_SUCCESS)
7137 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7138 else
7139 seq_num = instance->last_seq_num;
7140
7141 /* Register AEN with FW for latest sequence number plus 1 */
7142 class_locale.members.reserved = 0;
7143 class_locale.members.locale = MR_EVT_LOCALE_ALL;
7144 class_locale.members.class = MR_EVT_CLASS_DEBUG;
7145
7146 if (instance->aen_cmd != NULL) {
7147 kfree(ev);
7148 return;
7149 }
7150
7151 mutex_lock(&instance->reset_mutex);
7152 error = megasas_register_aen(instance, seq_num,
7153 class_locale.word);
7154 if (error)
7155 dev_err(&instance->pdev->dev,
7156 "register aen failed error %x\n", error);
7157
7158 mutex_unlock(&instance->reset_mutex);
7159 kfree(ev);
7160 }
7161
7162 /**
7163 * megasas_init - Driver load entry point
7164 */
7165 static int __init megasas_init(void)
7166 {
7167 int rval;
7168
7169 /*
7170 * Announce driver version and other information
7171 */
7172 pr_info("megasas: %s\n", MEGASAS_VERSION);
7173
7174 spin_lock_init(&poll_aen_lock);
7175
7176 support_poll_for_event = 2;
7177 support_device_change = 1;
7178
7179 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7180
7181 /*
7182 * Register character device node
7183 */
7184 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7185
7186 if (rval < 0) {
7187 printk(KERN_DEBUG "megasas: failed to open device node\n");
7188 return rval;
7189 }
7190
7191 megasas_mgmt_majorno = rval;
7192
7193 /*
7194 * Register ourselves as PCI hotplug module
7195 */
7196 rval = pci_register_driver(&megasas_pci_driver);
7197
7198 if (rval) {
7199 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7200 goto err_pcidrv;
7201 }
7202
7203 rval = driver_create_file(&megasas_pci_driver.driver,
7204 &driver_attr_version);
7205 if (rval)
7206 goto err_dcf_attr_ver;
7207
7208 rval = driver_create_file(&megasas_pci_driver.driver,
7209 &driver_attr_release_date);
7210 if (rval)
7211 goto err_dcf_rel_date;
7212
7213 rval = driver_create_file(&megasas_pci_driver.driver,
7214 &driver_attr_support_poll_for_event);
7215 if (rval)
7216 goto err_dcf_support_poll_for_event;
7217
7218 rval = driver_create_file(&megasas_pci_driver.driver,
7219 &driver_attr_dbg_lvl);
7220 if (rval)
7221 goto err_dcf_dbg_lvl;
7222 rval = driver_create_file(&megasas_pci_driver.driver,
7223 &driver_attr_support_device_change);
7224 if (rval)
7225 goto err_dcf_support_device_change;
7226
7227 return rval;
7228
7229 err_dcf_support_device_change:
7230 driver_remove_file(&megasas_pci_driver.driver,
7231 &driver_attr_dbg_lvl);
7232 err_dcf_dbg_lvl:
7233 driver_remove_file(&megasas_pci_driver.driver,
7234 &driver_attr_support_poll_for_event);
7235 err_dcf_support_poll_for_event:
7236 driver_remove_file(&megasas_pci_driver.driver,
7237 &driver_attr_release_date);
7238 err_dcf_rel_date:
7239 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7240 err_dcf_attr_ver:
7241 pci_unregister_driver(&megasas_pci_driver);
7242 err_pcidrv:
7243 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7244 return rval;
7245 }
7246
7247 /**
7248 * megasas_exit - Driver unload entry point
7249 */
7250 static void __exit megasas_exit(void)
7251 {
7252 driver_remove_file(&megasas_pci_driver.driver,
7253 &driver_attr_dbg_lvl);
7254 driver_remove_file(&megasas_pci_driver.driver,
7255 &driver_attr_support_poll_for_event);
7256 driver_remove_file(&megasas_pci_driver.driver,
7257 &driver_attr_support_device_change);
7258 driver_remove_file(&megasas_pci_driver.driver,
7259 &driver_attr_release_date);
7260 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7261
7262 pci_unregister_driver(&megasas_pci_driver);
7263 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7264 }
7265
7266 module_init(megasas_init);
7267 module_exit(megasas_exit);
This page took 0.222847 seconds and 6 git commands to generate.