blob: 8258506ba7d793d8a4fe2fd8e0e7fe4a3fb26245 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/module.h>
38
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
41
42////////////////////////////////////////////////////////////////
43
44#include <linux/ioctl.h> /* For SCSI-Passthrough */
45#include <asm/uaccess.h>
46
47#include <linux/stat.h>
48#include <linux/slab.h> /* for kmalloc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/pci.h> /* for PCI support */
50#include <linux/proc_fs.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h> /* for udelay */
53#include <linux/interrupt.h>
54#include <linux/kernel.h> /* for printk */
55#include <linux/sched.h>
56#include <linux/reboot.h>
57#include <linux/spinlock.h>
Matthias Gehre910638a2006-03-28 01:56:48 -080058#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#include <linux/timer.h>
61#include <linux/string.h>
62#include <linux/ioport.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010063#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65#include <asm/processor.h> /* for boot_cpu_data */
66#include <asm/pgtable.h>
67#include <asm/io.h> /* for virt_to_bus, etc. */
68
69#include <scsi/scsi.h>
70#include <scsi/scsi_cmnd.h>
71#include <scsi/scsi_device.h>
72#include <scsi/scsi_host.h>
73#include <scsi/scsi_tcq.h>
74
75#include "dpt/dptsig.h"
76#include "dpti.h"
77
78/*============================================================================
79 * Create a binary signature - this is read by dptsig
80 * Needed for our management apps
81 *============================================================================
82 */
83static dpt_sig_S DPTI_sig = {
84 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
85#ifdef __i386__
86 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
87#elif defined(__ia64__)
88 PROC_INTEL, PROC_IA64,
89#elif defined(__sparc__)
90 PROC_ULTRASPARC, PROC_ULTRASPARC,
91#elif defined(__alpha__)
92 PROC_ALPHA, PROC_ALPHA,
93#else
94 (-1),(-1),
95#endif
96 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
97 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
98 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
99};
100
101
102
103
104/*============================================================================
105 * Globals
106 *============================================================================
107 */
108
Arjan van de Ven0b950672006-01-11 13:16:10 +0100109static DEFINE_MUTEX(adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static struct i2o_sys_tbl *sys_tbl = NULL;
112static int sys_tbl_ind = 0;
113static int sys_tbl_len = 0;
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static adpt_hba* hba_chain = NULL;
116static int hba_count = 0;
117
Arjan van de Ven00977a52007-02-12 00:55:34 -0800118static const struct file_operations adpt_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .ioctl = adpt_ioctl,
120 .open = adpt_open,
121 .release = adpt_close
122};
123
124#ifdef REBOOT_NOTIFIER
125static struct notifier_block adpt_reboot_notifier =
126{
127 adpt_reboot_event,
128 NULL,
129 0
130};
131#endif
132
133/* Structures and definitions for synchronous message posting.
134 * See adpt_i2o_post_wait() for description
135 * */
136struct adpt_i2o_post_wait_data
137{
138 int status;
139 u32 id;
140 adpt_wait_queue_head_t *wq;
141 struct adpt_i2o_post_wait_data *next;
142};
143
144static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
145static u32 adpt_post_wait_id = 0;
146static DEFINE_SPINLOCK(adpt_post_wait_lock);
147
148
149/*============================================================================
150 * Functions
151 *============================================================================
152 */
153
154static u8 adpt_read_blink_led(adpt_hba* host)
155{
156 if(host->FwDebugBLEDflag_P != 0) {
157 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
158 return readb(host->FwDebugBLEDvalue_P);
159 }
160 }
161 return 0;
162}
163
164/*============================================================================
165 * Scsi host template interface functions
166 *============================================================================
167 */
168
169static struct pci_device_id dptids[] = {
170 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
171 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
172 { 0, }
173};
174MODULE_DEVICE_TABLE(pci,dptids);
175
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600176static void adpt_exit(void);
177
178static int adpt_detect(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 struct pci_dev *pDev = NULL;
181 adpt_hba* pHba;
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 PINFO("Detecting Adaptec I2O RAID controllers...\n");
184
185 /* search for all Adatpec I2O RAID cards */
Alan Coxa07f3532006-09-15 15:34:32 +0100186 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 if(pDev->device == PCI_DPT_DEVICE_ID ||
188 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600189 if(adpt_install_hba(pDev) ){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 PERROR("Could not Init an I2O RAID device\n");
191 PERROR("Will not try to detect others.\n");
192 return hba_count-1;
193 }
Alan Coxa07f3532006-09-15 15:34:32 +0100194 pci_dev_get(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196 }
197
198 /* In INIT state, Activate IOPs */
199 for (pHba = hba_chain; pHba; pHba = pHba->next) {
200 // Activate does get status , init outbound, and get hrt
201 if (adpt_i2o_activate_hba(pHba) < 0) {
202 adpt_i2o_delete_hba(pHba);
203 }
204 }
205
206
207 /* Active IOPs in HOLD state */
208
209rebuild_sys_tab:
210 if (hba_chain == NULL)
211 return 0;
212
213 /*
214 * If build_sys_table fails, we kill everything and bail
215 * as we can't init the IOPs w/o a system table
216 */
217 if (adpt_i2o_build_sys_table() < 0) {
218 adpt_i2o_sys_shutdown();
219 return 0;
220 }
221
222 PDEBUG("HBA's in HOLD state\n");
223
224 /* If IOP don't get online, we need to rebuild the System table */
225 for (pHba = hba_chain; pHba; pHba = pHba->next) {
226 if (adpt_i2o_online_hba(pHba) < 0) {
227 adpt_i2o_delete_hba(pHba);
228 goto rebuild_sys_tab;
229 }
230 }
231
232 /* Active IOPs now in OPERATIONAL state */
233 PDEBUG("HBA's in OPERATIONAL state\n");
234
235 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
236 for (pHba = hba_chain; pHba; pHba = pHba->next) {
237 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
238 if (adpt_i2o_lct_get(pHba) < 0){
239 adpt_i2o_delete_hba(pHba);
240 continue;
241 }
242
243 if (adpt_i2o_parse_lct(pHba) < 0){
244 adpt_i2o_delete_hba(pHba);
245 continue;
246 }
247 adpt_inquiry(pHba);
248 }
249
250 for (pHba = hba_chain; pHba; pHba = pHba->next) {
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600251 if (adpt_scsi_register(pHba) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 adpt_i2o_delete_hba(pHba);
253 continue;
254 }
255 pHba->initialized = TRUE;
256 pHba->state &= ~DPTI_STATE_RESET;
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600257 scsi_scan_host(pHba->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 }
259
260 // Register our control device node
261 // nodes will need to be created in /dev to access this
262 // the nodes can not be created from within the driver
263 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600264 adpt_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 return 0;
266 }
267 return hba_count;
268}
269
270
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600271static int adpt_release(adpt_hba *pHba)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272{
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600273 struct Scsi_Host *shost = pHba->host;
274 scsi_remove_host(shost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275// adpt_i2o_quiesce_hba(pHba);
276 adpt_i2o_delete_hba(pHba);
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600277 scsi_host_put(shost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 return 0;
279}
280
281
282static void adpt_inquiry(adpt_hba* pHba)
283{
284 u32 msg[14];
285 u32 *mptr;
286 u32 *lenptr;
287 int direction;
288 int scsidir;
289 u32 len;
290 u32 reqlen;
291 u8* buf;
292 u8 scb[16];
293 s32 rcode;
294
295 memset(msg, 0, sizeof(msg));
Robert P. J. Day5cbded52006-12-13 00:35:56 -0800296 buf = kmalloc(80,GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 if(!buf){
298 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
299 return;
300 }
301 memset((void*)buf, 0, 36);
302
303 len = 36;
304 direction = 0x00000000;
305 scsidir =0x40000000; // DATA IN (iop<--dev)
306
307 reqlen = 14; // SINGLE SGE
308 /* Stick the headers on */
309 msg[0] = reqlen<<16 | SGL_OFFSET_12;
310 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
311 msg[2] = 0;
312 msg[3] = 0;
313 // Adaptec/DPT Private stuff
314 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
315 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
316 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
317 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
318 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
319 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
320 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
321
322 mptr=msg+7;
323
324 memset(scb, 0, sizeof(scb));
325 // Write SCSI command into the message - always 16 byte block
326 scb[0] = INQUIRY;
327 scb[1] = 0;
328 scb[2] = 0;
329 scb[3] = 0;
330 scb[4] = 36;
331 scb[5] = 0;
332 // Don't care about the rest of scb
333
334 memcpy(mptr, scb, sizeof(scb));
335 mptr+=4;
336 lenptr=mptr++; /* Remember me - fill in when we know */
337
338 /* Now fill in the SGList and command */
339 *lenptr = len;
340 *mptr++ = 0xD0000000|direction|len;
341 *mptr++ = virt_to_bus(buf);
342
343 // Send it on it's way
344 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
345 if (rcode != 0) {
346 sprintf(pHba->detail, "Adaptec I2O RAID");
347 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
348 if (rcode != -ETIME && rcode != -EINTR)
349 kfree(buf);
350 } else {
351 memset(pHba->detail, 0, sizeof(pHba->detail));
352 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
353 memcpy(&(pHba->detail[16]), " Model: ", 8);
354 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
355 memcpy(&(pHba->detail[40]), " FW: ", 4);
356 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
357 pHba->detail[48] = '\0'; /* precautionary */
358 kfree(buf);
359 }
360 adpt_i2o_status_get(pHba);
361 return ;
362}
363
364
365static int adpt_slave_configure(struct scsi_device * device)
366{
367 struct Scsi_Host *host = device->host;
368 adpt_hba* pHba;
369
370 pHba = (adpt_hba *) host->hostdata[0];
371
372 if (host->can_queue && device->tagged_supported) {
373 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
374 host->can_queue - 1);
375 } else {
376 scsi_adjust_queue_depth(device, 0, 1);
377 }
378 return 0;
379}
380
381static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
382{
383 adpt_hba* pHba = NULL;
384 struct adpt_device* pDev = NULL; /* dpt per device information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 cmd->scsi_done = done;
387 /*
388 * SCSI REQUEST_SENSE commands will be executed automatically by the
389 * Host Adapter for any errors, so they should not be executed
390 * explicitly unless the Sense Data is zero indicating that no error
391 * occurred.
392 */
393
394 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
395 cmd->result = (DID_OK << 16);
396 cmd->scsi_done(cmd);
397 return 0;
398 }
399
400 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
401 if (!pHba) {
402 return FAILED;
403 }
404
405 rmb();
406 /*
407 * TODO: I need to block here if I am processing ioctl cmds
408 * but if the outstanding cmds all finish before the ioctl,
409 * the scsi-core will not know to start sending cmds to me again.
410 * I need to a way to restart the scsi-cores queues or should I block
411 * calling scsi_done on the outstanding cmds instead
412 * for now we don't set the IOCTL state
413 */
414 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
415 pHba->host->last_reset = jiffies;
416 pHba->host->resetting = 1;
417 return 1;
418 }
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 // TODO if the cmd->device if offline then I may need to issue a bus rescan
421 // followed by a get_lct to see if the device is there anymore
422 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
423 /*
424 * First command request for this device. Set up a pointer
425 * to the device structure. This should be a TEST_UNIT_READY
426 * command from scan_scsis_single.
427 */
428 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
429 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
430 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
431 cmd->result = (DID_NO_CONNECT << 16);
432 cmd->scsi_done(cmd);
433 return 0;
434 }
435 cmd->device->hostdata = pDev;
436 }
437 pDev->pScsi_dev = cmd->device;
438
439 /*
440 * If we are being called from when the device is being reset,
441 * delay processing of the command until later.
442 */
443 if (pDev->state & DPTI_DEV_RESET ) {
444 return FAILED;
445 }
446 return adpt_scsi_to_i2o(pHba, cmd, pDev);
447}
448
449static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
450 sector_t capacity, int geom[])
451{
452 int heads=-1;
453 int sectors=-1;
454 int cylinders=-1;
455
456 // *** First lets set the default geometry ****
457
458 // If the capacity is less than ox2000
459 if (capacity < 0x2000 ) { // floppy
460 heads = 18;
461 sectors = 2;
462 }
463 // else if between 0x2000 and 0x20000
464 else if (capacity < 0x20000) {
465 heads = 64;
466 sectors = 32;
467 }
468 // else if between 0x20000 and 0x40000
469 else if (capacity < 0x40000) {
470 heads = 65;
471 sectors = 63;
472 }
473 // else if between 0x4000 and 0x80000
474 else if (capacity < 0x80000) {
475 heads = 128;
476 sectors = 63;
477 }
478 // else if greater than 0x80000
479 else {
480 heads = 255;
481 sectors = 63;
482 }
483 cylinders = sector_div(capacity, heads * sectors);
484
485 // Special case if CDROM
486 if(sdev->type == 5) { // CDROM
487 heads = 252;
488 sectors = 63;
489 cylinders = 1111;
490 }
491
492 geom[0] = heads;
493 geom[1] = sectors;
494 geom[2] = cylinders;
495
496 PDEBUG("adpt_bios_param: exit\n");
497 return 0;
498}
499
500
501static const char *adpt_info(struct Scsi_Host *host)
502{
503 adpt_hba* pHba;
504
505 pHba = (adpt_hba *) host->hostdata[0];
506 return (char *) (pHba->detail);
507}
508
509static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
510 int length, int inout)
511{
512 struct adpt_device* d;
513 int id;
514 int chan;
515 int len = 0;
516 int begin = 0;
517 int pos = 0;
518 adpt_hba* pHba;
519 int unit;
520
521 *start = buffer;
522 if (inout == TRUE) {
523 /*
524 * The user has done a write and wants us to take the
525 * data in the buffer and do something with it.
526 * proc_scsiwrite calls us with inout = 1
527 *
528 * Read data from buffer (writing to us) - NOT SUPPORTED
529 */
530 return -EINVAL;
531 }
532
533 /*
534 * inout = 0 means the user has done a read and wants information
535 * returned, so we write information about the cards into the buffer
536 * proc_scsiread() calls us with inout = 0
537 */
538
539 // Find HBA (host bus adapter) we are looking for
Arjan van de Ven0b950672006-01-11 13:16:10 +0100540 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 for (pHba = hba_chain; pHba; pHba = pHba->next) {
542 if (pHba->host == host) {
543 break; /* found adapter */
544 }
545 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100546 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 if (pHba == NULL) {
548 return 0;
549 }
550 host = pHba->host;
551
552 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
553 len += sprintf(buffer+len, "%s\n", pHba->detail);
554 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
555 pHba->host->host_no, pHba->name, host->irq);
556 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
557 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
558
559 pos = begin + len;
560
561 /* CHECKPOINT */
562 if(pos > offset + length) {
563 goto stop_output;
564 }
565 if(pos <= offset) {
566 /*
567 * If we haven't even written to where we last left
568 * off (the last time we were called), reset the
569 * beginning pointer.
570 */
571 len = 0;
572 begin = pos;
573 }
574 len += sprintf(buffer+len, "Devices:\n");
575 for(chan = 0; chan < MAX_CHANNEL; chan++) {
576 for(id = 0; id < MAX_ID; id++) {
577 d = pHba->channel[chan].device[id];
578 while(d){
579 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
580 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
581 pos = begin + len;
582
583
584 /* CHECKPOINT */
585 if(pos > offset + length) {
586 goto stop_output;
587 }
588 if(pos <= offset) {
589 len = 0;
590 begin = pos;
591 }
592
593 unit = d->pI2o_dev->lct_data.tid;
594 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
595 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
596 scsi_device_online(d->pScsi_dev)? "online":"offline");
597 pos = begin + len;
598
599 /* CHECKPOINT */
600 if(pos > offset + length) {
601 goto stop_output;
602 }
603 if(pos <= offset) {
604 len = 0;
605 begin = pos;
606 }
607
608 d = d->next_lun;
609 }
610 }
611 }
612
613 /*
614 * begin is where we last checked our position with regards to offset
615 * begin is always less than offset. len is relative to begin. It
616 * is the number of bytes written past begin
617 *
618 */
619stop_output:
620 /* stop the output and calculate the correct length */
621 *(buffer + len) = '\0';
622
623 *start = buffer + (offset - begin); /* Start of wanted data */
624 len -= (offset - begin);
625 if(len > length) {
626 len = length;
627 } else if(len < 0){
628 len = 0;
629 **start = '\0';
630 }
631 return len;
632}
633
634
635/*===========================================================================
636 * Error Handling routines
637 *===========================================================================
638 */
639
640static int adpt_abort(struct scsi_cmnd * cmd)
641{
642 adpt_hba* pHba = NULL; /* host bus adapter structure */
643 struct adpt_device* dptdevice; /* dpt per device information */
644 u32 msg[5];
645 int rcode;
646
647 if(cmd->serial_number == 0){
648 return FAILED;
649 }
650 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
651 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
652 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
653 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
654 return FAILED;
655 }
656
657 memset(msg, 0, sizeof(msg));
658 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
659 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
660 msg[2] = 0;
661 msg[3]= 0;
662 msg[4] = (u32)cmd;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800663 if (pHba->host)
664 spin_lock_irq(pHba->host->host_lock);
665 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
666 if (pHba->host)
667 spin_unlock_irq(pHba->host->host_lock);
668 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 if(rcode == -EOPNOTSUPP ){
670 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
671 return FAILED;
672 }
673 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
674 return FAILED;
675 }
676 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
677 return SUCCESS;
678}
679
680
681#define I2O_DEVICE_RESET 0x27
682// This is the same for BLK and SCSI devices
683// NOTE this is wrong in the i2o.h definitions
684// This is not currently supported by our adapter but we issue it anyway
685static int adpt_device_reset(struct scsi_cmnd* cmd)
686{
687 adpt_hba* pHba;
688 u32 msg[4];
689 u32 rcode;
690 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700691 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
693 pHba = (void*) cmd->device->host->hostdata[0];
694 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
695 if (!d) {
696 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
697 return FAILED;
698 }
699 memset(msg, 0, sizeof(msg));
700 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
701 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
702 msg[2] = 0;
703 msg[3] = 0;
704
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800705 if (pHba->host)
706 spin_lock_irq(pHba->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 old_state = d->state;
708 d->state |= DPTI_DEV_RESET;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800709 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
710 d->state = old_state;
711 if (pHba->host)
712 spin_unlock_irq(pHba->host->host_lock);
713 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 if(rcode == -EOPNOTSUPP ){
715 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
716 return FAILED;
717 }
718 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
719 return FAILED;
720 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
722 return SUCCESS;
723 }
724}
725
726
727#define I2O_HBA_BUS_RESET 0x87
728// This version of bus reset is called by the eh_error handler
729static int adpt_bus_reset(struct scsi_cmnd* cmd)
730{
731 adpt_hba* pHba;
732 u32 msg[4];
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800733 u32 rcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
736 memset(msg, 0, sizeof(msg));
737 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
738 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
739 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
740 msg[2] = 0;
741 msg[3] = 0;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800742 if (pHba->host)
743 spin_lock_irq(pHba->host->host_lock);
744 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
745 if (pHba->host)
746 spin_unlock_irq(pHba->host->host_lock);
747 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
749 return FAILED;
750 } else {
751 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
752 return SUCCESS;
753 }
754}
755
756// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400757static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758{
759 adpt_hba* pHba;
760 int rcode;
761 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
762 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
763 rcode = adpt_hba_reset(pHba);
764 if(rcode == 0){
765 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
766 return SUCCESS;
767 } else {
768 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
769 return FAILED;
770 }
771}
772
Jeff Garzik df0ae242005-05-28 07:57:14 -0400773static int adpt_reset(struct scsi_cmnd* cmd)
774{
775 int rc;
776
777 spin_lock_irq(cmd->device->host->host_lock);
778 rc = __adpt_reset(cmd);
779 spin_unlock_irq(cmd->device->host->host_lock);
780
781 return rc;
782}
783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
785static int adpt_hba_reset(adpt_hba* pHba)
786{
787 int rcode;
788
789 pHba->state |= DPTI_STATE_RESET;
790
791 // Activate does get status , init outbound, and get hrt
792 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
793 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
794 adpt_i2o_delete_hba(pHba);
795 return rcode;
796 }
797
798 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
799 adpt_i2o_delete_hba(pHba);
800 return rcode;
801 }
802 PDEBUG("%s: in HOLD state\n",pHba->name);
803
804 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
805 adpt_i2o_delete_hba(pHba);
806 return rcode;
807 }
808 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
809
810 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
811 adpt_i2o_delete_hba(pHba);
812 return rcode;
813 }
814
815 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
816 adpt_i2o_delete_hba(pHba);
817 return rcode;
818 }
819 pHba->state &= ~DPTI_STATE_RESET;
820
821 adpt_fail_posted_scbs(pHba);
822 return 0; /* return success */
823}
824
825/*===========================================================================
826 *
827 *===========================================================================
828 */
829
830
831static void adpt_i2o_sys_shutdown(void)
832{
833 adpt_hba *pHba, *pNext;
Adrian Bunk458af542005-11-27 00:36:37 +0100834 struct adpt_i2o_post_wait_data *p1, *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
837 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
838 /* Delete all IOPs from the controller chain */
839 /* They should have already been released by the
840 * scsi-core
841 */
842 for (pHba = hba_chain; pHba; pHba = pNext) {
843 pNext = pHba->next;
844 adpt_i2o_delete_hba(pHba);
845 }
846
847 /* Remove any timedout entries from the wait queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848// spin_lock_irqsave(&adpt_post_wait_lock, flags);
849 /* Nothing should be outstanding at this point so just
850 * free them
851 */
Adrian Bunk458af542005-11-27 00:36:37 +0100852 for(p1 = adpt_post_wait_queue; p1;) {
853 old = p1;
854 p1 = p1->next;
855 kfree(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
857// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
858 adpt_post_wait_queue = NULL;
859
860 printk(KERN_INFO "Adaptec I2O controllers down.\n");
861}
862
863/*
864 * reboot/shutdown notification.
865 *
866 * - Quiesce each IOP in the system
867 *
868 */
869
870#ifdef REBOOT_NOTIFIER
871static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
872{
873
874 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
875 return NOTIFY_DONE;
876
877 adpt_i2o_sys_shutdown();
878
879 return NOTIFY_DONE;
880}
881#endif
882
883
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600884static int adpt_install_hba(struct pci_dev* pDev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885{
886
887 adpt_hba* pHba = NULL;
888 adpt_hba* p = NULL;
889 ulong base_addr0_phys = 0;
890 ulong base_addr1_phys = 0;
891 u32 hba_map0_area_size = 0;
892 u32 hba_map1_area_size = 0;
893 void __iomem *base_addr_virt = NULL;
894 void __iomem *msg_addr_virt = NULL;
895
896 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
898 if(pci_enable_device(pDev)) {
899 return -EINVAL;
900 }
Salyzyn, Mark9638d892006-01-12 08:31:57 -0500901
902 if (pci_request_regions(pDev, "dpt_i2o")) {
903 PERROR("dpti: adpt_config_hba: pci request region failed\n");
904 return -EINVAL;
905 }
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 pci_set_master(pDev);
Matthias Gehre910638a2006-03-28 01:56:48 -0800908 if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) &&
909 pci_set_dma_mask(pDev, DMA_32BIT_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 return -EINVAL;
911
912 base_addr0_phys = pci_resource_start(pDev,0);
913 hba_map0_area_size = pci_resource_len(pDev,0);
914
915 // Check if standard PCI card or single BAR Raptor
916 if(pDev->device == PCI_DPT_DEVICE_ID){
917 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
918 // Raptor card with this device id needs 4M
919 hba_map0_area_size = 0x400000;
920 } else { // Not Raptor - it is a PCI card
921 if(hba_map0_area_size > 0x100000 ){
922 hba_map0_area_size = 0x100000;
923 }
924 }
925 } else {// Raptor split BAR config
926 // Use BAR1 in this configuration
927 base_addr1_phys = pci_resource_start(pDev,1);
928 hba_map1_area_size = pci_resource_len(pDev,1);
929 raptorFlag = TRUE;
930 }
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
933 if (!base_addr_virt) {
James Bottomley9c472dd2005-08-08 11:51:38 -0500934 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 PERROR("dpti: adpt_config_hba: io remap failed\n");
936 return -EINVAL;
937 }
938
939 if(raptorFlag == TRUE) {
940 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
941 if (!msg_addr_virt) {
942 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
943 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -0500944 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 return -EINVAL;
946 }
947 } else {
948 msg_addr_virt = base_addr_virt;
949 }
950
951 // Allocate and zero the data structure
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +0200952 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
953 if (!pHba) {
954 if (msg_addr_virt != base_addr_virt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 iounmap(msg_addr_virt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -0500957 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 return -ENOMEM;
959 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
Arjan van de Ven0b950672006-01-11 13:16:10 +0100961 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
963 if(hba_chain != NULL){
964 for(p = hba_chain; p->next; p = p->next);
965 p->next = pHba;
966 } else {
967 hba_chain = pHba;
968 }
969 pHba->next = NULL;
970 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -0700971 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 hba_count++;
973
Arjan van de Ven0b950672006-01-11 13:16:10 +0100974 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976 pHba->pDev = pDev;
977 pHba->base_addr_phys = base_addr0_phys;
978
979 // Set up the Virtual Base Address of the I2O Device
980 pHba->base_addr_virt = base_addr_virt;
981 pHba->msg_addr_virt = msg_addr_virt;
982 pHba->irq_mask = base_addr_virt+0x30;
983 pHba->post_port = base_addr_virt+0x40;
984 pHba->reply_port = base_addr_virt+0x44;
985
986 pHba->hrt = NULL;
987 pHba->lct = NULL;
988 pHba->lct_size = 0;
989 pHba->status_block = NULL;
990 pHba->post_count = 0;
991 pHba->state = DPTI_STATE_RESET;
992 pHba->pDev = pDev;
993 pHba->devices = NULL;
994
995 // Initializing the spinlocks
996 spin_lock_init(&pHba->state_lock);
997 spin_lock_init(&adpt_post_wait_lock);
998
999 if(raptorFlag == 0){
1000 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
1001 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
1002 } else {
1003 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
1004 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1005 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1006 }
1007
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07001008 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1010 adpt_i2o_delete_hba(pHba);
1011 return -EINVAL;
1012 }
1013
1014 return 0;
1015}
1016
1017
1018static void adpt_i2o_delete_hba(adpt_hba* pHba)
1019{
1020 adpt_hba* p1;
1021 adpt_hba* p2;
1022 struct i2o_device* d;
1023 struct i2o_device* next;
1024 int i;
1025 int j;
1026 struct adpt_device* pDev;
1027 struct adpt_device* pNext;
1028
1029
Arjan van de Ven0b950672006-01-11 13:16:10 +01001030 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 if(pHba->host){
1032 free_irq(pHba->host->irq, pHba);
1033 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 p2 = NULL;
1035 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1036 if(p1 == pHba) {
1037 if(p2) {
1038 p2->next = p1->next;
1039 } else {
1040 hba_chain = p1->next;
1041 }
1042 break;
1043 }
1044 }
1045
1046 hba_count--;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001047 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049 iounmap(pHba->base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001050 pci_release_regions(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1052 iounmap(pHba->msg_addr_virt);
1053 }
Jesper Juhlc9475cb2005-11-07 01:01:26 -08001054 kfree(pHba->hrt);
1055 kfree(pHba->lct);
1056 kfree(pHba->status_block);
1057 kfree(pHba->reply_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
1059 for(d = pHba->devices; d ; d = next){
1060 next = d->next;
1061 kfree(d);
1062 }
1063 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1064 for(j = 0; j < MAX_ID; j++){
1065 if(pHba->channel[i].device[j] != NULL){
1066 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1067 pNext = pDev->next_lun;
1068 kfree(pDev);
1069 }
1070 }
1071 }
1072 }
Alan Coxa07f3532006-09-15 15:34:32 +01001073 pci_dev_put(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 kfree(pHba);
1075
1076 if(hba_count <= 0){
1077 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1078 }
1079}
1080
1081
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1083{
1084 struct adpt_device* d;
1085
1086 if(chan < 0 || chan >= MAX_CHANNEL)
1087 return NULL;
1088
1089 if( pHba->channel[chan].device == NULL){
1090 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1091 return NULL;
1092 }
1093
1094 d = pHba->channel[chan].device[id];
1095 if(!d || d->tid == 0) {
1096 return NULL;
1097 }
1098
1099 /* If it is the only lun at that address then this should match*/
1100 if(d->scsi_lun == lun){
1101 return d;
1102 }
1103
1104 /* else we need to look through all the luns */
1105 for(d=d->next_lun ; d ; d = d->next_lun){
1106 if(d->scsi_lun == lun){
1107 return d;
1108 }
1109 }
1110 return NULL;
1111}
1112
1113
1114static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1115{
1116 // I used my own version of the WAIT_QUEUE_HEAD
1117 // to handle some version differences
1118 // When embedded in the kernel this could go back to the vanilla one
1119 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1120 int status = 0;
1121 ulong flags = 0;
1122 struct adpt_i2o_post_wait_data *p1, *p2;
1123 struct adpt_i2o_post_wait_data *wait_data =
1124 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
Andrew Morton4452ea52005-06-23 00:10:26 -07001125 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Andrew Morton4452ea52005-06-23 00:10:26 -07001127 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 /*
1131 * The spin locking is needed to keep anyone from playing
1132 * with the queue pointers and id while we do the same
1133 */
1134 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1135 // TODO we need a MORE unique way of getting ids
1136 // to support async LCT get
1137 wait_data->next = adpt_post_wait_queue;
1138 adpt_post_wait_queue = wait_data;
1139 adpt_post_wait_id++;
1140 adpt_post_wait_id &= 0x7fff;
1141 wait_data->id = adpt_post_wait_id;
1142 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1143
1144 wait_data->wq = &adpt_wq_i2o_post;
1145 wait_data->status = -ETIMEDOUT;
1146
Andrew Morton4452ea52005-06-23 00:10:26 -07001147 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
1149 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1150 timeout *= HZ;
1151 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1152 set_current_state(TASK_INTERRUPTIBLE);
1153 if(pHba->host)
1154 spin_unlock_irq(pHba->host->host_lock);
1155 if (!timeout)
1156 schedule();
1157 else{
1158 timeout = schedule_timeout(timeout);
1159 if (timeout == 0) {
1160 // I/O issued, but cannot get result in
1161 // specified time. Freeing resorces is
1162 // dangerous.
1163 status = -ETIME;
1164 }
1165 }
1166 if(pHba->host)
1167 spin_lock_irq(pHba->host->host_lock);
1168 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001169 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
1171 if(status == -ETIMEDOUT){
1172 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1173 // We will have to free the wait_data memory during shutdown
1174 return status;
1175 }
1176
1177 /* Remove the entry from the queue. */
1178 p2 = NULL;
1179 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1180 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1181 if(p1 == wait_data) {
1182 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1183 status = -EOPNOTSUPP;
1184 }
1185 if(p2) {
1186 p2->next = p1->next;
1187 } else {
1188 adpt_post_wait_queue = p1->next;
1189 }
1190 break;
1191 }
1192 }
1193 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1194
1195 kfree(wait_data);
1196
1197 return status;
1198}
1199
1200
1201static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1202{
1203
1204 u32 m = EMPTY_QUEUE;
1205 u32 __iomem *msg;
1206 ulong timeout = jiffies + 30*HZ;
1207 do {
1208 rmb();
1209 m = readl(pHba->post_port);
1210 if (m != EMPTY_QUEUE) {
1211 break;
1212 }
1213 if(time_after(jiffies,timeout)){
1214 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1215 return -ETIMEDOUT;
1216 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001217 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 } while(m == EMPTY_QUEUE);
1219
1220 msg = pHba->msg_addr_virt + m;
1221 memcpy_toio(msg, data, len);
1222 wmb();
1223
1224 //post message
1225 writel(m, pHba->post_port);
1226 wmb();
1227
1228 return 0;
1229}
1230
1231
1232static void adpt_i2o_post_wait_complete(u32 context, int status)
1233{
1234 struct adpt_i2o_post_wait_data *p1 = NULL;
1235 /*
1236 * We need to search through the adpt_post_wait
1237 * queue to see if the given message is still
1238 * outstanding. If not, it means that the IOP
1239 * took longer to respond to the message than we
1240 * had allowed and timer has already expired.
1241 * Not much we can do about that except log
1242 * it for debug purposes, increase timeout, and recompile
1243 *
1244 * Lock needed to keep anyone from moving queue pointers
1245 * around while we're looking through them.
1246 */
1247
1248 context &= 0x7fff;
1249
1250 spin_lock(&adpt_post_wait_lock);
1251 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1252 if(p1->id == context) {
1253 p1->status = status;
1254 spin_unlock(&adpt_post_wait_lock);
1255 wake_up_interruptible(p1->wq);
1256 return;
1257 }
1258 }
1259 spin_unlock(&adpt_post_wait_lock);
1260 // If this happens we lose commands that probably really completed
1261 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1262 printk(KERN_DEBUG" Tasks in wait queue:\n");
1263 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1264 printk(KERN_DEBUG" %d\n",p1->id);
1265 }
1266 return;
1267}
1268
1269static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1270{
1271 u32 msg[8];
1272 u8* status;
1273 u32 m = EMPTY_QUEUE ;
1274 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1275
1276 if(pHba->initialized == FALSE) { // First time reset should be quick
1277 timeout = jiffies + (25*HZ);
1278 } else {
1279 adpt_i2o_quiesce_hba(pHba);
1280 }
1281
1282 do {
1283 rmb();
1284 m = readl(pHba->post_port);
1285 if (m != EMPTY_QUEUE) {
1286 break;
1287 }
1288 if(time_after(jiffies,timeout)){
1289 printk(KERN_WARNING"Timeout waiting for message!\n");
1290 return -ETIMEDOUT;
1291 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001292 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 } while (m == EMPTY_QUEUE);
1294
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301295 status = kzalloc(4, GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 if(status == NULL) {
1297 adpt_send_nop(pHba, m);
1298 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1299 return -ENOMEM;
1300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301
1302 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1303 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1304 msg[2]=0;
1305 msg[3]=0;
1306 msg[4]=0;
1307 msg[5]=0;
1308 msg[6]=virt_to_bus(status);
1309 msg[7]=0;
1310
1311 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1312 wmb();
1313 writel(m, pHba->post_port);
1314 wmb();
1315
1316 while(*status == 0){
1317 if(time_after(jiffies,timeout)){
1318 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1319 kfree(status);
1320 return -ETIMEDOUT;
1321 }
1322 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001323 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 }
1325
1326 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1327 PDEBUG("%s: Reset in progress...\n", pHba->name);
1328 // Here we wait for message frame to become available
1329 // indicated that reset has finished
1330 do {
1331 rmb();
1332 m = readl(pHba->post_port);
1333 if (m != EMPTY_QUEUE) {
1334 break;
1335 }
1336 if(time_after(jiffies,timeout)){
1337 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1338 return -ETIMEDOUT;
1339 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001340 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 } while (m == EMPTY_QUEUE);
1342 // Flush the offset
1343 adpt_send_nop(pHba, m);
1344 }
1345 adpt_i2o_status_get(pHba);
1346 if(*status == 0x02 ||
1347 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1348 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1349 pHba->name);
1350 } else {
1351 PDEBUG("%s: Reset completed.\n", pHba->name);
1352 }
1353
1354 kfree(status);
1355#ifdef UARTDELAY
1356 // This delay is to allow someone attached to the card through the debug UART to
1357 // set up the dump levels that they want before the rest of the initialization sequence
1358 adpt_delay(20000);
1359#endif
1360 return 0;
1361}
1362
1363
1364static int adpt_i2o_parse_lct(adpt_hba* pHba)
1365{
1366 int i;
1367 int max;
1368 int tid;
1369 struct i2o_device *d;
1370 i2o_lct *lct = pHba->lct;
1371 u8 bus_no = 0;
1372 s16 scsi_id;
1373 s16 scsi_lun;
1374 u32 buf[10]; // larger than 7, or 8 ...
1375 struct adpt_device* pDev;
1376
1377 if (lct == NULL) {
1378 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1379 return -1;
1380 }
1381
1382 max = lct->table_size;
1383 max -= 3;
1384 max /= 9;
1385
1386 for(i=0;i<max;i++) {
1387 if( lct->lct_entry[i].user_tid != 0xfff){
1388 /*
1389 * If we have hidden devices, we need to inform the upper layers about
1390 * the possible maximum id reference to handle device access when
1391 * an array is disassembled. This code has no other purpose but to
1392 * allow us future access to devices that are currently hidden
1393 * behind arrays, hotspares or have not been configured (JBOD mode).
1394 */
1395 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1396 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1397 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1398 continue;
1399 }
1400 tid = lct->lct_entry[i].tid;
1401 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1402 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1403 continue;
1404 }
1405 bus_no = buf[0]>>16;
1406 scsi_id = buf[1];
1407 scsi_lun = (buf[2]>>8 )&0xff;
1408 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1409 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1410 continue;
1411 }
1412 if (scsi_id >= MAX_ID){
1413 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1414 continue;
1415 }
1416 if(bus_no > pHba->top_scsi_channel){
1417 pHba->top_scsi_channel = bus_no;
1418 }
1419 if(scsi_id > pHba->top_scsi_id){
1420 pHba->top_scsi_id = scsi_id;
1421 }
1422 if(scsi_lun > pHba->top_scsi_lun){
1423 pHba->top_scsi_lun = scsi_lun;
1424 }
1425 continue;
1426 }
Robert P. J. Day5cbded52006-12-13 00:35:56 -08001427 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 if(d==NULL)
1429 {
1430 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1431 return -ENOMEM;
1432 }
1433
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001434 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 d->next = NULL;
1436
1437 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1438
1439 d->flags = 0;
1440 tid = d->lct_data.tid;
1441 adpt_i2o_report_hba_unit(pHba, d);
1442 adpt_i2o_install_device(pHba, d);
1443 }
1444 bus_no = 0;
1445 for(d = pHba->devices; d ; d = d->next) {
1446 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1447 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1448 tid = d->lct_data.tid;
1449 // TODO get the bus_no from hrt-but for now they are in order
1450 //bus_no =
1451 if(bus_no > pHba->top_scsi_channel){
1452 pHba->top_scsi_channel = bus_no;
1453 }
1454 pHba->channel[bus_no].type = d->lct_data.class_id;
1455 pHba->channel[bus_no].tid = tid;
1456 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1457 {
1458 pHba->channel[bus_no].scsi_id = buf[1];
1459 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1460 }
1461 // TODO remove - this is just until we get from hrt
1462 bus_no++;
1463 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1464 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1465 break;
1466 }
1467 }
1468 }
1469
1470 // Setup adpt_device table
1471 for(d = pHba->devices; d ; d = d->next) {
1472 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1473 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1474 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1475
1476 tid = d->lct_data.tid;
1477 scsi_id = -1;
1478 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1479 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1480 bus_no = buf[0]>>16;
1481 scsi_id = buf[1];
1482 scsi_lun = (buf[2]>>8 )&0xff;
1483 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1484 continue;
1485 }
1486 if (scsi_id >= MAX_ID) {
1487 continue;
1488 }
1489 if( pHba->channel[bus_no].device[scsi_id] == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301490 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 if(pDev == NULL) {
1492 return -ENOMEM;
1493 }
1494 pHba->channel[bus_no].device[scsi_id] = pDev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 } else {
1496 for( pDev = pHba->channel[bus_no].device[scsi_id];
1497 pDev->next_lun; pDev = pDev->next_lun){
1498 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301499 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 if(pDev->next_lun == NULL) {
1501 return -ENOMEM;
1502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 pDev = pDev->next_lun;
1504 }
1505 pDev->tid = tid;
1506 pDev->scsi_channel = bus_no;
1507 pDev->scsi_id = scsi_id;
1508 pDev->scsi_lun = scsi_lun;
1509 pDev->pI2o_dev = d;
1510 d->owner = pDev;
1511 pDev->type = (buf[0])&0xff;
1512 pDev->flags = (buf[0]>>8)&0xff;
1513 if(scsi_id > pHba->top_scsi_id){
1514 pHba->top_scsi_id = scsi_id;
1515 }
1516 if(scsi_lun > pHba->top_scsi_lun){
1517 pHba->top_scsi_lun = scsi_lun;
1518 }
1519 }
1520 if(scsi_id == -1){
1521 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1522 d->lct_data.identity_tag);
1523 }
1524 }
1525 }
1526 return 0;
1527}
1528
1529
1530/*
1531 * Each I2O controller has a chain of devices on it - these match
1532 * the useful parts of the LCT of the board.
1533 */
1534
1535static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1536{
Arjan van de Ven0b950672006-01-11 13:16:10 +01001537 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 d->controller=pHba;
1539 d->owner=NULL;
1540 d->next=pHba->devices;
1541 d->prev=NULL;
1542 if (pHba->devices != NULL){
1543 pHba->devices->prev=d;
1544 }
1545 pHba->devices=d;
1546 *d->dev_name = 0;
1547
Arjan van de Ven0b950672006-01-11 13:16:10 +01001548 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 return 0;
1550}
1551
1552static int adpt_open(struct inode *inode, struct file *file)
1553{
1554 int minor;
1555 adpt_hba* pHba;
1556
1557 //TODO check for root access
1558 //
1559 minor = iminor(inode);
1560 if (minor >= hba_count) {
1561 return -ENXIO;
1562 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001563 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1565 if (pHba->unit == minor) {
1566 break; /* found adapter */
1567 }
1568 }
1569 if (pHba == NULL) {
Arjan van de Ven0b950672006-01-11 13:16:10 +01001570 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 return -ENXIO;
1572 }
1573
1574// if(pHba->in_use){
Arjan van de Ven0b950672006-01-11 13:16:10 +01001575 // mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576// return -EBUSY;
1577// }
1578
1579 pHba->in_use = 1;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001580 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
1582 return 0;
1583}
1584
1585static int adpt_close(struct inode *inode, struct file *file)
1586{
1587 int minor;
1588 adpt_hba* pHba;
1589
1590 minor = iminor(inode);
1591 if (minor >= hba_count) {
1592 return -ENXIO;
1593 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001594 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1596 if (pHba->unit == minor) {
1597 break; /* found adapter */
1598 }
1599 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001600 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 if (pHba == NULL) {
1602 return -ENXIO;
1603 }
1604
1605 pHba->in_use = 0;
1606
1607 return 0;
1608}
1609
1610
1611static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1612{
1613 u32 msg[MAX_MESSAGE_SIZE];
1614 u32* reply = NULL;
1615 u32 size = 0;
1616 u32 reply_size = 0;
1617 u32 __user *user_msg = arg;
1618 u32 __user * user_reply = NULL;
1619 void *sg_list[pHba->sg_tablesize];
1620 u32 sg_offset = 0;
1621 u32 sg_count = 0;
1622 int sg_index = 0;
1623 u32 i = 0;
1624 u32 rcode = 0;
1625 void *p = NULL;
1626 ulong flags = 0;
1627
1628 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1629 // get user msg size in u32s
1630 if(get_user(size, &user_msg[0])){
1631 return -EFAULT;
1632 }
1633 size = size>>16;
1634
1635 user_reply = &user_msg[size];
1636 if(size > MAX_MESSAGE_SIZE){
1637 return -EFAULT;
1638 }
1639 size *= 4; // Convert to bytes
1640
1641 /* Copy in the user's I2O command */
1642 if(copy_from_user(msg, user_msg, size)) {
1643 return -EFAULT;
1644 }
1645 get_user(reply_size, &user_reply[0]);
1646 reply_size = reply_size>>16;
1647 if(reply_size > REPLY_FRAME_SIZE){
1648 reply_size = REPLY_FRAME_SIZE;
1649 }
1650 reply_size *= 4;
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301651 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 if(reply == NULL) {
1653 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1654 return -ENOMEM;
1655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 sg_offset = (msg[0]>>4)&0xf;
1657 msg[2] = 0x40000000; // IOCTL context
1658 msg[3] = (u32)reply;
1659 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1660 if(sg_offset) {
1661 // TODO 64bit fix
1662 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1663 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1664 if (sg_count > pHba->sg_tablesize){
1665 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1666 kfree (reply);
1667 return -EINVAL;
1668 }
1669
1670 for(i = 0; i < sg_count; i++) {
1671 int sg_size;
1672
1673 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1674 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1675 rcode = -EINVAL;
1676 goto cleanup;
1677 }
1678 sg_size = sg[i].flag_count & 0xffffff;
1679 /* Allocate memory for the transfer */
1680 p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
1681 if(!p) {
1682 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1683 pHba->name,sg_size,i,sg_count);
1684 rcode = -ENOMEM;
1685 goto cleanup;
1686 }
1687 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1688 /* Copy in the user's SG buffer if necessary */
1689 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1690 // TODO 64bit fix
1691 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
1692 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1693 rcode = -EFAULT;
1694 goto cleanup;
1695 }
1696 }
1697 //TODO 64bit fix
1698 sg[i].addr_bus = (u32)virt_to_bus(p);
1699 }
1700 }
1701
1702 do {
1703 if(pHba->host)
1704 spin_lock_irqsave(pHba->host->host_lock, flags);
1705 // This state stops any new commands from enterring the
1706 // controller while processing the ioctl
1707// pHba->state |= DPTI_STATE_IOCTL;
1708// We can't set this now - The scsi subsystem sets host_blocked and
1709// the queue empties and stops. We need a way to restart the queue
1710 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1711 if (rcode != 0)
1712 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1713 rcode, reply);
1714// pHba->state &= ~DPTI_STATE_IOCTL;
1715 if(pHba->host)
1716 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1717 } while(rcode == -ETIMEDOUT);
1718
1719 if(rcode){
1720 goto cleanup;
1721 }
1722
1723 if(sg_offset) {
1724 /* Copy back the Scatter Gather buffers back to user space */
1725 u32 j;
1726 // TODO 64bit fix
1727 struct sg_simple_element* sg;
1728 int sg_size;
1729
1730 // re-acquire the original message to handle correctly the sg copy operation
1731 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1732 // get user msg size in u32s
1733 if(get_user(size, &user_msg[0])){
1734 rcode = -EFAULT;
1735 goto cleanup;
1736 }
1737 size = size>>16;
1738 size *= 4;
1739 /* Copy in the user's I2O command */
1740 if (copy_from_user (msg, user_msg, size)) {
1741 rcode = -EFAULT;
1742 goto cleanup;
1743 }
1744 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1745
1746 // TODO 64bit fix
1747 sg = (struct sg_simple_element*)(msg + sg_offset);
1748 for (j = 0; j < sg_count; j++) {
1749 /* Copy out the SG list to user's buffer if necessary */
1750 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1751 sg_size = sg[j].flag_count & 0xffffff;
1752 // TODO 64bit fix
1753 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
1754 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1755 rcode = -EFAULT;
1756 goto cleanup;
1757 }
1758 }
1759 }
1760 }
1761
1762 /* Copy back the reply to user space */
1763 if (reply_size) {
1764 // we wrote our own values for context - now restore the user supplied ones
1765 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1766 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1767 rcode = -EFAULT;
1768 }
1769 if(copy_to_user(user_reply, reply, reply_size)) {
1770 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1771 rcode = -EFAULT;
1772 }
1773 }
1774
1775
1776cleanup:
1777 if (rcode != -ETIME && rcode != -EINTR)
1778 kfree (reply);
1779 while(sg_index) {
1780 if(sg_list[--sg_index]) {
1781 if (rcode != -ETIME && rcode != -EINTR)
1782 kfree(sg_list[sg_index]);
1783 }
1784 }
1785 return rcode;
1786}
1787
1788
1789/*
1790 * This routine returns information about the system. This does not effect
1791 * any logic and if the info is wrong - it doesn't matter.
1792 */
1793
1794/* Get all the info we can not get from kernel services */
1795static int adpt_system_info(void __user *buffer)
1796{
1797 sysInfo_S si;
1798
1799 memset(&si, 0, sizeof(si));
1800
1801 si.osType = OS_LINUX;
Adrian Bunka4cd16e2005-06-25 14:59:01 -07001802 si.osMajorVersion = 0;
1803 si.osMinorVersion = 0;
1804 si.osRevision = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 si.busType = SI_PCI_BUS;
1806 si.processorFamily = DPTI_sig.dsProcessorFamily;
1807
1808#if defined __i386__
1809 adpt_i386_info(&si);
1810#elif defined (__ia64__)
1811 adpt_ia64_info(&si);
1812#elif defined(__sparc__)
1813 adpt_sparc_info(&si);
1814#elif defined (__alpha__)
1815 adpt_alpha_info(&si);
1816#else
1817 si.processorType = 0xff ;
1818#endif
1819 if(copy_to_user(buffer, &si, sizeof(si))){
1820 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1821 return -EFAULT;
1822 }
1823
1824 return 0;
1825}
1826
1827#if defined __ia64__
1828static void adpt_ia64_info(sysInfo_S* si)
1829{
1830 // This is all the info we need for now
1831 // We will add more info as our new
1832 // managmenent utility requires it
1833 si->processorType = PROC_IA64;
1834}
1835#endif
1836
1837
1838#if defined __sparc__
1839static void adpt_sparc_info(sysInfo_S* si)
1840{
1841 // This is all the info we need for now
1842 // We will add more info as our new
1843 // managmenent utility requires it
1844 si->processorType = PROC_ULTRASPARC;
1845}
1846#endif
1847
1848#if defined __alpha__
1849static void adpt_alpha_info(sysInfo_S* si)
1850{
1851 // This is all the info we need for now
1852 // We will add more info as our new
1853 // managmenent utility requires it
1854 si->processorType = PROC_ALPHA;
1855}
1856#endif
1857
1858#if defined __i386__
1859
1860static void adpt_i386_info(sysInfo_S* si)
1861{
1862 // This is all the info we need for now
1863 // We will add more info as our new
1864 // managmenent utility requires it
1865 switch (boot_cpu_data.x86) {
1866 case CPU_386:
1867 si->processorType = PROC_386;
1868 break;
1869 case CPU_486:
1870 si->processorType = PROC_486;
1871 break;
1872 case CPU_586:
1873 si->processorType = PROC_PENTIUM;
1874 break;
1875 default: // Just in case
1876 si->processorType = PROC_PENTIUM;
1877 break;
1878 }
1879}
1880
1881#endif
1882
1883
1884static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1885 ulong arg)
1886{
1887 int minor;
1888 int error = 0;
1889 adpt_hba* pHba;
1890 ulong flags = 0;
1891 void __user *argp = (void __user *)arg;
1892
1893 minor = iminor(inode);
1894 if (minor >= DPTI_MAX_HBA){
1895 return -ENXIO;
1896 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001897 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1899 if (pHba->unit == minor) {
1900 break; /* found adapter */
1901 }
1902 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001903 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 if(pHba == NULL){
1905 return -ENXIO;
1906 }
1907
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001908 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1909 schedule_timeout_uninterruptible(2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
1911 switch (cmd) {
1912 // TODO: handle 3 cases
1913 case DPT_SIGNATURE:
1914 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1915 return -EFAULT;
1916 }
1917 break;
1918 case I2OUSRCMD:
1919 return adpt_i2o_passthru(pHba, argp);
1920
1921 case DPT_CTRLINFO:{
1922 drvrHBAinfo_S HbaInfo;
1923
1924#define FLG_OSD_PCI_VALID 0x0001
1925#define FLG_OSD_DMA 0x0002
1926#define FLG_OSD_I2O 0x0004
1927 memset(&HbaInfo, 0, sizeof(HbaInfo));
1928 HbaInfo.drvrHBAnum = pHba->unit;
1929 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1930 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1931 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1932 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1933 HbaInfo.Interrupt = pHba->pDev->irq;
1934 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1935 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1936 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1937 return -EFAULT;
1938 }
1939 break;
1940 }
1941 case DPT_SYSINFO:
1942 return adpt_system_info(argp);
1943 case DPT_BLINKLED:{
1944 u32 value;
1945 value = (u32)adpt_read_blink_led(pHba);
1946 if (copy_to_user(argp, &value, sizeof(value))) {
1947 return -EFAULT;
1948 }
1949 break;
1950 }
1951 case I2ORESETCMD:
1952 if(pHba->host)
1953 spin_lock_irqsave(pHba->host->host_lock, flags);
1954 adpt_hba_reset(pHba);
1955 if(pHba->host)
1956 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1957 break;
1958 case I2ORESCANCMD:
1959 adpt_rescan(pHba);
1960 break;
1961 default:
1962 return -EINVAL;
1963 }
1964
1965 return error;
1966}
1967
1968
David Howells7d12e782006-10-05 14:55:46 +01001969static irqreturn_t adpt_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970{
1971 struct scsi_cmnd* cmd;
1972 adpt_hba* pHba = dev_id;
1973 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001974 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 u32 status=0;
1976 u32 context;
1977 ulong flags = 0;
1978 int handled = 0;
1979
1980 if (pHba == NULL){
1981 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1982 return IRQ_NONE;
1983 }
1984 if(pHba->host)
1985 spin_lock_irqsave(pHba->host->host_lock, flags);
1986
1987 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
1988 m = readl(pHba->reply_port);
1989 if(m == EMPTY_QUEUE){
1990 // Try twice then give up
1991 rmb();
1992 m = readl(pHba->reply_port);
1993 if(m == EMPTY_QUEUE){
1994 // This really should not happen
1995 printk(KERN_ERR"dpti: Could not get reply frame\n");
1996 goto out;
1997 }
1998 }
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001999 reply = bus_to_virt(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
2001 if (readl(reply) & MSG_FAIL) {
2002 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002003 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 u32 old_context;
2005 PDEBUG("%s: Failed message\n",pHba->name);
2006 if(old_m >= 0x100000){
2007 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2008 writel(m,pHba->reply_port);
2009 continue;
2010 }
2011 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002012 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 old_context = readl(msg+12);
2014 writel(old_context, reply+12);
2015 adpt_send_nop(pHba, old_m);
2016 }
2017 context = readl(reply+8);
2018 if(context & 0x40000000){ // IOCTL
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002019 void *p = (void *)readl(reply+12);
2020 if( p != NULL) {
2021 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 }
2023 // All IOCTLs will also be post wait
2024 }
2025 if(context & 0x80000000){ // Post wait message
2026 status = readl(reply+16);
2027 if(status >> 24){
2028 status &= 0xffff; /* Get detail status */
2029 } else {
2030 status = I2O_POST_WAIT_OK;
2031 }
2032 if(!(context & 0x40000000)) {
2033 cmd = (struct scsi_cmnd*) readl(reply+12);
2034 if(cmd != NULL) {
2035 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2036 }
2037 }
2038 adpt_i2o_post_wait_complete(context, status);
2039 } else { // SCSI message
2040 cmd = (struct scsi_cmnd*) readl(reply+12);
2041 if(cmd != NULL){
2042 if(cmd->serial_number != 0) { // If not timedout
2043 adpt_i2o_to_scsi(reply, cmd);
2044 }
2045 }
2046 }
2047 writel(m, pHba->reply_port);
2048 wmb();
2049 rmb();
2050 }
2051 handled = 1;
2052out: if(pHba->host)
2053 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2054 return IRQ_RETVAL(handled);
2055}
2056
2057static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2058{
2059 int i;
2060 u32 msg[MAX_MESSAGE_SIZE];
2061 u32* mptr;
2062 u32 *lenptr;
2063 int direction;
2064 int scsidir;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002065 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 u32 len;
2067 u32 reqlen;
2068 s32 rcode;
2069
2070 memset(msg, 0 , sizeof(msg));
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002071 len = scsi_bufflen(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 direction = 0x00000000;
2073
2074 scsidir = 0x00000000; // DATA NO XFER
2075 if(len) {
2076 /*
2077 * Set SCBFlags to indicate if data is being transferred
2078 * in or out, or no data transfer
2079 * Note: Do not have to verify index is less than 0 since
2080 * cmd->cmnd[0] is an unsigned char
2081 */
2082 switch(cmd->sc_data_direction){
2083 case DMA_FROM_DEVICE:
2084 scsidir =0x40000000; // DATA IN (iop<--dev)
2085 break;
2086 case DMA_TO_DEVICE:
2087 direction=0x04000000; // SGL OUT
2088 scsidir =0x80000000; // DATA OUT (iop-->dev)
2089 break;
2090 case DMA_NONE:
2091 break;
2092 case DMA_BIDIRECTIONAL:
2093 scsidir =0x40000000; // DATA IN (iop<--dev)
2094 // Assume In - and continue;
2095 break;
2096 default:
2097 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2098 pHba->name, cmd->cmnd[0]);
2099 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2100 cmd->scsi_done(cmd);
2101 return 0;
2102 }
2103 }
2104 // msg[0] is set later
2105 // I2O_CMD_SCSI_EXEC
2106 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2107 msg[2] = 0;
2108 msg[3] = (u32)cmd; /* We want the SCSI control block back */
2109 // Our cards use the transaction context as the tag for queueing
2110 // Adaptec/DPT Private stuff
2111 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2112 msg[5] = d->tid;
2113 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2114 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2115 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2116 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2117 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2118
2119 mptr=msg+7;
2120
2121 // Write SCSI command into the message - always 16 byte block
2122 memset(mptr, 0, 16);
2123 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2124 mptr+=4;
2125 lenptr=mptr++; /* Remember me - fill in when we know */
2126 reqlen = 14; // SINGLE SGE
2127 /* Now fill in the SGList and command */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002129 nseg = scsi_dma_map(cmd);
2130 BUG_ON(nseg < 0);
2131 if (nseg) {
2132 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
2134 len = 0;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002135 scsi_for_each_sg(cmd, sg, nseg, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2137 len+=sg_dma_len(sg);
2138 *mptr++ = sg_dma_address(sg);
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002139 /* Make this an end of list */
2140 if (i == nseg - 1)
2141 mptr[-2] = direction|0xD0000000|sg_dma_len(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 reqlen = mptr - msg;
2144 *lenptr = len;
2145
2146 if(cmd->underflow && len != cmd->underflow){
2147 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2148 len, cmd->underflow);
2149 }
2150 } else {
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002151 *lenptr = len = 0;
2152 reqlen = 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 }
2154
2155 /* Stick the headers on */
2156 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2157
2158 // Send it on it's way
2159 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2160 if (rcode == 0) {
2161 return 0;
2162 }
2163 return rcode;
2164}
2165
2166
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002167static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168{
2169 adpt_hba* pHba;
2170 u32 hba_status;
2171 u32 dev_status;
2172 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2173 // I know this would look cleaner if I just read bytes
2174 // but the model I have been using for all the rest of the
2175 // io is in 4 byte words - so I keep that model
2176 u16 detailed_status = readl(reply+16) &0xffff;
2177 dev_status = (detailed_status & 0xff);
2178 hba_status = detailed_status >> 8;
2179
2180 // calculate resid for sg
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002181 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182
2183 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2184
2185 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2186
2187 if(!(reply_flags & MSG_FAIL)) {
2188 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2189 case I2O_SCSI_DSC_SUCCESS:
2190 cmd->result = (DID_OK << 16);
2191 // handle underflow
2192 if(readl(reply+5) < cmd->underflow ) {
2193 cmd->result = (DID_ERROR <<16);
2194 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2195 }
2196 break;
2197 case I2O_SCSI_DSC_REQUEST_ABORTED:
2198 cmd->result = (DID_ABORT << 16);
2199 break;
2200 case I2O_SCSI_DSC_PATH_INVALID:
2201 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2202 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2203 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2204 case I2O_SCSI_DSC_NO_ADAPTER:
2205 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2206 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2207 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2208 cmd->result = (DID_TIME_OUT << 16);
2209 break;
2210 case I2O_SCSI_DSC_ADAPTER_BUSY:
2211 case I2O_SCSI_DSC_BUS_BUSY:
2212 cmd->result = (DID_BUS_BUSY << 16);
2213 break;
2214 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2215 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2216 cmd->result = (DID_RESET << 16);
2217 break;
2218 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2219 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2220 cmd->result = (DID_PARITY << 16);
2221 break;
2222 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2223 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2224 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2225 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2226 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2227 case I2O_SCSI_DSC_DATA_OVERRUN:
2228 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2229 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2230 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2231 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2232 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2233 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2234 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2235 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2236 case I2O_SCSI_DSC_INVALID_CDB:
2237 case I2O_SCSI_DSC_LUN_INVALID:
2238 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2239 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2240 case I2O_SCSI_DSC_NO_NEXUS:
2241 case I2O_SCSI_DSC_CDB_RECEIVED:
2242 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2243 case I2O_SCSI_DSC_QUEUE_FROZEN:
2244 case I2O_SCSI_DSC_REQUEST_INVALID:
2245 default:
2246 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2247 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2248 hba_status, dev_status, cmd->cmnd[0]);
2249 cmd->result = (DID_ERROR << 16);
2250 break;
2251 }
2252
2253 // copy over the request sense data if it was a check
2254 // condition status
2255 if(dev_status == 0x02 /*CHECK_CONDITION*/) {
2256 u32 len = sizeof(cmd->sense_buffer);
2257 len = (len > 40) ? 40 : len;
2258 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002259 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2261 cmd->sense_buffer[2] == DATA_PROTECT ){
2262 /* This is to handle an array failed */
2263 cmd->result = (DID_TIME_OUT << 16);
2264 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2265 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2266 hba_status, dev_status, cmd->cmnd[0]);
2267
2268 }
2269 }
2270 } else {
2271 /* In this condtion we could not talk to the tid
2272 * the card rejected it. We should signal a retry
2273 * for a limitted number of retries.
2274 */
2275 cmd->result = (DID_TIME_OUT << 16);
2276 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2277 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2278 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2279 }
2280
2281 cmd->result |= (dev_status);
2282
2283 if(cmd->scsi_done != NULL){
2284 cmd->scsi_done(cmd);
2285 }
2286 return cmd->result;
2287}
2288
2289
2290static s32 adpt_rescan(adpt_hba* pHba)
2291{
2292 s32 rcode;
2293 ulong flags = 0;
2294
2295 if(pHba->host)
2296 spin_lock_irqsave(pHba->host->host_lock, flags);
2297 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2298 goto out;
2299 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2300 goto out;
2301 rcode = 0;
2302out: if(pHba->host)
2303 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2304 return rcode;
2305}
2306
2307
2308static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2309{
2310 int i;
2311 int max;
2312 int tid;
2313 struct i2o_device *d;
2314 i2o_lct *lct = pHba->lct;
2315 u8 bus_no = 0;
2316 s16 scsi_id;
2317 s16 scsi_lun;
2318 u32 buf[10]; // at least 8 u32's
2319 struct adpt_device* pDev = NULL;
2320 struct i2o_device* pI2o_dev = NULL;
2321
2322 if (lct == NULL) {
2323 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2324 return -1;
2325 }
2326
2327 max = lct->table_size;
2328 max -= 3;
2329 max /= 9;
2330
2331 // Mark each drive as unscanned
2332 for (d = pHba->devices; d; d = d->next) {
2333 pDev =(struct adpt_device*) d->owner;
2334 if(!pDev){
2335 continue;
2336 }
2337 pDev->state |= DPTI_DEV_UNSCANNED;
2338 }
2339
2340 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2341
2342 for(i=0;i<max;i++) {
2343 if( lct->lct_entry[i].user_tid != 0xfff){
2344 continue;
2345 }
2346
2347 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2348 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2349 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2350 tid = lct->lct_entry[i].tid;
2351 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2352 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2353 continue;
2354 }
2355 bus_no = buf[0]>>16;
2356 scsi_id = buf[1];
2357 scsi_lun = (buf[2]>>8 )&0xff;
2358 pDev = pHba->channel[bus_no].device[scsi_id];
2359 /* da lun */
2360 while(pDev) {
2361 if(pDev->scsi_lun == scsi_lun) {
2362 break;
2363 }
2364 pDev = pDev->next_lun;
2365 }
2366 if(!pDev ) { // Something new add it
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002367 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 if(d==NULL)
2369 {
2370 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2371 return -ENOMEM;
2372 }
2373
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002374 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 d->next = NULL;
2376
2377 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2378
2379 d->flags = 0;
2380 adpt_i2o_report_hba_unit(pHba, d);
2381 adpt_i2o_install_device(pHba, d);
2382
2383 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2384 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2385 continue;
2386 }
2387 pDev = pHba->channel[bus_no].device[scsi_id];
2388 if( pDev == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302389 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 if(pDev == NULL) {
2391 return -ENOMEM;
2392 }
2393 pHba->channel[bus_no].device[scsi_id] = pDev;
2394 } else {
2395 while (pDev->next_lun) {
2396 pDev = pDev->next_lun;
2397 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302398 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 if(pDev == NULL) {
2400 return -ENOMEM;
2401 }
2402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 pDev->tid = d->lct_data.tid;
2404 pDev->scsi_channel = bus_no;
2405 pDev->scsi_id = scsi_id;
2406 pDev->scsi_lun = scsi_lun;
2407 pDev->pI2o_dev = d;
2408 d->owner = pDev;
2409 pDev->type = (buf[0])&0xff;
2410 pDev->flags = (buf[0]>>8)&0xff;
2411 // Too late, SCSI system has made up it's mind, but what the hey ...
2412 if(scsi_id > pHba->top_scsi_id){
2413 pHba->top_scsi_id = scsi_id;
2414 }
2415 if(scsi_lun > pHba->top_scsi_lun){
2416 pHba->top_scsi_lun = scsi_lun;
2417 }
2418 continue;
2419 } // end of new i2o device
2420
2421 // We found an old device - check it
2422 while(pDev) {
2423 if(pDev->scsi_lun == scsi_lun) {
2424 if(!scsi_device_online(pDev->pScsi_dev)) {
2425 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2426 pHba->name,bus_no,scsi_id,scsi_lun);
2427 if (pDev->pScsi_dev) {
2428 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2429 }
2430 }
2431 d = pDev->pI2o_dev;
2432 if(d->lct_data.tid != tid) { // something changed
2433 pDev->tid = tid;
2434 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2435 if (pDev->pScsi_dev) {
2436 pDev->pScsi_dev->changed = TRUE;
2437 pDev->pScsi_dev->removable = TRUE;
2438 }
2439 }
2440 // Found it - mark it scanned
2441 pDev->state = DPTI_DEV_ONLINE;
2442 break;
2443 }
2444 pDev = pDev->next_lun;
2445 }
2446 }
2447 }
2448 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2449 pDev =(struct adpt_device*) pI2o_dev->owner;
2450 if(!pDev){
2451 continue;
2452 }
2453 // Drive offline drives that previously existed but could not be found
2454 // in the LCT table
2455 if (pDev->state & DPTI_DEV_UNSCANNED){
2456 pDev->state = DPTI_DEV_OFFLINE;
2457 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2458 if (pDev->pScsi_dev) {
2459 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2460 }
2461 }
2462 }
2463 return 0;
2464}
2465
2466static void adpt_fail_posted_scbs(adpt_hba* pHba)
2467{
2468 struct scsi_cmnd* cmd = NULL;
2469 struct scsi_device* d = NULL;
2470
2471 shost_for_each_device(d, pHba->host) {
2472 unsigned long flags;
2473 spin_lock_irqsave(&d->list_lock, flags);
2474 list_for_each_entry(cmd, &d->cmd_list, list) {
2475 if(cmd->serial_number == 0){
2476 continue;
2477 }
2478 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2479 cmd->scsi_done(cmd);
2480 }
2481 spin_unlock_irqrestore(&d->list_lock, flags);
2482 }
2483}
2484
2485
2486/*============================================================================
2487 * Routines from i2o subsystem
2488 *============================================================================
2489 */
2490
2491
2492
2493/*
2494 * Bring an I2O controller into HOLD state. See the spec.
2495 */
2496static int adpt_i2o_activate_hba(adpt_hba* pHba)
2497{
2498 int rcode;
2499
2500 if(pHba->initialized ) {
2501 if (adpt_i2o_status_get(pHba) < 0) {
2502 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2503 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2504 return rcode;
2505 }
2506 if (adpt_i2o_status_get(pHba) < 0) {
2507 printk(KERN_INFO "HBA not responding.\n");
2508 return -1;
2509 }
2510 }
2511
2512 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2513 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2514 return -1;
2515 }
2516
2517 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2518 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2519 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2520 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2521 adpt_i2o_reset_hba(pHba);
2522 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2523 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2524 return -1;
2525 }
2526 }
2527 } else {
2528 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2529 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2530 return rcode;
2531 }
2532
2533 }
2534
2535 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2536 return -1;
2537 }
2538
2539 /* In HOLD state */
2540
2541 if (adpt_i2o_hrt_get(pHba) < 0) {
2542 return -1;
2543 }
2544
2545 return 0;
2546}
2547
2548/*
2549 * Bring a controller online into OPERATIONAL state.
2550 */
2551
2552static int adpt_i2o_online_hba(adpt_hba* pHba)
2553{
2554 if (adpt_i2o_systab_send(pHba) < 0) {
2555 adpt_i2o_delete_hba(pHba);
2556 return -1;
2557 }
2558 /* In READY state */
2559
2560 if (adpt_i2o_enable_hba(pHba) < 0) {
2561 adpt_i2o_delete_hba(pHba);
2562 return -1;
2563 }
2564
2565 /* In OPERATIONAL state */
2566 return 0;
2567}
2568
2569static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2570{
2571 u32 __iomem *msg;
2572 ulong timeout = jiffies + 5*HZ;
2573
2574 while(m == EMPTY_QUEUE){
2575 rmb();
2576 m = readl(pHba->post_port);
2577 if(m != EMPTY_QUEUE){
2578 break;
2579 }
2580 if(time_after(jiffies,timeout)){
2581 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2582 return 2;
2583 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002584 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 }
2586 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2587 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2588 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2589 writel( 0,&msg[2]);
2590 wmb();
2591
2592 writel(m, pHba->post_port);
2593 wmb();
2594 return 0;
2595}
2596
2597static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2598{
2599 u8 *status;
2600 u32 __iomem *msg = NULL;
2601 int i;
2602 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2603 u32* ptr;
2604 u32 outbound_frame; // This had to be a 32 bit address
2605 u32 m;
2606
2607 do {
2608 rmb();
2609 m = readl(pHba->post_port);
2610 if (m != EMPTY_QUEUE) {
2611 break;
2612 }
2613
2614 if(time_after(jiffies,timeout)){
2615 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2616 return -ETIMEDOUT;
2617 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002618 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 } while(m == EMPTY_QUEUE);
2620
2621 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2622
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002623 status = kzalloc(4, GFP_KERNEL|ADDR32);
2624 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 adpt_send_nop(pHba, m);
2626 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2627 pHba->name);
2628 return -ENOMEM;
2629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630
2631 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2632 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2633 writel(0, &msg[2]);
2634 writel(0x0106, &msg[3]); /* Transaction context */
2635 writel(4096, &msg[4]); /* Host page frame size */
2636 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2637 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2638 writel(virt_to_bus(status), &msg[7]);
2639
2640 writel(m, pHba->post_port);
2641 wmb();
2642
2643 // Wait for the reply status to come back
2644 do {
2645 if (*status) {
2646 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2647 break;
2648 }
2649 }
2650 rmb();
2651 if(time_after(jiffies,timeout)){
2652 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2653 return -ETIMEDOUT;
2654 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002655 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 } while (1);
2657
2658 // If the command was successful, fill the fifo with our reply
2659 // message packets
2660 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002661 kfree(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 return -2;
2663 }
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002664 kfree(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002666 kfree(pHba->reply_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002668 pHba->reply_pool = kzalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
2669 if (!pHba->reply_pool) {
2670 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2671 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
2674 ptr = pHba->reply_pool;
2675 for(i = 0; i < pHba->reply_fifo_size; i++) {
2676 outbound_frame = (u32)virt_to_bus(ptr);
2677 writel(outbound_frame, pHba->reply_port);
2678 wmb();
2679 ptr += REPLY_FRAME_SIZE;
2680 }
2681 adpt_i2o_status_get(pHba);
2682 return 0;
2683}
2684
2685
2686/*
2687 * I2O System Table. Contains information about
2688 * all the IOPs in the system. Used to inform IOPs
2689 * about each other's existence.
2690 *
2691 * sys_tbl_ver is the CurrentChangeIndicator that is
2692 * used by IOPs to track changes.
2693 */
2694
2695
2696
2697static s32 adpt_i2o_status_get(adpt_hba* pHba)
2698{
2699 ulong timeout;
2700 u32 m;
2701 u32 __iomem *msg;
2702 u8 *status_block=NULL;
2703 ulong status_block_bus;
2704
2705 if(pHba->status_block == NULL) {
2706 pHba->status_block = (i2o_status_block*)
2707 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2708 if(pHba->status_block == NULL) {
2709 printk(KERN_ERR
2710 "dpti%d: Get Status Block failed; Out of memory. \n",
2711 pHba->unit);
2712 return -ENOMEM;
2713 }
2714 }
2715 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2716 status_block = (u8*)(pHba->status_block);
2717 status_block_bus = virt_to_bus(pHba->status_block);
2718 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2719 do {
2720 rmb();
2721 m = readl(pHba->post_port);
2722 if (m != EMPTY_QUEUE) {
2723 break;
2724 }
2725 if(time_after(jiffies,timeout)){
2726 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2727 pHba->name);
2728 return -ETIMEDOUT;
2729 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002730 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 } while(m==EMPTY_QUEUE);
2732
2733
2734 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2735
2736 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2737 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2738 writel(1, &msg[2]);
2739 writel(0, &msg[3]);
2740 writel(0, &msg[4]);
2741 writel(0, &msg[5]);
2742 writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2743 writel(0, &msg[7]);
2744 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2745
2746 //post message
2747 writel(m, pHba->post_port);
2748 wmb();
2749
2750 while(status_block[87]!=0xff){
2751 if(time_after(jiffies,timeout)){
2752 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2753 pHba->unit);
2754 return -ETIMEDOUT;
2755 }
2756 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002757 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 }
2759
2760 // Set up our number of outbound and inbound messages
2761 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2762 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2763 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2764 }
2765
2766 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2767 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2768 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2769 }
2770
2771 // Calculate the Scatter Gather list size
2772 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2773 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2774 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2775 }
2776
2777
2778#ifdef DEBUG
2779 printk("dpti%d: State = ",pHba->unit);
2780 switch(pHba->status_block->iop_state) {
2781 case 0x01:
2782 printk("INIT\n");
2783 break;
2784 case 0x02:
2785 printk("RESET\n");
2786 break;
2787 case 0x04:
2788 printk("HOLD\n");
2789 break;
2790 case 0x05:
2791 printk("READY\n");
2792 break;
2793 case 0x08:
2794 printk("OPERATIONAL\n");
2795 break;
2796 case 0x10:
2797 printk("FAILED\n");
2798 break;
2799 case 0x11:
2800 printk("FAULTED\n");
2801 break;
2802 default:
2803 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2804 }
2805#endif
2806 return 0;
2807}
2808
2809/*
2810 * Get the IOP's Logical Configuration Table
2811 */
2812static int adpt_i2o_lct_get(adpt_hba* pHba)
2813{
2814 u32 msg[8];
2815 int ret;
2816 u32 buf[16];
2817
2818 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2819 pHba->lct_size = pHba->status_block->expected_lct_size;
2820 }
2821 do {
2822 if (pHba->lct == NULL) {
2823 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2824 if(pHba->lct == NULL) {
2825 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2826 pHba->name);
2827 return -ENOMEM;
2828 }
2829 }
2830 memset(pHba->lct, 0, pHba->lct_size);
2831
2832 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2833 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2834 msg[2] = 0;
2835 msg[3] = 0;
2836 msg[4] = 0xFFFFFFFF; /* All devices */
2837 msg[5] = 0x00000000; /* Report now */
2838 msg[6] = 0xD0000000|pHba->lct_size;
2839 msg[7] = virt_to_bus(pHba->lct);
2840
2841 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2842 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2843 pHba->name, ret);
2844 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2845 return ret;
2846 }
2847
2848 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2849 pHba->lct_size = pHba->lct->table_size << 2;
2850 kfree(pHba->lct);
2851 pHba->lct = NULL;
2852 }
2853 } while (pHba->lct == NULL);
2854
2855 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2856
2857
2858 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2859 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2860 pHba->FwDebugBufferSize = buf[1];
2861 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0];
2862 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2863 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2864 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
2865 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2866 pHba->FwDebugBuffer_P += buf[2];
2867 pHba->FwDebugFlags = 0;
2868 }
2869
2870 return 0;
2871}
2872
2873static int adpt_i2o_build_sys_table(void)
2874{
2875 adpt_hba* pHba = NULL;
2876 int count = 0;
2877
2878 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2879 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2880
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002881 kfree(sys_tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002883 sys_tbl = kzalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2884 if (!sys_tbl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2886 return -ENOMEM;
2887 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888
2889 sys_tbl->num_entries = hba_count;
2890 sys_tbl->version = I2OVERSION;
2891 sys_tbl->change_ind = sys_tbl_ind++;
2892
2893 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2894 // Get updated Status Block so we have the latest information
2895 if (adpt_i2o_status_get(pHba)) {
2896 sys_tbl->num_entries--;
2897 continue; // try next one
2898 }
2899
2900 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2901 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2902 sys_tbl->iops[count].seg_num = 0;
2903 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2904 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2905 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2906 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2907 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2908 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002909 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
2910 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
2912 count++;
2913 }
2914
2915#ifdef DEBUG
2916{
2917 u32 *table = (u32*)sys_tbl;
2918 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2919 for(count = 0; count < (sys_tbl_len >>2); count++) {
2920 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2921 count, table[count]);
2922 }
2923}
2924#endif
2925
2926 return 0;
2927}
2928
2929
2930/*
2931 * Dump the information block associated with a given unit (TID)
2932 */
2933
2934static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2935{
2936 char buf[64];
2937 int unit = d->lct_data.tid;
2938
2939 printk(KERN_INFO "TID %3.3d ", unit);
2940
2941 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
2942 {
2943 buf[16]=0;
2944 printk(" Vendor: %-12.12s", buf);
2945 }
2946 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
2947 {
2948 buf[16]=0;
2949 printk(" Device: %-12.12s", buf);
2950 }
2951 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
2952 {
2953 buf[8]=0;
2954 printk(" Rev: %-12.12s\n", buf);
2955 }
2956#ifdef DEBUG
2957 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
2958 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
2959 printk(KERN_INFO "\tFlags: ");
2960
2961 if(d->lct_data.device_flags&(1<<0))
2962 printk("C"); // ConfigDialog requested
2963 if(d->lct_data.device_flags&(1<<1))
2964 printk("U"); // Multi-user capable
2965 if(!(d->lct_data.device_flags&(1<<4)))
2966 printk("P"); // Peer service enabled!
2967 if(!(d->lct_data.device_flags&(1<<5)))
2968 printk("M"); // Mgmt service enabled!
2969 printk("\n");
2970#endif
2971}
2972
2973#ifdef DEBUG
2974/*
2975 * Do i2o class name lookup
2976 */
2977static const char *adpt_i2o_get_class_name(int class)
2978{
2979 int idx = 16;
2980 static char *i2o_class_name[] = {
2981 "Executive",
2982 "Device Driver Module",
2983 "Block Device",
2984 "Tape Device",
2985 "LAN Interface",
2986 "WAN Interface",
2987 "Fibre Channel Port",
2988 "Fibre Channel Device",
2989 "SCSI Device",
2990 "ATE Port",
2991 "ATE Device",
2992 "Floppy Controller",
2993 "Floppy Device",
2994 "Secondary Bus Port",
2995 "Peer Transport Agent",
2996 "Peer Transport",
2997 "Unknown"
2998 };
2999
3000 switch(class&0xFFF) {
3001 case I2O_CLASS_EXECUTIVE:
3002 idx = 0; break;
3003 case I2O_CLASS_DDM:
3004 idx = 1; break;
3005 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3006 idx = 2; break;
3007 case I2O_CLASS_SEQUENTIAL_STORAGE:
3008 idx = 3; break;
3009 case I2O_CLASS_LAN:
3010 idx = 4; break;
3011 case I2O_CLASS_WAN:
3012 idx = 5; break;
3013 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3014 idx = 6; break;
3015 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3016 idx = 7; break;
3017 case I2O_CLASS_SCSI_PERIPHERAL:
3018 idx = 8; break;
3019 case I2O_CLASS_ATE_PORT:
3020 idx = 9; break;
3021 case I2O_CLASS_ATE_PERIPHERAL:
3022 idx = 10; break;
3023 case I2O_CLASS_FLOPPY_CONTROLLER:
3024 idx = 11; break;
3025 case I2O_CLASS_FLOPPY_DEVICE:
3026 idx = 12; break;
3027 case I2O_CLASS_BUS_ADAPTER_PORT:
3028 idx = 13; break;
3029 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3030 idx = 14; break;
3031 case I2O_CLASS_PEER_TRANSPORT:
3032 idx = 15; break;
3033 }
3034 return i2o_class_name[idx];
3035}
3036#endif
3037
3038
3039static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3040{
3041 u32 msg[6];
3042 int ret, size = sizeof(i2o_hrt);
3043
3044 do {
3045 if (pHba->hrt == NULL) {
3046 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3047 if (pHba->hrt == NULL) {
3048 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3049 return -ENOMEM;
3050 }
3051 }
3052
3053 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3054 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3055 msg[2]= 0;
3056 msg[3]= 0;
3057 msg[4]= (0xD0000000 | size); /* Simple transaction */
3058 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */
3059
3060 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3061 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3062 return ret;
3063 }
3064
3065 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3066 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3067 kfree(pHba->hrt);
3068 pHba->hrt = NULL;
3069 }
3070 } while(pHba->hrt == NULL);
3071 return 0;
3072}
3073
3074/*
3075 * Query one scalar group value or a whole scalar group.
3076 */
3077static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3078 int group, int field, void *buf, int buflen)
3079{
3080 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3081 u8 *resblk;
3082
3083 int size;
3084
3085 /* 8 bytes for header */
3086 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
3087 if (resblk == NULL) {
3088 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3089 return -ENOMEM;
3090 }
3091
3092 if (field == -1) /* whole group */
3093 opblk[4] = -1;
3094
3095 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3096 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
3097 if (size == -ETIME) {
3098 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3099 return -ETIME;
3100 } else if (size == -EINTR) {
3101 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3102 return -EINTR;
3103 }
3104
3105 memcpy(buf, resblk+8, buflen); /* cut off header */
3106
3107 kfree(resblk);
3108 if (size < 0)
3109 return size;
3110
3111 return buflen;
3112}
3113
3114
3115/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3116 *
3117 * This function can be used for all UtilParamsGet/Set operations.
3118 * The OperationBlock is given in opblk-buffer,
3119 * and results are returned in resblk-buffer.
3120 * Note that the minimum sized resblk is 8 bytes and contains
3121 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3122 */
3123static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3124 void *opblk, int oplen, void *resblk, int reslen)
3125{
3126 u32 msg[9];
3127 u32 *res = (u32 *)resblk;
3128 int wait_status;
3129
3130 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3131 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3132 msg[2] = 0;
3133 msg[3] = 0;
3134 msg[4] = 0;
3135 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3136 msg[6] = virt_to_bus(opblk);
3137 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3138 msg[8] = virt_to_bus(resblk);
3139
3140 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3141 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
3142 return wait_status; /* -DetailedStatus */
3143 }
3144
3145 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3146 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3147 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3148 pHba->name,
3149 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3150 : "PARAMS_GET",
3151 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3152 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3153 }
3154
3155 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3156}
3157
3158
3159static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3160{
3161 u32 msg[4];
3162 int ret;
3163
3164 adpt_i2o_status_get(pHba);
3165
3166 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3167
3168 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3169 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3170 return 0;
3171 }
3172
3173 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3174 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3175 msg[2] = 0;
3176 msg[3] = 0;
3177
3178 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3179 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3180 pHba->unit, -ret);
3181 } else {
3182 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3183 }
3184
3185 adpt_i2o_status_get(pHba);
3186 return ret;
3187}
3188
3189
3190/*
3191 * Enable IOP. Allows the IOP to resume external operations.
3192 */
3193static int adpt_i2o_enable_hba(adpt_hba* pHba)
3194{
3195 u32 msg[4];
3196 int ret;
3197
3198 adpt_i2o_status_get(pHba);
3199 if(!pHba->status_block){
3200 return -ENOMEM;
3201 }
3202 /* Enable only allowed on READY state */
3203 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3204 return 0;
3205
3206 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3207 return -EINVAL;
3208
3209 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3210 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3211 msg[2]= 0;
3212 msg[3]= 0;
3213
3214 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3215 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3216 pHba->name, ret);
3217 } else {
3218 PDEBUG("%s: Enabled.\n", pHba->name);
3219 }
3220
3221 adpt_i2o_status_get(pHba);
3222 return ret;
3223}
3224
3225
3226static int adpt_i2o_systab_send(adpt_hba* pHba)
3227{
3228 u32 msg[12];
3229 int ret;
3230
3231 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3232 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3233 msg[2] = 0;
3234 msg[3] = 0;
3235 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3236 msg[5] = 0; /* Segment 0 */
3237
3238 /*
3239 * Provide three SGL-elements:
3240 * System table (SysTab), Private memory space declaration and
3241 * Private i/o space declaration
3242 */
3243 msg[6] = 0x54000000 | sys_tbl_len;
3244 msg[7] = virt_to_phys(sys_tbl);
3245 msg[8] = 0x54000000 | 0;
3246 msg[9] = 0;
3247 msg[10] = 0xD4000000 | 0;
3248 msg[11] = 0;
3249
3250 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3251 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3252 pHba->name, ret);
3253 }
3254#ifdef DEBUG
3255 else {
3256 PINFO("%s: SysTab set.\n", pHba->name);
3257 }
3258#endif
3259
3260 return ret;
3261 }
3262
3263
3264/*============================================================================
3265 *
3266 *============================================================================
3267 */
3268
3269
3270#ifdef UARTDELAY
3271
3272static static void adpt_delay(int millisec)
3273{
3274 int i;
3275 for (i = 0; i < millisec; i++) {
3276 udelay(1000); /* delay for one millisecond */
3277 }
3278}
3279
3280#endif
3281
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -06003282static struct scsi_host_template adpt_template = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 .name = "dpt_i2o",
3284 .proc_name = "dpt_i2o",
3285 .proc_info = adpt_proc_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 .info = adpt_info,
3287 .queuecommand = adpt_queue,
3288 .eh_abort_handler = adpt_abort,
3289 .eh_device_reset_handler = adpt_device_reset,
3290 .eh_bus_reset_handler = adpt_bus_reset,
3291 .eh_host_reset_handler = adpt_reset,
3292 .bios_param = adpt_bios_param,
3293 .slave_configure = adpt_slave_configure,
3294 .can_queue = MAX_TO_IOP_MESSAGES,
3295 .this_id = 7,
3296 .cmd_per_lun = 1,
3297 .use_clustering = ENABLE_CLUSTERING,
FUJITA Tomonori9cb83c72007-10-16 11:24:32 +02003298 .use_sg_chaining = ENABLE_SG_CHAINING,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299};
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -06003300
3301static s32 adpt_scsi_register(adpt_hba* pHba)
3302{
3303 struct Scsi_Host *host;
3304
3305 host = scsi_host_alloc(&adpt_template, sizeof(adpt_hba*));
3306 if (host == NULL) {
3307 printk ("%s: scsi_host_alloc returned NULL\n",pHba->name);
3308 return -1;
3309 }
3310 host->hostdata[0] = (unsigned long)pHba;
3311 pHba->host = host;
3312
3313 host->irq = pHba->pDev->irq;
3314 /* no IO ports, so don't have to set host->io_port and
3315 * host->n_io_port
3316 */
3317 host->io_port = 0;
3318 host->n_io_port = 0;
3319 /* see comments in scsi_host.h */
3320 host->max_id = 16;
3321 host->max_lun = 256;
3322 host->max_channel = pHba->top_scsi_channel + 1;
3323 host->cmd_per_lun = 1;
3324 host->unique_id = (uint) pHba;
3325 host->sg_tablesize = pHba->sg_tablesize;
3326 host->can_queue = pHba->post_fifo_size;
3327
3328 if (scsi_add_host(host, &pHba->pDev->dev)) {
3329 scsi_host_put(host);
3330 return -1;
3331 }
3332
3333 return 0;
3334}
3335
3336static int __init adpt_init(void)
3337{
3338 int count;
3339
3340 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3341#ifdef REBOOT_NOTIFIER
3342 register_reboot_notifier(&adpt_reboot_notifier);
3343#endif
3344
3345 count = adpt_detect();
3346
3347 return count > 0 ? 0 : -ENODEV;
3348}
3349
Joe Korty89932692007-10-02 14:38:08 -07003350static void adpt_exit(void)
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -06003351{
3352 while (hba_chain)
3353 adpt_release(hba_chain);
3354}
3355
3356module_init(adpt_init);
3357module_exit(adpt_exit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358MODULE_LICENSE("GPL");