Linux 2.4.0-test9pre4
[davej-history.git] / drivers / scsi / scsi.c
blobc7b9c37b1ea0fbf8930bde7348cddd376565f78b
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
9 * <drew@colorado.edu>
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
18 * enhancements.
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25 * Bjorn Ekwall <bj0rn@blox.se>
26 * (changed to kmod)
28 * Major improvements to the timeout, abort, and reset processing,
29 * as well as performance modifications for large queue depths by
30 * Leonard N. Zubkoff <lnz@dandelion.com>
32 * Converted cli() code to spinlocks, Ingo Molnar
34 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 * out_of_space hacks, D. Gilbert (dpg) 990608
39 #include <linux/config.h>
40 #include <linux/module.h>
42 #include <linux/sched.h>
43 #include <linux/timer.h>
44 #include <linux/string.h>
45 #include <linux/malloc.h>
46 #include <linux/ioport.h>
47 #include <linux/kernel.h>
48 #include <linux/stat.h>
49 #include <linux/blk.h>
50 #include <linux/interrupt.h>
51 #include <linux/delay.h>
52 #include <linux/init.h>
54 #define __KERNEL_SYSCALLS__
56 #include <linux/unistd.h>
57 #include <linux/spinlock.h>
59 #include <asm/system.h>
60 #include <asm/irq.h>
61 #include <asm/dma.h>
62 #include <asm/uaccess.h>
64 #include"scsi.h"
65 #include"hosts.h"
66 #include"constants.h"
68 #ifdef CONFIG_KMOD
69 #include <linux/kmod.h>
70 #endif
72 #undef USE_STATIC_SCSI_MEMORY
74 struct proc_dir_entry *proc_scsi = NULL;
76 #ifdef CONFIG_PROC_FS
77 static intscsi_proc_info(char*buffer,char**start, off_t offset,int length);
78 static voidscsi_dump_status(int level);
79 #endif
82 static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
86 * Definitions and constants.
89 #define MIN_RESET_DELAY (2*HZ)
91 /* Do not call reset on error if we just did a reset within 15 sec. */
92 #define MIN_RESET_PERIOD (15*HZ)
96 * Data declarations.
98 unsigned long scsi_pid =0;
99 Scsi_Cmnd *last_cmnd = NULL;
100 /* Command groups 3 and 4 are reserved and should never be used. */
101 const unsigned char scsi_command_size[8] =
103 6,10,10,12,
104 12,12,10,10
106 static unsigned long serial_number =0;
107 static Scsi_Cmnd *scsi_bh_queue_head = NULL;
108 static Scsi_Cmnd *scsi_bh_queue_tail = NULL;
111 * Note - the initial logging level can be set here to log events at boot time.
112 * After the system is up, you may enable logging via the /proc interface.
114 unsigned int scsi_logging_level =0;
116 const char*const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
118 "Direct-Access ",
119 "Sequential-Access",
120 "Printer ",
121 "Processor ",
122 "WORM ",
123 "CD-ROM ",
124 "Scanner ",
125 "Optical Device ",
126 "Medium Changer ",
127 "Communications ",
128 "Unknown ",
129 "Unknown ",
130 "Unknown ",
131 "Enclosure ",
135 * Function prototypes.
137 externvoidscsi_times_out(Scsi_Cmnd * SCpnt);
138 voidscsi_build_commandblocks(Scsi_Device * SDpnt);
141 * These are the interface to the old error handling code. It should go away
142 * someday soon.
144 externvoidscsi_old_done(Scsi_Cmnd * SCpnt);
145 externvoidscsi_old_times_out(Scsi_Cmnd * SCpnt);
149 * Function: scsi_initialize_queue()
151 * Purpose: Selects queue handler function for a device.
153 * Arguments: SDpnt - device for which we need a handler function.
155 * Returns: Nothing
157 * Lock status: No locking assumed or required.
159 * Notes: Most devices will end up using scsi_request_fn for the
160 * handler function (at least as things are done now).
161 * The "block" feature basically ensures that only one of
162 * the blocked hosts is active at one time, mainly to work around
163 * buggy DMA chipsets where the memory gets starved.
164 * For this case, we have a special handler function, which
165 * does some checks and ultimately calls scsi_request_fn.
167 * The single_lun feature is a similar special case.
169 * We handle these things by stacking the handlers. The
170 * special case handlers simply check a few conditions,
171 * and return if they are not supposed to do anything.
172 * In the event that things are OK, then they call the next
173 * handler in the list - ultimately they call scsi_request_fn
174 * to do the dirty deed.
176 voidscsi_initialize_queue(Scsi_Device * SDpnt,struct Scsi_Host * SHpnt) {
177 blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
178 blk_queue_headactive(&SDpnt->request_queue,0);
179 SDpnt->request_queue.queuedata = (void*) SDpnt;
182 #ifdef MODULE
183 MODULE_PARM(scsi_logging_level,"i");
184 MODULE_PARM_DESC(scsi_logging_level,"SCSI logging level; should be zero or nonzero");
186 #else
188 static int __init scsi_logging_setup(char*str)
190 int tmp;
192 if(get_option(&str, &tmp) ==1) {
193 scsi_logging_level = (tmp ? ~0:0);
194 return1;
195 }else{
196 printk("scsi_logging_setup : usage scsi_logging_level=n "
197 "(n should be 0 or non-zero)\n");
198 return0;
202 __setup("scsi_logging=", scsi_logging_setup);
204 #endif
207 * Issue a command and wait for it to complete
210 static voidscsi_wait_done(Scsi_Cmnd * SCpnt)
212 struct request *req;
214 req = &SCpnt->request;
215 req->rq_status = RQ_SCSI_DONE;/* Busy, but indicate request done */
217 if(req->sem != NULL) {
218 up(req->sem);
223 * This lock protects the freelist for all devices on the system.
224 * We could make this finer grained by having a single lock per
225 * device if it is ever found that there is excessive contention
226 * on this lock.
228 static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
231 * Used to protect insertion into and removal from the queue of
232 * commands to be processed by the bottom half handler.
234 static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
237 * Function: scsi_allocate_request
239 * Purpose: Allocate a request descriptor.
241 * Arguments: device - device for which we want a request
243 * Lock status: No locks assumed to be held. This function is SMP-safe.
245 * Returns: Pointer to request block.
247 * Notes: With the new queueing code, it becomes important
248 * to track the difference between a command and a
249 * request. A request is a pending item in the queue that
250 * has not yet reached the top of the queue.
253 Scsi_Request *scsi_allocate_request(Scsi_Device * device)
255 Scsi_Request *SRpnt = NULL;
257 if(!device)
258 panic("No device passed to scsi_allocate_request().\n");
260 SRpnt = (Scsi_Request *)kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
261 if( SRpnt == NULL )
263 return NULL;
266 memset(SRpnt,0,sizeof(Scsi_Request));
267 SRpnt->sr_device = device;
268 SRpnt->sr_host = device->host;
269 SRpnt->sr_magic = SCSI_REQ_MAGIC;
270 SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
272 return SRpnt;
276 * Function: scsi_release_request
278 * Purpose: Release a request descriptor.
280 * Arguments: device - device for which we want a request
282 * Lock status: No locks assumed to be held. This function is SMP-safe.
284 * Returns: Pointer to request block.
286 * Notes: With the new queueing code, it becomes important
287 * to track the difference between a command and a
288 * request. A request is a pending item in the queue that
289 * has not yet reached the top of the queue. We still need
290 * to free a request when we are done with it, of course.
292 voidscsi_release_request(Scsi_Request * req)
294 if( req->sr_command != NULL )
296 scsi_release_command(req->sr_command);
297 req->sr_command = NULL;
300 kfree(req);
304 * Function: scsi_allocate_device
306 * Purpose: Allocate a command descriptor.
308 * Arguments: device - device for which we want a command descriptor
309 * wait - 1 if we should wait in the event that none
310 * are available.
311 * interruptible - 1 if we should unblock and return NULL
312 * in the event that we must wait, and a signal
313 * arrives.
315 * Lock status: No locks assumed to be held. This function is SMP-safe.
317 * Returns: Pointer to command descriptor.
319 * Notes: Prior to the new queue code, this function was not SMP-safe.
321 * If the wait flag is true, and we are waiting for a free
322 * command block, this function will interrupt and return
323 * NULL in the event that a signal arrives that needs to
324 * be handled.
326 * This function is deprecated, and drivers should be
327 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
330 Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device,int wait,
331 int interruptable)
333 struct Scsi_Host *host;
334 Scsi_Cmnd *SCpnt = NULL;
335 Scsi_Device *SDpnt;
336 unsigned long flags;
338 if(!device)
339 panic("No device passed to scsi_allocate_device().\n");
341 host = device->host;
343 spin_lock_irqsave(&device_request_lock, flags);
345 while(1==1) {
346 SCpnt = NULL;
347 if(!device->device_blocked) {
348 if(device->single_lun) {
350 * FIXME(eric) - this is not at all optimal. Given that
351 * single lun devices are rare and usually slow
352 * (i.e. CD changers), this is good enough for now, but
353 * we may want to come back and optimize this later.
355 * Scan through all of the devices attached to this
356 * host, and see if any are active or not. If so,
357 * we need to defer this command.
359 * We really need a busy counter per device. This would
360 * allow us to more easily figure out whether we should
361 * do anything here or not.
363 for(SDpnt = host->host_queue;
364 SDpnt;
365 SDpnt = SDpnt->next) {
367 * Only look for other devices on the same bus
368 * with the same target ID.
370 if(SDpnt->channel != device->channel
371 || SDpnt->id != device->id
372 || SDpnt == device) {
373 continue;
375 if(atomic_read(&SDpnt->device_active) !=0)
377 break;
380 if(SDpnt) {
382 * Some other device in this cluster is busy.
383 * If asked to wait, we need to wait, otherwise
384 * return NULL.
386 SCpnt = NULL;
387 goto busy;
391 * Now we can check for a free command block for this device.
393 for(SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
394 if(SCpnt->request.rq_status == RQ_INACTIVE)
395 break;
399 * If we couldn't find a free command block, and we have been
400 * asked to wait, then do so.
402 if(SCpnt) {
403 break;
405 busy:
407 * If we have been asked to wait for a free block, then
408 * wait here.
410 if(wait) {
411 DECLARE_WAITQUEUE(wait, current);
414 * We need to wait for a free commandblock. We need to
415 * insert ourselves into the list before we release the
416 * lock. This way if a block were released the same
417 * microsecond that we released the lock, the call
418 * to schedule() wouldn't block (well, it might switch,
419 * but the current task will still be schedulable.
421 add_wait_queue(&device->scpnt_wait, &wait);
422 if( interruptable ) {
423 set_current_state(TASK_INTERRUPTIBLE);
424 }else{
425 set_current_state(TASK_UNINTERRUPTIBLE);
428 spin_unlock_irqrestore(&device_request_lock, flags);
431 * This should block until a device command block
432 * becomes available.
434 schedule();
436 spin_lock_irqsave(&device_request_lock, flags);
438 remove_wait_queue(&device->scpnt_wait, &wait);
440 * FIXME - Isn't this redundant?? Someone
441 * else will have forced the state back to running.
443 set_current_state(TASK_RUNNING);
445 * In the event that a signal has arrived that we need
446 * to consider, then simply return NULL. Everyone
447 * that calls us should be prepared for this
448 * possibility, and pass the appropriate code back
449 * to the user.
451 if( interruptable ) {
452 if(signal_pending(current)) {
453 spin_unlock_irqrestore(&device_request_lock, flags);
454 return NULL;
457 }else{
458 spin_unlock_irqrestore(&device_request_lock, flags);
459 return NULL;
463 SCpnt->request.rq_status = RQ_SCSI_BUSY;
464 SCpnt->request.sem = NULL;/* And no one is waiting for this
465 * to complete */
466 atomic_inc(&SCpnt->host->host_active);
467 atomic_inc(&SCpnt->device->device_active);
469 SCpnt->buffer = NULL;
470 SCpnt->bufflen =0;
471 SCpnt->request_buffer = NULL;
472 SCpnt->request_bufflen =0;
474 SCpnt->use_sg =0;/* Reset the scatter-gather flag */
475 SCpnt->old_use_sg =0;
476 SCpnt->transfersize =0;/* No default transfer size */
477 SCpnt->cmd_len =0;
479 SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
480 SCpnt->sc_request = NULL;
481 SCpnt->sc_magic = SCSI_CMND_MAGIC;
483 SCpnt->result =0;
484 SCpnt->underflow =0;/* Do not flag underflow conditions */
485 SCpnt->old_underflow =0;
486 SCpnt->resid =0;
487 SCpnt->state = SCSI_STATE_INITIALIZING;
488 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
490 spin_unlock_irqrestore(&device_request_lock, flags);
492 SCSI_LOG_MLQUEUE(5,printk("Activating command for device %d (%d)\n",
493 SCpnt->target,
494 atomic_read(&SCpnt->host->host_active)));
496 return SCpnt;
499 inlinevoid__scsi_release_command(Scsi_Cmnd * SCpnt)
501 unsigned long flags;
502 Scsi_Device * SDpnt;
504 spin_lock_irqsave(&device_request_lock, flags);
506 SDpnt = SCpnt->device;
508 SCpnt->request.rq_status = RQ_INACTIVE;
509 SCpnt->state = SCSI_STATE_UNUSED;
510 SCpnt->owner = SCSI_OWNER_NOBODY;
511 atomic_dec(&SCpnt->host->host_active);
512 atomic_dec(&SDpnt->device_active);
514 SCSI_LOG_MLQUEUE(5,printk("Deactivating command for device %d (active=%d, failed=%d)\n",
515 SCpnt->target,
516 atomic_read(&SCpnt->host->host_active),
517 SCpnt->host->host_failed));
518 if(SCpnt->host->host_failed !=0) {
519 SCSI_LOG_ERROR_RECOVERY(5,printk("Error handler thread %d %d\n",
520 SCpnt->host->in_recovery,
521 SCpnt->host->eh_active));
524 * If the host is having troubles, then look to see if this was the last
525 * command that might have failed. If so, wake up the error handler.
527 if(SCpnt->host->in_recovery
528 && !SCpnt->host->eh_active
529 && SCpnt->host->host_busy == SCpnt->host->host_failed) {
530 SCSI_LOG_ERROR_RECOVERY(5,printk("Waking error handler thread (%d)\n",
531 atomic_read(&SCpnt->host->eh_wait->count)));
532 up(SCpnt->host->eh_wait);
535 spin_unlock_irqrestore(&device_request_lock, flags);
538 * Wake up anyone waiting for this device. Do this after we
539 * have released the lock, as they will need it as soon as
540 * they wake up.
542 wake_up(&SDpnt->scpnt_wait);
546 * Function: scsi_release_command
548 * Purpose: Release a command block.
550 * Arguments: SCpnt - command block we are releasing.
552 * Notes: The command block can no longer be used by the caller once
553 * this funciton is called. This is in effect the inverse
554 * of scsi_allocate_device. Note that we also must perform
555 * a couple of additional tasks. We must first wake up any
556 * processes that might have blocked waiting for a command
557 * block, and secondly we must hit the queue handler function
558 * to make sure that the device is busy. Note - there is an
559 * option to not do this - there were instances where we could
560 * recurse too deeply and blow the stack if this happened
561 * when we were indirectly called from the request function
562 * itself.
564 * The idea is that a lot of the mid-level internals gunk
565 * gets hidden in this function. Upper level drivers don't
566 * have any chickens to wave in the air to get things to
567 * work reliably.
569 * This function is deprecated, and drivers should be
570 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
572 voidscsi_release_command(Scsi_Cmnd * SCpnt)
574 request_queue_t *q;
575 Scsi_Device * SDpnt;
577 SDpnt = SCpnt->device;
579 __scsi_release_command(SCpnt);
582 * Finally, hit the queue request function to make sure that
583 * the device is actually busy if there are requests present.
584 * This won't block - if the device cannot take any more, life
585 * will go on.
587 q = &SDpnt->request_queue;
588 scsi_queue_next_request(q, NULL);
592 * Function: scsi_dispatch_command
594 * Purpose: Dispatch a command to the low-level driver.
596 * Arguments: SCpnt - command block we are dispatching.
598 * Notes:
600 intscsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
602 #ifdef DEBUG_DELAY
603 unsigned long clock;
604 #endif
605 struct Scsi_Host *host;
606 int rtn =0;
607 unsigned long flags =0;
608 unsigned long timeout;
610 ASSERT_LOCK(&io_request_lock,0);
612 #if DEBUG
613 unsigned long*ret =0;
614 #ifdef __mips__
615 __asm__ __volatile__("move\t%0,$31":"=r"(ret));
616 #else
617 ret =__builtin_return_address(0);
618 #endif
619 #endif
621 host = SCpnt->host;
623 /* Assign a unique nonzero serial_number. */
624 if(++serial_number ==0)
625 serial_number =1;
626 SCpnt->serial_number = serial_number;
629 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
630 * we can avoid the drive not being ready.
632 timeout = host->last_reset + MIN_RESET_DELAY;
634 if(host->resetting &&time_before(jiffies, timeout)) {
635 int ticks_remaining = timeout - jiffies;
637 * NOTE: This may be executed from within an interrupt
638 * handler! This is bad, but for now, it'll do. The irq
639 * level of the interrupt handler has been masked out by the
640 * platform dependent interrupt handling code already, so the
641 * sti() here will not cause another call to the SCSI host's
642 * interrupt handler (assuming there is one irq-level per
643 * host).
645 while(--ticks_remaining >=0)
646 mdelay(1+999/ HZ);
647 host->resetting =0;
649 if(host->hostt->use_new_eh_code) {
650 scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
651 }else{
652 scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
653 scsi_old_times_out);
657 * We will use a queued command if possible, otherwise we will emulate the
658 * queuing and calling of completion function ourselves.
660 SCSI_LOG_MLQUEUE(3,printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
661 "command = %p, buffer = %p,\nbufflen = %d, done = %p)\n",
662 SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
663 SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
665 SCpnt->state = SCSI_STATE_QUEUED;
666 SCpnt->owner = SCSI_OWNER_LOWLEVEL;
667 if(host->can_queue) {
668 SCSI_LOG_MLQUEUE(3,printk("queuecommand : routine at %p\n",
669 host->hostt->queuecommand));
671 * Use the old error handling code if we haven't converted the driver
672 * to use the new one yet. Note - only the new queuecommand variant
673 * passes a meaningful return value.
675 if(host->hostt->use_new_eh_code) {
676 spin_lock_irqsave(&io_request_lock, flags);
677 rtn = host->hostt->queuecommand(SCpnt, scsi_done);
678 spin_unlock_irqrestore(&io_request_lock, flags);
679 if(rtn !=0) {
680 scsi_delete_timer(SCpnt);
681 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
682 SCSI_LOG_MLQUEUE(3,printk("queuecommand : request rejected\n"));
684 }else{
685 spin_lock_irqsave(&io_request_lock, flags);
686 host->hostt->queuecommand(SCpnt, scsi_old_done);
687 spin_unlock_irqrestore(&io_request_lock, flags);
689 }else{
690 int temp;
692 SCSI_LOG_MLQUEUE(3,printk("command() : routine at %p\n", host->hostt->command));
693 spin_lock_irqsave(&io_request_lock, flags);
694 temp = host->hostt->command(SCpnt);
695 SCpnt->result = temp;
696 #ifdef DEBUG_DELAY
697 spin_unlock_irqrestore(&io_request_lock, flags);
698 clock = jiffies +4* HZ;
699 while(time_before(jiffies, clock))
700 barrier();
701 printk("done(host = %d, result = %04x) : routine at %p\n",
702 host->host_no, temp, host->hostt->command);
703 spin_lock_irqsave(&io_request_lock, flags);
704 #endif
705 if(host->hostt->use_new_eh_code) {
706 scsi_done(SCpnt);
707 }else{
708 scsi_old_done(SCpnt);
710 spin_unlock_irqrestore(&io_request_lock, flags);
712 SCSI_LOG_MLQUEUE(3,printk("leaving scsi_dispatch_cmnd()\n"));
713 return rtn;
716 devfs_handle_t scsi_devfs_handle = NULL;
719 * scsi_do_cmd sends all the commands out to the low-level driver. It
720 * handles the specifics required for each low level driver - ie queued
721 * or non queued. It also prevents conflicts when different high level
722 * drivers go for the same host at the same time.
725 voidscsi_wait_req(Scsi_Request * SRpnt,const void*cmnd ,
726 void*buffer,unsigned bufflen,
727 int timeout,int retries)
729 DECLARE_MUTEX_LOCKED(sem);
731 SRpnt->sr_request.sem = &sem;
732 SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
733 scsi_do_req(SRpnt, (void*) cmnd,
734 buffer, bufflen, scsi_wait_done, timeout, retries);
735 down(&sem);
736 SRpnt->sr_request.sem = NULL;
737 if( SRpnt->sr_command != NULL )
739 scsi_release_command(SRpnt->sr_command);
740 SRpnt->sr_command = NULL;
746 * Function: scsi_do_req
748 * Purpose: Queue a SCSI request
750 * Arguments: SRpnt - command descriptor.
751 * cmnd - actual SCSI command to be performed.
752 * buffer - data buffer.
753 * bufflen - size of data buffer.
754 * done - completion function to be run.
755 * timeout - how long to let it run before timeout.
756 * retries - number of retries we allow.
758 * Lock status: With the new queueing code, this is SMP-safe, and no locks
759 * need be held upon entry. The old queueing code the lock was
760 * assumed to be held upon entry.
762 * Returns: Nothing.
764 * Notes: Prior to the new queue code, this function was not SMP-safe.
765 * Also, this function is now only used for queueing requests
766 * for things like ioctls and character device requests - this
767 * is because we essentially just inject a request into the
768 * queue for the device. Normal block device handling manipulates
769 * the queue directly.
771 voidscsi_do_req(Scsi_Request * SRpnt,const void*cmnd,
772 void*buffer,unsigned bufflen,void(*done) (Scsi_Cmnd *),
773 int timeout,int retries)
775 Scsi_Device * SDpnt = SRpnt->sr_device;
776 struct Scsi_Host *host = SDpnt->host;
778 ASSERT_LOCK(&io_request_lock,0);
780 SCSI_LOG_MLQUEUE(4,
782 int i;
783 int target = SDpnt->id;
784 printk("scsi_do_req (host = %d, channel = %d target = %d, "
785 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
786 "retries = %d)\n"
787 "command : ", host->host_no, SDpnt->channel, target, buffer,
788 bufflen, done, timeout, retries);
789 for(i =0; i <10; ++i)
790 printk("%02x ", ((unsigned char*) cmnd)[i]);
791 printk("\n");
794 if(!host) {
795 panic("Invalid or not present host.\n");
799 * If the upper level driver is reusing these things, then
800 * we should release the low-level block now. Another one will
801 * be allocated later when this request is getting queued.
803 if( SRpnt->sr_command != NULL )
805 scsi_release_command(SRpnt->sr_command);
806 SRpnt->sr_command = NULL;
810 * We must prevent reentrancy to the lowlevel host driver. This prevents
811 * it - we enter a loop until the host we want to talk to is not busy.
812 * Race conditions are prevented, as interrupts are disabled in between the
813 * time we check for the host being not busy, and the time we mark it busy
814 * ourselves.
819 * Our own function scsi_done (which marks the host as not busy, disables
820 * the timeout counter, etc) will be called by us or by the
821 * scsi_hosts[host].queuecommand() function needs to also call
822 * the completion function for the high level driver.
825 memcpy((void*) SRpnt->sr_cmnd, (const void*) cmnd,
826 sizeof(SRpnt->sr_cmnd));
827 SRpnt->sr_bufflen = bufflen;
828 SRpnt->sr_buffer = buffer;
829 SRpnt->sr_allowed = retries;
830 SRpnt->sr_done = done;
831 SRpnt->sr_timeout_per_command = timeout;
833 memcpy((void*) SRpnt->sr_cmnd, (const void*) cmnd,
834 sizeof(SRpnt->sr_cmnd));
836 if(SRpnt->sr_cmd_len ==0)
837 SRpnt->sr_cmd_len =COMMAND_SIZE(SRpnt->sr_cmnd[0]);
840 * At this point, we merely set up the command, stick it in the normal
841 * request queue, and return. Eventually that request will come to the
842 * top of the list, and will be dispatched.
844 scsi_insert_special_req(SRpnt,0);
846 SCSI_LOG_MLQUEUE(3,printk("Leaving scsi_do_cmd()\n"));
850 * Function: scsi_init_cmd_from_req
852 * Purpose: Queue a SCSI command
853 * Purpose: Initialize a Scsi_Cmnd from a Scsi_Request
855 * Arguments: SCpnt - command descriptor.
856 * SRpnt - Request from the queue.
858 * Lock status: None needed.
860 * Returns: Nothing.
862 * Notes: Mainly transfer data from the request structure to the
863 * command structure. The request structure is allocated
864 * using the normal memory allocator, and requests can pile
865 * up to more or less any depth. The command structure represents
866 * a consumable resource, as these are allocated into a pool
867 * when the SCSI subsystem initializes. The preallocation is
868 * required so that in low-memory situations a disk I/O request
869 * won't cause the memory manager to try and write out a page.
870 * The request structure is generally used by ioctls and character
871 * devices.
873 voidscsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
875 struct Scsi_Host *host = SCpnt->host;
877 ASSERT_LOCK(&io_request_lock,0);
879 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
880 SRpnt->sr_command = SCpnt;
882 if(!host) {
883 panic("Invalid or not present host.\n");
886 SCpnt->cmd_len = SRpnt->sr_cmd_len;
887 SCpnt->use_sg = SRpnt->sr_use_sg;
889 memcpy((void*) &SCpnt->request, (const void*) &SRpnt->sr_request,
890 sizeof(SRpnt->sr_request));
891 memcpy((void*) SCpnt->data_cmnd, (const void*) SRpnt->sr_cmnd,
892 sizeof(SCpnt->data_cmnd));
893 SCpnt->reset_chain = NULL;
894 SCpnt->serial_number =0;
895 SCpnt->serial_number_at_timeout =0;
896 SCpnt->bufflen = SRpnt->sr_bufflen;
897 SCpnt->buffer = SRpnt->sr_buffer;
898 SCpnt->flags =0;
899 SCpnt->retries =0;
900 SCpnt->allowed = SRpnt->sr_allowed;
901 SCpnt->done = SRpnt->sr_done;
902 SCpnt->timeout_per_command = SRpnt->sr_timeout_per_command;
904 SCpnt->sc_data_direction = SRpnt->sr_data_direction;
906 SCpnt->sglist_len = SRpnt->sr_sglist_len;
907 SCpnt->underflow = SRpnt->sr_underflow;
909 SCpnt->sc_request = SRpnt;
911 memcpy((void*) SCpnt->cmnd, (const void*) SRpnt->sr_cmnd,
912 sizeof(SCpnt->cmnd));
913 /* Zero the sense buffer. Some host adapters automatically request
914 * sense on error. 0 is not a valid sense code.
916 memset((void*) SCpnt->sense_buffer,0,sizeof SCpnt->sense_buffer);
917 SCpnt->request_buffer = SRpnt->sr_buffer;
918 SCpnt->request_bufflen = SRpnt->sr_bufflen;
919 SCpnt->old_use_sg = SCpnt->use_sg;
920 if(SCpnt->cmd_len ==0)
921 SCpnt->cmd_len =COMMAND_SIZE(SCpnt->cmnd[0]);
922 SCpnt->old_cmd_len = SCpnt->cmd_len;
923 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
924 SCpnt->old_underflow = SCpnt->underflow;
926 /* Start the timer ticking. */
928 SCpnt->internal_timeout = NORMAL_TIMEOUT;
929 SCpnt->abort_reason =0;
930 SCpnt->result =0;
932 SCSI_LOG_MLQUEUE(3,printk("Leaving scsi_do_cmd()\n"));
936 * Function: scsi_do_cmd
938 * Purpose: Queue a SCSI command
940 * Arguments: SCpnt - command descriptor.
941 * cmnd - actual SCSI command to be performed.
942 * buffer - data buffer.
943 * bufflen - size of data buffer.
944 * done - completion function to be run.
945 * timeout - how long to let it run before timeout.
946 * retries - number of retries we allow.
948 * Lock status: With the new queueing code, this is SMP-safe, and no locks
949 * need be held upon entry. The old queueing code the lock was
950 * assumed to be held upon entry.
952 * Returns: Nothing.
954 * Notes: Prior to the new queue code, this function was not SMP-safe.
955 * Also, this function is now only used for queueing requests
956 * for things like ioctls and character device requests - this
957 * is because we essentially just inject a request into the
958 * queue for the device. Normal block device handling manipulates
959 * the queue directly.
961 voidscsi_do_cmd(Scsi_Cmnd * SCpnt,const void*cmnd,
962 void*buffer,unsigned bufflen,void(*done) (Scsi_Cmnd *),
963 int timeout,int retries)
965 struct Scsi_Host *host = SCpnt->host;
967 ASSERT_LOCK(&io_request_lock,0);
969 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
971 SCSI_LOG_MLQUEUE(4,
973 int i;
974 int target = SCpnt->target;
975 printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
976 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
977 "retries = %d)\n"
978 "command : ", host->host_no, SCpnt->channel, target, buffer,
979 bufflen, done, timeout, retries);
980 for(i =0; i <10; ++i)
981 printk("%02x ", ((unsigned char*) cmnd)[i]);
982 printk("\n");
985 if(!host) {
986 panic("Invalid or not present host.\n");
989 * We must prevent reentrancy to the lowlevel host driver. This prevents
990 * it - we enter a loop until the host we want to talk to is not busy.
991 * Race conditions are prevented, as interrupts are disabled in between the
992 * time we check for the host being not busy, and the time we mark it busy
993 * ourselves.
998 * Our own function scsi_done (which marks the host as not busy, disables
999 * the timeout counter, etc) will be called by us or by the
1000 * scsi_hosts[host].queuecommand() function needs to also call
1001 * the completion function for the high level driver.
1004 memcpy((void*) SCpnt->data_cmnd, (const void*) cmnd,
1005 sizeof(SCpnt->data_cmnd));
1006 SCpnt->reset_chain = NULL;
1007 SCpnt->serial_number =0;
1008 SCpnt->serial_number_at_timeout =0;
1009 SCpnt->bufflen = bufflen;
1010 SCpnt->buffer = buffer;
1011 SCpnt->flags =0;
1012 SCpnt->retries =0;
1013 SCpnt->allowed = retries;
1014 SCpnt->done = done;
1015 SCpnt->timeout_per_command = timeout;
1017 memcpy((void*) SCpnt->cmnd, (const void*) cmnd,
1018 sizeof(SCpnt->cmnd));
1019 /* Zero the sense buffer. Some host adapters automatically request
1020 * sense on error. 0 is not a valid sense code.
1022 memset((void*) SCpnt->sense_buffer,0,sizeof SCpnt->sense_buffer);
1023 SCpnt->request_buffer = buffer;
1024 SCpnt->request_bufflen = bufflen;
1025 SCpnt->old_use_sg = SCpnt->use_sg;
1026 if(SCpnt->cmd_len ==0)
1027 SCpnt->cmd_len =COMMAND_SIZE(SCpnt->cmnd[0]);
1028 SCpnt->old_cmd_len = SCpnt->cmd_len;
1029 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1030 SCpnt->old_underflow = SCpnt->underflow;
1032 /* Start the timer ticking. */
1034 SCpnt->internal_timeout = NORMAL_TIMEOUT;
1035 SCpnt->abort_reason =0;
1036 SCpnt->result =0;
1039 * At this point, we merely set up the command, stick it in the normal
1040 * request queue, and return. Eventually that request will come to the
1041 * top of the list, and will be dispatched.
1043 scsi_insert_special_cmd(SCpnt,0);
1045 SCSI_LOG_MLQUEUE(3,printk("Leaving scsi_do_cmd()\n"));
1049 * This function is the mid-level interrupt routine, which decides how
1050 * to handle error conditions. Each invocation of this function must
1051 * do one and *only* one of the following:
1053 * 1) Insert command in BH queue.
1054 * 2) Activate error handler for host.
1056 * FIXME(eric) - I am concerned about stack overflow (still). An
1057 * interrupt could come while we are processing the bottom queue,
1058 * which would cause another command to be stuffed onto the bottom
1059 * queue, and it would in turn be processed as that interrupt handler
1060 * is returning. Given a sufficiently steady rate of returning
1061 * commands, this could cause the stack to overflow. I am not sure
1062 * what is the most appropriate solution here - we should probably
1063 * keep a depth count, and not process any commands while we still
1064 * have a bottom handler active higher in the stack.
1066 * There is currently code in the bottom half handler to monitor
1067 * recursion in the bottom handler and report if it ever happens. If
1068 * this becomes a problem, it won't be hard to engineer something to
1069 * deal with it so that only the outer layer ever does any real
1070 * processing.
1072 voidscsi_done(Scsi_Cmnd * SCpnt)
1074 unsigned long flags;
1075 int tstatus;
1078 * We don't have to worry about this one timing out any more.
1080 tstatus =scsi_delete_timer(SCpnt);
1083 * If we are unable to remove the timer, it means that the command
1084 * has already timed out. In this case, we have no choice but to
1085 * let the timeout function run, as we have no idea where in fact
1086 * that function could really be. It might be on another processor,
1087 * etc, etc.
1089 if(!tstatus) {
1090 SCpnt->done_late =1;
1091 return;
1093 /* Set the serial numbers back to zero */
1094 SCpnt->serial_number =0;
1097 * First, see whether this command already timed out. If so, we ignore
1098 * the response. We treat it as if the command never finished.
1100 * Since serial_number is now 0, the error handler cound detect this
1101 * situation and avoid to call the the low level driver abort routine.
1102 * (DB)
1104 * FIXME(eric) - I believe that this test is now redundant, due to
1105 * the test of the return status of del_timer().
1107 if(SCpnt->state == SCSI_STATE_TIMEOUT) {
1108 SCSI_LOG_MLCOMPLETE(1,printk("Ignoring completion of %p due to timeout status", SCpnt));
1109 return;
1111 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1113 SCpnt->serial_number_at_timeout =0;
1114 SCpnt->state = SCSI_STATE_BHQUEUE;
1115 SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1116 SCpnt->bh_next = NULL;
1119 * Next, put this command in the BH queue.
1121 * We need a spinlock here, or compare and exchange if we can reorder incoming
1122 * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1123 * before bh is serviced. -jj
1125 * We already have the io_request_lock here, since we are called from the
1126 * interrupt handler or the error handler. (DB)
1128 * This may be true at the moment, but I would like to wean all of the low
1129 * level drivers away from using io_request_lock. Technically they should
1130 * all use their own locking. I am adding a small spinlock to protect
1131 * this datastructure to make it safe for that day. (ERY)
1133 if(!scsi_bh_queue_head) {
1134 scsi_bh_queue_head = SCpnt;
1135 scsi_bh_queue_tail = SCpnt;
1136 }else{
1137 scsi_bh_queue_tail->bh_next = SCpnt;
1138 scsi_bh_queue_tail = SCpnt;
1141 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1143 * Mark the bottom half handler to be run.
1145 mark_bh(SCSI_BH);
1149 * Procedure: scsi_bottom_half_handler
1151 * Purpose: Called after we have finished processing interrupts, it
1152 * performs post-interrupt handling for commands that may
1153 * have completed.
1155 * Notes: This is called with all interrupts enabled. This should reduce
1156 * interrupt latency, stack depth, and reentrancy of the low-level
1157 * drivers.
1159 * The io_request_lock is required in all the routine. There was a subtle
1160 * race condition when scsi_done is called after a command has already
1161 * timed out but before the time out is processed by the error handler.
1162 * (DB)
1164 * I believe I have corrected this. We simply monitor the return status of
1165 * del_timer() - if this comes back as 0, it means that the timer has fired
1166 * and that a timeout is in progress. I have modified scsi_done() such
1167 * that in this instance the command is never inserted in the bottom
1168 * half queue. Thus the only time we hold the lock here is when
1169 * we wish to atomically remove the contents of the queue.
1171 voidscsi_bottom_half_handler(void)
1173 Scsi_Cmnd *SCpnt;
1174 Scsi_Cmnd *SCnext;
1175 unsigned long flags;
1178 while(1==1) {
1179 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1180 SCpnt = scsi_bh_queue_head;
1181 scsi_bh_queue_head = NULL;
1182 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1184 if(SCpnt == NULL) {
1185 return;
1187 SCnext = SCpnt->bh_next;
1189 for(; SCpnt; SCpnt = SCnext) {
1190 SCnext = SCpnt->bh_next;
1192 switch(scsi_decide_disposition(SCpnt)) {
1193 case SUCCESS:
1195 * Add to BH queue.
1197 SCSI_LOG_MLCOMPLETE(3,printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy,
1198 SCpnt->host->host_failed,
1199 SCpnt->result));
1201 scsi_finish_command(SCpnt);
1202 break;
1203 case NEEDS_RETRY:
1205 * We only come in here if we want to retry a command. The
1206 * test to see whether the command should be retried should be
1207 * keeping track of the number of tries, so we don't end up looping,
1208 * of course.
1210 SCSI_LOG_MLCOMPLETE(3,printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy,
1211 SCpnt->host->host_failed, SCpnt->result));
1213 scsi_retry_command(SCpnt);
1214 break;
1215 case ADD_TO_MLQUEUE:
1217 * This typically happens for a QUEUE_FULL message -
1218 * typically only when the queue depth is only
1219 * approximate for a given device. Adding a command
1220 * to the queue for the device will prevent further commands
1221 * from being sent to the device, so we shouldn't end up
1222 * with tons of things being sent down that shouldn't be.
1224 SCSI_LOG_MLCOMPLETE(3,printk("Command rejected as device queue full, put on ml queue %p\n",
1225 SCpnt));
1226 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1227 break;
1228 default:
1230 * Here we have a fatal error of some sort. Turn it over to
1231 * the error handler.
1233 SCSI_LOG_MLCOMPLETE(3,printk("Command failed %p %x active=%d busy=%d failed=%d\n",
1234 SCpnt, SCpnt->result,
1235 atomic_read(&SCpnt->host->host_active),
1236 SCpnt->host->host_busy,
1237 SCpnt->host->host_failed));
1240 * Dump the sense information too.
1242 if((status_byte(SCpnt->result) & CHECK_CONDITION) !=0) {
1243 SCSI_LOG_MLCOMPLETE(3,print_sense("bh", SCpnt));
1245 if(SCpnt->host->eh_wait != NULL) {
1246 SCpnt->host->host_failed++;
1247 SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1248 SCpnt->state = SCSI_STATE_FAILED;
1249 SCpnt->host->in_recovery =1;
1251 * If the host is having troubles, then look to see if this was the last
1252 * command that might have failed. If so, wake up the error handler.
1254 if(SCpnt->host->host_busy == SCpnt->host->host_failed) {
1255 SCSI_LOG_ERROR_RECOVERY(5,printk("Waking error handler thread (%d)\n",
1256 atomic_read(&SCpnt->host->eh_wait->count)));
1257 up(SCpnt->host->eh_wait);
1259 }else{
1261 * We only get here if the error recovery thread has died.
1263 scsi_finish_command(SCpnt);
1266 }/* for(; SCpnt...) */
1268 }/* while(1==1) */
1273 * Function: scsi_retry_command
1275 * Purpose: Send a command back to the low level to be retried.
1277 * Notes: This command is always executed in the context of the
1278 * bottom half handler, or the error handler thread. Low
1279 * level drivers should not become re-entrant as a result of
1280 * this.
1282 intscsi_retry_command(Scsi_Cmnd * SCpnt)
1284 memcpy((void*) SCpnt->cmnd, (void*) SCpnt->data_cmnd,
1285 sizeof(SCpnt->data_cmnd));
1286 SCpnt->request_buffer = SCpnt->buffer;
1287 SCpnt->request_bufflen = SCpnt->bufflen;
1288 SCpnt->use_sg = SCpnt->old_use_sg;
1289 SCpnt->cmd_len = SCpnt->old_cmd_len;
1290 SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
1291 SCpnt->underflow = SCpnt->old_underflow;
1294 * Zero the sense information from the last time we tried
1295 * this command.
1297 memset((void*) SCpnt->sense_buffer,0,sizeof SCpnt->sense_buffer);
1299 returnscsi_dispatch_cmd(SCpnt);
1303 * Function: scsi_finish_command
1305 * Purpose: Pass command off to upper layer for finishing of I/O
1306 * request, waking processes that are waiting on results,
1307 * etc.
1309 voidscsi_finish_command(Scsi_Cmnd * SCpnt)
1311 struct Scsi_Host *host;
1312 Scsi_Device *device;
1313 Scsi_Request * SRpnt;
1314 unsigned long flags;
1316 ASSERT_LOCK(&io_request_lock,0);
1318 host = SCpnt->host;
1319 device = SCpnt->device;
1322 * We need to protect the decrement, as otherwise a race condition
1323 * would exist. Fiddling with SCpnt isn't a problem as the
1324 * design only allows a single SCpnt to be active in only
1325 * one execution context, but the device and host structures are
1326 * shared.
1328 spin_lock_irqsave(&io_request_lock, flags);
1329 host->host_busy--;/* Indicate that we are free */
1330 device->device_busy--;/* Decrement device usage counter. */
1331 spin_unlock_irqrestore(&io_request_lock, flags);
1334 * Clear the flags which say that the device/host is no longer
1335 * capable of accepting new commands. These are set in scsi_queue.c
1336 * for both the queue full condition on a device, and for a
1337 * host full condition on the host.
1339 host->host_blocked = FALSE;
1340 device->device_blocked = FALSE;
1343 * If we have valid sense information, then some kind of recovery
1344 * must have taken place. Make a note of this.
1346 if(scsi_sense_valid(SCpnt)) {
1347 SCpnt->result |= (DRIVER_SENSE <<24);
1349 SCSI_LOG_MLCOMPLETE(3,printk("Notifying upper driver of completion for device %d %x\n",
1350 SCpnt->device->id, SCpnt->result));
1352 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1353 SCpnt->state = SCSI_STATE_FINISHED;
1355 /* We can get here with use_sg=0, causing a panic in the upper level (DB) */
1356 SCpnt->use_sg = SCpnt->old_use_sg;
1359 * If there is an associated request structure, copy the data over before we call the
1360 * completion function.
1362 SRpnt = SCpnt->sc_request;
1363 if( SRpnt != NULL ) {
1364 SRpnt->sr_result = SRpnt->sr_command->result;
1365 if( SRpnt->sr_result !=0) {
1366 memcpy(SRpnt->sr_sense_buffer,
1367 SRpnt->sr_command->sense_buffer,
1368 sizeof(SRpnt->sr_sense_buffer));
1372 SCpnt->done(SCpnt);
1375 static intscsi_register_host(Scsi_Host_Template *);
1376 static voidscsi_unregister_host(Scsi_Host_Template *);
1379 * Function: scsi_release_commandblocks()
1381 * Purpose: Release command blocks associated with a device.
1383 * Arguments: SDpnt - device
1385 * Returns: Nothing
1387 * Lock status: No locking assumed or required.
1389 * Notes:
1391 voidscsi_release_commandblocks(Scsi_Device * SDpnt)
1393 Scsi_Cmnd *SCpnt, *SCnext;
1394 unsigned long flags;
1396 spin_lock_irqsave(&device_request_lock, flags);
1397 for(SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) {
1398 SDpnt->device_queue = SCnext = SCpnt->next;
1399 kfree((char*) SCpnt);
1401 SDpnt->has_cmdblocks =0;
1402 SDpnt->queue_depth =0;
1403 spin_unlock_irqrestore(&device_request_lock, flags);
1407 * Function: scsi_build_commandblocks()
1409 * Purpose: Allocate command blocks associated with a device.
1411 * Arguments: SDpnt - device
1413 * Returns: Nothing
1415 * Lock status: No locking assumed or required.
1417 * Notes:
1419 voidscsi_build_commandblocks(Scsi_Device * SDpnt)
1421 unsigned long flags;
1422 struct Scsi_Host *host = SDpnt->host;
1423 int j;
1424 Scsi_Cmnd *SCpnt;
1426 spin_lock_irqsave(&device_request_lock, flags);
1428 if(SDpnt->queue_depth ==0)
1430 SDpnt->queue_depth = host->cmd_per_lun;
1431 if(SDpnt->queue_depth ==0)
1432 SDpnt->queue_depth =1;/* live to fight another day */
1434 SDpnt->device_queue = NULL;
1436 for(j =0; j < SDpnt->queue_depth; j++) {
1437 SCpnt = (Scsi_Cmnd *)
1438 kmalloc(sizeof(Scsi_Cmnd),
1439 GFP_ATOMIC |
1440 (host->unchecked_isa_dma ? GFP_DMA :0));
1441 if(NULL == SCpnt)
1442 break;/* If not, the next line will oops ... */
1443 memset(SCpnt,0,sizeof(Scsi_Cmnd));
1444 SCpnt->host = host;
1445 SCpnt->device = SDpnt;
1446 SCpnt->target = SDpnt->id;
1447 SCpnt->lun = SDpnt->lun;
1448 SCpnt->channel = SDpnt->channel;
1449 SCpnt->request.rq_status = RQ_INACTIVE;
1450 SCpnt->use_sg =0;
1451 SCpnt->old_use_sg =0;
1452 SCpnt->old_cmd_len =0;
1453 SCpnt->underflow =0;
1454 SCpnt->old_underflow =0;
1455 SCpnt->transfersize =0;
1456 SCpnt->resid =0;
1457 SCpnt->serial_number =0;
1458 SCpnt->serial_number_at_timeout =0;
1459 SCpnt->host_scribble = NULL;
1460 SCpnt->next = SDpnt->device_queue;
1461 SDpnt->device_queue = SCpnt;
1462 SCpnt->state = SCSI_STATE_UNUSED;
1463 SCpnt->owner = SCSI_OWNER_NOBODY;
1465 if(j < SDpnt->queue_depth) {/* low on space (D.Gilbert 990424) */
1466 printk("scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1467 SDpnt->queue_depth, j);
1468 SDpnt->queue_depth = j;
1469 SDpnt->has_cmdblocks = (0!= j);
1470 }else{
1471 SDpnt->has_cmdblocks =1;
1473 spin_unlock_irqrestore(&device_request_lock, flags);
1476 static intproc_scsi_gen_write(struct file * file,const char* buf,
1477 unsigned long length,void*data);
1479 void __init scsi_host_no_insert(char*str,int n)
1481 Scsi_Host_Name *shn, *shn2;
1482 int len;
1484 len =strlen(str);
1485 if(len && (shn = (Scsi_Host_Name *)kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC))) {
1486 if((shn->name =kmalloc(len+1, GFP_ATOMIC))) {
1487 strncpy(shn->name, str, len);
1488 shn->name[len] =0;
1489 shn->host_no = n;
1490 shn->host_registered =0;
1491 shn->loaded_as_module =1;/* numbers shouldn't be freed in any case */
1492 shn->next = NULL;
1493 if(scsi_host_no_list) {
1494 for(shn2 = scsi_host_no_list;shn2->next;shn2 = shn2->next)
1496 shn2->next = shn;
1498 else
1499 scsi_host_no_list = shn;
1500 max_scsi_hosts = n+1;
1502 else
1503 kfree((char*) shn);
1507 #ifdef CONFIG_PROC_FS
1508 static intscsi_proc_info(char*buffer,char**start, off_t offset,int length)
1510 Scsi_Device *scd;
1511 struct Scsi_Host *HBA_ptr;
1512 int size, len =0;
1513 off_t begin =0;
1514 off_t pos =0;
1517 * First, see if there are any attached devices or not.
1519 for(HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1520 if(HBA_ptr->host_queue != NULL) {
1521 break;
1524 size =sprintf(buffer + len,"Attached devices: %s\n", (HBA_ptr) ?"":"none");
1525 len += size;
1526 pos = begin + len;
1527 for(HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1528 #if 0
1529 size +=sprintf(buffer + len,"scsi%2d: %s\n", (int) HBA_ptr->host_no,
1530 HBA_ptr->hostt->procname);
1531 len += size;
1532 pos = begin + len;
1533 #endif
1534 for(scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1535 proc_print_scsidevice(scd, buffer, &size, len);
1536 len += size;
1537 pos = begin + len;
1539 if(pos < offset) {
1540 len =0;
1541 begin = pos;
1543 if(pos > offset + length)
1544 goto stop_output;
1548 stop_output:
1549 *start = buffer + (offset - begin);/* Start of wanted data */
1550 len -= (offset - begin);/* Start slop */
1551 if(len > length)
1552 len = length;/* Ending slop */
1553 return(len);
1556 static intproc_scsi_gen_write(struct file * file,const char* buf,
1557 unsigned long length,void*data)
1559 struct Scsi_Device_Template *SDTpnt;
1560 Scsi_Device *scd;
1561 struct Scsi_Host *HBA_ptr;
1562 char*p;
1563 int host, channel, id, lun;
1564 char* buffer;
1565 int err;
1567 if(!buf || length>PAGE_SIZE)
1568 return-EINVAL;
1570 if(!(buffer = (char*)__get_free_page(GFP_KERNEL)))
1571 return-ENOMEM;
1572 copy_from_user(buffer, buf, length);
1574 err = -EINVAL;
1575 if(length <11||strncmp("scsi", buffer,4))
1576 goto out;
1579 * Usage: echo "scsi dump #N" > /proc/scsi/scsi
1580 * to dump status of all scsi commands. The number is used to specify the level
1581 * of detail in the dump.
1583 if(!strncmp("dump", buffer +5,4)) {
1584 unsigned int level;
1586 p = buffer +10;
1588 if(*p =='\0')
1589 goto out;
1591 level =simple_strtoul(p, NULL,0);
1592 scsi_dump_status(level);
1595 * Usage: echo "scsi log token #N" > /proc/scsi/scsi
1596 * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
1597 * llcomplete,hlqueue,hlcomplete]
1599 #ifdef CONFIG_SCSI_LOGGING/* { */
1601 if(!strncmp("log", buffer +5,3)) {
1602 char*token;
1603 unsigned int level;
1605 p = buffer +9;
1606 token = p;
1607 while(*p !=' '&& *p !='\t'&& *p !='\0') {
1608 p++;
1611 if(*p =='\0') {
1612 if(strncmp(token,"all",3) ==0) {
1614 * Turn on absolutely everything.
1616 scsi_logging_level = ~0;
1617 }else if(strncmp(token,"none",4) ==0) {
1619 * Turn off absolutely everything.
1621 scsi_logging_level =0;
1622 }else{
1623 goto out;
1625 }else{
1626 *p++ ='\0';
1628 level =simple_strtoul(p, NULL,0);
1631 * Now figure out what to do with it.
1633 if(strcmp(token,"error") ==0) {
1634 SCSI_SET_ERROR_RECOVERY_LOGGING(level);
1635 }else if(strcmp(token,"timeout") ==0) {
1636 SCSI_SET_TIMEOUT_LOGGING(level);
1637 }else if(strcmp(token,"scan") ==0) {
1638 SCSI_SET_SCAN_BUS_LOGGING(level);
1639 }else if(strcmp(token,"mlqueue") ==0) {
1640 SCSI_SET_MLQUEUE_LOGGING(level);
1641 }else if(strcmp(token,"mlcomplete") ==0) {
1642 SCSI_SET_MLCOMPLETE_LOGGING(level);
1643 }else if(strcmp(token,"llqueue") ==0) {
1644 SCSI_SET_LLQUEUE_LOGGING(level);
1645 }else if(strcmp(token,"llcomplete") ==0) {
1646 SCSI_SET_LLCOMPLETE_LOGGING(level);
1647 }else if(strcmp(token,"hlqueue") ==0) {
1648 SCSI_SET_HLQUEUE_LOGGING(level);
1649 }else if(strcmp(token,"hlcomplete") ==0) {
1650 SCSI_SET_HLCOMPLETE_LOGGING(level);
1651 }else if(strcmp(token,"ioctl") ==0) {
1652 SCSI_SET_IOCTL_LOGGING(level);
1653 }else{
1654 goto out;
1658 printk("scsi logging level set to 0x%8.8x\n", scsi_logging_level);
1660 #endif/* CONFIG_SCSI_LOGGING *//* } */
1663 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1664 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1665 * Consider this feature BETA.
1666 * CAUTION: This is not for hotplugging your peripherals. As
1667 * SCSI was not designed for this you could damage your
1668 * hardware !
1669 * However perhaps it is legal to switch on an
1670 * already connected device. It is perhaps not
1671 * guaranteed this device doesn't corrupt an ongoing data transfer.
1673 if(!strncmp("add-single-device", buffer +5,17)) {
1674 p = buffer +23;
1676 host =simple_strtoul(p, &p,0);
1677 channel =simple_strtoul(p +1, &p,0);
1678 id =simple_strtoul(p +1, &p,0);
1679 lun =simple_strtoul(p +1, &p,0);
1681 printk("scsi singledevice %d %d %d %d\n", host, channel,
1682 id, lun);
1684 for(HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1685 if(HBA_ptr->host_no == host) {
1686 break;
1689 err = -ENXIO;
1690 if(!HBA_ptr)
1691 goto out;
1693 for(scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1694 if((scd->channel == channel
1695 && scd->id == id
1696 && scd->lun == lun)) {
1697 break;
1701 err = -ENOSYS;
1702 if(scd)
1703 goto out;/* We do not yet support unplugging */
1705 scan_scsis(HBA_ptr,1, channel, id, lun);
1707 /* FIXME (DB) This assumes that the queue_depth routines can be used
1708 in this context as well, while they were all designed to be
1709 called only once after the detect routine. (DB) */
1710 /* queue_depth routine moved to inside scan_scsis(,1,,,) so
1711 it is called before build_commandblocks() */
1713 err = length;
1714 goto out;
1717 * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1718 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1720 * Consider this feature pre-BETA.
1722 * CAUTION: This is not for hotplugging your peripherals. As
1723 * SCSI was not designed for this you could damage your
1724 * hardware and thoroughly confuse the SCSI subsystem.
1727 else if(!strncmp("remove-single-device", buffer +5,20)) {
1728 p = buffer +26;
1730 host =simple_strtoul(p, &p,0);
1731 channel =simple_strtoul(p +1, &p,0);
1732 id =simple_strtoul(p +1, &p,0);
1733 lun =simple_strtoul(p +1, &p,0);
1736 for(HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1737 if(HBA_ptr->host_no == host) {
1738 break;
1741 err = -ENODEV;
1742 if(!HBA_ptr)
1743 goto out;
1745 for(scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1746 if((scd->channel == channel
1747 && scd->id == id
1748 && scd->lun == lun)) {
1749 break;
1753 if(scd == NULL)
1754 goto out;/* there is no such device attached */
1756 err = -EBUSY;
1757 if(scd->access_count)
1758 goto out;
1760 SDTpnt = scsi_devicelist;
1761 while(SDTpnt != NULL) {
1762 if(SDTpnt->detach)
1763 (*SDTpnt->detach) (scd);
1764 SDTpnt = SDTpnt->next;
1767 if(scd->attached ==0) {
1769 * Nobody is using this device any more.
1770 * Free all of the command structures.
1772 if(HBA_ptr->hostt->revoke)
1773 HBA_ptr->hostt->revoke(scd);
1774 devfs_unregister(scd->de);
1775 scsi_release_commandblocks(scd);
1777 /* Now we can remove the device structure */
1778 if(scd->next != NULL)
1779 scd->next->prev = scd->prev;
1781 if(scd->prev != NULL)
1782 scd->prev->next = scd->next;
1784 if(HBA_ptr->host_queue == scd) {
1785 HBA_ptr->host_queue = scd->next;
1787 blk_cleanup_queue(&scd->request_queue);
1788 kfree((char*) scd);
1789 }else{
1790 goto out;
1792 err =0;
1794 out:
1796 free_page((unsigned long) buffer);
1797 return err;
1799 #endif
1802 * This entry point should be called by a loadable module if it is trying
1803 * add a low level scsi driver to the system.
1805 static intscsi_register_host(Scsi_Host_Template * tpnt)
1807 int pcount;
1808 struct Scsi_Host *shpnt;
1809 Scsi_Device *SDpnt;
1810 struct Scsi_Device_Template *sdtpnt;
1811 const char*name;
1812 unsigned long flags;
1813 int out_of_space =0;
1815 if(tpnt->next || !tpnt->detect)
1816 return1;/* Must be already loaded, or
1817 * no detect routine available
1819 pcount = next_scsi_host;
1821 /* The detect routine must carefully spinunlock/spinlock if
1822 it enables interrupts, since all interrupt handlers do
1823 spinlock as well.
1824 All lame drivers are going to fail due to the following
1825 spinlock. For the time beeing let's use it only for drivers
1826 using the new scsi code. NOTE: the detect routine could
1827 redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
1829 if(tpnt->use_new_eh_code) {
1830 spin_lock_irqsave(&io_request_lock, flags);
1831 tpnt->present = tpnt->detect(tpnt);
1832 spin_unlock_irqrestore(&io_request_lock, flags);
1833 }else
1834 tpnt->present = tpnt->detect(tpnt);
1836 if(tpnt->present) {
1837 if(pcount == next_scsi_host) {
1838 if(tpnt->present >1) {
1839 printk("Failure to register low-level scsi driver");
1840 scsi_unregister_host(tpnt);
1841 return1;
1844 * The low-level driver failed to register a driver. We
1845 * can do this now.
1847 scsi_register(tpnt,0);
1849 tpnt->next = scsi_hosts;/* Add to the linked list */
1850 scsi_hosts = tpnt;
1852 /* Add the new driver to /proc/scsi */
1853 #ifdef CONFIG_PROC_FS
1854 build_proc_dir_entries(tpnt);
1855 #endif
1859 * Add the kernel threads for each host adapter that will
1860 * handle error correction.
1862 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1863 if(shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
1864 DECLARE_MUTEX_LOCKED(sem);
1866 shpnt->eh_notify = &sem;
1867 kernel_thread((int(*)(void*)) scsi_error_handler,
1868 (void*) shpnt,0);
1871 * Now wait for the kernel error thread to initialize itself
1872 * as it might be needed when we scan the bus.
1874 down(&sem);
1875 shpnt->eh_notify = NULL;
1879 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1880 if(shpnt->hostt == tpnt) {
1881 if(tpnt->info) {
1882 name = tpnt->info(shpnt);
1883 }else{
1884 name = tpnt->name;
1886 printk("scsi%d : %s\n",/* And print a little message */
1887 shpnt->host_no, name);
1891 printk("scsi : %d host%s.\n", next_scsi_host,
1892 (next_scsi_host ==1) ?"":"s");
1894 /* The next step is to call scan_scsis here. This generates the
1895 * Scsi_Devices entries
1897 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1898 if(shpnt->hostt == tpnt) {
1899 scan_scsis(shpnt,0,0,0,0);
1900 if(shpnt->select_queue_depths != NULL) {
1901 (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
1906 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
1907 if(sdtpnt->init && sdtpnt->dev_noticed)
1908 (*sdtpnt->init) ();
1912 * Next we create the Scsi_Cmnd structures for this host
1914 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1915 for(SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
1916 if(SDpnt->host->hostt == tpnt) {
1917 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
1918 if(sdtpnt->attach)
1919 (*sdtpnt->attach) (SDpnt);
1920 if(SDpnt->attached) {
1921 scsi_build_commandblocks(SDpnt);
1922 if(0== SDpnt->has_cmdblocks)
1923 out_of_space =1;
1929 * Now that we have all of the devices, resize the DMA pool,
1930 * as required. */
1931 if(!out_of_space)
1932 scsi_resize_dma_pool();
1935 /* This does any final handling that is required. */
1936 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
1937 if(sdtpnt->finish && sdtpnt->nr_dev) {
1938 (*sdtpnt->finish) ();
1942 #if defined(USE_STATIC_SCSI_MEMORY)
1943 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
1944 (scsi_memory_upper_value - scsi_memory_lower_value) /1024,
1945 (scsi_init_memory_start - scsi_memory_lower_value) /1024,
1946 (scsi_memory_upper_value - scsi_init_memory_start) /1024);
1947 #endif
1949 MOD_INC_USE_COUNT;
1951 if(out_of_space) {
1952 scsi_unregister_host(tpnt);/* easiest way to clean up?? */
1953 return1;
1954 }else
1955 return0;
1959 * Similarly, this entry point should be called by a loadable module if it
1960 * is trying to remove a low level scsi driver from the system.
1962 * Note - there is a fatal flaw in the deregister module function.
1963 * There is no way to return a code that says 'I cannot be unloaded now'.
1964 * The system relies entirely upon usage counts that are maintained,
1965 * and the assumption is that if the usage count is 0, then the module
1966 * can be unloaded.
1968 static voidscsi_unregister_host(Scsi_Host_Template * tpnt)
1970 int online_status;
1971 int pcount;
1972 Scsi_Cmnd *SCpnt;
1973 Scsi_Device *SDpnt;
1974 Scsi_Device *SDpnt1;
1975 struct Scsi_Device_Template *sdtpnt;
1976 struct Scsi_Host *sh1;
1977 struct Scsi_Host *shpnt;
1978 char name[10];/* host_no>=10^9? I don't think so. */
1981 * First verify that this host adapter is completely free with no pending
1982 * commands
1984 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1985 for(SDpnt = shpnt->host_queue; SDpnt;
1986 SDpnt = SDpnt->next) {
1987 if(SDpnt->host->hostt == tpnt
1988 && SDpnt->host->hostt->module
1989 &&GET_USE_COUNT(SDpnt->host->hostt->module))
1990 return;
1992 * FIXME(eric) - We need to find a way to notify the
1993 * low level driver that we are shutting down - via the
1994 * special device entry that still needs to get added.
1996 * Is detach interface below good enough for this?
2002 * FIXME(eric) put a spinlock on this. We force all of the devices offline
2003 * to help prevent race conditions where other hosts/processors could try and
2004 * get in and queue a command.
2006 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2007 for(SDpnt = shpnt->host_queue; SDpnt;
2008 SDpnt = SDpnt->next) {
2009 if(SDpnt->host->hostt == tpnt)
2010 SDpnt->online = FALSE;
2015 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2016 if(shpnt->hostt != tpnt) {
2017 continue;
2019 for(SDpnt = shpnt->host_queue; SDpnt;
2020 SDpnt = SDpnt->next) {
2022 * Loop over all of the commands associated with the device. If any of
2023 * them are busy, then set the state back to inactive and bail.
2025 for(SCpnt = SDpnt->device_queue; SCpnt;
2026 SCpnt = SCpnt->next) {
2027 online_status = SDpnt->online;
2028 SDpnt->online = FALSE;
2029 if(SCpnt->request.rq_status != RQ_INACTIVE) {
2030 printk("SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2031 SCpnt->request.rq_status, SCpnt->target, SCpnt->pid,
2032 SCpnt->state, SCpnt->owner);
2033 for(SDpnt1 = shpnt->host_queue; SDpnt1;
2034 SDpnt1 = SDpnt1->next) {
2035 for(SCpnt = SDpnt1->device_queue; SCpnt;
2036 SCpnt = SCpnt->next)
2037 if(SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2038 SCpnt->request.rq_status = RQ_INACTIVE;
2040 SDpnt->online = online_status;
2041 printk("Device busy???\n");
2042 return;
2045 * No, this device is really free. Mark it as such, and
2046 * continue on.
2048 SCpnt->state = SCSI_STATE_DISCONNECTING;
2049 SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING;/* Mark as busy */
2053 /* Next we detach the high level drivers from the Scsi_Device structures */
2055 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2056 if(shpnt->hostt != tpnt) {
2057 continue;
2059 for(SDpnt = shpnt->host_queue; SDpnt;
2060 SDpnt = SDpnt->next) {
2061 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2062 if(sdtpnt->detach)
2063 (*sdtpnt->detach) (SDpnt);
2065 /* If something still attached, punt */
2066 if(SDpnt->attached) {
2067 printk("Attached usage count = %d\n", SDpnt->attached);
2068 return;
2070 devfs_unregister(SDpnt->de);
2075 * Next, kill the kernel error recovery thread for this host.
2077 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2078 if(shpnt->hostt == tpnt
2079 && shpnt->hostt->use_new_eh_code
2080 && shpnt->ehandler != NULL) {
2081 DECLARE_MUTEX_LOCKED(sem);
2083 shpnt->eh_notify = &sem;
2084 send_sig(SIGHUP, shpnt->ehandler,1);
2085 down(&sem);
2086 shpnt->eh_notify = NULL;
2090 /* Next we free up the Scsi_Cmnd structures for this host */
2092 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2093 if(shpnt->hostt != tpnt) {
2094 continue;
2096 for(SDpnt = shpnt->host_queue; SDpnt;
2097 SDpnt = shpnt->host_queue) {
2098 scsi_release_commandblocks(SDpnt);
2100 blk_cleanup_queue(&SDpnt->request_queue);
2101 /* Next free up the Scsi_Device structures for this host */
2102 shpnt->host_queue = SDpnt->next;
2103 kfree((char*) SDpnt);
2108 /* Next we go through and remove the instances of the individual hosts
2109 * that were detected */
2111 for(shpnt = scsi_hostlist; shpnt; shpnt = sh1) {
2112 sh1 = shpnt->next;
2113 if(shpnt->hostt != tpnt)
2114 continue;
2115 pcount = next_scsi_host;
2116 /* Remove the /proc/scsi directory entry */
2117 sprintf(name,"%d",shpnt->host_no);
2118 remove_proc_entry(name, tpnt->proc_dir);
2119 if(tpnt->release)
2120 (*tpnt->release) (shpnt);
2121 else{
2122 /* This is the default case for the release function.
2123 * It should do the right thing for most correctly
2124 * written host adapters.
2126 if(shpnt->irq)
2127 free_irq(shpnt->irq, NULL);
2128 if(shpnt->dma_channel !=0xff)
2129 free_dma(shpnt->dma_channel);
2130 if(shpnt->io_port && shpnt->n_io_port)
2131 release_region(shpnt->io_port, shpnt->n_io_port);
2133 if(pcount == next_scsi_host)
2134 scsi_unregister(shpnt);
2135 tpnt->present--;
2139 * If there are absolutely no more hosts left, it is safe
2140 * to completely nuke the DMA pool. The resize operation will
2141 * do the right thing and free everything.
2143 if(!scsi_hosts)
2144 scsi_resize_dma_pool();
2146 printk("scsi : %d host%s.\n", next_scsi_host,
2147 (next_scsi_host ==1) ?"":"s");
2149 #if defined(USE_STATIC_SCSI_MEMORY)
2150 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2151 (scsi_memory_upper_value - scsi_memory_lower_value) /1024,
2152 (scsi_init_memory_start - scsi_memory_lower_value) /1024,
2153 (scsi_memory_upper_value - scsi_init_memory_start) /1024);
2154 #endif
2156 /* Remove it from the linked list and /proc */
2157 if(tpnt->present) {
2158 Scsi_Host_Template **SHTp = &scsi_hosts;
2159 Scsi_Host_Template *SHT;
2161 while((SHT = *SHTp) != NULL) {
2162 if(SHT == tpnt) {
2163 *SHTp = SHT->next;
2164 break;
2166 SHTp = &SHT->next;
2168 /* Rebuild the /proc/scsi directory entries */
2169 remove_proc_entry(tpnt->proc_name, proc_scsi);
2171 MOD_DEC_USE_COUNT;
2174 static intscsi_unregister_device(struct Scsi_Device_Template *tpnt);
2177 * This entry point should be called by a loadable module if it is trying
2178 * add a high level scsi driver to the system.
2180 static intscsi_register_device_module(struct Scsi_Device_Template *tpnt)
2182 Scsi_Device *SDpnt;
2183 struct Scsi_Host *shpnt;
2184 int out_of_space =0;
2186 if(tpnt->next)
2187 return1;
2189 scsi_register_device(tpnt);
2191 * First scan the devices that we know about, and see if we notice them.
2194 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2195 for(SDpnt = shpnt->host_queue; SDpnt;
2196 SDpnt = SDpnt->next) {
2197 if(tpnt->detect)
2198 SDpnt->attached += (*tpnt->detect) (SDpnt);
2203 * If any of the devices would match this driver, then perform the
2204 * init function.
2206 if(tpnt->init && tpnt->dev_noticed)
2207 if((*tpnt->init) ())
2208 return1;
2211 * Now actually connect the devices to the new driver.
2213 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2214 for(SDpnt = shpnt->host_queue; SDpnt;
2215 SDpnt = SDpnt->next) {
2216 if(tpnt->attach)
2217 (*tpnt->attach) (SDpnt);
2219 * If this driver attached to the device, and don't have any
2220 * command blocks for this device, allocate some.
2222 if(SDpnt->attached && SDpnt->has_cmdblocks ==0) {
2223 SDpnt->online = TRUE;
2224 scsi_build_commandblocks(SDpnt);
2225 if(0== SDpnt->has_cmdblocks)
2226 out_of_space =1;
2232 * This does any final handling that is required.
2234 if(tpnt->finish && tpnt->nr_dev)
2235 (*tpnt->finish) ();
2236 if(!out_of_space)
2237 scsi_resize_dma_pool();
2238 MOD_INC_USE_COUNT;
2240 if(out_of_space) {
2241 scsi_unregister_device(tpnt);/* easiest way to clean up?? */
2242 return1;
2243 }else
2244 return0;
2247 static intscsi_unregister_device(struct Scsi_Device_Template *tpnt)
2249 Scsi_Device *SDpnt;
2250 struct Scsi_Host *shpnt;
2251 struct Scsi_Device_Template *spnt;
2252 struct Scsi_Device_Template *prev_spnt;
2255 * If we are busy, this is not going to fly.
2257 if(GET_USE_COUNT(tpnt->module) !=0)
2258 return0;
2261 * Next, detach the devices from the driver.
2264 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2265 for(SDpnt = shpnt->host_queue; SDpnt;
2266 SDpnt = SDpnt->next) {
2267 if(tpnt->detach)
2268 (*tpnt->detach) (SDpnt);
2269 if(SDpnt->attached ==0) {
2270 SDpnt->online = FALSE;
2273 * Nobody is using this device any more. Free all of the
2274 * command structures.
2276 scsi_release_commandblocks(SDpnt);
2281 * Extract the template from the linked list.
2283 spnt = scsi_devicelist;
2284 prev_spnt = NULL;
2285 while(spnt != tpnt) {
2286 prev_spnt = spnt;
2287 spnt = spnt->next;
2289 if(prev_spnt == NULL)
2290 scsi_devicelist = tpnt->next;
2291 else
2292 prev_spnt->next = spnt->next;
2294 MOD_DEC_USE_COUNT;
2296 * Final cleanup for the driver is done in the driver sources in the
2297 * cleanup function.
2299 return0;
2303 intscsi_register_module(int module_type,void*ptr)
2305 switch(module_type) {
2306 case MODULE_SCSI_HA:
2307 returnscsi_register_host((Scsi_Host_Template *) ptr);
2309 /* Load upper level device handler of some kind */
2310 case MODULE_SCSI_DEV:
2311 #ifdef CONFIG_KMOD
2312 if(scsi_hosts == NULL)
2313 request_module("scsi_hostadapter");
2314 #endif
2315 returnscsi_register_device_module((struct Scsi_Device_Template *) ptr);
2316 /* The rest of these are not yet implemented */
2318 /* Load constants.o */
2319 case MODULE_SCSI_CONST:
2321 /* Load specialized ioctl handler for some device. Intended for
2322 * cdroms that have non-SCSI2 audio command sets. */
2323 case MODULE_SCSI_IOCTL:
2325 default:
2326 return1;
2330 voidscsi_unregister_module(int module_type,void*ptr)
2332 switch(module_type) {
2333 case MODULE_SCSI_HA:
2334 scsi_unregister_host((Scsi_Host_Template *) ptr);
2335 break;
2336 case MODULE_SCSI_DEV:
2337 scsi_unregister_device((struct Scsi_Device_Template *) ptr);
2338 break;
2339 /* The rest of these are not yet implemented. */
2340 case MODULE_SCSI_CONST:
2341 case MODULE_SCSI_IOCTL:
2342 break;
2343 default:
2345 return;
2348 #ifdef CONFIG_PROC_FS
2350 * Function: scsi_dump_status
2352 * Purpose: Brain dump of scsi system, used for problem solving.
2354 * Arguments: level - used to indicate level of detail.
2356 * Notes: The level isn't used at all yet, but we need to find some way
2357 * of sensibly logging varying degrees of information. A quick one-line
2358 * display of each command, plus the status would be most useful.
2360 * This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
2361 * it all off if the user wants a lean and mean kernel. It would probably
2362 * also be useful to allow the user to specify one single host to be dumped.
2363 * A second argument to the function would be useful for that purpose.
2365 * FIXME - some formatting of the output into tables would be very handy.
2367 static voidscsi_dump_status(int level)
2369 #ifdef CONFIG_SCSI_LOGGING/* { */
2370 int i;
2371 struct Scsi_Host *shpnt;
2372 Scsi_Cmnd *SCpnt;
2373 Scsi_Device *SDpnt;
2374 printk("Dump of scsi host parameters:\n");
2375 i =0;
2376 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2377 printk(" %d %d %d : %d %d\n",
2378 shpnt->host_failed,
2379 shpnt->host_busy,
2380 atomic_read(&shpnt->host_active),
2381 shpnt->host_blocked,
2382 shpnt->host_self_blocked);
2385 printk("\n\n");
2386 printk("Dump of scsi command parameters:\n");
2387 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2388 printk("h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
2389 for(SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2390 for(SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) {
2391 /* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
2392 printk("(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
2393 i++,
2395 SCpnt->host->host_no,
2396 SCpnt->channel,
2397 SCpnt->target,
2398 SCpnt->lun,
2400 kdevname(SCpnt->request.rq_dev),
2401 SCpnt->request.sector,
2402 SCpnt->request.nr_sectors,
2403 SCpnt->request.current_nr_sectors,
2404 SCpnt->request.rq_status,
2405 SCpnt->use_sg,
2407 SCpnt->retries,
2408 SCpnt->allowed,
2409 SCpnt->flags,
2411 SCpnt->timeout_per_command,
2412 SCpnt->timeout,
2413 SCpnt->internal_timeout,
2415 SCpnt->cmnd[0],
2416 SCpnt->sense_buffer[2],
2417 SCpnt->result);
2422 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2423 for(SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2424 /* Now dump the request lists for each block device */
2425 printk("Dump of pending block device requests\n");
2426 for(i =0; i < MAX_BLKDEV; i++) {
2427 struct list_head * queue_head;
2429 queue_head = &blk_dev[i].request_queue.queue_head;
2430 if(!list_empty(queue_head)) {
2431 struct request *req;
2432 struct list_head * entry;
2434 printk("%d: ", i);
2435 entry = queue_head->next;
2437 req =blkdev_entry_to_request(entry);
2438 printk("(%s %d %ld %ld %ld) ",
2439 kdevname(req->rq_dev),
2440 req->cmd,
2441 req->sector,
2442 req->nr_sectors,
2443 req->current_nr_sectors);
2444 }while((entry = entry->next) != queue_head);
2445 printk("\n");
2450 #endif/* CONFIG_SCSI_LOGGING *//* } */
2452 #endif/* CONFIG_PROC_FS */
2454 static intscsi_host_no_init(char*str)
2456 static int next_no =0;
2457 char*temp;
2459 while(str) {
2460 temp = str;
2461 while(*temp && (*temp !=':') && (*temp !=','))
2462 temp++;
2463 if(!*temp)
2464 temp = NULL;
2465 else
2466 *temp++ =0;
2467 scsi_host_no_insert(str, next_no);
2468 str = temp;
2469 next_no++;
2471 return1;
2474 #ifndef MODULE
2475 __setup("scsihosts=", scsi_host_no_init);
2476 #endif
2478 static char*scsihosts;
2480 MODULE_PARM(scsihosts,"s");
2482 static int __init init_scsi(void)
2484 struct proc_dir_entry *generic;
2486 if(scsi_init_minimal_dma_pool() !=0)
2488 return1;
2492 * This makes /proc/scsi and /proc/scsi/scsi visible.
2494 #ifdef CONFIG_PROC_FS
2495 proc_scsi =proc_mkdir("scsi",0);
2496 if(!proc_scsi) {
2497 printk(KERN_ERR "cannot init /proc/scsi\n");
2498 return-ENOMEM;
2500 generic =create_proc_info_entry("scsi/scsi",0,0, scsi_proc_info);
2501 if(!generic) {
2502 printk(KERN_ERR "cannot init /proc/scsi/scsi\n");
2503 remove_proc_entry("scsi",0);
2504 return-ENOMEM;
2506 generic->write_proc = proc_scsi_gen_write;
2507 #endif
2509 scsi_devfs_handle =devfs_mk_dir(NULL,"scsi", NULL);
2510 scsi_host_no_init(scsihosts);
2512 * This is where the processing takes place for most everything
2513 * when commands are completed.
2515 init_bh(SCSI_BH, scsi_bottom_half_handler);
2517 return0;
2520 static void __exit exit_scsi(void)
2522 Scsi_Host_Name *shn, *shn2 = NULL;
2524 remove_bh(SCSI_BH);
2526 devfs_unregister(scsi_devfs_handle);
2527 for(shn = scsi_host_no_list;shn;shn = shn->next) {
2528 if(shn->name)
2529 kfree(shn->name);
2530 if(shn2)
2531 kfree(shn2);
2532 shn2 = shn;
2534 if(shn2)
2535 kfree(shn2);
2537 #ifdef CONFIG_PROC_FS
2538 /* No, we're not here anymore. Don't show the /proc/scsi files. */
2539 remove_proc_entry("scsi/scsi",0);
2540 remove_proc_entry("scsi",0);
2541 #endif
2544 * Free up the DMA pool.
2546 scsi_resize_dma_pool();
2550 module_init(init_scsi);
2551 module_exit(exit_scsi);
2554 * Function: scsi_get_host_dev()
2556 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2558 * Arguments: SHpnt - Host that needs a Scsi_Device
2560 * Lock status: None assumed.
2562 * Returns: Nothing
2564 * Notes:
2566 Scsi_Device *scsi_get_host_dev(struct Scsi_Host * SHpnt)
2568 Scsi_Device * SDpnt;
2571 * Attach a single Scsi_Device to the Scsi_Host - this should
2572 * be made to look like a "pseudo-device" that points to the
2573 * HA itself. For the moment, we include it at the head of
2574 * the host_queue itself - I don't think we want to show this
2575 * to the HA in select_queue_depths(), as this would probably confuse
2576 * matters.
2577 * Note - this device is not accessible from any high-level
2578 * drivers (including generics), which is probably not
2579 * optimal. We can add hooks later to attach
2581 SDpnt = (Scsi_Device *)kmalloc(sizeof(Scsi_Device),
2582 GFP_ATOMIC);
2583 memset(SDpnt,0,sizeof(Scsi_Device));
2585 SDpnt->host = SHpnt;
2586 SDpnt->id = SHpnt->this_id;
2587 SDpnt->type = -1;
2588 SDpnt->queue_depth =1;
2590 scsi_build_commandblocks(SDpnt);
2592 scsi_initialize_queue(SDpnt, SHpnt);
2594 SDpnt->online = TRUE;
2597 * Initialize the object that we will use to wait for command blocks.
2599 init_waitqueue_head(&SDpnt->scpnt_wait);
2600 return SDpnt;
2604 * Function: scsi_free_host_dev()
2606 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2608 * Arguments: SHpnt - Host that needs a Scsi_Device
2610 * Lock status: None assumed.
2612 * Returns: Nothing
2614 * Notes:
2616 voidscsi_free_host_dev(Scsi_Device * SDpnt)
2618 if( (unsigned char) SDpnt->id != (unsigned char) SDpnt->host->this_id )
2620 panic("Attempt to delete wrong device\n");
2623 blk_cleanup_queue(&SDpnt->request_queue);
2626 * We only have a single SCpnt attached to this device. Free
2627 * it now.
2629 scsi_release_commandblocks(SDpnt);
2630 kfree(SDpnt);
2634 * Overrides for Emacs so that we follow Linus's tabbing style.
2635 * Emacs will notice this stuff at the end of the file and automatically
2636 * adjust the settings for this buffer only. This must remain at the end
2637 * of the file.
2638 * ---------------------------------------------------------------------------
2639 * Local variables:
2640 * c-indent-level: 4
2641 * c-brace-imaginary-offset: 0
2642 * c-brace-offset: -4
2643 * c-argdecl-indent: 4
2644 * c-label-offset: -4
2645 * c-continued-statement-offset: 4
2646 * c-continued-brace-offset: 0
2647 * indent-tabs-mode: nil
2648 * tab-width: 8
2649 * End:
close