patch-2.3.33 linux/drivers/scsi/scsi_error.c
Next file: linux/drivers/scsi/scsi_lib.c
Previous file: linux/drivers/scsi/scsi_debug.c
Back to the patch index
Back to the overall index
- Lines: 353
- Date:
Tue Dec 14 08:49:00 1999
- Orig file:
v2.3.32/linux/drivers/scsi/scsi_error.c
- Orig date:
Tue Dec 14 01:27:24 1999
diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/scsi_error.c linux/drivers/scsi/scsi_error.c
@@ -117,6 +117,8 @@
SCset->eh_timeout.expires = jiffies + timeout;
SCset->eh_timeout.function = (void (*)(unsigned long)) complete;
+ SCset->done_late = 0;
+
SCSI_LOG_ERROR_RECOVERY(5, printk("Adding timer for command %p at %d (%p)\n", SCset, timeout, complete));
add_timer(&SCset->eh_timeout);
@@ -159,11 +161,14 @@
*
* Returns: Nothing.
*
- * Notes:
+ * Notes: We do not need to lock this. There is the potential for
+ * a race only in that the normal completion handling might
+ * run, but if the normal completion function determines
+ * that the timer has already fired, then it mustn't do
+ * anything.
*/
-static void do_scsi_times_out(Scsi_Cmnd * SCpnt)
+void scsi_times_out(Scsi_Cmnd * SCpnt)
{
-
/*
* Notify the low-level code that this operation failed and we are
* reposessing the command.
@@ -219,20 +224,15 @@
* If the host is having troubles, then look to see if this was the last
* command that might have failed. If so, wake up the error handler.
*/
+ if( SCpnt->host->eh_wait == NULL ) {
+ panic("Error handler thread not present at %p %p %s %d",
+ SCpnt, SCpnt->host, __FILE__, __LINE__);
+ }
if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
up(SCpnt->host->eh_wait);
}
}
-void scsi_times_out(Scsi_Cmnd * SCpnt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
- do_scsi_times_out(SCpnt);
- spin_unlock_irqrestore(&io_request_lock, flags);
-}
-
/*
* Function scsi_block_when_processing_errors
*
@@ -277,8 +277,6 @@
unsigned long flags;
int rtn = FAILED;
- spin_lock_irqsave(&io_request_lock, flags);
-
SCpnt->eh_state = SCSI_STATE_TIMEOUT;
SCpnt->owner = SCSI_OWNER_LOWLEVEL;
@@ -286,8 +284,10 @@
* As far as the low level driver is concerned, this command is still
* active, so we must give the low level driver a chance to abort it. (DB)
*/
+ spin_lock_irqsave(&io_request_lock, flags);
if (SCpnt->host->hostt->eh_abort_handler)
rtn = SCpnt->host->hostt->eh_abort_handler(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
SCpnt->request.rq_status = RQ_SCSI_DONE;
SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
@@ -298,7 +298,6 @@
up(SCpnt->host->eh_action);
else
printk("Missing scsi error handler thread\n");
- spin_unlock_irqrestore(&io_request_lock, flags);
}
@@ -319,6 +318,20 @@
STATIC
void scsi_eh_done(Scsi_Cmnd * SCpnt)
{
+ int rtn;
+
+ /*
+ * If the timeout handler is already running, then just set the
+ * flag which says we finished late, and return. We have no
+ * way of stopping the timeout handler from running, so we must
+ * always defer to it.
+ */
+ rtn = del_timer(&SCpnt->eh_timeout);
+ if (!rtn) {
+ SCpnt->done_late = 1;
+ return;
+ }
+
SCpnt->request.rq_status = RQ_SCSI_DONE;
SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
@@ -418,7 +431,7 @@
{REQUEST_SENSE, 0, 0, 0, 255, 0};
unsigned char scsi_result0[256], *scsi_result = NULL;
- ASSERT_LOCK(&io_request_lock, 1);
+ ASSERT_LOCK(&io_request_lock, 0);
memcpy((void *) SCpnt->cmnd, (void *) generic_sense,
sizeof(generic_sense));
@@ -426,7 +439,7 @@
SCpnt->cmnd[1] = SCpnt->lun << 5;
scsi_result = (!SCpnt->host->hostt->unchecked_isa_dma)
- ? &scsi_result0[0] : scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+ ? &scsi_result0[0] : kmalloc(512, GFP_ATOMIC | GFP_DMA);
if (scsi_result == NULL) {
printk("cannot allocate scsi_result in scsi_request_sense.\n");
@@ -455,7 +468,7 @@
sizeof(SCpnt->sense_buffer));
if (scsi_result != &scsi_result0[0] && scsi_result != NULL)
- scsi_init_free(scsi_result, 512);
+ kfree(scsi_result);
/*
* When we eventually call scsi_finish, we really wish to complete
@@ -492,7 +505,7 @@
SCpnt->cmnd[1] = SCpnt->lun << 5;
scsi_result = (!SCpnt->host->hostt->unchecked_isa_dma)
- ? &scsi_result0[0] : scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+ ? &scsi_result0[0] : kmalloc(512, GFP_ATOMIC | GFP_DMA);
if (scsi_result == NULL) {
printk("cannot allocate scsi_result in scsi_test_unit_ready.\n");
@@ -520,7 +533,7 @@
sizeof(SCpnt->sense_buffer));
if (scsi_result != &scsi_result0[0] && scsi_result != NULL)
- scsi_init_free(scsi_result, 512);
+ kfree(scsi_result);
/*
* When we eventually call scsi_finish, we really wish to complete
@@ -552,7 +565,6 @@
}
}
-
void scsi_sleep(int timeout)
{
DECLARE_MUTEX_LOCKED(sem);
@@ -582,9 +594,10 @@
*/
STATIC void scsi_send_eh_cmnd(Scsi_Cmnd * SCpnt, int timeout)
{
+ unsigned long flags;
struct Scsi_Host *host;
- ASSERT_LOCK(&io_request_lock, 1);
+ ASSERT_LOCK(&io_request_lock, 0);
host = SCpnt->host;
@@ -608,15 +621,14 @@
SCpnt->host->eh_action = &sem;
SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ spin_lock_irqsave(&io_request_lock, flags);
host->hostt->queuecommand(SCpnt, scsi_eh_done);
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
down(&sem);
- spin_lock_irq(&io_request_lock);
SCpnt->host->eh_action = NULL;
- del_timer(&SCpnt->eh_timeout);
-
/*
* See if timeout. If so, tell the host to forget about it.
* In other words, we don't want a callback any more.
@@ -634,7 +646,10 @@
* protection here, since we would end up waiting in the actual low
* level driver, we don't know how to wake it up.
*/
+ spin_lock_irqsave(&io_request_lock, flags);
temp = host->hostt->command(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+
SCpnt->result = temp;
if (scsi_eh_completed_normally(SCpnt)) {
SCpnt->eh_state = SUCCESS;
@@ -734,6 +749,9 @@
*/
STATIC int scsi_try_to_abort_command(Scsi_Cmnd * SCpnt, int timeout)
{
+ int rtn;
+ unsigned long flags;
+
SCpnt->eh_state = FAILED; /* Until we come up with something better */
if (SCpnt->host->hostt->eh_abort_handler == NULL) {
@@ -748,7 +766,10 @@
SCpnt->owner = SCSI_OWNER_LOWLEVEL;
- return SCpnt->host->hostt->eh_abort_handler(SCpnt);
+ spin_lock_irqsave(&io_request_lock, flags);
+ rtn = SCpnt->host->hostt->eh_abort_handler(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return rtn;
}
/*
@@ -767,6 +788,7 @@
*/
STATIC int scsi_try_bus_device_reset(Scsi_Cmnd * SCpnt, int timeout)
{
+ unsigned long flags;
int rtn;
SCpnt->eh_state = FAILED; /* Until we come up with something better */
@@ -776,7 +798,9 @@
}
SCpnt->owner = SCSI_OWNER_LOWLEVEL;
+ spin_lock_irqsave(&io_request_lock, flags);
rtn = SCpnt->host->hostt->eh_device_reset_handler(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
if (rtn == SUCCESS)
SCpnt->eh_state = SUCCESS;
@@ -796,6 +820,7 @@
*/
STATIC int scsi_try_bus_reset(Scsi_Cmnd * SCpnt)
{
+ unsigned long flags;
int rtn;
SCpnt->eh_state = FAILED; /* Until we come up with something better */
@@ -805,7 +830,10 @@
if (SCpnt->host->hostt->eh_bus_reset_handler == NULL) {
return FAILED;
}
+
+ spin_lock_irqsave(&io_request_lock, flags);
rtn = SCpnt->host->hostt->eh_bus_reset_handler(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
if (rtn == SUCCESS)
SCpnt->eh_state = SUCCESS;
@@ -814,9 +842,7 @@
* If we had a successful bus reset, mark the command blocks to expect
* a condition code of unit attention.
*/
- spin_unlock_irq(&io_request_lock);
scsi_sleep(BUS_RESET_SETTLE_TIME);
- spin_lock_irq(&io_request_lock);
if (SCpnt->eh_state == SUCCESS) {
Scsi_Device *SDloop;
for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) {
@@ -841,6 +867,7 @@
*/
STATIC int scsi_try_host_reset(Scsi_Cmnd * SCpnt)
{
+ unsigned long flags;
int rtn;
SCpnt->eh_state = FAILED; /* Until we come up with something better */
@@ -850,7 +877,9 @@
if (SCpnt->host->hostt->eh_host_reset_handler == NULL) {
return FAILED;
}
+ spin_lock_irqsave(&io_request_lock, flags);
rtn = SCpnt->host->hostt->eh_host_reset_handler(SCpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
if (rtn == SUCCESS)
SCpnt->eh_state = SUCCESS;
@@ -859,9 +888,7 @@
* If we had a successful host reset, mark the command blocks to expect
* a condition code of unit attention.
*/
- spin_unlock_irq(&io_request_lock);
scsi_sleep(HOST_RESET_SETTLE_TIME);
- spin_lock_irq(&io_request_lock);
if (SCpnt->eh_state == SUCCESS) {
Scsi_Device *SDloop;
for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) {
@@ -1258,7 +1285,7 @@
Scsi_Cmnd *SCdone;
int timed_out;
- ASSERT_LOCK(&io_request_lock, 1);
+ ASSERT_LOCK(&io_request_lock, 0);
SCdone = NULL;
@@ -1539,14 +1566,7 @@
* is the case, we are worrying about nothing here.
*/
- /*
- * Due to the spinlock, we will never get out of this
- * loop without a proper wait (DB)
- */
- spin_unlock_irq(&io_request_lock);
scsi_sleep(1 * HZ);
- spin_lock_irq(&io_request_lock);
-
goto next_device;
}
}
@@ -1638,9 +1658,7 @@
* Due to the spinlock, we will never get out of this
* loop without a proper wait. (DB)
*/
- spin_unlock_irq(&io_request_lock);
scsi_sleep(1 * HZ);
- spin_lock_irq(&io_request_lock);
goto next_device2;
}
@@ -1784,7 +1802,6 @@
struct Scsi_Host *host = (struct Scsi_Host *) data;
int rtn;
DECLARE_MUTEX_LOCKED(sem);
- unsigned long flags;
siginitsetinv(¤t->blocked, SHUTDOWN_SIGS);
@@ -1828,7 +1845,6 @@
SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler waking up\n"));
- spin_lock_irqsave(&io_request_lock, flags);
host->eh_active = 1;
/*
@@ -1843,9 +1859,6 @@
}
host->eh_active = 0;
-
- /* The spinlock is really needed up to this point. (DB) */
- spin_unlock_irqrestore(&io_request_lock, flags);
/*
* Note - if the above fails completely, the action is to take
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)