Lines Matching refs:cqr
641 struct dasd_ccw_req *cqr, in dasd_profile_start() argument
676 device = cqr->startdev; in dasd_profile_start()
751 struct dasd_ccw_req *cqr, in dasd_profile_end() argument
761 device = cqr->startdev; in dasd_profile_end()
768 if (!cqr->buildclk || !cqr->startclk || in dasd_profile_end()
769 !cqr->stopclk || !cqr->endclk || in dasd_profile_end()
773 strtime = ((cqr->startclk - cqr->buildclk) >> 12); in dasd_profile_end()
774 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); in dasd_profile_end()
775 endtime = ((cqr->endclk - cqr->stopclk) >> 12); in dasd_profile_end()
776 tottime = ((cqr->endclk - cqr->buildclk) >> 12); in dasd_profile_end()
795 cqr->startdev != block->base, in dasd_profile_end()
796 cqr->cpmode == 1, in dasd_profile_end()
813 cqr->startdev != block->base, in dasd_profile_end()
814 cqr->cpmode == 1, in dasd_profile_end()
831 cqr->startdev != block->base, in dasd_profile_end()
832 cqr->cpmode == 1, in dasd_profile_end()
1092 #define dasd_profile_start(block, cqr, req) do {} while (0) argument
1093 #define dasd_profile_end(block, cqr, req) do {} while (0) argument
1170 struct dasd_ccw_req *cqr) in dasd_smalloc_request() argument
1180 if (!cqr) in dasd_smalloc_request()
1181 size += (sizeof(*cqr) + 7L) & -8L; in dasd_smalloc_request()
1188 if (!cqr) { in dasd_smalloc_request()
1189 cqr = (void *) data; in dasd_smalloc_request()
1190 data += (sizeof(*cqr) + 7L) & -8L; in dasd_smalloc_request()
1192 memset(cqr, 0, sizeof(*cqr)); in dasd_smalloc_request()
1193 cqr->mem_chunk = chunk; in dasd_smalloc_request()
1195 cqr->cpaddr = data; in dasd_smalloc_request()
1197 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); in dasd_smalloc_request()
1200 cqr->data = data; in dasd_smalloc_request()
1201 memset(cqr->data, 0, datasize); in dasd_smalloc_request()
1203 cqr->magic = magic; in dasd_smalloc_request()
1204 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); in dasd_smalloc_request()
1206 return cqr; in dasd_smalloc_request()
1214 struct dasd_ccw_req *cqr; in dasd_fmalloc_request() local
1219 cqr_size = (sizeof(*cqr) + 7L) & -8L; in dasd_fmalloc_request()
1227 cqr = dasd_alloc_chunk(&device->ese_chunks, size); in dasd_fmalloc_request()
1229 if (!cqr) in dasd_fmalloc_request()
1231 memset(cqr, 0, sizeof(*cqr)); in dasd_fmalloc_request()
1232 data = (char *)cqr + cqr_size; in dasd_fmalloc_request()
1233 cqr->cpaddr = NULL; in dasd_fmalloc_request()
1235 cqr->cpaddr = data; in dasd_fmalloc_request()
1237 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); in dasd_fmalloc_request()
1239 cqr->data = NULL; in dasd_fmalloc_request()
1241 cqr->data = data; in dasd_fmalloc_request()
1242 memset(cqr->data, 0, datasize); in dasd_fmalloc_request()
1245 cqr->magic = magic; in dasd_fmalloc_request()
1246 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); in dasd_fmalloc_request()
1249 return cqr; in dasd_fmalloc_request()
1253 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) in dasd_sfree_request() argument
1258 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); in dasd_sfree_request()
1264 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) in dasd_ffree_request() argument
1269 dasd_free_chunk(&device->ese_chunks, cqr); in dasd_ffree_request()
1278 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) in dasd_check_cqr() argument
1282 if (cqr == NULL) in dasd_check_cqr()
1284 device = cqr->startdev; in dasd_check_cqr()
1285 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { in dasd_check_cqr()
1289 cqr->magic, in dasd_check_cqr()
1302 int dasd_term_IO(struct dasd_ccw_req *cqr) in dasd_term_IO() argument
1309 rc = dasd_check_cqr(cqr); in dasd_term_IO()
1313 device = (struct dasd_device *) cqr->startdev; in dasd_term_IO()
1314 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { in dasd_term_IO()
1315 rc = ccw_device_clear(device->cdev, (long) cqr); in dasd_term_IO()
1318 cqr->status = DASD_CQR_CLEAR_PENDING; in dasd_term_IO()
1319 cqr->stopclk = get_tod_clock(); in dasd_term_IO()
1320 cqr->starttime = 0; in dasd_term_IO()
1323 cqr); in dasd_term_IO()
1334 cqr->status = DASD_CQR_CLEARED; in dasd_term_IO()
1335 cqr->stopclk = get_tod_clock(); in dasd_term_IO()
1336 cqr->starttime = 0; in dasd_term_IO()
1338 cqr->retries = -1; in dasd_term_IO()
1363 int dasd_start_IO(struct dasd_ccw_req *cqr) in dasd_start_IO() argument
1370 rc = dasd_check_cqr(cqr); in dasd_start_IO()
1372 cqr->intrc = rc; in dasd_start_IO()
1375 device = (struct dasd_device *) cqr->startdev; in dasd_start_IO()
1376 if (((cqr->block && in dasd_start_IO()
1377 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || in dasd_start_IO()
1379 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { in dasd_start_IO()
1381 "because of stolen lock", cqr); in dasd_start_IO()
1382 cqr->status = DASD_CQR_ERROR; in dasd_start_IO()
1383 cqr->intrc = -EPERM; in dasd_start_IO()
1386 if (cqr->retries < 0) { in dasd_start_IO()
1388 sprintf(errorstring, "14 %p", cqr); in dasd_start_IO()
1391 cqr->status = DASD_CQR_ERROR; in dasd_start_IO()
1394 cqr->startclk = get_tod_clock(); in dasd_start_IO()
1395 cqr->starttime = jiffies; in dasd_start_IO()
1396 cqr->retries--; in dasd_start_IO()
1397 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { in dasd_start_IO()
1398 cqr->lpm &= dasd_path_get_opm(device); in dasd_start_IO()
1399 if (!cqr->lpm) in dasd_start_IO()
1400 cqr->lpm = dasd_path_get_opm(device); in dasd_start_IO()
1406 if (cqr->block) in dasd_start_IO()
1407 cqr->trkcount = atomic_read(&cqr->block->trkcount); in dasd_start_IO()
1409 if (cqr->cpmode == 1) { in dasd_start_IO()
1410 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, in dasd_start_IO()
1411 (long) cqr, cqr->lpm); in dasd_start_IO()
1413 rc = ccw_device_start(device->cdev, cqr->cpaddr, in dasd_start_IO()
1414 (long) cqr, cqr->lpm, 0); in dasd_start_IO()
1418 cqr->status = DASD_CQR_IN_IO; in dasd_start_IO()
1432 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { in dasd_start_IO()
1435 cqr->lpm); in dasd_start_IO()
1436 } else if (cqr->lpm != dasd_path_get_opm(device)) { in dasd_start_IO()
1437 cqr->lpm = dasd_path_get_opm(device); in dasd_start_IO()
1456 dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO); in dasd_start_IO()
1476 cqr->intrc = rc; in dasd_start_IO()
1526 struct dasd_ccw_req *cqr; in dasd_handle_killed_request() local
1531 cqr = (struct dasd_ccw_req *) intparm; in dasd_handle_killed_request()
1532 if (cqr->status != DASD_CQR_IN_IO) { in dasd_handle_killed_request()
1535 "%02x", cqr->status); in dasd_handle_killed_request()
1546 if (!cqr->startdev || in dasd_handle_killed_request()
1547 device != cqr->startdev || in dasd_handle_killed_request()
1548 strncmp(cqr->startdev->discipline->ebcname, in dasd_handle_killed_request()
1549 (char *) &cqr->magic, 4)) { in dasd_handle_killed_request()
1557 cqr->status = DASD_CQR_QUEUED; in dasd_handle_killed_request()
1622 struct dasd_ccw_req *cqr, *next, *fcqr; in dasd_int_handler() local
1631 cqr = (struct dasd_ccw_req *) intparm; in dasd_int_handler()
1635 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { in dasd_int_handler()
1636 device = cqr->startdev; in dasd_int_handler()
1637 cqr->status = DASD_CQR_CLEARED; in dasd_int_handler()
1659 if (!cqr || in dasd_int_handler()
1662 if (cqr) in dasd_int_handler()
1663 memcpy(&cqr->irb, irb, sizeof(*irb)); in dasd_int_handler()
1682 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); in dasd_int_handler()
1684 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); in dasd_int_handler()
1691 dasd_generic_space_exhaust(device, cqr); in dasd_int_handler()
1692 device->discipline->ext_pool_exhaust(device, cqr); in dasd_int_handler()
1701 device->discipline->dump_sense(device, cqr, irb); in dasd_int_handler()
1702 device->discipline->check_for_device_change(device, cqr, irb); in dasd_int_handler()
1716 if (!cqr) in dasd_int_handler()
1719 device = (struct dasd_device *) cqr->startdev; in dasd_int_handler()
1721 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { in dasd_int_handler()
1727 if (dasd_ese_needs_format(cqr->block, irb)) { in dasd_int_handler()
1728 req = dasd_get_callback_data(cqr); in dasd_int_handler()
1730 cqr->status = DASD_CQR_ERROR; in dasd_int_handler()
1734 device->discipline->ese_read(cqr, irb); in dasd_int_handler()
1735 cqr->status = DASD_CQR_SUCCESS; in dasd_int_handler()
1736 cqr->stopclk = now; in dasd_int_handler()
1741 fcqr = device->discipline->ese_format(device, cqr, irb); in dasd_int_handler()
1744 cqr->status = DASD_CQR_ERROR; in dasd_int_handler()
1751 cqr->status = DASD_CQR_QUEUED; in dasd_int_handler()
1756 cqr->status = DASD_CQR_QUEUED; in dasd_int_handler()
1764 if (cqr->status == DASD_CQR_CLEAR_PENDING && in dasd_int_handler()
1766 cqr->status = DASD_CQR_CLEARED; in dasd_int_handler()
1774 if (cqr->status != DASD_CQR_IN_IO) { in dasd_int_handler()
1776 "status %02x", dev_name(&cdev->dev), cqr->status); in dasd_int_handler()
1785 cqr->status = DASD_CQR_SUCCESS; in dasd_int_handler()
1786 cqr->stopclk = now; in dasd_int_handler()
1788 if (cqr->devlist.next != &device->ccw_queue) { in dasd_int_handler()
1789 next = list_entry(cqr->devlist.next, in dasd_int_handler()
1797 if (cqr->cpmode && dasd_check_hpf_error(irb) && in dasd_int_handler()
1804 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && in dasd_int_handler()
1805 cqr->retries > 0) { in dasd_int_handler()
1806 if (cqr->lpm == dasd_path_get_opm(device)) in dasd_int_handler()
1810 cqr->retries); in dasd_int_handler()
1811 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) in dasd_int_handler()
1812 cqr->lpm = dasd_path_get_opm(device); in dasd_int_handler()
1813 cqr->status = DASD_CQR_QUEUED; in dasd_int_handler()
1814 next = cqr; in dasd_int_handler()
1816 cqr->status = DASD_CQR_ERROR; in dasd_int_handler()
1862 struct dasd_ccw_req *cqr; in __dasd_device_recovery() local
1871 cqr = list_entry(l, struct dasd_ccw_req, devlist); in __dasd_device_recovery()
1872 if (cqr->status == DASD_CQR_QUEUED && in __dasd_device_recovery()
1873 ref_cqr->block == cqr->block) { in __dasd_device_recovery()
1874 cqr->status = DASD_CQR_CLEARED; in __dasd_device_recovery()
1887 struct dasd_ccw_req *cqr; in __dasd_device_process_ccw_queue() local
1891 cqr = list_entry(l, struct dasd_ccw_req, devlist); in __dasd_device_process_ccw_queue()
1894 if (cqr->status == DASD_CQR_QUEUED || in __dasd_device_process_ccw_queue()
1895 cqr->status == DASD_CQR_IN_IO || in __dasd_device_process_ccw_queue()
1896 cqr->status == DASD_CQR_CLEAR_PENDING) in __dasd_device_process_ccw_queue()
1898 if (cqr->status == DASD_CQR_ERROR) { in __dasd_device_process_ccw_queue()
1899 __dasd_device_recovery(device, cqr); in __dasd_device_process_ccw_queue()
1902 list_move_tail(&cqr->devlist, final_queue); in __dasd_device_process_ccw_queue()
1907 struct dasd_ccw_req *cqr) in __dasd_process_cqr() argument
1911 switch (cqr->status) { in __dasd_process_cqr()
1913 cqr->status = DASD_CQR_DONE; in __dasd_process_cqr()
1916 cqr->status = DASD_CQR_NEED_ERP; in __dasd_process_cqr()
1919 cqr->status = DASD_CQR_TERMINATED; in __dasd_process_cqr()
1923 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); in __dasd_process_cqr()
1929 if (cqr->callback) in __dasd_process_cqr()
1930 cqr->callback(cqr, cqr->callback_data); in __dasd_process_cqr()
1941 struct dasd_ccw_req *cqr; in __dasd_device_process_final_queue() local
1945 cqr = list_entry(l, struct dasd_ccw_req, devlist); in __dasd_device_process_final_queue()
1946 list_del_init(&cqr->devlist); in __dasd_device_process_final_queue()
1947 block = cqr->block; in __dasd_device_process_final_queue()
1949 __dasd_process_cqr(device, cqr); in __dasd_device_process_final_queue()
1952 __dasd_process_cqr(device, cqr); in __dasd_device_process_final_queue()
1962 struct dasd_ccw_req *cqr) in __dasd_device_check_autoquiesce_timeout() argument
1964 if ((device->default_retries - cqr->retries) >= device->aq_timeouts) in __dasd_device_check_autoquiesce_timeout()
1965 dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS); in __dasd_device_check_autoquiesce_timeout()
1974 struct dasd_ccw_req *cqr; in __dasd_device_check_expire() local
1978 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); in __dasd_device_check_expire()
1979 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && in __dasd_device_check_expire()
1980 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { in __dasd_device_check_expire()
1986 cqr->retries++; in __dasd_device_check_expire()
1988 if (device->discipline->term_IO(cqr) != 0) { in __dasd_device_check_expire()
1993 cqr, (cqr->expires/HZ)); in __dasd_device_check_expire()
1994 cqr->expires += 5*HZ; in __dasd_device_check_expire()
1999 "remaining\n", cqr, (cqr->expires/HZ), in __dasd_device_check_expire()
2000 cqr->retries); in __dasd_device_check_expire()
2002 __dasd_device_check_autoquiesce_timeout(device, cqr); in __dasd_device_check_expire()
2010 struct dasd_ccw_req *cqr) in __dasd_device_is_unusable() argument
2027 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { in __dasd_device_is_unusable()
2043 struct dasd_ccw_req *cqr; in __dasd_device_start_head() local
2048 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); in __dasd_device_start_head()
2049 if (cqr->status != DASD_CQR_QUEUED) in __dasd_device_start_head()
2052 if (__dasd_device_is_unusable(device, cqr)) { in __dasd_device_start_head()
2053 cqr->intrc = -EAGAIN; in __dasd_device_start_head()
2054 cqr->status = DASD_CQR_CLEARED; in __dasd_device_start_head()
2059 rc = device->discipline->start_IO(cqr); in __dasd_device_start_head()
2061 dasd_device_set_timer(device, cqr->expires); in __dasd_device_start_head()
2106 struct dasd_ccw_req *cqr, *n; in dasd_flush_device_queue() local
2113 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { in dasd_flush_device_queue()
2115 switch (cqr->status) { in dasd_flush_device_queue()
2117 rc = device->discipline->term_IO(cqr); in dasd_flush_device_queue()
2122 "failed for request %p\n", cqr); in dasd_flush_device_queue()
2128 cqr->stopclk = get_tod_clock(); in dasd_flush_device_queue()
2129 cqr->status = DASD_CQR_CLEARED; in dasd_flush_device_queue()
2134 list_move_tail(&cqr->devlist, &flush_queue); in dasd_flush_device_queue()
2143 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) in dasd_flush_device_queue()
2145 (cqr->status != DASD_CQR_CLEAR_PENDING)); in dasd_flush_device_queue()
2214 void dasd_add_request_head(struct dasd_ccw_req *cqr) in dasd_add_request_head() argument
2219 device = cqr->startdev; in dasd_add_request_head()
2221 cqr->status = DASD_CQR_QUEUED; in dasd_add_request_head()
2222 list_add(&cqr->devlist, &device->ccw_queue); in dasd_add_request_head()
2233 void dasd_add_request_tail(struct dasd_ccw_req *cqr) in dasd_add_request_tail() argument
2238 device = cqr->startdev; in dasd_add_request_tail()
2240 cqr->status = DASD_CQR_QUEUED; in dasd_add_request_tail()
2241 list_add_tail(&cqr->devlist, &device->ccw_queue); in dasd_add_request_tail()
2251 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) in dasd_wakeup_cb() argument
2253 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); in dasd_wakeup_cb()
2254 cqr->callback_data = DASD_SLEEPON_END_TAG; in dasd_wakeup_cb()
2255 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); in dasd_wakeup_cb()
2260 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) in _wait_for_wakeup() argument
2265 device = cqr->startdev; in _wait_for_wakeup()
2267 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); in _wait_for_wakeup()
2275 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) in __dasd_sleep_on_erp() argument
2280 if (cqr->status == DASD_CQR_FILLED) in __dasd_sleep_on_erp()
2282 device = cqr->startdev; in __dasd_sleep_on_erp()
2283 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { in __dasd_sleep_on_erp()
2284 if (cqr->status == DASD_CQR_TERMINATED) { in __dasd_sleep_on_erp()
2285 device->discipline->handle_terminated_request(cqr); in __dasd_sleep_on_erp()
2288 if (cqr->status == DASD_CQR_NEED_ERP) { in __dasd_sleep_on_erp()
2289 erp_fn = device->discipline->erp_action(cqr); in __dasd_sleep_on_erp()
2290 erp_fn(cqr); in __dasd_sleep_on_erp()
2293 if (cqr->status == DASD_CQR_FAILED) in __dasd_sleep_on_erp()
2294 dasd_log_sense(cqr, &cqr->irb); in __dasd_sleep_on_erp()
2295 if (cqr->refers) { in __dasd_sleep_on_erp()
2296 __dasd_process_erp(device, cqr); in __dasd_sleep_on_erp()
2303 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) in __dasd_sleep_on_loop_condition() argument
2305 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { in __dasd_sleep_on_loop_condition()
2306 if (cqr->refers) /* erp is not done yet */ in __dasd_sleep_on_loop_condition()
2308 return ((cqr->status != DASD_CQR_DONE) && in __dasd_sleep_on_loop_condition()
2309 (cqr->status != DASD_CQR_FAILED)); in __dasd_sleep_on_loop_condition()
2311 return (cqr->status == DASD_CQR_FILLED); in __dasd_sleep_on_loop_condition()
2319 struct dasd_ccw_req *cqr; in _dasd_sleep_on() local
2325 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); in _dasd_sleep_on()
2326 cqr = list_first_entry(&ccw_queue, in _dasd_sleep_on()
2329 if (__dasd_sleep_on_erp(cqr)) in _dasd_sleep_on()
2331 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ in _dasd_sleep_on()
2334 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { in _dasd_sleep_on()
2335 cqr->status = DASD_CQR_FAILED; in _dasd_sleep_on()
2336 cqr->intrc = -EPERM; in _dasd_sleep_on()
2341 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && in _dasd_sleep_on()
2343 cqr->status = DASD_CQR_FAILED; in _dasd_sleep_on()
2344 cqr->intrc = -ENOLINK; in _dasd_sleep_on()
2352 cqr->status = DASD_CQR_FAILED; in _dasd_sleep_on()
2353 cqr->intrc = -ENODEV; in _dasd_sleep_on()
2360 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { in _dasd_sleep_on()
2365 cqr->status = DASD_CQR_FAILED; in _dasd_sleep_on()
2372 if (!cqr->callback) in _dasd_sleep_on()
2373 cqr->callback = dasd_wakeup_cb; in _dasd_sleep_on()
2375 cqr->callback_data = DASD_SLEEPON_START_TAG; in _dasd_sleep_on()
2376 dasd_add_request_tail(cqr); in _dasd_sleep_on()
2379 generic_waitq, _wait_for_wakeup(cqr)); in _dasd_sleep_on()
2381 dasd_cancel_req(cqr); in _dasd_sleep_on()
2384 _wait_for_wakeup(cqr)); in _dasd_sleep_on()
2385 cqr->status = DASD_CQR_FAILED; in _dasd_sleep_on()
2390 wait_event(generic_waitq, _wait_for_wakeup(cqr)); in _dasd_sleep_on()
2408 struct dasd_ccw_req *cqr; in _wait_for_wakeup_queue() local
2410 list_for_each_entry(cqr, ccw_queue, blocklist) { in _wait_for_wakeup_queue()
2411 if (cqr->callback_data != DASD_SLEEPON_END_TAG) in _wait_for_wakeup_queue()
2421 struct dasd_ccw_req *cqr, *n; in _dasd_sleep_on_queue() local
2426 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { in _dasd_sleep_on_queue()
2427 device = cqr->startdev; in _dasd_sleep_on_queue()
2428 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ in _dasd_sleep_on_queue()
2432 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { in _dasd_sleep_on_queue()
2433 cqr->status = DASD_CQR_FAILED; in _dasd_sleep_on_queue()
2434 cqr->intrc = -EPERM; in _dasd_sleep_on_queue()
2439 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && in _dasd_sleep_on_queue()
2441 cqr->status = DASD_CQR_FAILED; in _dasd_sleep_on_queue()
2442 cqr->intrc = -EAGAIN; in _dasd_sleep_on_queue()
2451 cqr->status = DASD_CQR_FAILED; in _dasd_sleep_on_queue()
2452 cqr->intrc = rc; in _dasd_sleep_on_queue()
2458 if (!cqr->callback) in _dasd_sleep_on_queue()
2459 cqr->callback = dasd_wakeup_cb; in _dasd_sleep_on_queue()
2460 cqr->callback_data = DASD_SLEEPON_START_TAG; in _dasd_sleep_on_queue()
2461 dasd_add_request_tail(cqr); in _dasd_sleep_on_queue()
2467 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { in _dasd_sleep_on_queue()
2474 sense = dasd_get_sense(&cqr->irb); in _dasd_sleep_on_queue()
2476 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) in _dasd_sleep_on_queue()
2478 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && in _dasd_sleep_on_queue()
2479 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) in _dasd_sleep_on_queue()
2487 if (cqr->startdev != cqr->basedev && !cqr->refers && in _dasd_sleep_on_queue()
2488 (cqr->status == DASD_CQR_TERMINATED || in _dasd_sleep_on_queue()
2489 cqr->status == DASD_CQR_NEED_ERP)) in _dasd_sleep_on_queue()
2493 if (__dasd_sleep_on_erp(cqr)) in _dasd_sleep_on_queue()
2505 int dasd_sleep_on(struct dasd_ccw_req *cqr) in dasd_sleep_on() argument
2507 return _dasd_sleep_on(cqr, 0); in dasd_sleep_on()
2533 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) in dasd_sleep_on_interruptible() argument
2535 return _dasd_sleep_on(cqr, 1); in dasd_sleep_on_interruptible()
2547 struct dasd_ccw_req *cqr; in _dasd_term_running_cqr() local
2552 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); in _dasd_term_running_cqr()
2553 rc = device->discipline->term_IO(cqr); in _dasd_term_running_cqr()
2560 cqr->retries++; in _dasd_term_running_cqr()
2564 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) in dasd_sleep_on_immediatly() argument
2569 device = cqr->startdev; in dasd_sleep_on_immediatly()
2571 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { in dasd_sleep_on_immediatly()
2572 cqr->status = DASD_CQR_FAILED; in dasd_sleep_on_immediatly()
2573 cqr->intrc = -EPERM; in dasd_sleep_on_immediatly()
2582 cqr->callback = dasd_wakeup_cb; in dasd_sleep_on_immediatly()
2583 cqr->callback_data = DASD_SLEEPON_START_TAG; in dasd_sleep_on_immediatly()
2584 cqr->status = DASD_CQR_QUEUED; in dasd_sleep_on_immediatly()
2589 list_add(&cqr->devlist, device->ccw_queue.next); in dasd_sleep_on_immediatly()
2596 wait_event(generic_waitq, _wait_for_wakeup(cqr)); in dasd_sleep_on_immediatly()
2598 if (cqr->status == DASD_CQR_DONE) in dasd_sleep_on_immediatly()
2600 else if (cqr->intrc) in dasd_sleep_on_immediatly()
2601 rc = cqr->intrc; in dasd_sleep_on_immediatly()
2623 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) in __dasd_cancel_req() argument
2625 struct dasd_device *device = cqr->startdev; in __dasd_cancel_req()
2628 switch (cqr->status) { in __dasd_cancel_req()
2631 cqr->status = DASD_CQR_CLEARED; in __dasd_cancel_req()
2635 rc = device->discipline->term_IO(cqr); in __dasd_cancel_req()
2639 cqr, rc); in __dasd_cancel_req()
2641 cqr->stopclk = get_tod_clock(); in __dasd_cancel_req()
2651 int dasd_cancel_req(struct dasd_ccw_req *cqr) in dasd_cancel_req() argument
2653 struct dasd_device *device = cqr->startdev; in dasd_cancel_req()
2658 rc = __dasd_cancel_req(cqr); in dasd_cancel_req()
2711 struct dasd_ccw_req *cqr) in __dasd_process_erp() argument
2715 if (cqr->status == DASD_CQR_DONE) in __dasd_process_erp()
2719 erp_fn = device->discipline->erp_postaction(cqr); in __dasd_process_erp()
2720 erp_fn(cqr); in __dasd_process_erp()
2723 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) in __dasd_cleanup_cqr() argument
2730 req = (struct request *) cqr->callback_data; in __dasd_cleanup_cqr()
2731 dasd_profile_end(cqr->block, cqr, req); in __dasd_cleanup_cqr()
2733 proc_bytes = cqr->proc_bytes; in __dasd_cleanup_cqr()
2734 status = cqr->block->base->discipline->free_cp(cqr, req); in __dasd_cleanup_cqr()
2738 switch (cqr->intrc) { in __dasd_cleanup_cqr()
2790 struct dasd_ccw_req *cqr; in __dasd_process_block_ccw_queue() local
2798 cqr = list_entry(l, struct dasd_ccw_req, blocklist); in __dasd_process_block_ccw_queue()
2799 if (cqr->status != DASD_CQR_DONE && in __dasd_process_block_ccw_queue()
2800 cqr->status != DASD_CQR_FAILED && in __dasd_process_block_ccw_queue()
2801 cqr->status != DASD_CQR_NEED_ERP && in __dasd_process_block_ccw_queue()
2802 cqr->status != DASD_CQR_TERMINATED) in __dasd_process_block_ccw_queue()
2805 if (cqr->status == DASD_CQR_TERMINATED) { in __dasd_process_block_ccw_queue()
2806 base->discipline->handle_terminated_request(cqr); in __dasd_process_block_ccw_queue()
2811 if (cqr->status == DASD_CQR_NEED_ERP) { in __dasd_process_block_ccw_queue()
2812 erp_fn = base->discipline->erp_action(cqr); in __dasd_process_block_ccw_queue()
2813 if (IS_ERR(erp_fn(cqr))) in __dasd_process_block_ccw_queue()
2819 if (cqr->status == DASD_CQR_FAILED) { in __dasd_process_block_ccw_queue()
2820 dasd_log_sense(cqr, &cqr->irb); in __dasd_process_block_ccw_queue()
2827 if (cqr->status == DASD_CQR_FAILED && in __dasd_process_block_ccw_queue()
2828 dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) { in __dasd_process_block_ccw_queue()
2829 cqr->status = DASD_CQR_FILLED; in __dasd_process_block_ccw_queue()
2830 cqr->retries = 255; in __dasd_process_block_ccw_queue()
2837 if (cqr->refers) { in __dasd_process_block_ccw_queue()
2838 __dasd_process_erp(base, cqr); in __dasd_process_block_ccw_queue()
2843 cqr->endclk = get_tod_clock(); in __dasd_process_block_ccw_queue()
2844 list_move_tail(&cqr->blocklist, final_queue); in __dasd_process_block_ccw_queue()
2848 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) in dasd_return_cqr_cb() argument
2850 dasd_schedule_block_bh(cqr->block); in dasd_return_cqr_cb()
2855 struct dasd_ccw_req *cqr; in __dasd_block_start_head() local
2863 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { in __dasd_block_start_head()
2864 if (cqr->status != DASD_CQR_FILLED) in __dasd_block_start_head()
2867 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { in __dasd_block_start_head()
2868 cqr->status = DASD_CQR_FAILED; in __dasd_block_start_head()
2869 cqr->intrc = -EPERM; in __dasd_block_start_head()
2875 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && in __dasd_block_start_head()
2877 cqr->status = DASD_CQR_FAILED; in __dasd_block_start_head()
2878 cqr->intrc = -ENOLINK; in __dasd_block_start_head()
2887 if (!cqr->startdev) in __dasd_block_start_head()
2888 cqr->startdev = block->base; in __dasd_block_start_head()
2891 cqr->callback = dasd_return_cqr_cb; in __dasd_block_start_head()
2893 dasd_add_request_tail(cqr); in __dasd_block_start_head()
2907 struct dasd_ccw_req *cqr; in dasd_block_tasklet() local
2919 cqr = list_entry(l, struct dasd_ccw_req, blocklist); in dasd_block_tasklet()
2920 dq = cqr->dq; in dasd_block_tasklet()
2922 list_del_init(&cqr->blocklist); in dasd_block_tasklet()
2923 __dasd_cleanup_cqr(cqr); in dasd_block_tasklet()
2937 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) in _dasd_wake_block_flush_cb() argument
2946 static void _dasd_requeue_request(struct dasd_ccw_req *cqr) in _dasd_requeue_request() argument
2954 if (cqr->refers) in _dasd_requeue_request()
2956 spin_lock_irq(&cqr->dq->lock); in _dasd_requeue_request()
2957 req = (struct request *) cqr->callback_data; in _dasd_requeue_request()
2959 spin_unlock_irq(&cqr->dq->lock); in _dasd_requeue_request()
2967 struct dasd_ccw_req *cqr, *n; in _dasd_requests_to_flushqueue() local
2974 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { in _dasd_requests_to_flushqueue()
2976 if (cqr->status >= DASD_CQR_QUEUED) in _dasd_requests_to_flushqueue()
2977 rc = dasd_cancel_req(cqr); in _dasd_requests_to_flushqueue()
2985 cqr->callback = _dasd_wake_block_flush_cb; in _dasd_requests_to_flushqueue()
2986 for (i = 0; cqr; cqr = cqr->refers, i++) in _dasd_requests_to_flushqueue()
2987 list_move_tail(&cqr->blocklist, flush_queue); in _dasd_requests_to_flushqueue()
3004 struct dasd_ccw_req *cqr, *n; in dasd_flush_block_queue() local
3014 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { in dasd_flush_block_queue()
3015 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); in dasd_flush_block_queue()
3017 if (cqr->refers) { in dasd_flush_block_queue()
3019 __dasd_process_erp(block->base, cqr); in dasd_flush_block_queue()
3026 spin_lock_irqsave(&cqr->dq->lock, flags); in dasd_flush_block_queue()
3027 cqr->endclk = get_tod_clock(); in dasd_flush_block_queue()
3028 list_del_init(&cqr->blocklist); in dasd_flush_block_queue()
3029 __dasd_cleanup_cqr(cqr); in dasd_flush_block_queue()
3030 spin_unlock_irqrestore(&cqr->dq->lock, flags); in dasd_flush_block_queue()
3065 struct dasd_ccw_req *cqr; in do_dasd_request() local
3107 cqr = basedev->discipline->build_cp(basedev, block, req); in do_dasd_request()
3108 if (IS_ERR(cqr)) { in do_dasd_request()
3109 if (PTR_ERR(cqr) == -EBUSY || in do_dasd_request()
3110 PTR_ERR(cqr) == -ENOMEM || in do_dasd_request()
3111 PTR_ERR(cqr) == -EAGAIN) { in do_dasd_request()
3117 PTR_ERR(cqr), req); in do_dasd_request()
3125 cqr->callback_data = req; in do_dasd_request()
3126 cqr->status = DASD_CQR_FILLED; in do_dasd_request()
3127 cqr->dq = dq; in do_dasd_request()
3131 list_add_tail(&cqr->blocklist, &block->ccw_queue); in do_dasd_request()
3132 INIT_LIST_HEAD(&cqr->devlist); in do_dasd_request()
3133 dasd_profile_start(block, cqr, req); in do_dasd_request()
3154 struct dasd_ccw_req *cqr; in dasd_times_out() local
3158 cqr = blk_mq_rq_to_pdu(req); in dasd_times_out()
3159 if (!cqr) in dasd_times_out()
3162 spin_lock_irqsave(&cqr->dq->lock, flags); in dasd_times_out()
3163 device = cqr->startdev ? cqr->startdev : block->base; in dasd_times_out()
3165 spin_unlock_irqrestore(&cqr->dq->lock, flags); in dasd_times_out()
3170 cqr, cqr->status); in dasd_times_out()
3174 cqr->retries = -1; in dasd_times_out()
3175 cqr->intrc = -ETIMEDOUT; in dasd_times_out()
3176 if (cqr->status >= DASD_CQR_QUEUED) { in dasd_times_out()
3177 rc = __dasd_cancel_req(cqr); in dasd_times_out()
3178 } else if (cqr->status == DASD_CQR_FILLED || in dasd_times_out()
3179 cqr->status == DASD_CQR_NEED_ERP) { in dasd_times_out()
3180 cqr->status = DASD_CQR_TERMINATED; in dasd_times_out()
3181 } else if (cqr->status == DASD_CQR_IN_ERP) { in dasd_times_out()
3189 if (tmpcqr != cqr) in dasd_times_out()
3213 spin_unlock_irqrestore(&cqr->dq->lock, flags); in dasd_times_out()
3690 struct dasd_ccw_req *cqr; in dasd_generic_last_path_gone() local
3701 list_for_each_entry(cqr, &device->ccw_queue, devlist) in dasd_generic_last_path_gone()
3702 if ((cqr->status == DASD_CQR_IN_IO) || in dasd_generic_last_path_gone()
3703 (cqr->status == DASD_CQR_CLEAR_PENDING)) { in dasd_generic_last_path_gone()
3704 cqr->status = DASD_CQR_QUEUED; in dasd_generic_last_path_gone()
3705 cqr->retries++; in dasd_generic_last_path_gone()
3849 struct dasd_ccw_req *cqr) in dasd_generic_space_exhaust() argument
3857 if (cqr->status == DASD_CQR_IN_IO || in dasd_generic_space_exhaust()
3858 cqr->status == DASD_CQR_CLEAR_PENDING) { in dasd_generic_space_exhaust()
3859 cqr->status = DASD_CQR_QUEUED; in dasd_generic_space_exhaust()
3860 cqr->retries++; in dasd_generic_space_exhaust()
3893 struct dasd_ccw_req *cqr, *n; in dasd_generic_requeue_all_requests() local
3904 list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { in dasd_generic_requeue_all_requests()
3905 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); in dasd_generic_requeue_all_requests()
3907 if (cqr->refers) { in dasd_generic_requeue_all_requests()
3909 __dasd_process_erp(block->base, cqr); in dasd_generic_requeue_all_requests()
3916 _dasd_requeue_request(cqr); in dasd_generic_requeue_all_requests()
3917 list_del_init(&cqr->blocklist); in dasd_generic_requeue_all_requests()
3918 cqr->block->base->discipline->free_cp( in dasd_generic_requeue_all_requests()
3919 cqr, (struct request *) cqr->callback_data); in dasd_generic_requeue_all_requests()
3947 struct dasd_ccw_req *cqr, in dasd_handle_autoquiesce() argument
3952 dasd_eer_write(device, cqr, reason); in dasd_handle_autoquiesce()
3975 struct dasd_ccw_req *cqr; in dasd_generic_build_rdc() local
3978 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, in dasd_generic_build_rdc()
3981 if (IS_ERR(cqr)) { in dasd_generic_build_rdc()
3986 return cqr; in dasd_generic_build_rdc()
3989 ccw = cqr->cpaddr; in dasd_generic_build_rdc()
3991 ccw->cda = (__u32)virt_to_phys(cqr->data); in dasd_generic_build_rdc()
3994 cqr->startdev = device; in dasd_generic_build_rdc()
3995 cqr->memdev = device; in dasd_generic_build_rdc()
3996 cqr->expires = 10*HZ; in dasd_generic_build_rdc()
3997 cqr->retries = 256; in dasd_generic_build_rdc()
3998 cqr->buildclk = get_tod_clock(); in dasd_generic_build_rdc()
3999 cqr->status = DASD_CQR_FILLED; in dasd_generic_build_rdc()
4000 return cqr; in dasd_generic_build_rdc()
4008 struct dasd_ccw_req *cqr; in dasd_generic_read_dev_chars() local
4010 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); in dasd_generic_read_dev_chars()
4011 if (IS_ERR(cqr)) in dasd_generic_read_dev_chars()
4012 return PTR_ERR(cqr); in dasd_generic_read_dev_chars()
4014 ret = dasd_sleep_on(cqr); in dasd_generic_read_dev_chars()
4016 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); in dasd_generic_read_dev_chars()
4017 dasd_sfree_request(cqr, cqr->memdev); in dasd_generic_read_dev_chars()