.. | .. |
---|
232 | 232 | |
---|
233 | 233 | /* 1/64k is granular enough and can easily be handled w/ u32 */ |
---|
234 | 234 | WEIGHT_ONE = 1 << 16, |
---|
| 235 | +}; |
---|
235 | 236 | |
---|
| 237 | +enum { |
---|
236 | 238 | /* |
---|
237 | 239 | * As vtime is used to calculate the cost of each IO, it needs to |
---|
238 | 240 | * be fairly high precision. For example, it should be able to |
---|
.. | .. |
---|
256 | 258 | VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION, |
---|
257 | 259 | VRATE_CLAMP_ADJ_PCT = 4, |
---|
258 | 260 | |
---|
| 261 | + /* switch iff the conditions are met for longer than this */ |
---|
| 262 | + AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC, |
---|
| 263 | +}; |
---|
| 264 | + |
---|
| 265 | +enum { |
---|
259 | 266 | /* if IOs end up waiting for requests, issue less */ |
---|
260 | 267 | RQ_WAIT_BUSY_PCT = 5, |
---|
261 | 268 | |
---|
.. | .. |
---|
293 | 300 | |
---|
294 | 301 | /* don't let cmds which take a very long time pin lagging for too long */ |
---|
295 | 302 | MAX_LAGGING_PERIODS = 10, |
---|
296 | | - |
---|
297 | | - /* switch iff the conditions are met for longer than this */ |
---|
298 | | - AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC, |
---|
299 | 303 | |
---|
300 | 304 | /* |
---|
301 | 305 | * Count IO size in 4k pages. The 12bit shift helps keeping |
---|
.. | .. |
---|
872 | 876 | |
---|
873 | 877 | *page = *seqio = *randio = 0; |
---|
874 | 878 | |
---|
875 | | - if (bps) |
---|
876 | | - *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, |
---|
877 | | - DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE)); |
---|
| 879 | + if (bps) { |
---|
| 880 | + u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE); |
---|
| 881 | + |
---|
| 882 | + if (bps_pages) |
---|
| 883 | + *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages); |
---|
| 884 | + else |
---|
| 885 | + *page = 1; |
---|
| 886 | + } |
---|
878 | 887 | |
---|
879 | 888 | if (seqiops) { |
---|
880 | 889 | v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops); |
---|
.. | .. |
---|
2417 | 2426 | u32 hwi, adj_step; |
---|
2418 | 2427 | s64 margin; |
---|
2419 | 2428 | u64 cost, new_inuse; |
---|
| 2429 | + unsigned long flags; |
---|
2420 | 2430 | |
---|
2421 | 2431 | current_hweight(iocg, NULL, &hwi); |
---|
2422 | 2432 | old_hwi = hwi; |
---|
.. | .. |
---|
2435 | 2445 | iocg->inuse == iocg->active) |
---|
2436 | 2446 | return cost; |
---|
2437 | 2447 | |
---|
2438 | | - spin_lock_irq(&ioc->lock); |
---|
| 2448 | + spin_lock_irqsave(&ioc->lock, flags); |
---|
2439 | 2449 | |
---|
2440 | 2450 | /* we own inuse only when @iocg is in the normal active state */ |
---|
2441 | 2451 | if (iocg->abs_vdebt || list_empty(&iocg->active_list)) { |
---|
2442 | | - spin_unlock_irq(&ioc->lock); |
---|
| 2452 | + spin_unlock_irqrestore(&ioc->lock, flags); |
---|
2443 | 2453 | return cost; |
---|
2444 | 2454 | } |
---|
2445 | 2455 | |
---|
.. | .. |
---|
2460 | 2470 | } while (time_after64(vtime + cost, now->vnow) && |
---|
2461 | 2471 | iocg->inuse != iocg->active); |
---|
2462 | 2472 | |
---|
2463 | | - spin_unlock_irq(&ioc->lock); |
---|
| 2473 | + spin_unlock_irqrestore(&ioc->lock, flags); |
---|
2464 | 2474 | |
---|
2465 | 2475 | TRACE_IOCG_PATH(inuse_adjust, iocg, now, |
---|
2466 | 2476 | old_inuse, iocg->inuse, old_hwi, hwi); |
---|