842 rq = get_request(q, rw_flags, bio, GFP_NOIO);
843 while (!rq) {
844 DEFINE_WAIT(wait);
845 struct io_context *ioc;
846 struct request_list *rl = &q->rq;
847
848 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
849 TASK_UNINTERRUPTIBLE);
850
851 blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
852
853 __generic_unplug_device(q);
854 spin_unlock_irq(q->queue_lock);
855 io_schedule();
856
857
858
859
860
861
862
863 ioc = current_io_context(GFP_NOIO, q->node);
864 ioc_set_batching(q, ioc);
865
866 spin_lock_irq(q->queue_lock);
867 finish_wait(&rl->wait[rw], &wait);
868
869 rq = get_request(q, rw_flags, bio, GFP_NOIO);
870 };
871
872 return rq;
873}
874
875struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
876{
877 struct request *rq;
878
879 BUG_ON(rw != READ && rw != WRITE);
880
881 spin_lock_irq(q->queue_lock);
882 if (gfp_mask & __GFP_WAIT) {