dm: disable interrupt when taking map_lock

This patch disables interrupt when taking map_lock to avoid
lockdep warnings in request-based dm.

request-based dm takes map_lock after taking queue_lock with
disabling interrupt:
spin_lock_irqsave(queue_lock)
q->request_fn() == dm_request_fn()
=> dm_get_table()
=> read_lock(map_lock)
while queue_lock could be (but isn't) taken in interrupt context.

Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Acked-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Acked-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

authored by Kiyoshi Ueda and committed by Alasdair G Kergon 523d9297 5d67aa23

+9 -6
+9 -6
drivers/md/dm.c
··· 512 struct dm_table *dm_get_table(struct mapped_device *md) 513 { 514 struct dm_table *t; 515 516 - read_lock(&md->map_lock); 517 t = md->map; 518 if (t) 519 dm_table_get(t); 520 - read_unlock(&md->map_lock); 521 522 return t; 523 } ··· 1911 { 1912 struct request_queue *q = md->queue; 1913 sector_t size; 1914 1915 size = dm_table_get_size(t); 1916 ··· 1942 1943 __bind_mempools(md, t); 1944 1945 - write_lock(&md->map_lock); 1946 md->map = t; 1947 dm_table_set_restrictions(t, q, limits); 1948 - write_unlock(&md->map_lock); 1949 1950 return 0; 1951 } ··· 1953 static void __unbind(struct mapped_device *md) 1954 { 1955 struct dm_table *map = md->map; 1956 1957 if (!map) 1958 return; 1959 1960 dm_table_event_callback(map, NULL, NULL); 1961 - write_lock(&md->map_lock); 1962 md->map = NULL; 1963 - write_unlock(&md->map_lock); 1964 dm_table_destroy(map); 1965 } 1966
··· 512 struct dm_table *dm_get_table(struct mapped_device *md) 513 { 514 struct dm_table *t; 515 + unsigned long flags; 516 517 + read_lock_irqsave(&md->map_lock, flags); 518 t = md->map; 519 if (t) 520 dm_table_get(t); 521 + read_unlock_irqrestore(&md->map_lock, flags); 522 523 return t; 524 } ··· 1910 { 1911 struct request_queue *q = md->queue; 1912 sector_t size; 1913 + unsigned long flags; 1914 1915 size = dm_table_get_size(t); 1916 ··· 1940 1941 __bind_mempools(md, t); 1942 1943 + write_lock_irqsave(&md->map_lock, flags); 1944 md->map = t; 1945 dm_table_set_restrictions(t, q, limits); 1946 + write_unlock_irqrestore(&md->map_lock, flags); 1947 1948 return 0; 1949 } ··· 1951 static void __unbind(struct mapped_device *md) 1952 { 1953 struct dm_table *map = md->map; 1954 + unsigned long flags; 1955 1956 if (!map) 1957 return; 1958 1959 dm_table_event_callback(map, NULL, NULL); 1960 + write_lock_irqsave(&md->map_lock, flags); 1961 md->map = NULL; 1962 + write_unlock_irqrestore(&md->map_lock, flags); 1963 dm_table_destroy(map); 1964 } 1965