Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: fix channel index enumeration

When the channel register code was changed to allow hotplug operations,
dynamic indexing wasn't taken into account. When channels are randomly
plugged and unplugged out of order, the serial indexing breaks. Convert
channel indexing to using IDA tracking in order to allow dynamic
assignment. The previous code does not cause any regression bug for
existing channel allocation besides idxd driver since the hotplug usage
case is only used by idxd at this point.

With this change, the chan->idr_ref is also not needed any longer. We can
have a device with no channels registered due to hot plug. The channel
device release code no longer should attempt to free the dma device id on
the last channel release.

Fixes: e81274cd6b52 ("dmaengine: add support to dynamic register/unregister of channels")

Reported-by: Yixin Zhang <yixin.zhang@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Tested-by: Yixin Zhang <yixin.zhang@intel.com>
Link: https://lore.kernel.org/r/158679961260.7674.8485924270472851852.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Dave Jiang and committed by
Vinod Koul
08210094 0c894463

+28 -36
+26 -34
drivers/dma/dmaengine.c
··· 232 232 struct dma_chan_dev *chan_dev; 233 233 234 234 chan_dev = container_of(dev, typeof(*chan_dev), device); 235 - if (atomic_dec_and_test(chan_dev->idr_ref)) { 236 - ida_free(&dma_ida, chan_dev->dev_id); 237 - kfree(chan_dev->idr_ref); 238 - } 239 235 kfree(chan_dev); 240 236 } 241 237 ··· 1039 1043 } 1040 1044 1041 1045 static int __dma_async_device_channel_register(struct dma_device *device, 1042 - struct dma_chan *chan, 1043 - int chan_id) 1046 + struct dma_chan *chan) 1044 1047 { 1045 1048 int rc = 0; 1046 - int chancnt = device->chancnt; 1047 - atomic_t *idr_ref; 1048 - struct dma_chan *tchan; 1049 - 1050 - tchan = list_first_entry_or_null(&device->channels, 1051 - struct dma_chan, device_node); 1052 - if (!tchan) 1053 - return -ENODEV; 1054 - 1055 - if (tchan->dev) { 1056 - idr_ref = tchan->dev->idr_ref; 1057 - } else { 1058 - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 1059 - if (!idr_ref) 1060 - return -ENOMEM; 1061 - atomic_set(idr_ref, 0); 1062 - } 1063 1049 1064 1050 chan->local = alloc_percpu(typeof(*chan->local)); 1065 1051 if (!chan->local) ··· 1057 1079 * When the chan_id is a negative value, we are dynamically adding 1058 1080 * the channel. Otherwise we are static enumerating. 1059 1081 */ 1060 - chan->chan_id = chan_id < 0 ? chancnt : chan_id; 1082 + mutex_lock(&device->chan_mutex); 1083 + chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); 1084 + mutex_unlock(&device->chan_mutex); 1085 + if (chan->chan_id < 0) { 1086 + pr_err("%s: unable to alloc ida for chan: %d\n", 1087 + __func__, chan->chan_id); 1088 + goto err_out; 1089 + } 1090 + 1061 1091 chan->dev->device.class = &dma_devclass; 1062 1092 chan->dev->device.parent = device->dev; 1063 1093 chan->dev->chan = chan; 1064 - chan->dev->idr_ref = idr_ref; 1065 1094 chan->dev->dev_id = device->dev_id; 1066 - atomic_inc(idr_ref); 1067 1095 dev_set_name(&chan->dev->device, "dma%dchan%d", 1068 1096 device->dev_id, chan->chan_id); 1069 - 1070 1097 rc = device_register(&chan->dev->device); 1071 1098 if (rc) 1072 - goto err_out; 1099 + goto err_out_ida; 1073 1100 chan->client_count = 0; 1074 - device->chancnt = chan->chan_id + 1; 1101 + device->chancnt++; 1075 1102 1076 1103 return 0; 1077 1104 1105 + err_out_ida: 1106 + mutex_lock(&device->chan_mutex); 1107 + ida_free(&device->chan_ida, chan->chan_id); 1108 + mutex_unlock(&device->chan_mutex); 1078 1109 err_out: 1079 1110 free_percpu(chan->local); 1080 1111 kfree(chan->dev); 1081 - if (atomic_dec_return(idr_ref) == 0) 1082 - kfree(idr_ref); 1083 1112 return rc; 1084 1113 } 1085 1114 ··· 1095 1110 { 1096 1111 int rc; 1097 1112 1098 - rc = __dma_async_device_channel_register(device, chan, -1); 1113 + rc = __dma_async_device_channel_register(device, chan); 1099 1114 if (rc < 0) 1100 1115 return rc; 1101 1116 ··· 1115 1130 device->chancnt--; 1116 1131 chan->dev->chan = NULL; 1117 1132 mutex_unlock(&dma_list_mutex); 1133 + mutex_lock(&device->chan_mutex); 1134 + ida_free(&device->chan_ida, chan->chan_id); 1135 + mutex_unlock(&device->chan_mutex); 1118 1136 device_unregister(&chan->dev->device); 1119 1137 free_percpu(chan->local); 1120 1138 } ··· 1140 1152 */ 1141 1153 int dma_async_device_register(struct dma_device *device) 1142 1154 { 1143 - int rc, i = 0; 1155 + int rc; 1144 1156 struct dma_chan* chan; 1145 1157 1146 1158 if (!device) ··· 1245 1257 if (rc != 0) 1246 1258 return rc; 1247 1259 1260 + mutex_init(&device->chan_mutex); 1261 + ida_init(&device->chan_ida); 1262 + 1248 1263 /* represent channels in sysfs. Probably want devs too */ 1249 1264 list_for_each_entry(chan, &device->channels, device_node) { 1250 - rc = __dma_async_device_channel_register(device, chan, i++); 1265 + rc = __dma_async_device_channel_register(device, chan); 1251 1266 if (rc < 0) 1252 1267 goto err_out; 1253 1268 } ··· 1325 1334 */ 1326 1335 dma_cap_set(DMA_PRIVATE, device->cap_mask); 1327 1336 dma_channel_rebalance(); 1337 + ida_free(&dma_ida, device->dev_id); 1328 1338 dma_device_put(device); 1329 1339 mutex_unlock(&dma_list_mutex); 1330 1340 }
+2 -2
include/linux/dmaengine.h
··· 341 341 * @chan: driver channel device 342 342 * @device: sysfs device 343 343 * @dev_id: parent dma_device dev_id 344 - * @idr_ref: reference count to gate release of dma_device dev_id 345 344 */ 346 345 struct dma_chan_dev { 347 346 struct dma_chan *chan; 348 347 struct device device; 349 348 int dev_id; 350 - atomic_t *idr_ref; 351 349 }; 352 350 353 351 /** ··· 833 835 int dev_id; 834 836 struct device *dev; 835 837 struct module *owner; 838 + struct ida chan_ida; 839 + struct mutex chan_mutex; /* to protect chan_ida */ 836 840 837 841 u32 src_addr_widths; 838 842 u32 dst_addr_widths;