Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge patch series "Basic inline encryption support for ufs-exynos"

Eric Biggers <ebiggers@kernel.org> says:

Add support for Flash Memory Protector (FMP), which is the inline
encryption hardware on Exynos and Exynos-based SoCs.

Specifically, add support for the "traditional FMP mode" that works on
many Exynos-based SoCs including gs101. This is the mode that uses
"software keys" and is compatible with the upstream kernel's existing
inline encryption framework in the block and filesystem layers. I
plan to add support for the wrapped key support on gs101 at a later
time.

Tested on gs101 (specifically Pixel 6) by running the 'encrypt' group
of xfstests on a filesystem mounted with the 'inlinecrypt' mount
option.

This patchset applies to v6.10-rc6, and it has no prerequisites that
aren't already upstream.

Link: https://lore.kernel.org/r/20240708235330.103590-1-ebiggers@kernel.org
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

+320 -21
+20 -14
drivers/ufs/core/ufshcd-crypto.c
··· 95 95 return err; 96 96 } 97 97 98 - static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) 98 + static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, 99 + const struct blk_crypto_key *key, 100 + unsigned int slot) 99 101 { 102 + struct ufs_hba *hba = 103 + container_of(profile, struct ufs_hba, crypto_profile); 100 104 /* 101 105 * Clear the crypto cfg on the device. Clearing CFGE 102 106 * might not be sufficient, so just clear the entire cfg. ··· 110 106 return ufshcd_program_key(hba, &cfg, slot); 111 107 } 112 108 113 - static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, 114 - const struct blk_crypto_key *key, 115 - unsigned int slot) 116 - { 117 - struct ufs_hba *hba = 118 - container_of(profile, struct ufs_hba, crypto_profile); 119 - 120 - return ufshcd_clear_keyslot(hba, slot); 121 - } 122 - 109 + /* 110 + * Reprogram the keyslots if needed, and return true if CRYPTO_GENERAL_ENABLE 111 + * should be used in the host controller initialization sequence. 112 + */ 123 113 bool ufshcd_crypto_enable(struct ufs_hba *hba) 124 114 { 125 115 if (!(hba->caps & UFSHCD_CAP_CRYPTO)) ··· 121 123 122 124 /* Reset might clear all keys, so reprogram all the keys. */ 123 125 blk_crypto_reprogram_all_keys(&hba->crypto_profile); 126 + 127 + if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE) 128 + return false; 129 + 124 130 return true; 125 131 } 126 132 ··· 160 158 int cap_idx; 161 159 int err = 0; 162 160 enum blk_crypto_mode_num blk_mode_num; 161 + 162 + if (hba->quirks & UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE) 163 + return 0; 163 164 164 165 /* 165 166 * Don't use crypto if either the hardware doesn't advertise the ··· 233 228 if (!(hba->caps & UFSHCD_CAP_CRYPTO)) 234 229 return; 235 230 236 - /* Clear all keyslots - the number of keyslots is (CFGC + 1) */ 237 - for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++) 238 - ufshcd_clear_keyslot(hba, slot); 231 + /* Clear all keyslots. */ 232 + for (slot = 0; slot < hba->crypto_profile.num_slots; slot++) 233 + hba->crypto_profile.ll_ops.keyslot_evict(&hba->crypto_profile, 234 + NULL, slot); 239 235 } 240 236 241 237 void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
+36
drivers/ufs/core/ufshcd-crypto.h
··· 37 37 h->dunu = cpu_to_le32(upper_32_bits(lrbp->data_unit_num)); 38 38 } 39 39 40 + static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, 41 + struct ufshcd_lrb *lrbp) 42 + { 43 + struct scsi_cmnd *cmd = lrbp->cmd; 44 + const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx; 45 + 46 + if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt) 47 + return hba->vops->fill_crypto_prdt(hba, crypt_ctx, 48 + lrbp->ucd_prdt_ptr, 49 + scsi_sg_count(cmd)); 50 + return 0; 51 + } 52 + 53 + static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, 54 + struct ufshcd_lrb *lrbp) 55 + { 56 + if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT)) 57 + return; 58 + 59 + if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx)) 60 + return; 61 + 62 + /* Zeroize the PRDT because it can contain cryptographic keys. */ 63 + memzero_explicit(lrbp->ucd_prdt_ptr, 64 + ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd)); 65 + } 66 + 40 67 bool ufshcd_crypto_enable(struct ufs_hba *hba); 41 68 42 69 int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); ··· 80 53 static inline void 81 54 ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, 82 55 struct request_desc_header *h) { } 56 + 57 + static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, 58 + struct ufshcd_lrb *lrbp) 59 + { 60 + return 0; 61 + } 62 + 63 + static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba, 64 + struct ufshcd_lrb *lrbp) { } 83 65 84 66 static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) 85 67 {
+2 -1
drivers/ufs/core/ufshcd.c
··· 2640 2640 2641 2641 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd)); 2642 2642 2643 - return 0; 2643 + return ufshcd_crypto_fill_prdt(hba, lrbp); 2644 2644 } 2645 2645 2646 2646 /** ··· 5479 5479 struct scsi_cmnd *cmd = lrbp->cmd; 5480 5480 5481 5481 scsi_dma_unmap(cmd); 5482 + ufshcd_crypto_clear_prdt(hba, lrbp); 5482 5483 ufshcd_release(hba); 5483 5484 ufshcd_clk_scaling_update_busy(hba); 5484 5485 }
+234 -6
drivers/ufs/host/ufs-exynos.c
··· 8 8 * 9 9 */ 10 10 11 + #include <asm/unaligned.h> 12 + #include <crypto/aes.h> 13 + #include <linux/arm-smccc.h> 11 14 #include <linux/clk.h> 12 15 #include <linux/delay.h> 13 16 #include <linux/module.h> ··· 28 25 29 26 #include "ufs-exynos.h" 30 27 28 + #define DATA_UNIT_SIZE 4096 29 + 31 30 /* 32 31 * Exynos's Vendor specific registers for UFSHCI 33 32 */ 34 33 #define HCI_TXPRDT_ENTRY_SIZE 0x00 35 34 #define PRDT_PREFECT_EN BIT(31) 36 - #define PRDT_SET_SIZE(x) ((x) & 0x1F) 37 35 #define HCI_RXPRDT_ENTRY_SIZE 0x04 38 36 #define HCI_1US_TO_CNT_VAL 0x0C 39 37 #define CNT_VAL_1US_MASK 0x3FF ··· 1047 1043 exynos_ufs_fit_aggr_timeout(ufs); 1048 1044 1049 1045 hci_writel(ufs, 0xa, HCI_DATA_REORDER); 1050 - hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE); 1051 - hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE); 1046 + hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE); 1047 + hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE); 1052 1048 hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); 1053 1049 hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); 1054 1050 hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); ··· 1155 1151 hba->quirks = ufs->drv_data->quirks; 1156 1152 } 1157 1153 1154 + #ifdef CONFIG_SCSI_UFS_CRYPTO 1155 + 1156 + /* 1157 + * Support for Flash Memory Protector (FMP), which is the inline encryption 1158 + * hardware on Exynos and Exynos-based SoCs. The interface to this hardware is 1159 + * not compatible with the standard UFS crypto. It requires that encryption be 1160 + * configured in the PRDT using a nonstandard extension. 1161 + */ 1162 + 1163 + enum fmp_crypto_algo_mode { 1164 + FMP_BYPASS_MODE = 0, 1165 + FMP_ALGO_MODE_AES_CBC = 1, 1166 + FMP_ALGO_MODE_AES_XTS = 2, 1167 + }; 1168 + enum fmp_crypto_key_length { 1169 + FMP_KEYLEN_256BIT = 1, 1170 + }; 1171 + 1172 + /** 1173 + * struct fmp_sg_entry - nonstandard format of PRDT entries when FMP is enabled 1174 + * 1175 + * @base: The standard PRDT entry, but with nonstandard bitfields in the high 1176 + * bits of the 'size' field, i.e. the last 32-bit word. When these 1177 + * nonstandard bitfields are zero, the data segment won't be encrypted or 1178 + * decrypted. Otherwise they specify the algorithm and key length with 1179 + * which the data segment will be encrypted or decrypted. 1180 + * @file_iv: The initialization vector (IV) with all bytes reversed 1181 + * @file_enckey: The first half of the AES-XTS key with all bytes reserved 1182 + * @file_twkey: The second half of the AES-XTS key with all bytes reserved 1183 + * @disk_iv: Unused 1184 + * @reserved: Unused 1185 + */ 1186 + struct fmp_sg_entry { 1187 + struct ufshcd_sg_entry base; 1188 + __be64 file_iv[2]; 1189 + __be64 file_enckey[4]; 1190 + __be64 file_twkey[4]; 1191 + __be64 disk_iv[2]; 1192 + __be64 reserved[2]; 1193 + }; 1194 + 1195 + #define SMC_CMD_FMP_SECURITY \ 1196 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ 1197 + ARM_SMCCC_OWNER_SIP, 0x1810) 1198 + #define SMC_CMD_SMU \ 1199 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ 1200 + ARM_SMCCC_OWNER_SIP, 0x1850) 1201 + #define SMC_CMD_FMP_SMU_RESUME \ 1202 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ 1203 + ARM_SMCCC_OWNER_SIP, 0x1860) 1204 + #define SMU_EMBEDDED 0 1205 + #define SMU_INIT 0 1206 + #define CFG_DESCTYPE_3 3 1207 + 1208 + static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs) 1209 + { 1210 + struct blk_crypto_profile *profile = &hba->crypto_profile; 1211 + struct arm_smccc_res res; 1212 + int err; 1213 + 1214 + /* 1215 + * Check for the standard crypto support bit, since it's available even 1216 + * though the rest of the interface to FMP is nonstandard. 1217 + * 1218 + * This check should have the effect of preventing the driver from 1219 + * trying to use FMP on old Exynos SoCs that don't have FMP. 1220 + */ 1221 + if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) & 1222 + MASK_CRYPTO_SUPPORT)) 1223 + return; 1224 + 1225 + /* 1226 + * The below sequence of SMC calls to enable FMP can be found in the 1227 + * downstream driver source for gs101 and other Exynos-based SoCs. It 1228 + * is the only way to enable FMP that works on SoCs such as gs101 that 1229 + * don't make the FMP registers accessible to Linux. It probably works 1230 + * on other Exynos-based SoCs too, and might even still be the only way 1231 + * that works. But this hasn't been properly tested, and this code is 1232 + * mutually exclusive with exynos_ufs_config_smu(). So for now only 1233 + * enable FMP support on SoCs with EXYNOS_UFS_OPT_UFSPR_SECURE. 1234 + */ 1235 + if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)) 1236 + return; 1237 + 1238 + /* 1239 + * This call (which sets DESCTYPE to 0x3 in the FMPSECURITY0 register) 1240 + * is needed to make the hardware use the larger PRDT entry size. 1241 + */ 1242 + BUILD_BUG_ON(sizeof(struct fmp_sg_entry) != 128); 1243 + arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3, 1244 + 0, 0, 0, 0, &res); 1245 + if (res.a0) { 1246 + dev_warn(hba->dev, 1247 + "SMC_CMD_FMP_SECURITY failed on init: %ld. Disabling FMP support.\n", 1248 + res.a0); 1249 + return; 1250 + } 1251 + ufshcd_set_sg_entry_size(hba, sizeof(struct fmp_sg_entry)); 1252 + 1253 + /* 1254 + * This is needed to initialize FMP. Without it, errors occur when 1255 + * inline encryption is used. 1256 + */ 1257 + arm_smccc_smc(SMC_CMD_SMU, SMU_INIT, SMU_EMBEDDED, 0, 0, 0, 0, 0, &res); 1258 + if (res.a0) { 1259 + dev_err(hba->dev, 1260 + "SMC_CMD_SMU(SMU_INIT) failed: %ld. Disabling FMP support.\n", 1261 + res.a0); 1262 + return; 1263 + } 1264 + 1265 + /* Advertise crypto capabilities to the block layer. */ 1266 + err = devm_blk_crypto_profile_init(hba->dev, profile, 0); 1267 + if (err) { 1268 + /* Only ENOMEM should be possible here. */ 1269 + dev_err(hba->dev, "Failed to initialize crypto profile: %d\n", 1270 + err); 1271 + return; 1272 + } 1273 + profile->max_dun_bytes_supported = AES_BLOCK_SIZE; 1274 + profile->dev = hba->dev; 1275 + profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] = 1276 + DATA_UNIT_SIZE; 1277 + 1278 + /* Advertise crypto support to ufshcd-core. */ 1279 + hba->caps |= UFSHCD_CAP_CRYPTO; 1280 + 1281 + /* Advertise crypto quirks to ufshcd-core. */ 1282 + hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE | 1283 + UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE | 1284 + UFSHCD_QUIRK_KEYS_IN_PRDT; 1285 + 1286 + } 1287 + 1288 + static void exynos_ufs_fmp_resume(struct ufs_hba *hba) 1289 + { 1290 + struct arm_smccc_res res; 1291 + 1292 + arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3, 1293 + 0, 0, 0, 0, &res); 1294 + if (res.a0) 1295 + dev_err(hba->dev, 1296 + "SMC_CMD_FMP_SECURITY failed on resume: %ld\n", res.a0); 1297 + 1298 + arm_smccc_smc(SMC_CMD_FMP_SMU_RESUME, 0, SMU_EMBEDDED, 0, 0, 0, 0, 0, 1299 + &res); 1300 + if (res.a0) 1301 + dev_err(hba->dev, 1302 + "SMC_CMD_FMP_SMU_RESUME failed: %ld\n", res.a0); 1303 + } 1304 + 1305 + static inline __be64 fmp_key_word(const u8 *key, int j) 1306 + { 1307 + return cpu_to_be64(get_unaligned_le64( 1308 + key + AES_KEYSIZE_256 - (j + 1) * sizeof(u64))); 1309 + } 1310 + 1311 + /* Fill the PRDT for a request according to the given encryption context. */ 1312 + static int exynos_ufs_fmp_fill_prdt(struct ufs_hba *hba, 1313 + const struct bio_crypt_ctx *crypt_ctx, 1314 + void *prdt, unsigned int num_segments) 1315 + { 1316 + struct fmp_sg_entry *fmp_prdt = prdt; 1317 + const u8 *enckey = crypt_ctx->bc_key->raw; 1318 + const u8 *twkey = enckey + AES_KEYSIZE_256; 1319 + u64 dun_lo = crypt_ctx->bc_dun[0]; 1320 + u64 dun_hi = crypt_ctx->bc_dun[1]; 1321 + unsigned int i; 1322 + 1323 + /* If FMP wasn't enabled, we shouldn't get any encrypted requests. */ 1324 + if (WARN_ON_ONCE(!(hba->caps & UFSHCD_CAP_CRYPTO))) 1325 + return -EIO; 1326 + 1327 + /* Configure FMP on each segment of the request. */ 1328 + for (i = 0; i < num_segments; i++) { 1329 + struct fmp_sg_entry *prd = &fmp_prdt[i]; 1330 + int j; 1331 + 1332 + /* Each segment must be exactly one data unit. */ 1333 + if (prd->base.size != cpu_to_le32(DATA_UNIT_SIZE - 1)) { 1334 + dev_err(hba->dev, 1335 + "data segment is misaligned for FMP\n"); 1336 + return -EIO; 1337 + } 1338 + 1339 + /* Set the algorithm and key length. */ 1340 + prd->base.size |= cpu_to_le32((FMP_ALGO_MODE_AES_XTS << 28) | 1341 + (FMP_KEYLEN_256BIT << 26)); 1342 + 1343 + /* Set the IV. */ 1344 + prd->file_iv[0] = cpu_to_be64(dun_hi); 1345 + prd->file_iv[1] = cpu_to_be64(dun_lo); 1346 + 1347 + /* Set the key. */ 1348 + for (j = 0; j < AES_KEYSIZE_256 / sizeof(u64); j++) { 1349 + prd->file_enckey[j] = fmp_key_word(enckey, j); 1350 + prd->file_twkey[j] = fmp_key_word(twkey, j); 1351 + } 1352 + 1353 + /* Increment the data unit number. */ 1354 + dun_lo++; 1355 + if (dun_lo == 0) 1356 + dun_hi++; 1357 + } 1358 + return 0; 1359 + } 1360 + 1361 + #else /* CONFIG_SCSI_UFS_CRYPTO */ 1362 + 1363 + static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs) 1364 + { 1365 + } 1366 + 1367 + static void exynos_ufs_fmp_resume(struct ufs_hba *hba) 1368 + { 1369 + } 1370 + 1371 + #define exynos_ufs_fmp_fill_prdt NULL 1372 + 1373 + #endif /* !CONFIG_SCSI_UFS_CRYPTO */ 1374 + 1158 1375 static int exynos_ufs_init(struct ufs_hba *hba) 1159 1376 { 1160 1377 struct device *dev = hba->dev; ··· 1423 1198 1424 1199 exynos_ufs_priv_init(hba, ufs); 1425 1200 1201 + exynos_ufs_fmp_init(hba, ufs); 1202 + 1426 1203 if (ufs->drv_data->drv_init) { 1427 1204 ret = ufs->drv_data->drv_init(dev, ufs); 1428 1205 if (ret) { ··· 1440 1213 if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)) 1441 1214 exynos_ufs_config_smu(ufs); 1442 1215 1443 - hba->host->dma_alignment = SZ_4K - 1; 1216 + hba->host->dma_alignment = DATA_UNIT_SIZE - 1; 1444 1217 return 0; 1445 1218 1446 1219 out: ··· 1559 1332 * (ufshcd_async_scan()). Note: this callback may also be called 1560 1333 * from other functions than ufshcd_init(). 1561 1334 */ 1562 - hba->host->max_segment_size = SZ_4K; 1335 + hba->host->max_segment_size = DATA_UNIT_SIZE; 1563 1336 1564 1337 if (ufs->drv_data->pre_hce_enable) { 1565 1338 ret = ufs->drv_data->pre_hce_enable(ufs); ··· 1659 1432 phy_power_on(ufs->phy); 1660 1433 1661 1434 exynos_ufs_config_smu(ufs); 1662 - 1435 + exynos_ufs_fmp_resume(hba); 1663 1436 return 0; 1664 1437 } 1665 1438 ··· 1925 1698 .hibern8_notify = exynos_ufs_hibern8_notify, 1926 1699 .suspend = exynos_ufs_suspend, 1927 1700 .resume = exynos_ufs_resume, 1701 + .fill_crypto_prdt = exynos_ufs_fmp_fill_prdt, 1928 1702 }; 1929 1703 1930 1704 static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
+28
include/ufs/ufshcd.h
··· 322 322 * @device_reset: called to issue a reset pulse on the UFS device 323 323 * @config_scaling_param: called to configure clock scaling parameters 324 324 * @program_key: program or evict an inline encryption key 325 + * @fill_crypto_prdt: initialize crypto-related fields in the PRDT 325 326 * @event_notify: called to notify important events 326 327 * @reinit_notify: called to notify reinit of UFSHCD during max gear switch 327 328 * @mcq_config_resource: called to configure MCQ platform resources ··· 370 369 struct devfreq_simple_ondemand_data *data); 371 370 int (*program_key)(struct ufs_hba *hba, 372 371 const union ufs_crypto_cfg_entry *cfg, int slot); 372 + int (*fill_crypto_prdt)(struct ufs_hba *hba, 373 + const struct bio_crypt_ctx *crypt_ctx, 374 + void *prdt, unsigned int num_segments); 373 375 void (*event_notify)(struct ufs_hba *hba, 374 376 enum ufs_event_type evt, void *data); 375 377 void (*reinit_notify)(struct ufs_hba *); ··· 652 648 * thus need this quirk to skip related flow. 653 649 */ 654 650 UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21, 651 + 652 + /* 653 + * This quirk needs to be enabled if the host controller supports inline 654 + * encryption but it needs to initialize the crypto capabilities in a 655 + * nonstandard way and/or needs to override blk_crypto_ll_ops. If 656 + * enabled, the standard code won't initialize the blk_crypto_profile; 657 + * ufs_hba_variant_ops::init() must do it instead. 658 + */ 659 + UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 22, 660 + 661 + /* 662 + * This quirk needs to be enabled if the host controller supports inline 663 + * encryption but does not support the CRYPTO_GENERAL_ENABLE bit, i.e. 664 + * host controller initialization fails if that bit is set. 665 + */ 666 + UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 23, 667 + 668 + /* 669 + * This quirk needs to be enabled if the host controller driver copies 670 + * cryptographic keys into the PRDT in order to send them to hardware, 671 + * and therefore the PRDT should be zeroized after each request (as per 672 + * the standard best practice for managing keys). 673 + */ 674 + UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24, 655 675 }; 656 676 657 677 enum ufshcd_caps {