Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: talitos - add support for 36 bit addressing

Enabling extended addressing in the h/w requires we always assign the
extended address component (eptr) of the talitos h/w pointer. This is
for e500 based platforms with large memories.

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Kim Phillips and committed by
Herbert Xu
81eb024c 4b992628

+41 -29
+40 -29
drivers/crypto/talitos.c
··· 146 146 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 147 147 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 148 148 149 + static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) 150 + { 151 + talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 152 + talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr)); 153 + } 154 + 149 155 /* 150 156 * map virtual single (contiguous) pointer to h/w descriptor pointer 151 157 */ ··· 161 155 unsigned char extent, 162 156 enum dma_data_direction dir) 163 157 { 158 + dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); 159 + 164 160 talitos_ptr->len = cpu_to_be16(len); 165 - talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); 161 + to_talitos_ptr(talitos_ptr, dma_addr); 166 162 talitos_ptr->j_extent = extent; 167 163 } 168 164 ··· 195 187 return -EIO; 196 188 } 197 189 198 - /* set done writeback and IRQ */ 199 - setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | 200 - TALITOS_CCCR_LO_CDIE); 190 + /* set 36-bit addressing, done writeback enable and done IRQ enable */ 191 + setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE | 192 + TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); 201 193 202 194 /* and ICCR writeback, if available */ 203 195 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) ··· 320 312 321 313 /* GO! */ 322 314 wmb(); 323 - out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); 315 + out_be32(priv->reg + TALITOS_FF(ch), 316 + cpu_to_be32(upper_32_bits(request->dma_desc))); 317 + out_be32(priv->reg + TALITOS_FF_LO(ch), 318 + cpu_to_be32(lower_32_bits(request->dma_desc))); 324 319 325 320 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 326 321 ··· 945 934 int n_sg = sg_count; 946 935 947 936 while (n_sg--) { 948 - link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); 937 + to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg)); 949 938 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); 950 939 link_tbl_ptr->j_extent = 0; 951 940 link_tbl_ptr++; ··· 1020 1009 edesc->src_is_chained); 1021 1010 1022 1011 if (sg_count == 1) { 1023 - desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1012 + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); 1024 1013 } else { 1025 1014 sg_link_tbl_len = cryptlen; 1026 1015 ··· 1031 1020 &edesc->link_tbl[0]); 1032 1021 if (sg_count > 1) { 1033 1022 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1034 - desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 1023 + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); 1035 1024 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1036 1025 edesc->dma_len, 1037 1026 DMA_BIDIRECTIONAL); 1038 1027 } else { 1039 1028 /* Only one segment now, so no link tbl needed */ 1040 - desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> 1041 - src)); 1029 + to_talitos_ptr(&desc->ptr[4], 1030 + sg_dma_address(areq->src)); 1042 1031 } 1043 1032 } 1044 1033 ··· 1053 1042 edesc->dst_is_chained); 1054 1043 1055 1044 if (sg_count == 1) { 1056 - desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1045 + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); 1057 1046 } else { 1058 1047 struct talitos_ptr *link_tbl_ptr = 1059 1048 &edesc->link_tbl[edesc->src_nents + 1]; 1060 1049 1061 - desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 1062 - edesc->dma_link_tbl + 1063 - edesc->src_nents + 1); 1050 + to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + 1051 + (edesc->src_nents + 1) * 1052 + sizeof(struct talitos_ptr)); 1064 1053 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1065 1054 link_tbl_ptr); 1066 1055 ··· 1073 1062 link_tbl_ptr->len = cpu_to_be16(authsize); 1074 1063 1075 1064 /* icv data follows link tables */ 1076 - link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 1077 - edesc->dma_link_tbl + 1078 - edesc->src_nents + 1079 - edesc->dst_nents + 2); 1080 - 1065 + to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl + 1066 + (edesc->src_nents + edesc->dst_nents + 2) * 1067 + sizeof(struct talitos_ptr)); 1081 1068 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 1082 1069 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1083 1070 edesc->dma_len, DMA_BIDIRECTIONAL); ··· 1350 1341 1351 1342 /* first DWORD empty */ 1352 1343 desc->ptr[0].len = 0; 1353 - desc->ptr[0].ptr = 0; 1344 + to_talitos_ptr(&desc->ptr[0], 0); 1354 1345 desc->ptr[0].j_extent = 0; 1355 1346 1356 1347 /* cipher iv */ ··· 1374 1365 edesc->src_is_chained); 1375 1366 1376 1367 if (sg_count == 1) { 1377 - desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1368 + to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); 1378 1369 } else { 1379 1370 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, 1380 1371 &edesc->link_tbl[0]); 1381 1372 if (sg_count > 1) { 1373 + to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); 1382 1374 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; 1383 - desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); 1384 1375 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1385 1376 edesc->dma_len, 1386 1377 DMA_BIDIRECTIONAL); 1387 1378 } else { 1388 1379 /* Only one segment now, so no link tbl needed */ 1389 - desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> 1390 - src)); 1380 + to_talitos_ptr(&desc->ptr[3], 1381 + sg_dma_address(areq->src)); 1391 1382 } 1392 1383 } 1393 1384 ··· 1402 1393 edesc->dst_is_chained); 1403 1394 1404 1395 if (sg_count == 1) { 1405 - desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1396 + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); 1406 1397 } else { 1407 1398 struct talitos_ptr *link_tbl_ptr = 1408 1399 &edesc->link_tbl[edesc->src_nents + 1]; 1409 1400 1401 + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + 1402 + (edesc->src_nents + 1) * 1403 + sizeof(struct talitos_ptr)); 1410 1404 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1411 - desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *) 1412 - edesc->dma_link_tbl + 1413 - edesc->src_nents + 1); 1414 1405 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1415 1406 link_tbl_ptr); 1416 1407 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, ··· 1423 1414 1424 1415 /* last DWORD empty */ 1425 1416 desc->ptr[6].len = 0; 1426 - desc->ptr[6].ptr = 0; 1417 + to_talitos_ptr(&desc->ptr[6], 0); 1427 1418 desc->ptr[6].j_extent = 0; 1428 1419 1429 1420 ret = talitos_submit(dev, desc, callback, areq); ··· 1906 1897 for (i = 0; i < priv->num_channels; i++) 1907 1898 atomic_set(&priv->chan[i].submit_count, 1908 1899 -(priv->chfifo_len - 1)); 1900 + 1901 + dma_set_mask(dev, DMA_BIT_MASK(36)); 1909 1902 1910 1903 /* reset and initialize the h/w */ 1911 1904 err = init_device(dev);
+1
drivers/crypto/talitos.h
··· 57 57 #define TALITOS_CCCR_RESET 0x1 /* channel reset */ 58 58 #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) 59 59 #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ 60 + #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ 60 61 #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ 61 62 #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ 62 63 #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */