Coves frontend - a photon fork
at main 176 lines 4.8 kB view raw
1// @ts-nocheck 2import { marked } from 'marked' 3import markedLinkifyIt from 'marked-linkify-it' 4 5export const spoilerPlugin = { 6 name: 'spoiler', 7 level: 'block', 8 start(src) { 9 return src.match(/:::/)?.index 10 }, 11 // eslint-disable-next-line 12 tokenizer(src, tokens) { 13 const rule = /::: ?spoiler(?: ?(.*))\n([\s\S]*?)\n:::/ 14 const match = rule.exec(src) 15 if (match) { 16 return { 17 type: 'spoiler', 18 raw: match[0], 19 title: match[1].trim(), 20 text: match[2].trim(), 21 } 22 } 23 }, 24 renderer(token) { 25 return ` 26 <details> 27 <summary>${marked.parseInline(token.title)}</summary> 28 ${marked.parse(token.text)} 29 </details> 30 ` 31 }, 32} 33 34export const linkify = markedLinkifyIt( 35 { 36 '!': { 37 validate: function (text, pos, self) { 38 var tail = text.slice(pos) 39 40 if (!self.re.community) { 41 self.re.community = new RegExp( 42 /^([a-z0-9_.-]+)@([\da-z.-]+)\.([a-z]{2,63})/i, 43 ) 44 } 45 if (self.re.community.test(tail)) { 46 // Linkifier allows punctuation chars before prefix, 47 // but we additionally disable `@` ("@@mention" is invalid) 48 if (pos >= 2 && tail[pos - 2] === '!') { 49 return false 50 } 51 return tail.match(self.re.community)[0].length 52 } 53 return 0 54 }, 55 normalize: function (match) { 56 let prefix = match.url 57 prefix = prefix.startsWith('c/') ? prefix.slice(2) : prefix.slice(1) 58 59 match.url = `/c/${prefix}` 60 }, 61 }, 62 '@': { 63 validate: function (text, pos, self) { 64 var tail = text.slice(pos) 65 66 if (!self.re.user) { 67 self.re.user = new RegExp( 68 /^([a-z0-9_.-]+)@([\da-z.-]+)\.([a-z]{2,63})/i, 69 ) 70 } 71 if (self.re.user.test(tail)) { 72 // Linkifier allows punctuation chars before prefix, 73 // but we additionally disable `@` ("@@mention" is invalid) 74 if (pos >= 2 && tail[pos - 2] === '!') { 75 return false 76 } 77 return tail.match(self.re.user)[0].length 78 } 79 return 0 80 }, 81 normalize: function (match) { 82 let prefix = match.url 83 prefix = prefix.startsWith('u/') ? prefix.slice(2) : prefix.slice(1) 84 85 match.url = `/profile/${prefix}` 86 }, 87 }, 88 }, 89 { 90 fuzzyEmail: false, 91 }, 92) 93 94const regexes = { 95 post: /^https:\/\/([a-zA-Z0-9.-]+)\/post\/(\d+)$/i, 96 comment: /^https:\/\/([a-zA-Z0-9.-]+)\/comment\/(\d+)$/i, 97 user: /^https:\/\/([a-zA-Z0-9.-]+)(\/u\/)([a-zA-Z0-9.-_]+)$/i, 98 community: /^https:\/\/([a-zA-Z0-9.-]+)(\/c\/)([a-zA-Z0-9.-_]+)$/i, 99 implicitUser: /^mailto:([a-z0-9_.-]+)@(([\da-z.-]+)\.([a-z]{2,63}))/i, 100} 101 102export { regexes as CONTENT_REGEXES } 103 104/** 105 * Convert links to local app links 106 */ 107export const localizeLink = (link) => { 108 if (regexes.community.test(link)) { 109 const match = link.match(regexes.community) 110 if (!match) return 111 112 // If the match[3] includes @, the URL included an instance already, so don't add one. 113 if (match?.[3].includes('@')) return `/c/${match?.[3]}` 114 else return `/c/${match?.[3]}@${match?.[1]}` 115 } 116 if (regexes.post.test(link)) { 117 const match = link.match(regexes.post) 118 if (!match) return 119 return `/post/${match?.[1]}/${match?.[2]}` 120 } 121 if (regexes.comment.test(link)) { 122 const match = link.match(regexes.comment) 123 if (!match) return 124 return `/comment/${match?.[1]}/${match?.[2]}` 125 } 126 if (regexes.user.test(link)) { 127 const match = link.match(regexes.user) 128 if (!match) return 129 130 // Same as above for the community. 131 if (match?.[3].includes('@')) return `/profile/${match?.[3]}` 132 else return `/profile/${match?.[3]}@${match?.[1]}` 133 } 134 // Support implicit user syntax (no preceding @), by messing with mailto links. 135 if (regexes.implicitUser.test(link)) { 136 const exec = regexes.implicitUser.exec(link) 137 138 if (!exec?.[1] || !exec?.[2]) return 139 return `/profile/${exec[1]}@${exec[2]}` 140 } 141} 142 143export function subSupscriptExtension(tokensExtractor) { 144 return { 145 name: 'subscriptSuperscript', 146 level: 'inline', 147 start(src) { 148 return src.match(/[~^]/)?.index 149 }, 150 // eslint-disable-next-line 151 tokenizer(src, tokens) { 152 const subscriptRule = /^~([^~\s](?:[^~]*[^~\s])?)~/ 153 const superscriptRule = /^\^([^^\s](?:[^^]*[^^\s])?)\^/ 154 155 let match 156 157 if ((match = subscriptRule.exec(src))) { 158 return tokensExtractor({ 159 type: 'subscript', 160 content: match[1], 161 raw: match[0], 162 lexer: this.lexer, 163 }) 164 } 165 166 if ((match = superscriptRule.exec(src))) { 167 return tokensExtractor({ 168 type: 'superscript', 169 content: match[1], 170 raw: match[0], 171 lexer: this.lexer, 172 }) 173 } 174 }, 175 } 176}