Merge branch 'feature/788-separate-email-addresses' into 'develop'
[akkoma] / lib / pleroma / html.ex
1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
4
5 defmodule Pleroma.HTML do
6 alias HtmlSanitizeEx.Scrubber
7
8 defp get_scrubbers(scrubber) when is_atom(scrubber), do: [scrubber]
9 defp get_scrubbers(scrubbers) when is_list(scrubbers), do: scrubbers
10 defp get_scrubbers(_), do: [Pleroma.HTML.Scrubber.Default]
11
12 def get_scrubbers do
13 Pleroma.Config.get([:markup, :scrub_policy])
14 |> get_scrubbers
15 end
16
17 def filter_tags(html, nil) do
18 filter_tags(html, get_scrubbers())
19 end
20
21 def filter_tags(html, scrubbers) when is_list(scrubbers) do
22 Enum.reduce(scrubbers, html, fn scrubber, html ->
23 filter_tags(html, scrubber)
24 end)
25 end
26
27 def filter_tags(html, scrubber), do: Scrubber.scrub(html, scrubber)
28 def filter_tags(html), do: filter_tags(html, nil)
29 def strip_tags(html), do: Scrubber.scrub(html, Scrubber.StripTags)
30
31 def get_cached_scrubbed_html_for_activity(content, scrubbers, activity, key \\ "") do
32 key = "#{key}#{generate_scrubber_signature(scrubbers)}|#{activity.id}"
33
34 Cachex.fetch!(:scrubber_cache, key, fn _key ->
35 ensure_scrubbed_html(content, scrubbers, activity.data["object"]["fake"] || false)
36 end)
37 end
38
39 def get_cached_stripped_html_for_activity(content, activity, key) do
40 get_cached_scrubbed_html_for_activity(
41 content,
42 HtmlSanitizeEx.Scrubber.StripTags,
43 activity,
44 key
45 )
46 end
47
48 def ensure_scrubbed_html(
49 content,
50 scrubbers,
51 false = _fake
52 ) do
53 {:commit, filter_tags(content, scrubbers)}
54 end
55
56 def ensure_scrubbed_html(
57 content,
58 scrubbers,
59 true = _fake
60 ) do
61 {:ignore, filter_tags(content, scrubbers)}
62 end
63
64 defp generate_scrubber_signature(scrubber) when is_atom(scrubber) do
65 generate_scrubber_signature([scrubber])
66 end
67
68 defp generate_scrubber_signature(scrubbers) do
69 Enum.reduce(scrubbers, "", fn scrubber, signature ->
70 "#{signature}#{to_string(scrubber)}"
71 end)
72 end
73
74 def extract_first_external_url(_, nil), do: {:error, "No content"}
75
76 def extract_first_external_url(object, content) do
77 key = "URL|#{object.id}"
78
79 Cachex.fetch!(:scrubber_cache, key, fn _key ->
80 result =
81 content
82 |> Floki.filter_out("a.mention")
83 |> Floki.attribute("a", "href")
84 |> Enum.at(0)
85
86 {:commit, {:ok, result}}
87 end)
88 end
89 end
90
91 defmodule Pleroma.HTML.Scrubber.TwitterText do
92 @moduledoc """
93 An HTML scrubbing policy which limits to twitter-style text. Only
94 paragraphs, breaks and links are allowed through the filter.
95 """
96
97 @markup Application.get_env(:pleroma, :markup)
98 @valid_schemes Pleroma.Config.get([:uri_schemes, :valid_schemes], [])
99
100 require HtmlSanitizeEx.Scrubber.Meta
101 alias HtmlSanitizeEx.Scrubber.Meta
102
103 Meta.remove_cdata_sections_before_scrub()
104 Meta.strip_comments()
105
106 # links
107 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
108 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
109
110 Meta.allow_tag_with_this_attribute_values("a", "rel", [
111 "tag",
112 "nofollow",
113 "noopener",
114 "noreferrer"
115 ])
116
117 # paragraphs and linebreaks
118 Meta.allow_tag_with_these_attributes("br", [])
119 Meta.allow_tag_with_these_attributes("p", [])
120
121 # microformats
122 Meta.allow_tag_with_these_attributes("span", ["class"])
123
124 # allow inline images for custom emoji
125 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
126
127 if @allow_inline_images do
128 # restrict img tags to http/https only, because of MediaProxy.
129 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
130
131 Meta.allow_tag_with_these_attributes("img", [
132 "width",
133 "height",
134 "title",
135 "alt"
136 ])
137 end
138
139 Meta.strip_everything_not_covered()
140 end
141
142 defmodule Pleroma.HTML.Scrubber.Default do
143 @doc "The default HTML scrubbing policy: no "
144
145 require HtmlSanitizeEx.Scrubber.Meta
146 alias HtmlSanitizeEx.Scrubber.Meta
147 # credo:disable-for-previous-line
148 # No idea how to fix this one…
149
150 @markup Application.get_env(:pleroma, :markup)
151 @valid_schemes Pleroma.Config.get([:uri_schemes, :valid_schemes], [])
152
153 Meta.remove_cdata_sections_before_scrub()
154 Meta.strip_comments()
155
156 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
157 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
158
159 Meta.allow_tag_with_this_attribute_values("a", "rel", [
160 "tag",
161 "nofollow",
162 "noopener",
163 "noreferrer"
164 ])
165
166 Meta.allow_tag_with_these_attributes("abbr", ["title"])
167
168 Meta.allow_tag_with_these_attributes("b", [])
169 Meta.allow_tag_with_these_attributes("blockquote", [])
170 Meta.allow_tag_with_these_attributes("br", [])
171 Meta.allow_tag_with_these_attributes("code", [])
172 Meta.allow_tag_with_these_attributes("del", [])
173 Meta.allow_tag_with_these_attributes("em", [])
174 Meta.allow_tag_with_these_attributes("i", [])
175 Meta.allow_tag_with_these_attributes("li", [])
176 Meta.allow_tag_with_these_attributes("ol", [])
177 Meta.allow_tag_with_these_attributes("p", [])
178 Meta.allow_tag_with_these_attributes("pre", [])
179 Meta.allow_tag_with_these_attributes("span", ["class"])
180 Meta.allow_tag_with_these_attributes("strong", [])
181 Meta.allow_tag_with_these_attributes("u", [])
182 Meta.allow_tag_with_these_attributes("ul", [])
183
184 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
185
186 if @allow_inline_images do
187 # restrict img tags to http/https only, because of MediaProxy.
188 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
189
190 Meta.allow_tag_with_these_attributes("img", [
191 "width",
192 "height",
193 "title",
194 "alt"
195 ])
196 end
197
198 @allow_tables Keyword.get(@markup, :allow_tables)
199
200 if @allow_tables do
201 Meta.allow_tag_with_these_attributes("table", [])
202 Meta.allow_tag_with_these_attributes("tbody", [])
203 Meta.allow_tag_with_these_attributes("td", [])
204 Meta.allow_tag_with_these_attributes("th", [])
205 Meta.allow_tag_with_these_attributes("thead", [])
206 Meta.allow_tag_with_these_attributes("tr", [])
207 end
208
209 @allow_headings Keyword.get(@markup, :allow_headings)
210
211 if @allow_headings do
212 Meta.allow_tag_with_these_attributes("h1", [])
213 Meta.allow_tag_with_these_attributes("h2", [])
214 Meta.allow_tag_with_these_attributes("h3", [])
215 Meta.allow_tag_with_these_attributes("h4", [])
216 Meta.allow_tag_with_these_attributes("h5", [])
217 end
218
219 @allow_fonts Keyword.get(@markup, :allow_fonts)
220
221 if @allow_fonts do
222 Meta.allow_tag_with_these_attributes("font", ["face"])
223 end
224
225 Meta.strip_everything_not_covered()
226 end
227
228 defmodule Pleroma.HTML.Transform.MediaProxy do
229 @moduledoc "Transforms inline image URIs to use MediaProxy."
230
231 alias Pleroma.Web.MediaProxy
232
233 def before_scrub(html), do: html
234
235 def scrub_attribute("img", {"src", "http" <> target}) do
236 media_url =
237 ("http" <> target)
238 |> MediaProxy.url()
239
240 {"src", media_url}
241 end
242
243 def scrub_attribute(_tag, attribute), do: attribute
244
245 def scrub({"img", attributes, children}) do
246 attributes =
247 attributes
248 |> Enum.map(fn attr -> scrub_attribute("img", attr) end)
249 |> Enum.reject(&is_nil(&1))
250
251 {"img", attributes, children}
252 end
253
254 def scrub({:comment, _children}), do: ""
255
256 def scrub({tag, attributes, children}), do: {tag, attributes, children}
257 def scrub({_tag, children}), do: children
258 def scrub(text), do: text
259 end