Merge branch 'develop' into feature/database-compaction
[akkoma] / lib / pleroma / html.ex
1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
4
5 defmodule Pleroma.HTML do
6 alias HtmlSanitizeEx.Scrubber
7
8 defp get_scrubbers(scrubber) when is_atom(scrubber), do: [scrubber]
9 defp get_scrubbers(scrubbers) when is_list(scrubbers), do: scrubbers
10 defp get_scrubbers(_), do: [Pleroma.HTML.Scrubber.Default]
11
12 def get_scrubbers do
13 Pleroma.Config.get([:markup, :scrub_policy])
14 |> get_scrubbers
15 end
16
17 def filter_tags(html, nil) do
18 filter_tags(html, get_scrubbers())
19 end
20
21 def filter_tags(html, scrubbers) when is_list(scrubbers) do
22 Enum.reduce(scrubbers, html, fn scrubber, html ->
23 filter_tags(html, scrubber)
24 end)
25 end
26
27 def filter_tags(html, scrubber), do: Scrubber.scrub(html, scrubber)
28 def filter_tags(html), do: filter_tags(html, nil)
29 def strip_tags(html), do: Scrubber.scrub(html, Scrubber.StripTags)
30
31 def get_cached_scrubbed_html_for_activity(content, scrubbers, activity, key \\ "") do
32 key = "#{key}#{generate_scrubber_signature(scrubbers)}|#{activity.id}"
33
34 Cachex.fetch!(:scrubber_cache, key, fn _key ->
35 object = Pleroma.Object.normalize(activity)
36 ensure_scrubbed_html(content, scrubbers, object.data["fake"] || false)
37 end)
38 end
39
40 def get_cached_stripped_html_for_activity(content, activity, key) do
41 get_cached_scrubbed_html_for_activity(
42 content,
43 HtmlSanitizeEx.Scrubber.StripTags,
44 activity,
45 key
46 )
47 end
48
49 def ensure_scrubbed_html(
50 content,
51 scrubbers,
52 false = _fake
53 ) do
54 {:commit, filter_tags(content, scrubbers)}
55 end
56
57 def ensure_scrubbed_html(
58 content,
59 scrubbers,
60 true = _fake
61 ) do
62 {:ignore, filter_tags(content, scrubbers)}
63 end
64
65 defp generate_scrubber_signature(scrubber) when is_atom(scrubber) do
66 generate_scrubber_signature([scrubber])
67 end
68
69 defp generate_scrubber_signature(scrubbers) do
70 Enum.reduce(scrubbers, "", fn scrubber, signature ->
71 "#{signature}#{to_string(scrubber)}"
72 end)
73 end
74
75 def extract_first_external_url(_, nil), do: {:error, "No content"}
76
77 def extract_first_external_url(object, content) do
78 key = "URL|#{object.id}"
79
80 Cachex.fetch!(:scrubber_cache, key, fn _key ->
81 result =
82 content
83 |> Floki.filter_out("a.mention")
84 |> Floki.attribute("a", "href")
85 |> Enum.at(0)
86
87 {:commit, {:ok, result}}
88 end)
89 end
90 end
91
92 defmodule Pleroma.HTML.Scrubber.TwitterText do
93 @moduledoc """
94 An HTML scrubbing policy which limits to twitter-style text. Only
95 paragraphs, breaks and links are allowed through the filter.
96 """
97
98 @markup Application.get_env(:pleroma, :markup)
99 @valid_schemes Pleroma.Config.get([:uri_schemes, :valid_schemes], [])
100
101 require HtmlSanitizeEx.Scrubber.Meta
102 alias HtmlSanitizeEx.Scrubber.Meta
103
104 Meta.remove_cdata_sections_before_scrub()
105 Meta.strip_comments()
106
107 # links
108 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
109 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
110
111 Meta.allow_tag_with_this_attribute_values("a", "rel", [
112 "tag",
113 "nofollow",
114 "noopener",
115 "noreferrer"
116 ])
117
118 # paragraphs and linebreaks
119 Meta.allow_tag_with_these_attributes("br", [])
120 Meta.allow_tag_with_these_attributes("p", [])
121
122 # microformats
123 Meta.allow_tag_with_these_attributes("span", ["class"])
124
125 # allow inline images for custom emoji
126 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
127
128 if @allow_inline_images do
129 # restrict img tags to http/https only, because of MediaProxy.
130 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
131
132 Meta.allow_tag_with_these_attributes("img", [
133 "width",
134 "height",
135 "title",
136 "alt"
137 ])
138 end
139
140 Meta.strip_everything_not_covered()
141 end
142
143 defmodule Pleroma.HTML.Scrubber.Default do
144 @doc "The default HTML scrubbing policy: no "
145
146 require HtmlSanitizeEx.Scrubber.Meta
147 alias HtmlSanitizeEx.Scrubber.Meta
148 # credo:disable-for-previous-line
149 # No idea how to fix this one…
150
151 @markup Application.get_env(:pleroma, :markup)
152 @valid_schemes Pleroma.Config.get([:uri_schemes, :valid_schemes], [])
153
154 Meta.remove_cdata_sections_before_scrub()
155 Meta.strip_comments()
156
157 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
158 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
159
160 Meta.allow_tag_with_this_attribute_values("a", "rel", [
161 "tag",
162 "nofollow",
163 "noopener",
164 "noreferrer"
165 ])
166
167 Meta.allow_tag_with_these_attributes("abbr", ["title"])
168
169 Meta.allow_tag_with_these_attributes("b", [])
170 Meta.allow_tag_with_these_attributes("blockquote", [])
171 Meta.allow_tag_with_these_attributes("br", [])
172 Meta.allow_tag_with_these_attributes("code", [])
173 Meta.allow_tag_with_these_attributes("del", [])
174 Meta.allow_tag_with_these_attributes("em", [])
175 Meta.allow_tag_with_these_attributes("i", [])
176 Meta.allow_tag_with_these_attributes("li", [])
177 Meta.allow_tag_with_these_attributes("ol", [])
178 Meta.allow_tag_with_these_attributes("p", [])
179 Meta.allow_tag_with_these_attributes("pre", [])
180 Meta.allow_tag_with_these_attributes("span", ["class"])
181 Meta.allow_tag_with_these_attributes("strong", [])
182 Meta.allow_tag_with_these_attributes("u", [])
183 Meta.allow_tag_with_these_attributes("ul", [])
184
185 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
186
187 if @allow_inline_images do
188 # restrict img tags to http/https only, because of MediaProxy.
189 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
190
191 Meta.allow_tag_with_these_attributes("img", [
192 "width",
193 "height",
194 "title",
195 "alt"
196 ])
197 end
198
199 @allow_tables Keyword.get(@markup, :allow_tables)
200
201 if @allow_tables do
202 Meta.allow_tag_with_these_attributes("table", [])
203 Meta.allow_tag_with_these_attributes("tbody", [])
204 Meta.allow_tag_with_these_attributes("td", [])
205 Meta.allow_tag_with_these_attributes("th", [])
206 Meta.allow_tag_with_these_attributes("thead", [])
207 Meta.allow_tag_with_these_attributes("tr", [])
208 end
209
210 @allow_headings Keyword.get(@markup, :allow_headings)
211
212 if @allow_headings do
213 Meta.allow_tag_with_these_attributes("h1", [])
214 Meta.allow_tag_with_these_attributes("h2", [])
215 Meta.allow_tag_with_these_attributes("h3", [])
216 Meta.allow_tag_with_these_attributes("h4", [])
217 Meta.allow_tag_with_these_attributes("h5", [])
218 end
219
220 @allow_fonts Keyword.get(@markup, :allow_fonts)
221
222 if @allow_fonts do
223 Meta.allow_tag_with_these_attributes("font", ["face"])
224 end
225
226 Meta.strip_everything_not_covered()
227 end
228
229 defmodule Pleroma.HTML.Transform.MediaProxy do
230 @moduledoc "Transforms inline image URIs to use MediaProxy."
231
232 alias Pleroma.Web.MediaProxy
233
234 def before_scrub(html), do: html
235
236 def scrub_attribute("img", {"src", "http" <> target}) do
237 media_url =
238 ("http" <> target)
239 |> MediaProxy.url()
240
241 {"src", media_url}
242 end
243
244 def scrub_attribute(_tag, attribute), do: attribute
245
246 def scrub({"img", attributes, children}) do
247 attributes =
248 attributes
249 |> Enum.map(fn attr -> scrub_attribute("img", attr) end)
250 |> Enum.reject(&is_nil(&1))
251
252 {"img", attributes, children}
253 end
254
255 def scrub({:comment, _children}), do: ""
256
257 def scrub({tag, attributes, children}), do: {tag, attributes, children}
258 def scrub({_tag, children}), do: children
259 def scrub(text), do: text
260 end