Stash
[akkoma] / lib / pleroma / html.ex
1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
4
5 defmodule Pleroma.HTML do
6 alias HtmlSanitizeEx.Scrubber
7
8 defp get_scrubbers(scrubber) when is_atom(scrubber), do: [scrubber]
9 defp get_scrubbers(scrubbers) when is_list(scrubbers), do: scrubbers
10 defp get_scrubbers(_), do: [Pleroma.HTML.Scrubber.Default]
11
12 def get_scrubbers() do
13 Pleroma.Config.get([:markup, :scrub_policy])
14 |> get_scrubbers
15 end
16
17 def filter_tags(html, nil) do
18 filter_tags(html, get_scrubbers())
19 end
20
21 def filter_tags(html, scrubbers) when is_list(scrubbers) do
22 Enum.reduce(scrubbers, html, fn scrubber, html ->
23 filter_tags(html, scrubber)
24 end)
25 end
26
27 def filter_tags(html, scrubber), do: Scrubber.scrub(html, scrubber)
28 def filter_tags(html), do: filter_tags(html, nil)
29 def strip_tags(html), do: Scrubber.scrub(html, Scrubber.StripTags)
30
31 def get_cached_scrubbed_html_for_object(content, scrubbers, object, module) do
32 key = "#{module}#{generate_scrubber_signature(scrubbers)}|#{object.id}"
33 Cachex.fetch!(:scrubber_cache, key, fn _key -> ensure_scrubbed_html(content, scrubbers) end)
34 end
35
36 def get_cached_stripped_html_for_object(content, object, module) do
37 get_cached_scrubbed_html_for_object(
38 content,
39 HtmlSanitizeEx.Scrubber.StripTags,
40 object,
41 module
42 )
43 end
44
45 def ensure_scrubbed_html(
46 content,
47 scrubbers
48 ) do
49 {:commit, filter_tags(content, scrubbers)}
50 end
51
52 defp generate_scrubber_signature(scrubber) when is_atom(scrubber) do
53 generate_scrubber_signature([scrubber])
54 end
55
56 defp generate_scrubber_signature(scrubbers) do
57 Enum.reduce(scrubbers, "", fn scrubber, signature ->
58 "#{signature}#{to_string(scrubber)}"
59 end)
60 end
61
62 def extract_first_external_url(_, nil), do: {:error, "No content"}
63
64 def extract_first_external_url(object, content) do
65 key = "URL|#{object.id}"
66
67 Cachex.fetch!(:scrubber_cache, key, fn _key ->
68 result =
69 content
70 |> Floki.filter_out("a.mention")
71 |> Floki.attribute("a", "href")
72 |> Enum.at(0)
73
74 {:commit, {:ok, result}}
75 end)
76 end
77 end
78
79 defmodule Pleroma.HTML.Scrubber.TwitterText do
80 @moduledoc """
81 An HTML scrubbing policy which limits to twitter-style text. Only
82 paragraphs, breaks and links are allowed through the filter.
83 """
84
85 @markup Application.get_env(:pleroma, :markup)
86 @valid_schemes Pleroma.Config.get([:uri_schemes, :valid_schemes], [])
87
88 require HtmlSanitizeEx.Scrubber.Meta
89 alias HtmlSanitizeEx.Scrubber.Meta
90
91 Meta.remove_cdata_sections_before_scrub()
92 Meta.strip_comments()
93
94 # links
95 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
96 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
97
98 # paragraphs and linebreaks
99 Meta.allow_tag_with_these_attributes("br", [])
100 Meta.allow_tag_with_these_attributes("p", [])
101
102 # microformats
103 Meta.allow_tag_with_these_attributes("span", ["class"])
104
105 # allow inline images for custom emoji
106 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
107
108 if @allow_inline_images do
109 # restrict img tags to http/https only, because of MediaProxy.
110 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
111
112 Meta.allow_tag_with_these_attributes("img", [
113 "width",
114 "height",
115 "title",
116 "alt"
117 ])
118 end
119
120 Meta.strip_everything_not_covered()
121 end
122
123 defmodule Pleroma.HTML.Scrubber.Default do
124 @doc "The default HTML scrubbing policy: no "
125
126 require HtmlSanitizeEx.Scrubber.Meta
127 alias HtmlSanitizeEx.Scrubber.Meta
128
129 @markup Application.get_env(:pleroma, :markup)
130 @valid_schemes Pleroma.Config.get([:uri_schemes, :valid_schemes], [])
131
132 Meta.remove_cdata_sections_before_scrub()
133 Meta.strip_comments()
134
135 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
136 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
137
138 Meta.allow_tag_with_these_attributes("abbr", ["title"])
139
140 Meta.allow_tag_with_these_attributes("b", [])
141 Meta.allow_tag_with_these_attributes("blockquote", [])
142 Meta.allow_tag_with_these_attributes("br", [])
143 Meta.allow_tag_with_these_attributes("code", [])
144 Meta.allow_tag_with_these_attributes("del", [])
145 Meta.allow_tag_with_these_attributes("em", [])
146 Meta.allow_tag_with_these_attributes("i", [])
147 Meta.allow_tag_with_these_attributes("li", [])
148 Meta.allow_tag_with_these_attributes("ol", [])
149 Meta.allow_tag_with_these_attributes("p", [])
150 Meta.allow_tag_with_these_attributes("pre", [])
151 Meta.allow_tag_with_these_attributes("span", ["class"])
152 Meta.allow_tag_with_these_attributes("strong", [])
153 Meta.allow_tag_with_these_attributes("u", [])
154 Meta.allow_tag_with_these_attributes("ul", [])
155
156 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
157
158 if @allow_inline_images do
159 # restrict img tags to http/https only, because of MediaProxy.
160 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
161
162 Meta.allow_tag_with_these_attributes("img", [
163 "width",
164 "height",
165 "title",
166 "alt"
167 ])
168 end
169
170 @allow_tables Keyword.get(@markup, :allow_tables)
171
172 if @allow_tables do
173 Meta.allow_tag_with_these_attributes("table", [])
174 Meta.allow_tag_with_these_attributes("tbody", [])
175 Meta.allow_tag_with_these_attributes("td", [])
176 Meta.allow_tag_with_these_attributes("th", [])
177 Meta.allow_tag_with_these_attributes("thead", [])
178 Meta.allow_tag_with_these_attributes("tr", [])
179 end
180
181 @allow_headings Keyword.get(@markup, :allow_headings)
182
183 if @allow_headings do
184 Meta.allow_tag_with_these_attributes("h1", [])
185 Meta.allow_tag_with_these_attributes("h2", [])
186 Meta.allow_tag_with_these_attributes("h3", [])
187 Meta.allow_tag_with_these_attributes("h4", [])
188 Meta.allow_tag_with_these_attributes("h5", [])
189 end
190
191 @allow_fonts Keyword.get(@markup, :allow_fonts)
192
193 if @allow_fonts do
194 Meta.allow_tag_with_these_attributes("font", ["face"])
195 end
196
197 Meta.strip_everything_not_covered()
198 end
199
200 defmodule Pleroma.HTML.Transform.MediaProxy do
201 @moduledoc "Transforms inline image URIs to use MediaProxy."
202
203 alias Pleroma.Web.MediaProxy
204
205 def before_scrub(html), do: html
206
207 def scrub_attribute("img", {"src", "http" <> target}) do
208 media_url =
209 ("http" <> target)
210 |> MediaProxy.url()
211
212 {"src", media_url}
213 end
214
215 def scrub_attribute(_tag, attribute), do: attribute
216
217 def scrub({"img", attributes, children}) do
218 attributes =
219 attributes
220 |> Enum.map(fn attr -> scrub_attribute("img", attr) end)
221 |> Enum.reject(&is_nil(&1))
222
223 {"img", attributes, children}
224 end
225
226 def scrub({:comment, _children}), do: ""
227
228 def scrub({tag, attributes, children}), do: {tag, attributes, children}
229 def scrub({_tag, children}), do: children
230 def scrub(text), do: text
231 end