html: add utility function to extract first URL from an object and cache the result
[akkoma] / lib / pleroma / html.ex
1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
4
5 defmodule Pleroma.HTML do
6 alias HtmlSanitizeEx.Scrubber
7
8 defp get_scrubbers(scrubber) when is_atom(scrubber), do: [scrubber]
9 defp get_scrubbers(scrubbers) when is_list(scrubbers), do: scrubbers
10 defp get_scrubbers(_), do: [Pleroma.HTML.Scrubber.Default]
11
12 def get_scrubbers() do
13 Pleroma.Config.get([:markup, :scrub_policy])
14 |> get_scrubbers
15 end
16
17 def filter_tags(html, nil) do
18 filter_tags(html, get_scrubbers())
19 end
20
21 def filter_tags(html, scrubbers) when is_list(scrubbers) do
22 Enum.reduce(scrubbers, html, fn scrubber, html ->
23 filter_tags(html, scrubber)
24 end)
25 end
26
27 def filter_tags(html, scrubber), do: Scrubber.scrub(html, scrubber)
28 def filter_tags(html), do: filter_tags(html, nil)
29 def strip_tags(html), do: Scrubber.scrub(html, Scrubber.StripTags)
30
31 def get_cached_scrubbed_html_for_object(content, scrubbers, object, module) do
32 key = "#{module}#{generate_scrubber_signature(scrubbers)}|#{object.id}"
33 Cachex.fetch!(:scrubber_cache, key, fn _key -> ensure_scrubbed_html(content, scrubbers) end)
34 end
35
36 def get_cached_stripped_html_for_object(content, object, module) do
37 get_cached_scrubbed_html_for_object(
38 content,
39 HtmlSanitizeEx.Scrubber.StripTags,
40 object,
41 module
42 )
43 end
44
45 def ensure_scrubbed_html(
46 content,
47 scrubbers
48 ) do
49 {:commit, filter_tags(content, scrubbers)}
50 end
51
52 defp generate_scrubber_signature(scrubber) when is_atom(scrubber) do
53 generate_scrubber_signature([scrubber])
54 end
55
56 defp generate_scrubber_signature(scrubbers) do
57 Enum.reduce(scrubbers, "", fn scrubber, signature ->
58 "#{signature}#{to_string(scrubber)}"
59 end)
60 end
61
62 def extract_first_external_url(object, content) do
63 key = "URL|#{object.id}"
64
65 Cachex.fetch!(:scrubber_cache, key, fn _key ->
66 result =
67 content
68 |> Floki.filter_out("a.mention")
69 |> Floki.attribute("a", "href")
70 |> Enum.at(0)
71
72 {:commit, result}
73 end)
74 end
75 end
76
77 defmodule Pleroma.HTML.Scrubber.TwitterText do
78 @moduledoc """
79 An HTML scrubbing policy which limits to twitter-style text. Only
80 paragraphs, breaks and links are allowed through the filter.
81 """
82
83 @markup Application.get_env(:pleroma, :markup)
84 @uri_schemes Application.get_env(:pleroma, :uri_schemes, [])
85 @valid_schemes Keyword.get(@uri_schemes, :valid_schemes, [])
86
87 require HtmlSanitizeEx.Scrubber.Meta
88 alias HtmlSanitizeEx.Scrubber.Meta
89
90 Meta.remove_cdata_sections_before_scrub()
91 Meta.strip_comments()
92
93 # links
94 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
95 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
96
97 # paragraphs and linebreaks
98 Meta.allow_tag_with_these_attributes("br", [])
99 Meta.allow_tag_with_these_attributes("p", [])
100
101 # microformats
102 Meta.allow_tag_with_these_attributes("span", ["class"])
103
104 # allow inline images for custom emoji
105 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
106
107 if @allow_inline_images do
108 # restrict img tags to http/https only, because of MediaProxy.
109 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
110
111 Meta.allow_tag_with_these_attributes("img", [
112 "width",
113 "height",
114 "title",
115 "alt"
116 ])
117 end
118
119 Meta.strip_everything_not_covered()
120 end
121
122 defmodule Pleroma.HTML.Scrubber.Default do
123 @doc "The default HTML scrubbing policy: no "
124
125 require HtmlSanitizeEx.Scrubber.Meta
126 alias HtmlSanitizeEx.Scrubber.Meta
127
128 @markup Application.get_env(:pleroma, :markup)
129 @uri_schemes Application.get_env(:pleroma, :uri_schemes, [])
130 @valid_schemes Keyword.get(@uri_schemes, :valid_schemes, [])
131
132 Meta.remove_cdata_sections_before_scrub()
133 Meta.strip_comments()
134
135 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
136 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
137
138 Meta.allow_tag_with_these_attributes("abbr", ["title"])
139
140 Meta.allow_tag_with_these_attributes("b", [])
141 Meta.allow_tag_with_these_attributes("blockquote", [])
142 Meta.allow_tag_with_these_attributes("br", [])
143 Meta.allow_tag_with_these_attributes("code", [])
144 Meta.allow_tag_with_these_attributes("del", [])
145 Meta.allow_tag_with_these_attributes("em", [])
146 Meta.allow_tag_with_these_attributes("i", [])
147 Meta.allow_tag_with_these_attributes("li", [])
148 Meta.allow_tag_with_these_attributes("ol", [])
149 Meta.allow_tag_with_these_attributes("p", [])
150 Meta.allow_tag_with_these_attributes("pre", [])
151 Meta.allow_tag_with_these_attributes("span", ["class"])
152 Meta.allow_tag_with_these_attributes("strong", [])
153 Meta.allow_tag_with_these_attributes("u", [])
154 Meta.allow_tag_with_these_attributes("ul", [])
155
156 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
157
158 if @allow_inline_images do
159 # restrict img tags to http/https only, because of MediaProxy.
160 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
161
162 Meta.allow_tag_with_these_attributes("img", [
163 "width",
164 "height",
165 "title",
166 "alt"
167 ])
168 end
169
170 @allow_tables Keyword.get(@markup, :allow_tables)
171
172 if @allow_tables do
173 Meta.allow_tag_with_these_attributes("table", [])
174 Meta.allow_tag_with_these_attributes("tbody", [])
175 Meta.allow_tag_with_these_attributes("td", [])
176 Meta.allow_tag_with_these_attributes("th", [])
177 Meta.allow_tag_with_these_attributes("thead", [])
178 Meta.allow_tag_with_these_attributes("tr", [])
179 end
180
181 @allow_headings Keyword.get(@markup, :allow_headings)
182
183 if @allow_headings do
184 Meta.allow_tag_with_these_attributes("h1", [])
185 Meta.allow_tag_with_these_attributes("h2", [])
186 Meta.allow_tag_with_these_attributes("h3", [])
187 Meta.allow_tag_with_these_attributes("h4", [])
188 Meta.allow_tag_with_these_attributes("h5", [])
189 end
190
191 @allow_fonts Keyword.get(@markup, :allow_fonts)
192
193 if @allow_fonts do
194 Meta.allow_tag_with_these_attributes("font", ["face"])
195 end
196
197 Meta.strip_everything_not_covered()
198 end
199
200 defmodule Pleroma.HTML.Transform.MediaProxy do
201 @moduledoc "Transforms inline image URIs to use MediaProxy."
202
203 alias Pleroma.Web.MediaProxy
204
205 def before_scrub(html), do: html
206
207 def scrub_attribute("img", {"src", "http" <> target}) do
208 media_url =
209 ("http" <> target)
210 |> MediaProxy.url()
211
212 {"src", media_url}
213 end
214
215 def scrub_attribute(_tag, attribute), do: attribute
216
217 def scrub({"img", attributes, children}) do
218 attributes =
219 attributes
220 |> Enum.map(fn attr -> scrub_attribute("img", attr) end)
221 |> Enum.reject(&is_nil(&1))
222
223 {"img", attributes, children}
224 end
225
226 def scrub({:comment, _children}), do: ""
227
228 def scrub({tag, attributes, children}), do: {tag, attributes, children}
229 def scrub({_tag, children}), do: children
230 def scrub(text), do: text
231 end