1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
5 defmodule Pleroma.HTML do
6 alias HtmlSanitizeEx.Scrubber
8 defp get_scrubbers(scrubber) when is_atom(scrubber), do: [scrubber]
9 defp get_scrubbers(scrubbers) when is_list(scrubbers), do: scrubbers
10 defp get_scrubbers(_), do: [Pleroma.HTML.Scrubber.Default]
12 def get_scrubbers() do
13 Pleroma.Config.get([:markup, :scrub_policy])
17 def filter_tags(html, nil) do
18 filter_tags(html, get_scrubbers())
21 def filter_tags(html, scrubbers) when is_list(scrubbers) do
22 Enum.reduce(scrubbers, html, fn scrubber, html ->
23 filter_tags(html, scrubber)
27 def filter_tags(html, scrubber), do: Scrubber.scrub(html, scrubber)
28 def filter_tags(html), do: filter_tags(html, nil)
29 def strip_tags(html), do: Scrubber.scrub(html, Scrubber.StripTags)
31 def get_cached_scrubbed_html_for_object(content, scrubbers, object, module) do
32 key = "#{module}#{generate_scrubber_signature(scrubbers)}|#{object.id}"
33 Cachex.fetch!(:scrubber_cache, key, fn _key -> ensure_scrubbed_html(content, scrubbers) end)
36 def get_cached_stripped_html_for_object(content, object, module) do
37 get_cached_scrubbed_html_for_object(
39 HtmlSanitizeEx.Scrubber.StripTags,
45 def ensure_scrubbed_html(
49 {:commit, filter_tags(content, scrubbers)}
52 defp generate_scrubber_signature(scrubber) when is_atom(scrubber) do
53 generate_scrubber_signature([scrubber])
56 defp generate_scrubber_signature(scrubbers) do
57 Enum.reduce(scrubbers, "", fn scrubber, signature ->
58 "#{signature}#{to_string(scrubber)}"
62 def extract_first_external_url(_, nil), do: {:error, "No content"}
64 def extract_first_external_url(object, content) do
65 key = "URL|#{object.id}"
67 Cachex.fetch!(:scrubber_cache, key, fn _key ->
70 |> Floki.filter_out("a.mention")
71 |> Floki.attribute("a", "href")
74 {:commit, {:ok, result}}
79 defmodule Pleroma.HTML.Scrubber.TwitterText do
81 An HTML scrubbing policy which limits to twitter-style text. Only
82 paragraphs, breaks and links are allowed through the filter.
85 @markup Application.get_env(:pleroma, :markup)
86 @valid_schemes Pleroma.Config.get([:uri_schemes, :valid_schemes], [])
88 require HtmlSanitizeEx.Scrubber.Meta
89 alias HtmlSanitizeEx.Scrubber.Meta
91 Meta.remove_cdata_sections_before_scrub()
95 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
96 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
98 # paragraphs and linebreaks
99 Meta.allow_tag_with_these_attributes("br", [])
100 Meta.allow_tag_with_these_attributes("p", [])
103 Meta.allow_tag_with_these_attributes("span", ["class"])
105 # allow inline images for custom emoji
106 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
108 if @allow_inline_images do
109 # restrict img tags to http/https only, because of MediaProxy.
110 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
112 Meta.allow_tag_with_these_attributes("img", [
120 Meta.strip_everything_not_covered()
123 defmodule Pleroma.HTML.Scrubber.Default do
124 @doc "The default HTML scrubbing policy: no "
126 require HtmlSanitizeEx.Scrubber.Meta
127 alias HtmlSanitizeEx.Scrubber.Meta
128 # credo:disable-for-previous-line
129 # No idea how to fix this one…
131 @markup Application.get_env(:pleroma, :markup)
132 @valid_schemes Pleroma.Config.get([:uri_schemes, :valid_schemes], [])
134 Meta.remove_cdata_sections_before_scrub()
135 Meta.strip_comments()
137 Meta.allow_tag_with_uri_attributes("a", ["href", "data-user", "data-tag"], @valid_schemes)
138 Meta.allow_tag_with_these_attributes("a", ["name", "title", "class"])
140 Meta.allow_tag_with_these_attributes("abbr", ["title"])
142 Meta.allow_tag_with_these_attributes("b", [])
143 Meta.allow_tag_with_these_attributes("blockquote", [])
144 Meta.allow_tag_with_these_attributes("br", [])
145 Meta.allow_tag_with_these_attributes("code", [])
146 Meta.allow_tag_with_these_attributes("del", [])
147 Meta.allow_tag_with_these_attributes("em", [])
148 Meta.allow_tag_with_these_attributes("i", [])
149 Meta.allow_tag_with_these_attributes("li", [])
150 Meta.allow_tag_with_these_attributes("ol", [])
151 Meta.allow_tag_with_these_attributes("p", [])
152 Meta.allow_tag_with_these_attributes("pre", [])
153 Meta.allow_tag_with_these_attributes("span", ["class"])
154 Meta.allow_tag_with_these_attributes("strong", [])
155 Meta.allow_tag_with_these_attributes("u", [])
156 Meta.allow_tag_with_these_attributes("ul", [])
158 @allow_inline_images Keyword.get(@markup, :allow_inline_images)
160 if @allow_inline_images do
161 # restrict img tags to http/https only, because of MediaProxy.
162 Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"])
164 Meta.allow_tag_with_these_attributes("img", [
172 @allow_tables Keyword.get(@markup, :allow_tables)
175 Meta.allow_tag_with_these_attributes("table", [])
176 Meta.allow_tag_with_these_attributes("tbody", [])
177 Meta.allow_tag_with_these_attributes("td", [])
178 Meta.allow_tag_with_these_attributes("th", [])
179 Meta.allow_tag_with_these_attributes("thead", [])
180 Meta.allow_tag_with_these_attributes("tr", [])
183 @allow_headings Keyword.get(@markup, :allow_headings)
185 if @allow_headings do
186 Meta.allow_tag_with_these_attributes("h1", [])
187 Meta.allow_tag_with_these_attributes("h2", [])
188 Meta.allow_tag_with_these_attributes("h3", [])
189 Meta.allow_tag_with_these_attributes("h4", [])
190 Meta.allow_tag_with_these_attributes("h5", [])
193 @allow_fonts Keyword.get(@markup, :allow_fonts)
196 Meta.allow_tag_with_these_attributes("font", ["face"])
199 Meta.strip_everything_not_covered()
202 defmodule Pleroma.HTML.Transform.MediaProxy do
203 @moduledoc "Transforms inline image URIs to use MediaProxy."
205 alias Pleroma.Web.MediaProxy
207 def before_scrub(html), do: html
209 def scrub_attribute("img", {"src", "http" <> target}) do
217 def scrub_attribute(_tag, attribute), do: attribute
219 def scrub({"img", attributes, children}) do
222 |> Enum.map(fn attr -> scrub_attribute("img", attr) end)
223 |> Enum.reject(&is_nil(&1))
225 {"img", attributes, children}
228 def scrub({:comment, _children}), do: ""
230 def scrub({tag, attributes, children}), do: {tag, attributes, children}
231 def scrub({_tag, children}), do: children
232 def scrub(text), do: text