X-Git-Url: http://git.squeep.com/?a=blobdiff_plain;f=lib%2Fpleroma%2Fhtml.ex;h=a7338eac3681b6bd4804cf32674d57f5865da1cc;hb=945ce9910dc7b29147ec49af0bdb82202008c7c4;hp=107784e70496219156c616e17561e0285a94fd3d;hpb=c2650f0ffb5938005baf437dfa69bbf05da0cc71;p=akkoma diff --git a/lib/pleroma/html.ex b/lib/pleroma/html.ex index 107784e70..a7338eac3 100644 --- a/lib/pleroma/html.ex +++ b/lib/pleroma/html.ex @@ -3,11 +3,28 @@ defmodule Pleroma.HTML do @markup Application.get_env(:pleroma, :markup) - def filter_tags(html) do - scrubber = Keyword.get(@markup, :scrub_policy) + defp get_scrubbers(scrubber) when is_atom(scrubber), do: [scrubber] + defp get_scrubbers(scrubbers) when is_list(scrubbers), do: scrubbers + defp get_scrubbers(_), do: [Pleroma.HTML.Scrubber.Default] + + def get_scrubbers() do + Keyword.get(@markup, :scrub_policy) + |> get_scrubbers + end + + def filter_tags(html, nil) do + get_scrubbers() + |> Enum.reduce(html, fn scrubber, html -> + filter_tags(html, scrubber) + end) + end + + def filter_tags(html, scrubber) do html |> Scrubber.scrub(scrubber) end + def filter_tags(html), do: filter_tags(html, nil) + def strip_tags(html) do html |> Scrubber.scrub(Scrubber.StripTags) end @@ -19,11 +36,13 @@ defmodule Pleroma.HTML.Scrubber.TwitterText do paragraphs, breaks and links are allowed through the filter. """ + @markup Application.get_env(:pleroma, :markup) + @uri_schemes Application.get_env(:pleroma, :uri_schemes, []) + @valid_schemes Keyword.get(@uri_schemes, :valid_schemes, []) + require HtmlSanitizeEx.Scrubber.Meta alias HtmlSanitizeEx.Scrubber.Meta - @valid_schemes ["http", "https"] - Meta.remove_cdata_sections_before_scrub() Meta.strip_comments() @@ -39,11 +58,11 @@ defmodule Pleroma.HTML.Scrubber.TwitterText do Meta.allow_tag_with_these_attributes("span", []) # allow inline images for custom emoji - @markup Application.get_env(:pleroma, :markup) @allow_inline_images Keyword.get(@markup, :allow_inline_images) if @allow_inline_images do - Meta.allow_tag_with_uri_attributes("img", ["src"], @valid_schemes) + # restrict img tags to http/https only, because of MediaProxy. + Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"]) Meta.allow_tag_with_these_attributes("img", [ "width", @@ -52,6 +71,8 @@ defmodule Pleroma.HTML.Scrubber.TwitterText do "alt" ]) end + + Meta.strip_everything_not_covered() end defmodule Pleroma.HTML.Scrubber.Default do @@ -60,7 +81,9 @@ defmodule Pleroma.HTML.Scrubber.Default do require HtmlSanitizeEx.Scrubber.Meta alias HtmlSanitizeEx.Scrubber.Meta - @valid_schemes ["http", "https"] + @markup Application.get_env(:pleroma, :markup) + @uri_schemes Application.get_env(:pleroma, :uri_schemes, []) + @valid_schemes Keyword.get(@uri_schemes, :valid_schemes, []) Meta.remove_cdata_sections_before_scrub() Meta.strip_comments() @@ -84,11 +107,11 @@ defmodule Pleroma.HTML.Scrubber.Default do Meta.allow_tag_with_these_attributes("u", []) Meta.allow_tag_with_these_attributes("ul", []) - @markup Application.get_env(:pleroma, :markup) @allow_inline_images Keyword.get(@markup, :allow_inline_images) if @allow_inline_images do - Meta.allow_tag_with_uri_attributes("img", ["src"], @valid_schemes) + # restrict img tags to http/https only, because of MediaProxy. + Meta.allow_tag_with_uri_attributes("img", ["src"], ["http", "https"]) Meta.allow_tag_with_these_attributes("img", [ "width", @@ -127,3 +150,36 @@ defmodule Pleroma.HTML.Scrubber.Default do Meta.strip_everything_not_covered() end + +defmodule Pleroma.HTML.Transform.MediaProxy do + @moduledoc "Transforms inline image URIs to use MediaProxy." + + alias Pleroma.Web.MediaProxy + + def before_scrub(html), do: html + + def scrub_attribute("img", {"src", "http" <> target}) do + media_url = + ("http" <> target) + |> MediaProxy.url() + + {"src", media_url} + end + + def scrub_attribute(tag, attribute), do: attribute + + def scrub({"img", attributes, children}) do + attributes = + attributes + |> Enum.map(fn attr -> scrub_attribute("img", attr) end) + |> Enum.reject(&is_nil(&1)) + + {"img", attributes, children} + end + + def scrub({:comment, children}), do: "" + + def scrub({tag, attributes, children}), do: {tag, attributes, children} + def scrub({tag, children}), do: children + def scrub(text), do: text +end