1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
5 defmodule Pleroma.Workers.AttachmentsCleanupWorker do
11 use Pleroma.Workers.WorkerHelper, queue: "attachments_cleanup"
16 "op" => "cleanup_attachments",
17 "object" => %{"data" => %{"attachment" => [_ | _] = attachments, "actor" => actor}}
20 if Pleroma.Config.get([:instance, :cleanup_attachments], false) do
22 |> Enum.flat_map(fn item -> Enum.map(item["url"], & &1["href"]) end)
24 |> prepare_objects(actor, Enum.map(attachments, & &1["name"]))
32 def perform(%Job{args: %{"op" => "cleanup_attachments", "object" => _object}}), do: {:ok, :skip}
34 defp do_clean({object_ids, attachment_urls}) do
35 uploader = Pleroma.Config.get([Pleroma.Upload, :uploader])
39 Pleroma.Upload.base_url(),
43 Enum.each(attachment_urls, fn href ->
45 |> String.trim_leading("#{base_url}")
46 |> uploader.delete_file()
49 delete_objects(object_ids)
52 defp delete_objects([_ | _] = object_ids) do
53 Repo.delete_all(from(o in Object, where: o.id in ^object_ids))
56 defp delete_objects(_), do: :ok
58 # we should delete 1 object for any given attachment, but don't delete
59 # files if there are more than 1 object for it
60 defp filter_objects(objects) do
61 Enum.reduce(objects, {[], []}, fn {href, %{id: id, count: count}}, {ids, hrefs} ->
63 {ids ++ [id], hrefs ++ [href]}
65 _ -> {ids ++ [id], hrefs}
70 defp prepare_objects(objects, actor, names) do
72 |> Enum.reduce(%{}, fn %{
75 "url" => [%{"href" => href}],
81 Map.update(acc, href, %{id: id, count: 1}, fn val ->
82 case obj_actor == actor and name in names do
84 # set id of the actor's object that will be deleted
85 %{val | id: id, count: val.count + 1}
88 # another actor's object, just increase count to not delete file
89 %{val | count: val.count + 1}
95 defp fetch_objects(hrefs) do
99 "to_jsonb(array(select jsonb_array_elements((?)#>'{url}') ->> 'href' where jsonb_typeof((?)#>'{url}') = 'array'))::jsonb \\?| (?)",
105 # The query above can be time consumptive on large instances until we
106 # refactor how uploads are stored
107 |> Repo.all(timeout: :infinity)