ac17f91ccc2dd34d2de4d2fab5aea3fd47b4f14b
[akkoma] / lib / pleroma / migrators / hashtags_table_migrator.ex
1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
4
5 defmodule Pleroma.Migrators.HashtagsTableMigrator do
6 use GenServer
7
8 require Logger
9
10 import Ecto.Query
11
12 alias __MODULE__.State
13 alias Pleroma.Config
14 alias Pleroma.Hashtag
15 alias Pleroma.Object
16 alias Pleroma.Repo
17
18 defdelegate data_migration(), to: State
19
20 defdelegate state(), to: State
21 defdelegate persist_state(), to: State, as: :persist_to_db
22 defdelegate get_stat(key, value \\ nil), to: State, as: :get_data_key
23 defdelegate put_stat(key, value), to: State, as: :put_data_key
24 defdelegate increment_stat(key, increment), to: State, as: :increment_data_key
25
26 @reg_name {:global, __MODULE__}
27
28 def whereis, do: GenServer.whereis(@reg_name)
29
30 def start_link(_) do
31 case whereis() do
32 nil ->
33 GenServer.start_link(__MODULE__, nil, name: @reg_name)
34
35 pid ->
36 {:ok, pid}
37 end
38 end
39
40 @impl true
41 def init(_) do
42 {:ok, nil, {:continue, :init_state}}
43 end
44
45 @impl true
46 def handle_continue(:init_state, _state) do
47 {:ok, _} = State.start_link(nil)
48
49 update_status(:pending)
50
51 data_migration = data_migration()
52 manual_migrations = Config.get([:instance, :manual_data_migrations], [])
53
54 cond do
55 Config.get(:env) == :test ->
56 update_status(:noop)
57
58 is_nil(data_migration) ->
59 update_status(:failed, "Data migration does not exist.")
60
61 data_migration.state == :manual or data_migration.name in manual_migrations ->
62 update_status(:manual, "Data migration is in manual execution state.")
63
64 data_migration.state == :complete ->
65 on_complete(data_migration)
66
67 true ->
68 send(self(), :migrate_hashtags)
69 end
70
71 {:noreply, nil}
72 end
73
74 @impl true
75 def handle_info(:migrate_hashtags, state) do
76 State.reinit()
77
78 update_status(:running)
79 put_stat(:started_at, NaiveDateTime.utc_now())
80
81 %{id: data_migration_id} = data_migration()
82 max_processed_id = get_stat(:max_processed_id, 0)
83
84 Logger.info("Transferring embedded hashtags to `hashtags` (from oid: #{max_processed_id})...")
85
86 query()
87 |> where([object], object.id > ^max_processed_id)
88 |> Repo.chunk_stream(100, :batches, timeout: :infinity)
89 |> Stream.each(fn objects ->
90 object_ids = Enum.map(objects, & &1.id)
91
92 failed_ids =
93 objects
94 |> Enum.map(&transfer_object_hashtags(&1))
95 |> Enum.filter(&(elem(&1, 0) == :error))
96 |> Enum.map(&elem(&1, 1))
97
98 for failed_id <- failed_ids do
99 _ =
100 Repo.query(
101 "INSERT INTO data_migration_failed_ids(data_migration_id, record_id) " <>
102 "VALUES ($1, $2) ON CONFLICT DO NOTHING;",
103 [data_migration_id, failed_id]
104 )
105 end
106
107 _ =
108 Repo.query(
109 "DELETE FROM data_migration_failed_ids " <>
110 "WHERE data_migration_id = $1 AND record_id = ANY($2)",
111 [data_migration_id, object_ids -- failed_ids]
112 )
113
114 max_object_id = Enum.at(object_ids, -1)
115
116 put_stat(:max_processed_id, max_object_id)
117 increment_stat(:processed_count, length(object_ids))
118 increment_stat(:failed_count, length(failed_ids))
119 put_stat(:records_per_second, records_per_second())
120 persist_state()
121
122 # A quick and dirty approach to controlling the load this background migration imposes
123 sleep_interval = Config.get([:populate_hashtags_table, :sleep_interval_ms], 0)
124 Process.sleep(sleep_interval)
125 end)
126 |> Stream.run()
127
128 with 0 <- failures_count(data_migration_id) do
129 _ = delete_non_create_activities_hashtags()
130 set_complete()
131 else
132 _ ->
133 update_status(:failed, "Please check data_migration_failed_ids records.")
134 end
135
136 {:noreply, state}
137 end
138
139 defp records_per_second do
140 get_stat(:processed_count, 0) / Enum.max([running_time(), 1])
141 end
142
143 defp running_time do
144 NaiveDateTime.diff(NaiveDateTime.utc_now(), get_stat(:started_at, NaiveDateTime.utc_now()))
145 end
146
147 @hashtags_objects_cleanup_query """
148 DELETE FROM hashtags_objects WHERE object_id IN
149 (SELECT DISTINCT objects.id FROM objects
150 JOIN hashtags_objects ON hashtags_objects.object_id = objects.id LEFT JOIN activities
151 ON COALESCE(activities.data->'object'->>'id', activities.data->>'object') =
152 (objects.data->>'id')
153 AND activities.data->>'type' = 'Create'
154 WHERE activities.id IS NULL);
155 """
156
157 @hashtags_cleanup_query """
158 DELETE FROM hashtags WHERE id IN
159 (SELECT hashtags.id FROM hashtags
160 LEFT OUTER JOIN hashtags_objects
161 ON hashtags_objects.hashtag_id = hashtags.id
162 WHERE hashtags_objects.hashtag_id IS NULL);
163 """
164
165 @doc """
166 Deletes `hashtags_objects` for legacy objects not asoociated with Create activity.
167 Also deletes unreferenced `hashtags` records (might occur after deletion of `hashtags_objects`).
168 """
169 def delete_non_create_activities_hashtags do
170 {:ok, %{num_rows: hashtags_objects_count}} =
171 Repo.query(@hashtags_objects_cleanup_query, [], timeout: :infinity)
172
173 {:ok, %{num_rows: hashtags_count}} =
174 Repo.query(@hashtags_cleanup_query, [], timeout: :infinity)
175
176 {:ok, hashtags_objects_count, hashtags_count}
177 end
178
179 defp query do
180 # Note: most objects have Mention-type AS2 tags and no hashtags (but we can't filter them out)
181 # Note: not checking activity type, expecting remove_non_create_objects_hashtags/_ to clean up
182 from(
183 object in Object,
184 where:
185 fragment("(?)->'tag' IS NOT NULL AND (?)->'tag' != '[]'::jsonb", object.data, object.data),
186 select: %{
187 id: object.id,
188 tag: fragment("(?)->'tag'", object.data)
189 }
190 )
191 |> join(:left, [o], hashtags_objects in fragment("SELECT object_id FROM hashtags_objects"),
192 on: hashtags_objects.object_id == o.id
193 )
194 |> where([_o, hashtags_objects], is_nil(hashtags_objects.object_id))
195 end
196
197 defp transfer_object_hashtags(object) do
198 embedded_tags = if Map.has_key?(object, :tag), do: object.tag, else: object.data["tag"]
199 hashtags = Object.object_data_hashtags(%{"tag" => embedded_tags})
200
201 if Enum.any?(hashtags) do
202 transfer_object_hashtags(object, hashtags)
203 else
204 {:ok, object.id}
205 end
206 end
207
208 defp transfer_object_hashtags(object, hashtags) do
209 Repo.transaction(fn ->
210 with {:ok, hashtag_records} <- Hashtag.get_or_create_by_names(hashtags) do
211 maps = Enum.map(hashtag_records, &%{hashtag_id: &1.id, object_id: object.id})
212 expected_rows = length(hashtag_records)
213
214 base_error =
215 "ERROR when inserting #{expected_rows} hashtags_objects for obj. #{object.id}"
216
217 try do
218 with {^expected_rows, _} <- Repo.insert_all("hashtags_objects", maps) do
219 object.id
220 else
221 e ->
222 Logger.error("#{base_error}: #{inspect(e)}")
223 Repo.rollback(object.id)
224 end
225 rescue
226 e ->
227 Logger.error("#{base_error}: #{inspect(e)}")
228 Repo.rollback(object.id)
229 end
230 else
231 e ->
232 error = "ERROR: could not create hashtags for object #{object.id}: #{inspect(e)}"
233 Logger.error(error)
234 Repo.rollback(object.id)
235 end
236 end)
237 end
238
239 @doc "Approximate count for current iteration (including processed records count)"
240 def count(force \\ false, timeout \\ :infinity) do
241 stored_count = get_stat(:count)
242
243 if stored_count && !force do
244 stored_count
245 else
246 processed_count = get_stat(:processed_count, 0)
247 max_processed_id = get_stat(:max_processed_id, 0)
248 query = where(query(), [object], object.id > ^max_processed_id)
249
250 count = Repo.aggregate(query, :count, :id, timeout: timeout) + processed_count
251 put_stat(:count, count)
252 persist_state()
253
254 count
255 end
256 end
257
258 defp on_complete(data_migration) do
259 cond do
260 data_migration.feature_lock ->
261 :noop
262
263 not is_nil(Config.get([:database, :improved_hashtag_timeline])) ->
264 :noop
265
266 true ->
267 Config.put([:database, :improved_hashtag_timeline], true)
268 :ok
269 end
270 end
271
272 def failed_objects_query do
273 from(o in Object)
274 |> join(:inner, [o], dmf in fragment("SELECT * FROM data_migration_failed_ids"),
275 on: dmf.record_id == o.id
276 )
277 |> where([_o, dmf], dmf.data_migration_id == ^data_migration().id)
278 |> order_by([o], asc: o.id)
279 end
280
281 def failures_count(data_migration_id \\ nil) do
282 data_migration_id = data_migration_id || data_migration().id
283
284 with {:ok, %{rows: [[count]]}} <-
285 Repo.query(
286 "SELECT COUNT(record_id) FROM data_migration_failed_ids WHERE data_migration_id = $1;",
287 [data_migration_id]
288 ) do
289 count
290 end
291 end
292
293 def retry_failed do
294 data_migration = data_migration()
295
296 failed_objects_query()
297 |> Repo.chunk_stream(100, :one)
298 |> Stream.each(fn object ->
299 with {:ok, _} <- transfer_object_hashtags(object) do
300 _ =
301 Repo.query(
302 "DELETE FROM data_migration_failed_ids " <>
303 "WHERE data_migration_id = $1 AND record_id = $2",
304 [data_migration.id, object.id]
305 )
306 end
307 end)
308 |> Stream.run()
309 end
310
311 def force_continue do
312 send(whereis(), :migrate_hashtags)
313 end
314
315 def force_restart do
316 :ok = State.reset()
317 force_continue()
318 end
319
320 def set_complete do
321 update_status(:complete)
322 persist_state()
323 on_complete(data_migration())
324 end
325
326 defp update_status(status, message \\ nil) do
327 put_stat(:state, status)
328 put_stat(:message, message)
329 end
330 end