Merge remote-tracking branch 'remotes/origin/develop' into feature/object-hashtags...
[akkoma] / lib / pleroma / migrators / hashtags_table_migrator.ex
1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
4
5 defmodule Pleroma.Migrators.HashtagsTableMigrator do
6 use GenServer
7
8 require Logger
9
10 import Ecto.Query
11
12 alias __MODULE__.State
13 alias Pleroma.Config
14 alias Pleroma.Hashtag
15 alias Pleroma.Object
16 alias Pleroma.Repo
17
18 defdelegate data_migration(), to: State
19
20 defdelegate state(), to: State
21 defdelegate get_stat(key, value), to: State, as: :get_data_key
22 defdelegate put_stat(key, value), to: State, as: :put_data_key
23 defdelegate increment_stat(key, increment), to: State, as: :increment_data_key
24
25 @reg_name {:global, __MODULE__}
26
27 def whereis, do: GenServer.whereis(@reg_name)
28
29 def start_link(_) do
30 case whereis() do
31 nil ->
32 GenServer.start_link(__MODULE__, nil, name: @reg_name)
33
34 pid ->
35 {:ok, pid}
36 end
37 end
38
39 @impl true
40 def init(_) do
41 {:ok, nil, {:continue, :init_state}}
42 end
43
44 @impl true
45 def handle_continue(:init_state, _state) do
46 {:ok, _} = State.start_link(nil)
47
48 update_status(:pending)
49
50 data_migration = data_migration()
51 manual_migrations = Config.get([:instance, :manual_data_migrations], [])
52
53 cond do
54 Config.get(:env) == :test ->
55 update_status(:noop)
56
57 is_nil(data_migration) ->
58 update_status(:failed, "Data migration does not exist.")
59
60 data_migration.state == :manual or data_migration.name in manual_migrations ->
61 update_status(:manual, "Data migration is in manual execution state.")
62
63 data_migration.state == :complete ->
64 on_complete(data_migration)
65
66 true ->
67 send(self(), :migrate_hashtags)
68 end
69
70 {:noreply, nil}
71 end
72
73 @impl true
74 def handle_info(:migrate_hashtags, state) do
75 State.reinit()
76
77 update_status(:running)
78 put_stat(:started_at, NaiveDateTime.utc_now())
79
80 %{id: data_migration_id} = data_migration()
81 max_processed_id = get_stat(:max_processed_id, 0)
82
83 Logger.info("Transferring embedded hashtags to `hashtags` (from oid: #{max_processed_id})...")
84
85 query()
86 |> where([object], object.id > ^max_processed_id)
87 |> Repo.chunk_stream(100, :batches, timeout: :infinity)
88 |> Stream.each(fn objects ->
89 object_ids = Enum.map(objects, & &1.id)
90
91 failed_ids =
92 objects
93 |> Enum.map(&transfer_object_hashtags(&1))
94 |> Enum.filter(&(elem(&1, 0) == :error))
95 |> Enum.map(&elem(&1, 1))
96
97 for failed_id <- failed_ids do
98 _ =
99 Repo.query(
100 "INSERT INTO data_migration_failed_ids(data_migration_id, record_id) " <>
101 "VALUES ($1, $2) ON CONFLICT DO NOTHING;",
102 [data_migration_id, failed_id]
103 )
104 end
105
106 _ =
107 Repo.query(
108 "DELETE FROM data_migration_failed_ids " <>
109 "WHERE data_migration_id = $1 AND record_id = ANY($2)",
110 [data_migration_id, object_ids -- failed_ids]
111 )
112
113 max_object_id = Enum.at(object_ids, -1)
114
115 put_stat(:max_processed_id, max_object_id)
116 increment_stat(:processed_count, length(object_ids))
117 increment_stat(:failed_count, length(failed_ids))
118 put_stat(:records_per_second, records_per_second())
119 _ = State.persist_to_db()
120
121 # A quick and dirty approach to controlling the load this background migration imposes
122 sleep_interval = Config.get([:populate_hashtags_table, :sleep_interval_ms], 0)
123 Process.sleep(sleep_interval)
124 end)
125 |> Stream.run()
126
127 with 0 <- failures_count(data_migration_id) do
128 _ = delete_non_create_activities_hashtags()
129 set_complete()
130 else
131 _ ->
132 update_status(:failed, "Please check data_migration_failed_ids records.")
133 end
134
135 {:noreply, state}
136 end
137
138 defp records_per_second do
139 get_stat(:processed_count, 0) / Enum.max([running_time(), 1])
140 end
141
142 defp running_time do
143 NaiveDateTime.diff(NaiveDateTime.utc_now(), get_stat(:started_at, NaiveDateTime.utc_now()))
144 end
145
146 @hashtags_objects_cleanup_query """
147 DELETE FROM hashtags_objects WHERE object_id IN
148 (SELECT DISTINCT objects.id FROM objects
149 JOIN hashtags_objects ON hashtags_objects.object_id = objects.id LEFT JOIN activities
150 ON COALESCE(activities.data->'object'->>'id', activities.data->>'object') =
151 (objects.data->>'id')
152 AND activities.data->>'type' = 'Create'
153 WHERE activities.id IS NULL);
154 """
155
156 @hashtags_cleanup_query """
157 DELETE FROM hashtags WHERE id IN
158 (SELECT hashtags.id FROM hashtags
159 LEFT OUTER JOIN hashtags_objects
160 ON hashtags_objects.hashtag_id = hashtags.id
161 WHERE hashtags_objects.hashtag_id IS NULL);
162 """
163
164 @doc """
165 Deletes `hashtags_objects` for legacy objects not asoociated with Create activity.
166 Also deletes unreferenced `hashtags` records (might occur after deletion of `hashtags_objects`).
167 """
168 def delete_non_create_activities_hashtags do
169 {:ok, %{num_rows: hashtags_objects_count}} =
170 Repo.query(@hashtags_objects_cleanup_query, [], timeout: :infinity)
171
172 {:ok, %{num_rows: hashtags_count}} =
173 Repo.query(@hashtags_cleanup_query, [], timeout: :infinity)
174
175 {:ok, hashtags_objects_count, hashtags_count}
176 end
177
178 defp query do
179 # Note: most objects have Mention-type AS2 tags and no hashtags (but we can't filter them out)
180 # Note: not checking activity type, expecting remove_non_create_objects_hashtags/_ to clean up
181 from(
182 object in Object,
183 where:
184 fragment("(?)->'tag' IS NOT NULL AND (?)->'tag' != '[]'::jsonb", object.data, object.data),
185 select: %{
186 id: object.id,
187 tag: fragment("(?)->'tag'", object.data)
188 }
189 )
190 |> join(:left, [o], hashtags_objects in fragment("SELECT object_id FROM hashtags_objects"),
191 on: hashtags_objects.object_id == o.id
192 )
193 |> where([_o, hashtags_objects], is_nil(hashtags_objects.object_id))
194 end
195
196 defp transfer_object_hashtags(object) do
197 embedded_tags = if Map.has_key?(object, :tag), do: object.tag, else: object.data["tag"]
198 hashtags = Object.object_data_hashtags(%{"tag" => embedded_tags})
199
200 if Enum.any?(hashtags) do
201 transfer_object_hashtags(object, hashtags)
202 else
203 {:ok, object.id}
204 end
205 end
206
207 defp transfer_object_hashtags(object, hashtags) do
208 Repo.transaction(fn ->
209 with {:ok, hashtag_records} <- Hashtag.get_or_create_by_names(hashtags) do
210 maps = Enum.map(hashtag_records, &%{hashtag_id: &1.id, object_id: object.id})
211 expected_rows = length(hashtag_records)
212
213 base_error =
214 "ERROR when inserting #{expected_rows} hashtags_objects for obj. #{object.id}"
215
216 try do
217 with {^expected_rows, _} <- Repo.insert_all("hashtags_objects", maps) do
218 object.id
219 else
220 e ->
221 Logger.error("#{base_error}: #{inspect(e)}")
222 Repo.rollback(object.id)
223 end
224 rescue
225 e ->
226 Logger.error("#{base_error}: #{inspect(e)}")
227 Repo.rollback(object.id)
228 end
229 else
230 e ->
231 error = "ERROR: could not create hashtags for object #{object.id}: #{inspect(e)}"
232 Logger.error(error)
233 Repo.rollback(object.id)
234 end
235 end)
236 end
237
238 @doc "Approximate count for current iteration (including processed records count)"
239 def count(force \\ false, timeout \\ :infinity) do
240 stored_count = state()[:count]
241
242 if stored_count && !force do
243 stored_count
244 else
245 processed_count = state()[:processed_count] || 0
246 max_processed_id = data_migration().data["max_processed_id"] || 0
247 query = where(query(), [object], object.id > ^max_processed_id)
248
249 count = Repo.aggregate(query, :count, :id, timeout: timeout) + processed_count
250 put_stat(:count, count)
251 count
252 end
253 end
254
255 defp on_complete(data_migration) do
256 cond do
257 data_migration.feature_lock ->
258 :noop
259
260 not is_nil(Config.get([:database, :improved_hashtag_timeline])) ->
261 :noop
262
263 true ->
264 Config.put([:database, :improved_hashtag_timeline], true)
265 :ok
266 end
267 end
268
269 def failed_objects_query do
270 from(o in Object)
271 |> join(:inner, [o], dmf in fragment("SELECT * FROM data_migration_failed_ids"),
272 on: dmf.record_id == o.id
273 )
274 |> where([_o, dmf], dmf.data_migration_id == ^data_migration().id)
275 |> order_by([o], asc: o.id)
276 end
277
278 def failures_count(data_migration_id \\ nil) do
279 data_migration_id = data_migration_id || data_migration().id
280
281 with {:ok, %{rows: [[count]]}} <-
282 Repo.query(
283 "SELECT COUNT(record_id) FROM data_migration_failed_ids WHERE data_migration_id = $1;",
284 [data_migration_id]
285 ) do
286 count
287 end
288 end
289
290 def retry_failed do
291 data_migration = data_migration()
292
293 failed_objects_query()
294 |> Repo.chunk_stream(100, :one)
295 |> Stream.each(fn object ->
296 with {:ok, _} <- transfer_object_hashtags(object) do
297 _ =
298 Repo.query(
299 "DELETE FROM data_migration_failed_ids " <>
300 "WHERE data_migration_id = $1 AND record_id = $2",
301 [data_migration.id, object.id]
302 )
303 end
304 end)
305 |> Stream.run()
306 end
307
308 def force_continue do
309 send(whereis(), :migrate_hashtags)
310 end
311
312 def force_restart do
313 :ok = State.reset()
314 force_continue()
315 end
316
317 def set_complete do
318 update_status(:complete)
319 _ = State.persist_to_db()
320 on_complete(data_migration())
321 end
322
323 defp update_status(status, message \\ nil) do
324 put_stat(:state, status)
325 put_stat(:message, message)
326 end
327 end