[#3213] Partially addressed code review points.
[akkoma] / lib / pleroma / migrators / hashtags_table_migrator.ex
1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
4
5 defmodule Pleroma.Migrators.HashtagsTableMigrator do
6 use GenServer
7
8 require Logger
9
10 import Ecto.Query
11
12 alias __MODULE__.State
13 alias Pleroma.Config
14 alias Pleroma.DataMigration
15 alias Pleroma.Hashtag
16 alias Pleroma.Object
17 alias Pleroma.Repo
18
19 defdelegate state(), to: State, as: :get
20 defdelegate put_stat(key, value), to: State, as: :put
21 defdelegate increment_stat(key, increment), to: State, as: :increment
22
23 defdelegate data_migration(), to: DataMigration, as: :populate_hashtags_table
24
25 @reg_name {:global, __MODULE__}
26
27 def whereis, do: GenServer.whereis(@reg_name)
28
29 def start_link(_) do
30 case whereis() do
31 nil ->
32 GenServer.start_link(__MODULE__, nil, name: @reg_name)
33
34 pid ->
35 {:ok, pid}
36 end
37 end
38
39 @impl true
40 def init(_) do
41 {:ok, nil, {:continue, :init_state}}
42 end
43
44 @impl true
45 def handle_continue(:init_state, _state) do
46 {:ok, _} = State.start_link(nil)
47
48 update_status(:init)
49
50 data_migration = data_migration()
51 manual_migrations = Config.get([:instance, :manual_data_migrations], [])
52
53 cond do
54 Config.get(:env) == :test ->
55 update_status(:noop)
56
57 is_nil(data_migration) ->
58 update_status(:halt, "Data migration does not exist.")
59
60 data_migration.state == :manual or data_migration.name in manual_migrations ->
61 update_status(:noop, "Data migration is in manual execution state.")
62
63 data_migration.state == :complete ->
64 handle_success(data_migration)
65
66 true ->
67 send(self(), :migrate_hashtags)
68 end
69
70 {:noreply, nil}
71 end
72
73 @impl true
74 def handle_info(:migrate_hashtags, state) do
75 State.clear()
76
77 data_migration = data_migration()
78
79 persistent_data = Map.take(data_migration.data, ["max_processed_id"])
80
81 {:ok, data_migration} =
82 DataMigration.update(data_migration, %{state: :running, data: persistent_data})
83
84 update_status(:running)
85 put_stat(:started_at, NaiveDateTime.utc_now())
86
87 Logger.info("Starting transferring object embedded hashtags to `hashtags` table...")
88
89 max_processed_id = data_migration.data["max_processed_id"] || 0
90
91 query()
92 |> where([object], object.id > ^max_processed_id)
93 |> Repo.chunk_stream(100, :batches, timeout: :infinity)
94 |> Stream.each(fn objects ->
95 object_ids = Enum.map(objects, & &1.id)
96
97 failed_ids =
98 objects
99 |> Enum.map(&transfer_object_hashtags(&1))
100 |> Enum.filter(&(elem(&1, 0) == :error))
101 |> Enum.map(&elem(&1, 1))
102
103 for failed_id <- failed_ids do
104 _ =
105 Repo.query(
106 "INSERT INTO data_migration_failed_ids(data_migration_id, record_id) " <>
107 "VALUES ($1, $2) ON CONFLICT DO NOTHING;",
108 [data_migration.id, failed_id]
109 )
110 end
111
112 _ =
113 Repo.query(
114 "DELETE FROM data_migration_failed_ids " <>
115 "WHERE data_migration_id = $1 AND record_id = ANY($2)",
116 [data_migration.id, object_ids -- failed_ids]
117 )
118
119 max_object_id = Enum.at(object_ids, -1)
120
121 put_stat(:max_processed_id, max_object_id)
122 increment_stat(:processed_count, length(object_ids))
123 increment_stat(:failed_count, length(failed_ids))
124
125 put_stat(
126 :records_per_second,
127 state()[:processed_count] /
128 Enum.max([NaiveDateTime.diff(NaiveDateTime.utc_now(), state()[:started_at]), 1])
129 )
130
131 persist_stats(data_migration)
132
133 # A quick and dirty approach to controlling the load this background migration imposes
134 sleep_interval = Config.get([:populate_hashtags_table, :sleep_interval_ms], 0)
135 Process.sleep(sleep_interval)
136 end)
137 |> Stream.run()
138
139 with 0 <- failures_count(data_migration.id) do
140 {:ok, data_migration} = DataMigration.update_state(data_migration, :complete)
141
142 handle_success(data_migration)
143 else
144 _ ->
145 _ = DataMigration.update_state(data_migration, :failed)
146
147 update_status(:failed, "Please check data_migration_failed_ids records.")
148 end
149
150 {:noreply, state}
151 end
152
153 defp query do
154 # Note: most objects have Mention-type AS2 tags and no hashtags (but we can't filter them out)
155 # Note: not checking activity type; HashtagsCleanupWorker should clean up unused records later
156 from(
157 object in Object,
158 where:
159 fragment("(?)->'tag' IS NOT NULL AND (?)->'tag' != '[]'::jsonb", object.data, object.data),
160 select: %{
161 id: object.id,
162 tag: fragment("(?)->'tag'", object.data)
163 }
164 )
165 |> join(:left, [o], hashtags_objects in fragment("SELECT object_id FROM hashtags_objects"),
166 on: hashtags_objects.object_id == o.id
167 )
168 |> where([_o, hashtags_objects], is_nil(hashtags_objects.object_id))
169 end
170
171 defp transfer_object_hashtags(object) do
172 embedded_tags = if Map.has_key?(object, :tag), do: object.tag, else: object.data["tag"]
173 hashtags = Object.object_data_hashtags(%{"tag" => embedded_tags})
174
175 if Enum.any?(hashtags) do
176 transfer_object_hashtags(object, hashtags)
177 else
178 {:ok, object.id}
179 end
180 end
181
182 defp transfer_object_hashtags(object, hashtags) do
183 Repo.transaction(fn ->
184 with {:ok, hashtag_records} <- Hashtag.get_or_create_by_names(hashtags) do
185 for hashtag_record <- hashtag_records do
186 with {:ok, _} <-
187 Repo.query(
188 "insert into hashtags_objects(hashtag_id, object_id) values ($1, $2);",
189 [hashtag_record.id, object.id]
190 ) do
191 nil
192 else
193 {:error, e} ->
194 error =
195 "ERROR: could not link object #{object.id} and hashtag " <>
196 "#{hashtag_record.id}: #{inspect(e)}"
197
198 Logger.error(error)
199 Repo.rollback(object.id)
200 end
201 end
202
203 object.id
204 else
205 e ->
206 error = "ERROR: could not create hashtags for object #{object.id}: #{inspect(e)}"
207 Logger.error(error)
208 Repo.rollback(object.id)
209 end
210 end)
211 end
212
213 @doc "Approximate count for current iteration (including processed records count)"
214 def count(force \\ false, timeout \\ :infinity) do
215 stored_count = state()[:count]
216
217 if stored_count && !force do
218 stored_count
219 else
220 processed_count = state()[:processed_count] || 0
221 max_processed_id = data_migration().data["max_processed_id"] || 0
222 query = where(query(), [object], object.id > ^max_processed_id)
223
224 count = Repo.aggregate(query, :count, :id, timeout: timeout) + processed_count
225 put_stat(:count, count)
226 count
227 end
228 end
229
230 defp persist_stats(data_migration) do
231 runner_state = Map.drop(state(), [:status])
232 _ = DataMigration.update(data_migration, %{data: runner_state})
233 end
234
235 defp handle_success(data_migration) do
236 update_status(:complete)
237
238 cond do
239 data_migration.feature_lock ->
240 :noop
241
242 not is_nil(Config.get([:database, :improved_hashtag_timeline])) ->
243 :noop
244
245 true ->
246 Config.put([:database, :improved_hashtag_timeline], true)
247 :ok
248 end
249 end
250
251 def failed_objects_query do
252 from(o in Object)
253 |> join(:inner, [o], dmf in fragment("SELECT * FROM data_migration_failed_ids"),
254 on: dmf.record_id == o.id
255 )
256 |> where([_o, dmf], dmf.data_migration_id == ^data_migration().id)
257 |> order_by([o], asc: o.id)
258 end
259
260 def failures_count(data_migration_id \\ nil) do
261 data_migration_id = data_migration_id || data_migration().id
262
263 with {:ok, %{rows: [[count]]}} <-
264 Repo.query(
265 "SELECT COUNT(record_id) FROM data_migration_failed_ids WHERE data_migration_id = $1;",
266 [data_migration_id]
267 ) do
268 count
269 end
270 end
271
272 def retry_failed do
273 data_migration = data_migration()
274
275 failed_objects_query()
276 |> Repo.chunk_stream(100, :one)
277 |> Stream.each(fn object ->
278 with {:ok, _} <- transfer_object_hashtags(object) do
279 _ =
280 Repo.query(
281 "DELETE FROM data_migration_failed_ids " <>
282 "WHERE data_migration_id = $1 AND record_id = $2",
283 [data_migration.id, object.id]
284 )
285 end
286 end)
287 |> Stream.run()
288 end
289
290 def force_continue do
291 send(whereis(), :migrate_hashtags)
292 end
293
294 def force_restart do
295 {:ok, _} = DataMigration.update(data_migration(), %{state: :pending, data: %{}})
296 force_continue()
297 end
298
299 def force_complete do
300 {:ok, data_migration} = DataMigration.update_state(data_migration(), :complete)
301
302 handle_success(data_migration)
303 end
304
305 defp update_status(status, message \\ nil) do
306 put_stat(:status, status)
307 put_stat(:message, message)
308 end
309 end