1 # Pleroma: A lightweight social networking server
2 # Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
3 # SPDX-License-Identifier: AGPL-3.0-only
5 defmodule Pleroma.Migrators.HashtagsTableMigrator do
12 alias __MODULE__.State
14 alias Pleroma.DataMigration
19 defdelegate state(), to: State, as: :get
20 defdelegate put_stat(key, value), to: State, as: :put
21 defdelegate increment_stat(key, increment), to: State, as: :increment
23 defdelegate data_migration(), to: DataMigration, as: :populate_hashtags_table
25 @reg_name {:global, __MODULE__}
27 def whereis, do: GenServer.whereis(@reg_name)
32 GenServer.start_link(__MODULE__, nil, name: @reg_name)
41 {:ok, nil, {:continue, :init_state}}
45 def handle_continue(:init_state, _state) do
46 {:ok, _} = State.start_link(nil)
50 data_migration = data_migration()
51 manual_migrations = Config.get([:instance, :manual_data_migrations], [])
54 Config.get(:env) == :test ->
57 is_nil(data_migration) ->
58 update_status(:halt, "Data migration does not exist.")
60 data_migration.state == :manual or data_migration.name in manual_migrations ->
61 update_status(:noop, "Data migration is in manual execution state.")
63 data_migration.state == :complete ->
64 handle_success(data_migration)
67 send(self(), :migrate_hashtags)
74 def handle_info(:migrate_hashtags, state) do
75 data_migration = data_migration()
77 persistent_data = Map.take(data_migration.data, ["max_processed_id"])
79 {:ok, data_migration} =
80 DataMigration.update(data_migration, %{state: :running, data: persistent_data})
82 update_status(:running)
84 Logger.info("Starting transferring object embedded hashtags to `hashtags` table...")
86 max_processed_id = data_migration.data["max_processed_id"] || 0
88 # Note: most objects have Mention-type AS2 tags and no hashtags (but we can't filter them out)
91 left_join: hashtag in assoc(object, :hashtags),
92 where: object.id > ^max_processed_id,
93 where: is_nil(hashtag.id),
95 fragment("(?)->'tag' IS NOT NULL AND (?)->'tag' != '[]'::jsonb", object.data, object.data),
98 tag: fragment("(?)->'tag'", object.data)
101 |> Repo.chunk_stream(100, :batches, timeout: :infinity)
102 |> Stream.each(fn objects ->
103 object_ids = Enum.map(objects, & &1.id)
107 |> Enum.map(&transfer_object_hashtags(&1))
108 |> Enum.filter(&(elem(&1, 0) == :error))
109 |> Enum.map(&elem(&1, 1))
111 for failed_id <- failed_ids do
114 "INSERT INTO data_migration_failed_ids(data_migration_id, record_id) " <>
115 "VALUES ($1, $2) ON CONFLICT DO NOTHING;",
116 [data_migration.id, failed_id]
122 "DELETE FROM data_migration_failed_ids WHERE id = ANY($1)",
123 [object_ids -- failed_ids]
126 max_object_id = Enum.at(object_ids, -1)
128 put_stat(:max_processed_id, max_object_id)
129 increment_stat(:processed_count, length(object_ids))
130 increment_stat(:failed_count, length(failed_ids))
132 persist_stats(data_migration)
134 # A quick and dirty approach to controlling the load this background migration imposes
135 sleep_interval = Config.get([:populate_hashtags_table, :sleep_interval_ms], 0)
136 Process.sleep(sleep_interval)
140 with {:ok, %{rows: [[0]]}} <-
142 "SELECT COUNT(record_id) FROM data_migration_failed_ids WHERE data_migration_id = $1;",
145 _ = DataMigration.update_state(data_migration, :complete)
147 handle_success(data_migration)
150 _ = DataMigration.update_state(data_migration, :failed)
152 update_status(:failed, "Please check data_migration_failed_ids records.")
158 defp transfer_object_hashtags(object) do
159 hashtags = Object.object_data_hashtags(%{"tag" => object.tag})
161 Repo.transaction(fn ->
162 with {:ok, hashtag_records} <- Hashtag.get_or_create_by_names(hashtags) do
163 for hashtag_record <- hashtag_records do
166 "insert into hashtags_objects(hashtag_id, object_id) values ($1, $2);",
167 [hashtag_record.id, object.id]
173 "ERROR: could not link object #{object.id} and hashtag " <>
174 "#{hashtag_record.id}: #{inspect(e)}"
177 Repo.rollback(object.id)
184 error = "ERROR: could not create hashtags for object #{object.id}: #{inspect(e)}"
186 Repo.rollback(object.id)
191 defp persist_stats(data_migration) do
192 runner_state = Map.drop(state(), [:status])
193 _ = DataMigration.update(data_migration, %{data: runner_state})
196 defp handle_success(data_migration) do
197 update_status(:complete)
200 data_migration.feature_lock ->
203 not is_nil(Config.improved_hashtag_timeline()) ->
207 Config.put(Config.improved_hashtag_timeline_path(), true)
212 def failed_objects_query do
214 |> join(:inner, [o], dmf in fragment("SELECT * FROM data_migration_failed_ids"),
215 on: dmf.record_id == o.id
217 |> where([_o, dmf], dmf.data_migration_id == ^data_migration().id)
218 |> order_by([o], asc: o.id)
221 def force_continue do
222 send(whereis(), :migrate_hashtags)
226 {:ok, _} = DataMigration.update(data_migration(), %{state: :pending, data: %{}})
230 defp update_status(status, message \\ nil) do
231 put_stat(:status, status)
232 put_stat(:message, message)