- Admin API: Reports now ordered by newest
</details>
-- Extracted object hashtags into separate table in order to improve hashtag timeline performance (via background migration in `Pleroma.Migrators.HashtagsTableMigrator`).
+- Improved hashtag timeline performance (requires a background migration).
### Added
remote_fetcher: 2,
attachments_cleanup: 1,
new_users_digest: 1,
- hashtags_cleanup: 1,
mute_expire: 5
],
plugins: [Oban.Plugins.Pruner],
}
]
},
+ %{
+ group: :pleroma,
+ key: :populate_hashtags_table,
+ type: :group,
+ description: "`populate_hashtags_table` background migration settings",
+ children: [
+ %{
+ key: :sleep_interval_ms,
+ type: :integer,
+ description:
+ "Sleep interval between each chunk of processed records in order to decrease the load on the system (defaults to 0 and should be keep default on most instances)."
+ }
+ ]
+ },
%{
group: :pleroma,
key: :instance,
* `show_reactions`: Let favourites and emoji reactions be viewed through the API (default: `true`).
* `password_reset_token_validity`: The time after which reset tokens aren't accepted anymore, in seconds (default: one day).
+## :database
+* `improved_hashtag_timeline`: If `true`, hashtags will be fetched from `hashtags` table for hashtags timeline. When `false`, object-embedded hashtags will be used (slower). Is auto-set to `true` (unless overridden) when `HashtagsTableMigrator` completes.
+
+## Background migrations
+* `populate_hashtags_table/sleep_interval_ms`: Sleep interval between each chunk of processed records in order to decrease the load on the system (defaults to 0 and should be keep default on most instances).
+
## Welcome
* `direct_message`: - welcome message sent as a direct message.
* `enabled`: Enables the send a direct message to a newly registered user. Defaults to `false`.
|> Map.merge(%{inserted_at: timestamp, updated_at: timestamp})
end)
- with {:ok, %{query_op: hashtags}} <-
- Multi.new()
- |> Multi.insert_all(:insert_all_op, Hashtag, structs, on_conflict: :nothing)
- |> Multi.run(:query_op, fn _repo, _changes ->
- {:ok, Repo.all(from(ht in Hashtag, where: ht.name in ^names))}
- end)
- |> Repo.transaction() do
- {:ok, hashtags}
- else
- {:error, _name, value, _changes_so_far} -> {:error, value}
+ try do
+ with {:ok, %{query_op: hashtags}} <-
+ Multi.new()
+ |> Multi.insert_all(:insert_all_op, Hashtag, structs, on_conflict: :nothing)
+ |> Multi.run(:query_op, fn _repo, _changes ->
+ {:ok, Repo.all(from(ht in Hashtag, where: ht.name in ^names))}
+ end)
+ |> Repo.transaction() do
+ {:ok, hashtags}
+ else
+ {:error, _name, value, _changes_so_far} -> {:error, value}
+ end
+ rescue
+ e -> {:error, e}
end
end
where: hto.object_id == ^object_id,
select: hto.hashtag_id
)
- |> Repo.delete_all() do
- delete_unreferenced(hashtag_ids)
+ |> Repo.delete_all(),
+ {:ok, unreferenced_count} <- delete_unreferenced(hashtag_ids) do
+ {:ok, length(hashtag_ids), unreferenced_count}
end
end
maps = Enum.map(hashtag_records, &%{hashtag_id: &1.id, object_id: object.id})
expected_rows = length(hashtag_records)
- with {^expected_rows, _} <- Repo.insert_all("hashtags_objects", maps) do
- object.id
- else
+ base_error =
+ "ERROR when inserting #{expected_rows} hashtags_objects for obj. #{object.id}"
+
+ try do
+ with {^expected_rows, _} <- Repo.insert_all("hashtags_objects", maps) do
+ object.id
+ else
+ e ->
+ Logger.error("#{base_error}: #{inspect(e)}")
+ Repo.rollback(object.id)
+ end
+ rescue
e ->
- error =
- "ERROR when inserting #{expected_rows} hashtags_objects " <>
- "for object #{object.id}: #{inspect(e)}"
-
- Logger.error(error)
+ Logger.error("#{base_error}: #{inspect(e)}")
Repo.rollback(object.id)
end
else