feat: Add cascade_destroy to builtin changes.

This commit is contained in:
James Harton 2024-05-07 16:59:30 +12:00 committed by James Harton
parent 11f0f9aa03
commit 70c1a688f8
3 changed files with 380 additions and 0 deletions

View file

@ -286,6 +286,42 @@ defmodule Ash.Resource.Change.Builtins do
{Ash.Resource.Change.Select, target: value, ensure?: true}
end
@doc """
Cascade this resource's destroy action to a related resource's destroy action.
Adds an after-action hook that explicitly calls destroy on any records related
via the named relationship. It will optimise for bulk destroys where
possible.
> #### Beware database constraints {: .warning}
>
> Think carefully before using this change with data layers which enforce
> referential integrity (ie PostgreSQL and SQLite) and you may need to defer
> constraints for the relationship in question.
>
> See also:
> 1. [`postgres.references.reference.deferrable` DSL](https://hexdocs.pm/ash_postgres/dsl-ashpostgres-datalayer.html#postgres-references-reference-deferrable)
> 2. [`sqlite.references.reference.deferrable` DSL](https://hexdocs.pm/ash_sqlite/dsl-ashsqlite-datalayer.html#sqlite-references-reference-deferrable)
> 3. [PostgreSQL's `SET CONSTRAINTS` documentation](https://www.postgresql.org/docs/current/sql-set-constraints.html)
> 4. [SQLite's `PRAGMA defer_foreign_keys` documentation](https://www.sqlite.org/pragma.html#pragma_defer_foreign_keys)
> #### Cascading notifications {: .tip}
>
> By default notifications are disabled for the related destroy. This is to avoid potentially sending a **lot** of notifications for high-cardinality relationships.
## Options
#{Ash.Resource.Change.CascadeDestroy.opt_schema() |> Keyword.delete(:resource) |> Spark.Options.docs()}
## Example
change cascade_destroy(:relationship)
"""
@spec cascade_destroy(relationship :: atom) :: Ash.Resource.Change.ref()
def cascade_destroy(relationship, opts \\ []) do
{Ash.Resource.Change.CascadeDestroy, Keyword.put(opts, :relationship, relationship)}
end
@doc ~S"""
Directly attach an `after_action` function to the current change.

View file

@ -0,0 +1,215 @@
defmodule Ash.Resource.Change.CascadeDestroy do
@option_schema [
relationship: [
type: :atom,
doc: "The name of the relationship to work on",
required: true
],
action: [
type: :atom,
doc: "The name of the destroy action to call on the related resource",
required: false,
default: :destroy
],
notify?: [
type: :boolean,
doc: "Emit notifications for each destroyed record?",
required: false,
default: false
]
]
@moduledoc """
Cascade a resource's destroy action to a related resource's destroy action.
Adds an after-action hook that explicitly calls destroy on any records related
via the named relationship. It will optimise for bulk destroys where
possible.
> #### Beware database constraints {: .warning}
>
> Think carefully before using this change with data layers which enforce
> referential integrity (ie PostgreSQL and SQLite) and you may need to defer
> constraints for the relationship in question.
>
> See also:
> 1. [`postgres.references.reference.deferrable` DSL](https://hexdocs.pm/ash_postgres/dsl-ashpostgres-datalayer.html#postgres-references-reference-deferrable)
> 2. [`sqlite.references.reference.deferrable` DSL](https://hexdocs.pm/ash_sqlite/dsl-ashsqlite-datalayer.html#sqlite-references-reference-deferrable)
> 3. [PostgreSQL's `SET CONSTRAINTS` documentation](https://www.postgresql.org/docs/current/sql-set-constraints.html)
> 4. [SQLite's `PRAGMA defer_foreign_keys` documentation](https://www.sqlite.org/pragma.html#pragma_defer_foreign_keys)
> #### Cascading notifications {: .tip}
>
> By default notifications are disabled for the related destroy. This is to avoid potentially sending a **lot** of notifications for high-cardinality relationships.
## Options
#{Spark.Options.docs(@option_schema)}
## Example
change {Ash.Resource.Change.CascadeDestroy, relationship: :comments, action: :destroy}
or, equivalently using `Ash.Resource.Change.Builtins.cascade_destroy/2`:
change cascade_destroy(:comments, action: :destroy)
"""
use Ash.Resource.Change
require Ash.Query
@doc false
@impl true
def change(changeset, opts, context) do
with {:ok, opts} <- Spark.Options.validate(opts, @option_schema),
{:ok, opts} <- validate_relationship_and_action(opts, changeset.resource) do
Ash.Changeset.after_action(changeset, fn _changeset, result ->
destroy_related([result], opts, context)
{:ok, result}
end)
else
{:error, reason} ->
Ash.Changeset.add_error(changeset, reason)
end
end
@doc false
@impl true
def atomic(_, _, _), do: :ok
@doc false
@impl true
def after_batch([{%{resource: resource}, _} | _] = changesets_and_results, opts, context) do
with {:ok, opts} <- Spark.Options.validate(opts, @option_schema),
{:ok, opts} <- validate_relationship_and_action(opts, resource) do
records = Enum.map(changesets_and_results, &elem(&1, 1))
destroy_related(records, opts, context)
Enum.map(records, &{:ok, &1})
else
{:error, reason} -> [{:error, reason}]
end
end
@doc false
@impl true
def batch_callbacks?([], _, _), do: false
def batch_callbacks?(_, _, _), do: true
@doc false
def opt_schema, do: @option_schema
defp validate_relationship_and_action(opts, resource) do
case Ash.Resource.Info.relationship(resource, opts[:relationship]) do
nil ->
{:error,
Ash.Error.Changes.InvalidRelationship.exception(
relationship: opts[:relationship],
message: "Relationship doesn't exist."
)}
relationship ->
case Ash.Resource.Info.action(relationship.destination, opts[:action]) do
action when action.type == :destroy ->
opts =
opts
|> Keyword.put(:action, action)
|> Keyword.put(:relationship, relationship)
|> Keyword.put(
:domain,
relationship.domain || Ash.Resource.Info.domain(relationship.destination)
)
{:ok, opts}
_ ->
{:error,
Ash.Error.Invalid.NoSuchAction.exception(
resource: relationship.destination,
action: opts[:action],
destroy: :destroy
)}
end
end
end
defp destroy_related([], _, _), do: :ok
defp destroy_related(data, opts, context) do
action = opts[:action]
relationship = opts[:relationship]
context_opts =
context
|> Ash.Context.to_opts(
domain: opts[:domain],
return_errors?: true,
strategy: [:stream, :atomic, :atomic_batches],
notify?: opts[:notify?]
)
case related_query(data, opts[:relationship]) do
{:ok, query} ->
Ash.bulk_destroy!(query, action.name, %{}, context_opts)
:error ->
data
|> List.wrap()
|> Ash.load!(
[
{relationship.name,
Ash.Query.set_context(relationship.destination, %{cascade_destroy: true})}
],
authorize?: false
)
|> Enum.flat_map(fn record ->
record
|> Map.get(relationship.name)
|> List.wrap()
end)
|> Ash.bulk_destroy!(
action.name,
%{},
Keyword.update(
context_opts,
:context,
%{cascade_destroy: true},
&Map.put(&1, :cascade_destroy, true)
)
)
end
end
defp related_query(_records, relationship) when relationship.type == :many_to_many, do: :error
defp related_query(records, relationship) do
if Ash.Actions.Read.Relationships.has_parent_expr?(relationship) do
:error
else
{:ok,
Ash.Actions.Read.Relationships.related_query(
relationship.name,
records,
Ash.Query.new(relationship.destination),
Ash.Query.new(relationship.source)
)
|> elem(1)
|> filter_by_keys(relationship, records)}
end
end
defp filter_by_keys(query, %{no_attributes?: true}, _records) do
query
end
defp filter_by_keys(
query,
%{source_attribute: source_attribute, destination_attribute: destination_attribute},
records
) do
source_values = Enum.map(records, &Map.get(&1, source_attribute))
Ash.Query.filter(query, ^ref(destination_attribute) in ^source_values)
end
end

View file

@ -0,0 +1,129 @@
defmodule Ash.Test.Resource.Change.CascadeDestroy do
@moduledoc false
use ExUnit.Case, async: true
alias Ash.Test.Domain
alias Ash.Test.Resource.Change.CascadeDestroy, as: Test
defmodule Notifier do
@moduledoc false
use Ash.Notifier
def notify(notification) do
if notification.action.type == :destroy do
Agent.update(
Test.Agent,
&%{&1 | notifications: MapSet.put(&1.notifications, notification.data.id)}
)
end
:ok
end
end
defmodule Author do
@moduledoc false
use Ash.Resource, domain: Domain, data_layer: Ash.DataLayer.Ets
attributes do
uuid_primary_key :id
end
actions do
defaults [:read, create: :*]
destroy :destroy do
primary? true
change cascade_destroy(:posts, notify?: true)
end
end
relationships do
has_many :posts, Test.Post, public?: true
end
code_interface do
define :create
define :destroy
define :read
end
end
defmodule Post do
@moduledoc false
use Ash.Resource, domain: Domain, data_layer: Ash.DataLayer.Ets, notifiers: [Test.Notifier]
attributes do
uuid_primary_key :id
end
actions do
defaults [:read, create: :*]
destroy :destroy do
primary? true
require_atomic? false
change before_action(fn changeset, _ ->
Agent.update(
Test.Agent,
&%{&1 | destroys: MapSet.put(&1.destroys, changeset.data.id)}
)
changeset
end)
end
end
relationships do
belongs_to :author, Test.Author, public?: true, attribute_writable?: true
end
code_interface do
define :create
define :read
end
end
setup do
{:ok, pid} =
start_supervised({Agent, fn -> %{destroys: MapSet.new(), notifications: MapSet.new()} end})
Process.register(pid, Test.Agent)
:ok
end
test "when destroying an author, all their posts area also destroyed" do
author = Author.create!(%{})
post_ids =
1..Enum.random(3..25)
|> Enum.map(fn _ -> Post.create!(%{author_id: author.id}) end)
|> MapSet.new(& &1.id)
Author.destroy!(author)
deleted_ids = Agent.get(Test.Agent, & &1.destroys)
assert MapSet.equal?(post_ids, deleted_ids)
assert [] = Post.read!()
assert [] = Author.read!()
end
test "destroyed records are notified" do
author = Author.create!(%{})
post_ids =
1..Enum.random(3..25)
|> Enum.map(fn _ -> Post.create!(%{author_id: author.id}) end)
|> MapSet.new(& &1.id)
Author.destroy!(author)
notified_ids = Agent.get(Test.Agent, & &1.notifications)
assert MapSet.equal?(post_ids, notified_ids)
end
end