ash_postgres/lib/migration_generator/migration_generator.ex

1734 lines
48 KiB
Elixir
Raw Normal View History

defmodule AshPostgres.MigrationGenerator do
2020-09-11 15:53:43 +12:00
@moduledoc """
Generates migrations based on resource snapshots
See `Mix.Tasks.AshPostgres.GenerateMigrations` for more information.
"""
@default_snapshot_path "priv/resource_snapshots"
import Mix.Generator
alias AshPostgres.MigrationGenerator.{Operation, Phase}
2020-09-20 10:08:09 +12:00
defstruct snapshot_path: @default_snapshot_path,
migration_path: nil,
2020-10-29 15:26:45 +13:00
tenant_migration_path: nil,
2020-09-20 10:08:09 +12:00
quiet: false,
current_snapshots: nil,
answers: [],
no_shell?: false,
2020-09-20 10:08:09 +12:00
format: true,
dry_run: false,
check_generated: false,
drop_columns: false
def generate(apis, opts \\ []) do
apis = List.wrap(apis)
opts =
case struct(__MODULE__, opts) do
%{check_generated: true} = opts ->
%{opts | dry_run: true}
opts ->
opts
end
2021-02-01 10:39:59 +13:00
all_resources = Enum.flat_map(apis, &Ash.Api.resources/1)
2020-10-29 15:26:45 +13:00
{tenant_snapshots, snapshots} =
2021-02-01 10:39:59 +13:00
all_resources
2021-02-23 17:53:18 +13:00
|> Enum.filter(&(Ash.DataLayer.data_layer(&1) == AshPostgres.DataLayer))
|> Enum.filter(&AshPostgres.migrate?/1)
2021-02-01 10:39:59 +13:00
|> Enum.flat_map(&get_snapshots(&1, all_resources))
2020-10-29 15:26:45 +13:00
|> Enum.split_with(&(&1.multitenancy.strategy == :context))
2020-10-29 15:26:45 +13:00
tenant_snapshots_to_include_in_global =
tenant_snapshots
|> Enum.filter(& &1.multitenancy.global)
|> Enum.map(&Map.put(&1, :multitenancy, %{strategy: nil, attribute: nil, global: nil}))
2020-10-29 15:26:45 +13:00
snapshots = snapshots ++ tenant_snapshots_to_include_in_global
repos =
snapshots
|> Enum.map(& &1.repo)
|> Enum.uniq()
create_extension_migrations(repos, opts)
2020-10-29 15:26:45 +13:00
create_migrations(tenant_snapshots, opts, true)
create_migrations(snapshots, opts, false)
end
@doc """
A work in progress utility for getting snapshots.
Does not support everything supported by the migration generator.
"""
def take_snapshots(api, repo) do
all_resources = Ash.Api.resources(api)
all_resources
|> Enum.filter(&(Ash.DataLayer.data_layer(&1) == AshPostgres.DataLayer))
|> Enum.filter(&(AshPostgres.repo(&1) == repo))
|> Enum.flat_map(&get_snapshots(&1, all_resources))
end
@doc """
A work in progress utility for getting operations between snapshots.
Does not support everything supported by the migration generator.
"""
def get_operations_from_snapshots(old_snapshots, new_snapshots, opts \\ []) do
opts = %{opts(opts) | no_shell?: true}
old_snapshots = Enum.map(old_snapshots, &sanitize_snapshot/1)
new_snapshots
|> deduplicate_snapshots(opts, old_snapshots)
|> fetch_operations(opts)
|> Enum.flat_map(&elem(&1, 1))
|> Enum.uniq()
|> organize_operations()
end
defp opts(opts) do
case struct(__MODULE__, opts) do
%{check_generated: true} = opts ->
%{opts | dry_run: true}
opts ->
opts
end
end
defp create_extension_migrations(repos, opts) do
for repo <- repos do
snapshot_file = Path.join(opts.snapshot_path, "extensions.json")
installed_extensions =
if File.exists?(snapshot_file) do
snapshot_file
|> File.read!()
|> Jason.decode!()
else
[]
end
to_install = List.wrap(repo.installed_extensions()) -- List.wrap(installed_extensions)
if Enum.empty?(to_install) do
:ok
else
{module, migration_name} =
case to_install do
[single] ->
{"install_#{single}", "#{timestamp(true)}_install_#{single}.exs"}
multiple ->
{"install_#{Enum.count(multiple)}_extensions",
"#{timestamp(true)}_install_#{Enum.count(multiple)}_extensions.exs"}
end
migration_file =
opts
|> migration_path(repo)
|> Path.join(migration_name <> ".exs")
module_name = Module.concat([repo, Migrations, Macro.camelize(module)])
install =
Enum.map_join(to_install, "\n", fn extension ->
"execute(\"CREATE EXTENSION IF NOT EXISTS \\\"#{extension}\\\"\")"
end)
uninstall =
Enum.map_join(to_install, "\n", fn extension ->
"execute(\"DROP EXTENSION IF EXISTS \\\"#{extension}\\\"\")"
end)
contents = """
defmodule #{inspect(module_name)} do
@moduledoc \"\"\"
Installs any extensions that are mentioned in the repo's `installed_extensions/0` callback
This file was autogenerated with `mix ash_postgres.generate_migrations`
\"\"\"
use Ecto.Migration
def up do
#{install}
end
def down do
# Uncomment this if you actually want to uninstall the extensions
# when this migration is rolled back.
#{uninstall}
end
end
"""
snapshot_contents = Jason.encode!(repo.installed_extensions(), pretty: true)
contents = format(contents, opts)
create_file(snapshot_file, snapshot_contents, force: true)
create_file(migration_file, contents)
end
end
end
2020-10-29 15:26:45 +13:00
defp create_migrations(snapshots, opts, tenant?) do
snapshots
|> Enum.group_by(& &1.repo)
|> Enum.each(fn {repo, snapshots} ->
deduped = deduplicate_snapshots(snapshots, opts)
2021-02-01 10:39:59 +13:00
snapshots_with_operations = fetch_operations(deduped, opts)
snapshots = Enum.map(snapshots_with_operations, &elem(&1, 0))
2021-02-01 10:39:59 +13:00
snapshots_with_operations
|> Enum.flat_map(&elem(&1, 1))
|> Enum.uniq()
|> case do
[] ->
2020-10-29 15:26:45 +13:00
tenant_str =
if tenant? do
"tenant "
else
""
end
Mix.shell().info(
2020-10-29 15:26:45 +13:00
"No #{tenant_str}changes detected, so no migrations or snapshots have been created."
)
:ok
operations ->
if opts.check_generated, do: exit({:shutdown, 1})
operations
|> organize_operations
|> build_up_and_down()
2021-01-07 18:37:41 +13:00
|> write_migration!(snapshots, repo, opts, tenant?)
end
end)
end
defp organize_operations([]), do: []
defp organize_operations(operations) do
operations
|> sort_operations()
|> streamline()
|> group_into_phases()
|> comment_out_phases()
end
defp comment_out_phases(phases) do
Enum.map(phases, fn
%{operations: operations} = phase ->
if Enum.all?(operations, &match?(%{commented?: true}, &1)) do
%{phase | commented?: true}
else
phase
end
phase ->
phase
end)
end
defp deduplicate_snapshots(snapshots, opts, existing_snapshots \\ []) do
snapshots
|> Enum.group_by(fn snapshot ->
snapshot.table
end)
|> Enum.map(fn {_table, [snapshot | _] = snapshots} ->
existing_snapshot =
if opts.no_shell? do
Enum.find(existing_snapshots, &(&1.table == snapshot.table))
else
get_existing_snapshot(snapshot, opts)
end
{primary_key, identities} = merge_primary_keys(existing_snapshot, snapshots, opts)
attributes = Enum.flat_map(snapshots, & &1.attributes)
count_with_create =
snapshots
|> Enum.filter(& &1.has_create_action)
|> Enum.count()
snapshot_identities =
snapshots
|> Enum.map(& &1.identities)
|> Enum.concat()
new_snapshot = %{
snapshot
| attributes: merge_attributes(attributes, snapshot.table, count_with_create),
identities: snapshot_identities
}
all_identities =
new_snapshot.identities
|> Kernel.++(identities)
|> Enum.sort_by(& &1.name)
2020-09-20 10:08:09 +12:00
# We sort the identities by there being an identity with a matching name in the existing snapshot
# so that we prefer identities that currently exist over new ones
|> Enum.sort_by(fn identity ->
existing_snapshot
|> Kernel.||(%{})
|> Map.get(:identities, [])
|> Enum.any?(fn existing_identity ->
existing_identity.name == identity.name
end)
|> Kernel.!()
end)
|> Enum.uniq_by(fn identity ->
2020-09-20 10:08:09 +12:00
{Enum.sort(identity.keys), identity.base_filter}
end)
new_snapshot = %{new_snapshot | identities: all_identities}
{
%{
new_snapshot
| attributes:
Enum.map(new_snapshot.attributes, fn attribute ->
if attribute.name in primary_key do
%{attribute | primary_key?: true}
else
%{attribute | primary_key?: false}
end
end)
},
existing_snapshot
}
end)
end
defp merge_attributes(attributes, table, count) do
attributes
|> Enum.group_by(& &1.name)
|> Enum.map(fn {name, attributes} ->
%{
name: name,
type: merge_types(Enum.map(attributes, & &1.type), name, table),
default: merge_defaults(Enum.map(attributes, & &1.default)),
allow_nil?: Enum.any?(attributes, & &1.allow_nil?) || Enum.count(attributes) < count,
generated?: Enum.any?(attributes, & &1.generated?),
references: merge_references(Enum.map(attributes, & &1.references), name, table),
primary_key?: false
}
end)
end
defp merge_references(references, name, table) do
references
|> Enum.reject(&is_nil/1)
|> Enum.uniq()
|> case do
[] ->
nil
references ->
%{
destination_field: merge_uniq!(references, table, :destination_field, name),
multitenancy: merge_uniq!(references, table, :multitenancy, name),
on_delete: merge_uniq!(references, table, :on_delete, name),
on_update: merge_uniq!(references, table, :on_update, name),
name: merge_uniq!(references, table, :name, name),
table: merge_uniq!(references, table, :table, name)
}
end
end
defp merge_uniq!(references, table, field, attribute) do
references
|> Enum.map(&Map.get(&1, field))
|> Enum.filter(& &1)
|> Enum.uniq()
|> case do
[] ->
nil
[value] ->
value
values ->
values = Enum.map_join(values, "\n", &" * #{inspect(&1)}")
raise """
Conflicting configurations for references for #{table}.#{attribute}:
Values:
#{values}
"""
end
end
defp merge_types(types, name, table) do
types
|> Enum.uniq()
|> case do
[type] ->
type
types ->
raise "Conflicting types for table `#{table}.#{name}`: #{inspect(types)}"
end
end
defp merge_defaults(defaults) do
defaults
|> Enum.uniq()
|> case do
[default] -> default
2020-10-29 15:26:45 +13:00
_ -> "nil"
end
end
defp merge_primary_keys(nil, [snapshot | _] = snapshots, opts) do
snapshots
|> Enum.map(&pkey_names(&1.attributes))
|> Enum.uniq()
|> case do
[pkey_names] ->
{pkey_names, []}
unique_primary_keys ->
unique_primary_key_names =
unique_primary_keys
|> Enum.with_index()
|> Enum.map_join("\n", fn {pkey, index} ->
"#{index}: #{inspect(pkey)}"
end)
choice =
if opts.no_shell? do
raise "Unimplemented: cannot resolve primary key ambiguity without shell input"
else
message = """
Which primary key should be used for the table `#{snapshot.table}` (enter the number)?
#{unique_primary_key_names}
"""
message
|> Mix.shell().prompt()
|> String.to_integer()
end
identities =
unique_primary_keys
|> List.delete_at(choice)
|> Enum.map(fn pkey_names ->
pkey_name_string = Enum.join(pkey_names, "_")
name = snapshot.table <> "_" <> pkey_name_string
%{
keys: pkey_names,
name: name
}
end)
primary_key = Enum.sort(Enum.at(unique_primary_keys, choice))
identities =
Enum.reject(identities, fn identity ->
Enum.sort(identity.keys) == primary_key
end)
{primary_key, identities}
end
end
defp merge_primary_keys(existing_snapshot, snapshots, opts) do
pkey_names = pkey_names(existing_snapshot.attributes)
one_pkey_exists? =
Enum.any?(snapshots, fn snapshot ->
pkey_names(snapshot.attributes) == pkey_names
end)
if one_pkey_exists? do
identities =
snapshots
|> Enum.map(&pkey_names(&1.attributes))
|> Enum.uniq()
|> Enum.reject(&(&1 == pkey_names))
|> Enum.map(fn pkey_names ->
pkey_name_string = Enum.join(pkey_names, "_")
name = existing_snapshot.table <> "_" <> pkey_name_string
%{
keys: pkey_names,
name: name
}
end)
{pkey_names, identities}
else
merge_primary_keys(nil, snapshots, opts)
end
end
defp pkey_names(attributes) do
attributes
|> Enum.filter(& &1.primary_key?)
|> Enum.map(& &1.name)
|> Enum.sort()
end
defp migration_path(opts, repo, tenant? \\ false) do
repo_name = repo_name(repo)
if tenant? do
if opts.tenant_migration_path do
opts.tenant_migration_path
else
"priv/"
end
|> Path.join(repo_name)
|> Path.join("tenant_migrations")
else
if opts.migration_path do
opts.migration_path
else
"priv/"
end
|> Path.join(repo_name)
|> Path.join("migrations")
end
end
defp repo_name(repo) do
repo |> Module.split() |> List.last() |> Macro.underscore()
end
defp write_migration!({up, down}, snapshots, repo, opts, tenant?) do
repo_name = repo_name(repo)
migration_path = migration_path(opts, repo, tenant?)
count =
migration_path
|> Path.join("*_migrate_resources*")
|> Path.wildcard()
|> Enum.count()
|> Kernel.+(1)
migration_name = "#{timestamp(true)}_migrate_resources#{count}"
migration_file =
migration_path
|> Path.join(migration_name <> ".exs")
2020-10-29 15:26:45 +13:00
module_name =
if tenant? do
Module.concat([repo, TenantMigrations, Macro.camelize("migrate_resources#{count}")])
else
Module.concat([repo, Migrations, Macro.camelize("migrate_resources#{count}")])
end
contents = """
defmodule #{inspect(module_name)} do
@moduledoc \"\"\"
Updates resources based on their most recent snapshots.
This file was autogenerated with `mix ash_postgres.generate_migrations`
\"\"\"
use Ecto.Migration
2021-01-12 07:15:21 +13:00
def up do
#{up}
end
2021-01-12 07:15:21 +13:00
def down do
#{down}
end
end
"""
2021-01-07 18:37:41 +13:00
try do
contents = format(contents, opts)
create_new_snapshot(snapshots, repo_name, opts, tenant?)
if opts.dry_run do
Mix.shell().info(contents)
else
create_file(migration_file, contents)
end
rescue
exception ->
reraise(
"""
Exception while formatting generated code:
#{Exception.format(:error, exception, __STACKTRACE__)}
Code:
#{add_line_numbers(contents)}
To generate it unformatted anyway, but manually fix it, use the `--no-format` option.
""",
__STACKTRACE__
)
end
end
defp add_line_numbers(contents) do
lines = String.split(contents, "\n")
digits = String.length(to_string(Enum.count(lines)))
lines
|> Enum.with_index()
|> Enum.map_join("\n", fn {line, index} ->
"#{String.pad_trailing(to_string(index), digits, " ")} | #{line}"
end)
end
defp create_new_snapshot(snapshots, repo_name, opts, tenant?) do
unless opts.dry_run do
Enum.each(snapshots, fn snapshot ->
snapshot_binary = snapshot_to_binary(snapshot)
snapshot_folder =
if tenant? do
opts.snapshot_path
|> Path.join(repo_name)
|> Path.join("tenants")
else
opts.snapshot_path
|> Path.join(repo_name)
end
snapshot_file = Path.join(snapshot_folder, "#{snapshot.table}/#{timestamp()}.json")
File.mkdir_p(Path.dirname(snapshot_file))
File.write!(snapshot_file, snapshot_binary, [])
old_snapshot_folder = Path.join(snapshot_folder, "#{snapshot.table}.json")
if File.exists?(old_snapshot_folder) do
new_snapshot_folder = Path.join(snapshot_folder, "#{snapshot.table}/initial.json")
File.rename(old_snapshot_folder, new_snapshot_folder)
end
end)
2020-09-20 10:08:09 +12:00
end
end
@doc false
def build_up_and_down(phases) do
up =
Enum.map_join(phases, "\n", fn phase ->
phase
|> phase.__struct__.up()
|> Kernel.<>("\n")
|> maybe_comment(phase)
end)
down =
phases
|> Enum.reverse()
|> Enum.map_join("\n", fn phase ->
phase
|> phase.__struct__.down()
|> Kernel.<>("\n")
|> maybe_comment(phase)
end)
{up, down}
end
defp maybe_comment(text, %{commented?: true}) do
text
|> String.split("\n")
|> Enum.map(fn line ->
if String.starts_with?(line, "#") do
line
else
"# #{line}"
end
end)
|> Enum.join("\n")
end
defp maybe_comment(text, _), do: text
defp format(string, opts) do
if opts.format do
Code.format_string!(string, locals_without_parens: ecto_sql_locals_without_parens())
else
string
end
rescue
exception ->
IO.puts("""
Exception while formatting:
#{inspect(exception)}
#{inspect(string)}
""")
reraise exception, __STACKTRACE__
end
defp ecto_sql_locals_without_parens do
path = File.cwd!() |> Path.join("deps/ecto_sql/.formatter.exs")
if File.exists?(path) do
{opts, _} = Code.eval_file(path)
Keyword.get(opts, :locals_without_parens, [])
else
[]
end
end
defp streamline(ops, acc \\ [])
defp streamline([], acc), do: Enum.reverse(acc)
defp streamline(
[
%Operation.AddAttribute{
attribute: %{
name: name
},
table: table
} = add
| rest
],
acc
) do
rest
|> Enum.take_while(fn op ->
op.table == table
end)
|> Enum.with_index()
|> Enum.find(fn
{%Operation.AlterAttribute{
new_attribute: %{name: ^name, references: references},
old_attribute: %{name: ^name}
}, _}
when not is_nil(references) ->
true
_ ->
false
end)
|> case do
nil ->
streamline(rest, [add | acc])
{alter, index} ->
new_attribute = Map.put(add.attribute, :references, alter.new_attribute.references)
streamline(List.delete_at(rest, index), [%{add | attribute: new_attribute} | acc])
end
end
defp streamline([first | rest], acc) do
streamline(rest, [first | acc])
end
defp group_into_phases(ops, current \\ nil, acc \\ [])
defp group_into_phases([], nil, acc), do: Enum.reverse(acc)
defp group_into_phases([], phase, acc) do
phase = %{phase | operations: Enum.reverse(phase.operations)}
Enum.reverse([phase | acc])
end
2020-10-29 15:26:45 +13:00
defp group_into_phases(
[%Operation.CreateTable{table: table, multitenancy: multitenancy} | rest],
nil,
acc
) do
group_into_phases(rest, %Phase.Create{table: table, multitenancy: multitenancy}, acc)
end
defp group_into_phases(
[%Operation.AddAttribute{table: table} = op | rest],
%{table: table} = phase,
acc
) do
group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc)
end
defp group_into_phases(
[%Operation.AlterAttribute{table: table} = op | rest],
%Phase.Alter{table: table} = phase,
acc
) do
group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc)
end
defp group_into_phases(
[%Operation.RenameAttribute{table: table} = op | rest],
%Phase.Alter{table: table} = phase,
acc
) do
group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc)
end
defp group_into_phases(
[%Operation.RemoveAttribute{table: table} = op | rest],
%{table: table} = phase,
acc
) do
group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc)
end
2020-09-20 10:08:09 +12:00
defp group_into_phases([%{no_phase: true} = op | rest], nil, acc) do
group_into_phases(rest, nil, [op | acc])
end
defp group_into_phases([operation | rest], nil, acc) do
2020-10-29 15:26:45 +13:00
phase = %Phase.Alter{
operations: [operation],
multitenancy: operation.multitenancy,
table: operation.table
}
group_into_phases(rest, phase, acc)
end
defp group_into_phases(operations, phase, acc) do
phase = %{phase | operations: Enum.reverse(phase.operations)}
group_into_phases(operations, nil, [phase | acc])
end
defp sort_operations(ops, acc \\ [])
defp sort_operations([], acc), do: acc
defp sort_operations([op | rest], []), do: sort_operations(rest, [op])
defp sort_operations([op | rest], acc) do
acc = Enum.reverse(acc)
after_index = Enum.find_index(acc, &after?(op, &1))
new_acc =
if after_index do
acc
|> List.insert_at(after_index, op)
|> Enum.reverse()
else
[op | Enum.reverse(acc)]
end
sort_operations(rest, new_acc)
end
defp after?(
%Operation.AddUniqueIndex{
identity: %{keys: keys},
table: table,
multitenancy: multitenancy
},
%Operation.AddAttribute{table: table, attribute: %{name: name}}
) do
name in keys || (multitenancy.attribute && name == multitenancy.attribute)
end
defp after?(
%Operation.AddCheckConstraint{
constraint: %{attribute: attribute_or_attributes},
table: table,
multitenancy: multitenancy
},
%Operation.AddAttribute{table: table, attribute: %{name: name}}
) do
name in List.wrap(attribute_or_attributes) ||
(multitenancy.attribute && multitenancy.attribute in List.wrap(attribute_or_attributes))
end
defp after?(%Operation.AddUniqueIndex{table: table}, %Operation.RemoveUniqueIndex{table: table}),
do: true
defp after?(%Operation.AddCheckConstraint{table: table}, %Operation.RemoveCheckConstraint{
table: table
}),
do: true
defp after?(
%Operation.AddUniqueIndex{identity: %{keys: keys}, table: table},
%Operation.AlterAttribute{table: table, new_attribute: %{name: name}}
) do
name in keys
end
defp after?(
%Operation.AddUniqueIndex{identity: %{keys: keys}, table: table},
%Operation.RenameAttribute{table: table, new_attribute: %{name: name}}
) do
name in keys
end
defp after?(
%Operation.AddCheckConstraint{
constraint: %{attribute: attribute_or_attributes},
table: table
},
%Operation.AlterAttribute{table: table, new_attribute: %{name: name}}
) do
name in List.wrap(attribute_or_attributes)
end
defp after?(
%Operation.AddCheckConstraint{
constraint: %{attribute: attribute_or_attributes},
table: table
},
%Operation.RenameAttribute{table: table, new_attribute: %{name: name}}
) do
name in List.wrap(attribute_or_attributes)
end
defp after?(
%Operation.RemoveUniqueIndex{identity: %{keys: keys}, table: table},
%Operation.RemoveAttribute{table: table, attribute: %{name: name}}
) do
name in keys
end
defp after?(
%Operation.RemoveUniqueIndex{identity: %{keys: keys}, table: table},
%Operation.RenameAttribute{table: table, old_attribute: %{name: name}}
) do
name in keys
end
defp after?(
%Operation.RemoveCheckConstraint{constraint: %{attribute: attributes}, table: table},
%Operation.RemoveAttribute{table: table, attribute: %{name: name}}
) do
name in List.wrap(attributes)
end
defp after?(
%Operation.RemoveCheckConstraint{constraint: %{attribute: attributes}, table: table},
%Operation.RenameAttribute{table: table, old_attribute: %{name: name}}
) do
name in List.wrap(attributes)
end
defp after?(%Operation.AlterAttribute{table: table}, %Operation.DropForeignKey{
table: table,
direction: :up
}),
do: true
defp after?(
%Operation.DropForeignKey{
table: table,
direction: :down
},
%Operation.AlterAttribute{table: table}
),
do: true
defp after?(%Operation.AddAttribute{table: table}, %Operation.CreateTable{table: table}) do
true
end
defp after?(
%Operation.AddAttribute{
attribute: %{
references: %{table: table, destination_field: name}
}
},
%Operation.AddAttribute{table: table, attribute: %{name: name}}
),
do: true
defp after?(
%Operation.AddAttribute{
table: table,
attribute: %{
primary_key?: false
}
},
%Operation.AddAttribute{table: table, attribute: %{primary_key?: true}}
),
do: true
defp after?(
%Operation.AddAttribute{
table: table,
attribute: %{
primary_key?: true
}
},
%Operation.RemoveAttribute{table: table, attribute: %{primary_key?: true}}
),
do: true
defp after?(
%Operation.AlterAttribute{
table: table,
new_attribute: %{primary_key?: false},
old_attribute: %{primary_key?: true}
},
%Operation.AddAttribute{
table: table,
attribute: %{
primary_key?: true
}
}
),
do: true
defp after?(
%Operation.RemoveAttribute{attribute: %{name: name}, table: table},
%Operation.AlterAttribute{
old_attribute: %{references: %{table: table, destination_field: name}}
}
),
do: true
defp after?(
%Operation.AlterAttribute{
new_attribute: %{
references: %{table: table, destination_field: name}
}
},
%Operation.AddAttribute{table: table, attribute: %{name: name}}
),
do: true
defp after?(%Operation.AddUniqueIndex{table: table}, %Operation.CreateTable{table: table}) do
true
end
defp after?(%Operation.AddCheckConstraint{table: table}, %Operation.CreateTable{table: table}) do
true
end
defp after?(%Operation.AlterAttribute{new_attribute: %{references: references}}, _)
when not is_nil(references),
do: true
defp after?(_, _), do: false
defp fetch_operations(snapshots, opts) do
2021-02-01 10:39:59 +13:00
snapshots
|> Enum.map(fn {snapshot, existing_snapshot} ->
{snapshot, do_fetch_operations(snapshot, existing_snapshot, opts)}
end)
|> Enum.reject(fn {_, ops} ->
Enum.empty?(ops)
end)
end
defp do_fetch_operations(snapshot, existing_snapshot, opts, acc \\ [])
defp do_fetch_operations(snapshot, nil, opts, acc) do
empty_snapshot = %{
attributes: [],
identities: [],
check_constraints: [],
table: snapshot.table,
2020-10-29 15:26:45 +13:00
repo: snapshot.repo,
multitenancy: %{
attribute: nil,
strategy: nil,
global: nil
2020-10-29 15:26:45 +13:00
}
}
do_fetch_operations(snapshot, empty_snapshot, opts, [
2020-10-29 15:26:45 +13:00
%Operation.CreateTable{
table: snapshot.table,
multitenancy: snapshot.multitenancy,
old_multitenancy: empty_snapshot.multitenancy
}
| acc
])
end
defp do_fetch_operations(snapshot, old_snapshot, opts, acc) do
attribute_operations = attribute_operations(snapshot, old_snapshot, opts)
rewrite_all_identities? = changing_multitenancy_affects_identities?(snapshot, old_snapshot)
unique_indexes_to_remove =
if rewrite_all_identities? do
old_snapshot.identities
else
Enum.reject(old_snapshot.identities, fn old_identity ->
Enum.find(snapshot.identities, fn identity ->
Enum.sort(old_identity.keys) == Enum.sort(identity.keys) &&
old_identity.base_filter == identity.base_filter
end)
end)
end
|> Enum.map(fn identity ->
%Operation.RemoveUniqueIndex{identity: identity, table: snapshot.table}
end)
unique_indexes_to_add =
if rewrite_all_identities? do
snapshot.identities
else
Enum.reject(snapshot.identities, fn identity ->
Enum.find(old_snapshot.identities, fn old_identity ->
Enum.sort(old_identity.keys) == Enum.sort(identity.keys) &&
old_identity.base_filter == identity.base_filter
end)
end)
end
|> Enum.map(fn identity ->
2020-09-20 10:08:09 +12:00
%Operation.AddUniqueIndex{
identity: identity,
table: snapshot.table
}
end)
constraints_to_add =
snapshot.check_constraints
|> Enum.reject(fn constraint ->
Enum.find(old_snapshot.check_constraints, fn old_constraint ->
old_constraint.check == constraint.check && old_constraint.name == constraint.name
end)
end)
|> Enum.map(fn constraint ->
%Operation.AddCheckConstraint{
constraint: constraint,
table: snapshot.table
}
end)
constraints_to_remove =
old_snapshot.check_constraints
|> Enum.reject(fn old_constraint ->
Enum.find(snapshot.check_constraints, fn constraint ->
old_constraint.check == constraint.check && old_constraint.name == constraint.name
end)
end)
|> Enum.map(fn old_constraint ->
%Operation.RemoveCheckConstraint{
constraint: old_constraint,
table: old_snapshot.table
}
end)
[
unique_indexes_to_remove,
attribute_operations,
unique_indexes_to_add,
constraints_to_add,
constraints_to_remove,
acc
]
2020-10-29 15:26:45 +13:00
|> Enum.concat()
|> Enum.map(&Map.put(&1, :multitenancy, snapshot.multitenancy))
|> Enum.map(&Map.put(&1, :old_multitenancy, old_snapshot.multitenancy))
end
defp attribute_operations(snapshot, old_snapshot, opts) do
attributes_to_add =
Enum.reject(snapshot.attributes, fn attribute ->
Enum.find(old_snapshot.attributes, &(&1.name == attribute.name))
end)
attributes_to_remove =
Enum.reject(old_snapshot.attributes, fn attribute ->
Enum.find(snapshot.attributes, &(&1.name == attribute.name))
end)
{attributes_to_add, attributes_to_remove, attributes_to_rename} =
resolve_renames(snapshot.table, attributes_to_add, attributes_to_remove, opts)
attributes_to_alter =
snapshot.attributes
|> Enum.map(fn attribute ->
{attribute,
Enum.find(old_snapshot.attributes, &(&1.name == attribute.name && &1 != attribute))}
end)
|> Enum.filter(&elem(&1, 1))
rename_attribute_events =
Enum.map(attributes_to_rename, fn {new, old} ->
%Operation.RenameAttribute{new_attribute: new, old_attribute: old, table: snapshot.table}
end)
add_attribute_events =
Enum.flat_map(attributes_to_add, fn attribute ->
if attribute.references do
[
%Operation.AddAttribute{
attribute: Map.delete(attribute, :references),
table: snapshot.table
},
%Operation.AlterAttribute{
old_attribute: Map.delete(attribute, :references),
new_attribute: attribute,
table: snapshot.table
}
]
else
[
%Operation.AddAttribute{
attribute: attribute,
table: snapshot.table
}
]
end
end)
alter_attribute_events =
Enum.flat_map(attributes_to_alter, fn {new_attribute, old_attribute} ->
if has_reference?(old_snapshot.multitenancy, old_attribute) and
Map.get(old_attribute, :references) != Map.get(new_attribute, :references) do
[
%Operation.DropForeignKey{
attribute: old_attribute,
table: snapshot.table,
multitenancy: old_snapshot.multitenancy,
direction: :up
},
%Operation.AlterAttribute{
new_attribute: new_attribute,
old_attribute: old_attribute,
table: snapshot.table
},
%Operation.DropForeignKey{
attribute: new_attribute,
table: snapshot.table,
multitenancy: snapshot.multitenancy,
direction: :down
}
]
else
[
%Operation.AlterAttribute{
new_attribute: Map.delete(new_attribute, :references),
old_attribute: Map.delete(old_attribute, :references),
table: snapshot.table
}
]
end
end)
remove_attribute_events =
Enum.map(attributes_to_remove, fn attribute ->
%Operation.RemoveAttribute{
attribute: attribute,
table: snapshot.table,
commented?: !opts.drop_columns
}
end)
add_attribute_events ++
alter_attribute_events ++ remove_attribute_events ++ rename_attribute_events
end
def changing_multitenancy_affects_identities?(snapshot, old_snapshot) do
snapshot.multitenancy != old_snapshot.multitenancy
end
def has_reference?(multitenancy, attribute) do
not is_nil(Map.get(attribute, :references)) and
!(attribute.references.multitenancy &&
attribute.references.multitenancy.strategy == :context &&
(is_nil(multitenancy) || multitenancy.strategy == :attribute))
end
def get_existing_snapshot(snapshot, opts) do
repo_name = snapshot.repo |> Module.split() |> List.last() |> Macro.underscore()
2020-10-29 15:26:45 +13:00
folder =
if snapshot.multitenancy.strategy == :context do
opts.snapshot_path
|> Path.join(repo_name)
|> Path.join("tenants")
else
2021-01-07 18:37:41 +13:00
opts.snapshot_path
|> Path.join(repo_name)
2020-10-29 15:26:45 +13:00
end
2021-01-07 18:37:41 +13:00
snapshot_folder = Path.join(folder, snapshot.table)
if File.exists?(snapshot_folder) do
snapshot_folder
|> File.ls!()
|> Enum.filter(&String.ends_with?(&1, ".json"))
|> Enum.map(&String.trim_trailing(&1, ".json"))
|> Enum.map(&Integer.parse/1)
|> Enum.filter(fn
{_int, remaining} ->
remaining == ""
:error ->
false
end)
2021-01-07 18:37:41 +13:00
|> Enum.map(&elem(&1, 0))
|> case do
[] ->
get_old_snapshot(folder, snapshot)
timestamps ->
timestamp = Enum.max(timestamps)
snapshot_file = Path.join(snapshot_folder, "#{timestamp}.json")
snapshot_file
|> File.read!()
|> load_snapshot()
2021-01-07 18:37:41 +13:00
end
else
get_old_snapshot(folder, snapshot)
end
end
2021-01-24 16:45:15 +13:00
T
2021-01-07 18:37:41 +13:00
defp get_old_snapshot(folder, snapshot) do
old_snapshot_file = Path.join(folder, "#{snapshot.table}.json")
# This is adapter code for the old version, where migrations were stored in a flat directory
if File.exists?(old_snapshot_file) do
old_snapshot_file
2020-10-29 15:26:45 +13:00
|> File.read!()
|> load_snapshot()
end
end
defp resolve_renames(_table, adding, [], _opts), do: {adding, [], []}
defp resolve_renames(_table, [], removing, _opts), do: {[], removing, []}
defp resolve_renames(table, [adding], [removing], opts) do
if renaming_to?(table, removing.name, adding.name, opts) do
{[], [], [{adding, removing}]}
else
{[adding], [removing], []}
end
end
defp resolve_renames(table, adding, [removing | rest], opts) do
{new_adding, new_removing, new_renames} =
if renaming?(table, removing, opts) do
new_attribute =
if opts.no_shell? do
raise "Unimplemented: Cannot get new_attribute without the shell!"
else
get_new_attribute(adding)
end
{adding -- [new_attribute], [], [{new_attribute, removing}]}
else
{adding, [removing], []}
end
{rest_adding, rest_removing, rest_renames} = resolve_renames(table, new_adding, rest, opts)
{new_adding ++ rest_adding, new_removing ++ rest_removing, rest_renames ++ new_renames}
end
defp renaming_to?(table, removing, adding, opts) do
if opts.no_shell? do
raise "Unimplemented: cannot determine: Are you renaming #{table}.#{removing} to #{table}.#{
adding
}? without shell input"
else
Mix.shell().yes?("Are you renaming #{table}.#{removing} to #{table}.#{adding}?")
end
end
defp renaming?(table, removing, opts) do
if opts.no_shell? do
raise "Unimplemented: cannot determine: Are you renaming #{table}.#{removing.name}? without shell input"
else
Mix.shell().yes?("Are you renaming #{table}.#{removing.name}?")
end
end
defp get_new_attribute(adding, tries \\ 3)
defp get_new_attribute(_adding, 0) do
raise "Could not get matching name after 3 attempts."
end
defp get_new_attribute(adding, tries) do
name =
Mix.shell().prompt(
"What are you renaming it to?: #{Enum.map_join(adding, ", ", & &1.name)}"
)
name =
if name do
String.trim(name)
else
nil
end
case Enum.find(adding, &(to_string(&1.name) == name)) do
nil -> get_new_attribute(adding, tries - 1)
new_attribute -> new_attribute
end
end
defp timestamp(require_unique? \\ false) do
# Alright, this is silly I know. But migration ids need to be unique
# and "synthesizing" that behavior is significantly more annoying than
# just waiting a bit, ensuring the migration versions are unique.
if require_unique?, do: :timer.sleep(1500)
{{y, m, d}, {hh, mm, ss}} = :calendar.universal_time()
"#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}"
end
defp pad(i) when i < 10, do: <<?0, ?0 + i>>
defp pad(i), do: to_string(i)
2021-02-01 10:39:59 +13:00
def get_snapshots(resource, all_resources) do
if AshPostgres.polymorphic?(resource) do
all_resources
2021-02-23 17:53:18 +13:00
|> Enum.flat_map(&Ash.Resource.Info.relationships/1)
2021-02-01 10:39:59 +13:00
|> Enum.filter(&(&1.destination == resource))
|> Enum.reject(&(&1.type == :belongs_to))
2021-02-01 10:39:59 +13:00
|> Enum.filter(& &1.context[:data_layer][:table])
|> Enum.map(fn relationship ->
resource
|> do_snapshot(relationship.context[:data_layer][:table])
|> Map.update!(:attributes, fn attributes ->
Enum.map(attributes, fn attribute ->
if attribute.name == relationship.destination_field do
Map.put(attribute, :references, %{
destination_field: relationship.source_field,
multitenancy: multitenancy(relationship.source),
table: AshPostgres.table(relationship.source),
on_delete: AshPostgres.polymorphic_on_delete(relationship.source),
on_update: AshPostgres.polymorphic_on_update(relationship.source),
name:
AshPostgres.polymorphic_name(relationship.source) ||
"#{relationship.context[:data_layer][:table]}_#{relationship.source_field}_fkey"
2021-02-01 10:39:59 +13:00
})
else
attribute
end
end)
end)
end)
else
[do_snapshot(resource)]
end
end
defp do_snapshot(resource, table \\ nil) do
snapshot = %{
attributes: attributes(resource),
identities: identities(resource),
2021-02-01 10:39:59 +13:00
table: table || AshPostgres.table(resource),
check_constraints: check_constraints(resource),
2020-09-20 10:08:09 +12:00
repo: AshPostgres.repo(resource),
2020-10-29 15:26:45 +13:00
multitenancy: multitenancy(resource),
base_filter: AshPostgres.base_filter_sql(resource),
has_create_action: has_create_action?(resource)
}
hash =
:sha256
|> :crypto.hash(inspect(snapshot))
|> Base.encode16()
Map.put(snapshot, :hash, hash)
end
defp has_create_action?(resource) do
resource
|> Ash.Resource.Info.actions()
|> Enum.any?(&(&1.type == :create))
end
defp check_constraints(resource) do
resource
|> AshPostgres.check_constraints()
|> Enum.filter(& &1.check)
|> case do
[] ->
[]
constraints ->
base_filter = Ash.Resource.Info.base_filter(resource)
if base_filter && !AshPostgres.base_filter_sql(resource) do
raise """
Cannot create a check constraint for a resource with a base filter without also configuring `base_filter_sql`.
You must provide the `base_filter_sql` option, or manually create add the check constraint to your migrations.
"""
end
constraints
end
|> Enum.map(fn constraint ->
%{
name: constraint.name,
attribute: List.wrap(constraint.attribute),
check: constraint.check,
base_filter: AshPostgres.base_filter_sql(resource)
}
end)
end
2020-10-29 15:26:45 +13:00
defp multitenancy(resource) do
2021-02-23 17:53:18 +13:00
strategy = Ash.Resource.Info.multitenancy_strategy(resource)
attribute = Ash.Resource.Info.multitenancy_attribute(resource)
global = Ash.Resource.Info.multitenancy_global?(resource)
2020-10-29 15:26:45 +13:00
%{
strategy: strategy,
attribute: attribute,
global: global
}
end
defp attributes(resource) do
repo = AshPostgres.repo(resource)
resource
2021-02-23 17:53:18 +13:00
|> Ash.Resource.Info.attributes()
|> Enum.sort_by(& &1.name)
|> Enum.map(&Map.take(&1, [:name, :type, :default, :allow_nil?, :generated?, :primary_key?]))
|> Enum.map(fn attribute ->
default = default(attribute, repo)
attribute
|> Map.put(:default, default)
|> Map.update!(:type, fn type ->
migration_type(type)
end)
end)
|> Enum.map(fn attribute ->
references = find_reference(resource, attribute)
Map.put(attribute, :references, references)
end)
end
defp find_reference(resource, attribute) do
2021-02-23 17:53:18 +13:00
Enum.find_value(Ash.Resource.Info.relationships(resource), fn relationship ->
if attribute.name == relationship.source_field && relationship.type == :belongs_to &&
foreign_key?(relationship) do
configured_reference = configured_reference(resource, attribute.name, relationship.name)
%{
destination_field: relationship.destination_field,
2020-10-29 15:26:45 +13:00
multitenancy: multitenancy(relationship.destination),
on_delete: configured_reference.on_delete,
on_update: configured_reference.on_update,
name: configured_reference.name,
2021-02-07 09:52:47 +13:00
table:
relationship.context[:data_layer][:table] ||
AshPostgres.table(relationship.destination)
}
end
end)
end
defp configured_reference(resource, attribute, relationship) do
ref =
resource
|> AshPostgres.references()
|> Enum.find(&(&1.relationship == relationship))
|> Kernel.||(%{
on_delete: nil,
on_update: nil,
name: nil
})
Map.put(ref, :name, ref.name || "#{AshPostgres.table(resource)}_#{attribute}_fkey")
end
defp migration_type({:array, type}), do: {:array, migration_type(type)}
defp migration_type(Ash.Type.CiString), do: :citext
defp migration_type(Ash.Type.UUID), do: :uuid
defp migration_type(Ash.Type.Integer), do: :bigint
defp migration_type(other), do: migration_type_from_storage_type(Ash.Type.storage_type(other))
defp migration_type_from_storage_type(:string), do: :text
defp migration_type_from_storage_type(storage_type), do: storage_type
defp foreign_key?(relationship) do
2021-02-23 17:53:18 +13:00
Ash.DataLayer.data_layer(relationship.source) == AshPostgres.DataLayer &&
AshPostgres.repo(relationship.source) == AshPostgres.repo(relationship.destination)
end
defp identities(resource) do
resource
2021-02-23 17:53:18 +13:00
|> Ash.Resource.Info.identities()
2020-09-20 10:08:09 +12:00
|> case do
[] ->
[]
identities ->
2021-02-23 17:53:18 +13:00
base_filter = Ash.Resource.Info.base_filter(resource)
2020-09-20 10:08:09 +12:00
if base_filter && !AshPostgres.base_filter_sql(resource) do
raise """
Cannot create a unique index for a resource with a base filter without also configuring `base_filter_sql`.
You must provide the `base_filter_sql` option, or skip unique indexes with `skip_unique_indexes`"
2020-09-20 10:08:09 +12:00
"""
end
identities
end
|> Enum.reject(fn identity ->
identity.name in AshPostgres.skip_unique_indexes?(resource)
end)
|> Enum.filter(fn identity ->
Enum.all?(identity.keys, fn key ->
2021-02-23 17:53:18 +13:00
Ash.Resource.Info.attribute(resource, key)
end)
end)
2020-09-20 10:08:09 +12:00
|> Enum.map(fn identity ->
%{identity | keys: Enum.sort(identity.keys)}
end)
|> Enum.sort_by(& &1.name)
|> Enum.map(&Map.take(&1, [:name, :keys]))
2020-09-20 10:08:09 +12:00
|> Enum.map(&Map.put(&1, :base_filter, AshPostgres.base_filter_sql(resource)))
end
@uuid_functions [&Ash.UUID.generate/0, &Ecto.UUID.generate/0]
defp default(%{default: default}, repo) when is_function(default) do
cond do
default in @uuid_functions && "uuid-ossp" in (repo.config()[:installed_extensions] || []) ->
~S[fragment("uuid_generate_v4()")]
default == (&DateTime.utc_now/0) ->
~S[fragment("now()")]
true ->
"nil"
end
end
defp default(%{default: {_, _, _}}, _), do: "nil"
2020-10-29 15:26:45 +13:00
defp default(%{default: nil}, _), do: "nil"
defp default(%{default: value, type: type}, _) do
case Ash.Type.dump_to_native(type, value) do
{:ok, value} -> inspect(value)
_ -> "nil"
end
end
defp snapshot_to_binary(snapshot) do
snapshot
|> Map.update!(:attributes, fn attributes ->
Enum.map(attributes, fn attribute ->
%{attribute | type: sanitize_type(attribute.type)}
end)
end)
|> Jason.encode!(pretty: true)
end
defp sanitize_type({:array, type}) do
["array", sanitize_type(type)]
end
defp sanitize_type(type) do
type
end
defp load_snapshot(json) do
json
|> Jason.decode!(keys: :atoms!)
|> sanitize_snapshot()
end
defp sanitize_snapshot(snapshot) do
snapshot
|> Map.put_new(:has_create_action, true)
|> Map.update!(:identities, fn identities ->
Enum.map(identities, &load_identity/1)
end)
|> Map.update!(:attributes, fn attributes ->
Enum.map(attributes, &load_attribute(&1, snapshot.table))
end)
|> Map.put_new(:check_constraints, [])
|> Map.update!(:check_constraints, &load_check_constraints/1)
|> Map.update!(:repo, &String.to_atom/1)
2020-10-29 15:26:45 +13:00
|> Map.put_new(:multitenancy, %{
attribute: nil,
strategy: nil,
global: nil
2020-10-29 15:26:45 +13:00
})
|> Map.update!(:multitenancy, &load_multitenancy/1)
end
defp load_check_constraints(constraints) do
Enum.map(constraints, fn constraint ->
Map.update!(constraint, :attribute, fn attribute ->
attribute
|> List.wrap()
|> Enum.map(&String.to_atom/1)
end)
end)
end
2020-10-29 15:26:45 +13:00
defp load_multitenancy(multitenancy) do
multitenancy
|> Map.update!(:strategy, fn strategy -> strategy && String.to_atom(strategy) end)
|> Map.update!(:attribute, fn attribute -> attribute && String.to_atom(attribute) end)
end
defp load_attribute(attribute, table) do
attribute
|> Map.update!(:type, &load_type/1)
|> Map.update!(:name, &String.to_atom/1)
2021-01-07 18:37:41 +13:00
|> Map.put_new(:default, "nil")
|> Map.update!(:default, &(&1 || "nil"))
|> Map.update!(:references, fn
nil ->
nil
references ->
2020-10-29 15:26:45 +13:00
references
|> Map.update!(:destination_field, &String.to_atom/1)
|> Map.put_new(:on_delete, nil)
|> Map.put_new(:on_update, nil)
|> Map.update!(:on_delete, &(&1 && String.to_atom(&1)))
|> Map.update!(:on_update, &(&1 && String.to_atom(&1)))
2021-04-14 04:38:39 +12:00
|> Map.put(:name, Map.get(references, :name) || "#{table}_#{attribute.name}_fkey")
2020-10-29 15:26:45 +13:00
|> Map.put_new(:multitenancy, %{
attribute: nil,
strategy: nil,
global: nil
2020-10-29 15:26:45 +13:00
})
|> Map.update!(:multitenancy, &load_multitenancy/1)
end)
end
defp load_type(["array", type]) do
{:array, load_type(type)}
end
defp load_type(type) do
String.to_atom(type)
end
defp load_identity(identity) do
identity
|> Map.update!(:name, &String.to_atom/1)
|> Map.update!(:keys, fn keys ->
2020-09-20 10:08:09 +12:00
keys
|> Enum.map(&String.to_atom/1)
|> Enum.sort()
end)
2020-09-20 10:08:09 +12:00
|> Map.put_new(:base_filter, nil)
end
end