commit 6fb41026f82fa48adfbc15e6edcc9bab57e6b924 Author: Zach Daniel Date: Fri Sep 22 22:52:22 2023 -0400 init: copy and gouge ash_postgres diff --git a/.check.exs b/.check.exs new file mode 100644 index 0000000..91e14cb --- /dev/null +++ b/.check.exs @@ -0,0 +1,21 @@ +[ + ## all available options with default values (see `mix check` docs for description) + # parallel: true, + # skipped: true, + retry: false, + ## list of tools (see `mix check` docs for defaults) + tools: [ + ## curated tools may be disabled (e.g. the check for compilation warnings) + # {:compiler, false}, + + ## ...or adjusted (e.g. use one-line formatter for more compact credo output) + # {:credo, "mix credo --format oneline"}, + + {:check_formatter, command: "mix spark.formatter --check"}, + {:check_migrations, command: "mix test.check_migrations"} + ## custom new tools may be added (mix tasks or arbitrary commands) + # {:my_mix_task, command: "mix release", env: %{"MIX_ENV" => "prod"}}, + # {:my_arbitrary_tool, command: "npm test", cd: "assets"}, + # {:my_arbitrary_script, command: ["my_script", "argument with spaces"], cd: "scripts"} + ] +] diff --git a/.credo.exs b/.credo.exs new file mode 100644 index 0000000..a986fd0 --- /dev/null +++ b/.credo.exs @@ -0,0 +1,184 @@ +# This file contains the configuration for Credo and you are probably reading +# this after creating it with `mix credo.gen.config`. +# +# If you find anything wrong or unclear in this file, please report an +# issue on GitHub: https://github.com/rrrene/credo/issues +# +%{ + # + # You can have as many configs as you like in the `configs:` field. + configs: [ + %{ + # + # Run any config using `mix credo -C `. If no config name is given + # "default" is used. + # + name: "default", + # + # These are the files included in the analysis: + files: %{ + # + # You can give explicit globs or simply directories. + # In the latter case `**/*.{ex,exs}` will be used. + # + included: [ + "lib/", + "src/", + "test/", + "web/", + "apps/*/lib/", + "apps/*/src/", + "apps/*/test/", + "apps/*/web/" + ], + excluded: [~r"/_build/", ~r"/deps/", ~r"/node_modules/"] + }, + # + # Load and configure plugins here: + # + plugins: [], + # + # If you create your own checks, you must specify the source files for + # them here, so they can be loaded by Credo before running the analysis. + # + requires: [], + # + # If you want to enforce a style guide and need a more traditional linting + # experience, you can change `strict` to `true` below: + # + strict: false, + # + # To modify the timeout for parsing files, change this value: + # + parse_timeout: 5000, + # + # If you want to use uncolored output by default, you can change `color` + # to `false` below: + # + color: true, + # + # You can customize the parameters of any check by adding a second element + # to the tuple. + # + # To disable a check put `false` as second element: + # + # {Credo.Check.Design.DuplicatedCode, false} + # + checks: [ + # + ## Consistency Checks + # + {Credo.Check.Consistency.ExceptionNames, []}, + {Credo.Check.Consistency.LineEndings, []}, + {Credo.Check.Consistency.ParameterPatternMatching, []}, + {Credo.Check.Consistency.SpaceAroundOperators, false}, + {Credo.Check.Consistency.SpaceInParentheses, []}, + {Credo.Check.Consistency.TabsOrSpaces, []}, + + # + ## Design Checks + # + # You can customize the priority of any check + # Priority values are: `low, normal, high, higher` + # + {Credo.Check.Design.AliasUsage, false}, + # You can also customize the exit_status of each check. + # If you don't want TODO comments to cause `mix credo` to fail, just + # set this value to 0 (zero). + # + {Credo.Check.Design.TagTODO, false}, + {Credo.Check.Design.TagFIXME, []}, + + # + ## Readability Checks + # + {Credo.Check.Readability.AliasOrder, []}, + {Credo.Check.Readability.FunctionNames, []}, + {Credo.Check.Readability.LargeNumbers, []}, + {Credo.Check.Readability.MaxLineLength, [priority: :low, max_length: 120]}, + {Credo.Check.Readability.ModuleAttributeNames, []}, + {Credo.Check.Readability.ModuleDoc, []}, + {Credo.Check.Readability.ModuleNames, []}, + {Credo.Check.Readability.ParenthesesInCondition, false}, + {Credo.Check.Readability.ParenthesesOnZeroArityDefs, []}, + {Credo.Check.Readability.PredicateFunctionNames, []}, + {Credo.Check.Readability.PreferImplicitTry, []}, + {Credo.Check.Readability.RedundantBlankLines, []}, + {Credo.Check.Readability.Semicolons, []}, + {Credo.Check.Readability.SpaceAfterCommas, []}, + {Credo.Check.Readability.StringSigils, []}, + {Credo.Check.Readability.TrailingBlankLine, []}, + {Credo.Check.Readability.TrailingWhiteSpace, []}, + {Credo.Check.Readability.UnnecessaryAliasExpansion, []}, + {Credo.Check.Readability.VariableNames, []}, + + # + ## Refactoring Opportunities + # + {Credo.Check.Refactor.CondStatements, []}, + {Credo.Check.Refactor.CyclomaticComplexity, false}, + {Credo.Check.Refactor.FunctionArity, []}, + {Credo.Check.Refactor.LongQuoteBlocks, []}, + {Credo.Check.Refactor.MapInto, []}, + {Credo.Check.Refactor.MatchInCondition, []}, + {Credo.Check.Refactor.NegatedConditionsInUnless, []}, + {Credo.Check.Refactor.NegatedConditionsWithElse, []}, + {Credo.Check.Refactor.Nesting, [max_nesting: 5]}, + {Credo.Check.Refactor.UnlessWithElse, []}, + {Credo.Check.Refactor.WithClauses, []}, + + # + ## Warnings + # + {Credo.Check.Warning.BoolOperationOnSameValues, []}, + {Credo.Check.Warning.ExpensiveEmptyEnumCheck, []}, + {Credo.Check.Warning.IExPry, []}, + {Credo.Check.Warning.IoInspect, []}, + {Credo.Check.Warning.LazyLogging, []}, + {Credo.Check.Warning.MixEnv, false}, + {Credo.Check.Warning.OperationOnSameValues, []}, + {Credo.Check.Warning.OperationWithConstantResult, []}, + {Credo.Check.Warning.RaiseInsideRescue, []}, + {Credo.Check.Warning.UnusedEnumOperation, []}, + {Credo.Check.Warning.UnusedFileOperation, []}, + {Credo.Check.Warning.UnusedKeywordOperation, []}, + {Credo.Check.Warning.UnusedListOperation, []}, + {Credo.Check.Warning.UnusedPathOperation, []}, + {Credo.Check.Warning.UnusedRegexOperation, []}, + {Credo.Check.Warning.UnusedStringOperation, []}, + {Credo.Check.Warning.UnusedTupleOperation, []}, + {Credo.Check.Warning.UnsafeExec, []}, + + # + # Checks scheduled for next check update (opt-in for now, just replace `false` with `[]`) + + # + # Controversial and experimental checks (opt-in, just replace `false` with `[]`) + # + {Credo.Check.Readability.StrictModuleLayout, false}, + {Credo.Check.Consistency.MultiAliasImportRequireUse, false}, + {Credo.Check.Consistency.UnusedVariableNames, false}, + {Credo.Check.Design.DuplicatedCode, false}, + {Credo.Check.Readability.AliasAs, false}, + {Credo.Check.Readability.MultiAlias, false}, + {Credo.Check.Readability.Specs, false}, + {Credo.Check.Readability.SinglePipe, false}, + {Credo.Check.Readability.WithCustomTaggedTuple, false}, + {Credo.Check.Refactor.ABCSize, false}, + {Credo.Check.Refactor.AppendSingleItem, false}, + {Credo.Check.Refactor.DoubleBooleanNegation, false}, + {Credo.Check.Refactor.ModuleDependencies, false}, + {Credo.Check.Refactor.NegatedIsNil, false}, + {Credo.Check.Refactor.PipeChainStart, false}, + {Credo.Check.Refactor.VariableRebinding, false}, + {Credo.Check.Warning.LeakyEnvironment, false}, + {Credo.Check.Warning.MapGetUnsafePass, false}, + {Credo.Check.Warning.UnsafeToAtom, false} + + # + # Custom checks can be created using `mix credo.gen.check`. + # + ] + } + ] +} diff --git a/.formatter.exs b/.formatter.exs new file mode 100644 index 0000000..16242f7 --- /dev/null +++ b/.formatter.exs @@ -0,0 +1,54 @@ +spark_locals_without_parens = [ + base_filter_sql: 1, + check: 1, + check_constraint: 2, + check_constraint: 3, + code?: 1, + concurrently: 1, + create?: 1, + deferrable: 1, + down: 1, + exclusion_constraint_names: 1, + foreign_key_names: 1, + identity_index_names: 1, + ignore?: 1, + include: 1, + index: 1, + index: 2, + message: 1, + migrate?: 1, + migration_defaults: 1, + migration_ignore_attributes: 1, + migration_types: 1, + name: 1, + on_delete: 1, + on_update: 1, + polymorphic?: 1, + polymorphic_name: 1, + polymorphic_on_delete: 1, + polymorphic_on_update: 1, + prefix: 1, + reference: 1, + reference: 2, + repo: 1, + schema: 1, + skip_unique_indexes: 1, + statement: 1, + statement: 2, + table: 1, + template: 1, + unique: 1, + unique_index_names: 1, + up: 1, + update?: 1, + using: 1, + where: 1 +] + +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"], + locals_without_parens: spark_locals_without_parens, + export: [ + locals_without_parens: spark_locals_without_parens + ] +] diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..7aa6f74 --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at zach@zachdaniel.dev. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 0000000..f537454 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,2 @@ +# Contributing Guidelines +Contributing guidelines can be found in the core project, [ash](https://github.com/ash-project/ash/blob/main/.github/CONTRIBUTING.md) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..1f47341 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,27 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: bug, needs review +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. If you are not sure if the bug is related to `ash` or an extension, log it with [ash](https://github.com/ash-project/ash/issues/new) and we will move it. + +**To Reproduce** +A minimal set of resource definitions and calls that can reproduce the bug. + +**Expected behavior** +A clear and concise description of what you expected to happen. + +** Runtime + - Elixir version + - Erlang version + - OS + - Ash version + - any related extension versions + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..f347dcb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,36 @@ +--- +name: Proposal +about: Suggest an idea for this project +title: '' +labels: enhancement, needs review +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Express the feature either with a change to resource syntax, or with a change to the resource interface** + +For example + +```elixir + attributes do + attribute :foo, :integer, bar: 10 # <- Adding `bar` here would cause + end +``` + +Or + +```elixir + Api.read(:resource, bar: 10) # <- Adding `bar` here would cause +``` + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..8c13744 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,4 @@ +### Contributor checklist + +- [ ] Bug fixes include regression tests +- [ ] Features include unit/acceptance tests diff --git a/.github/workflows/elixir.yml b/.github/workflows/elixir.yml new file mode 100644 index 0000000..e015e71 --- /dev/null +++ b/.github/workflows/elixir.yml @@ -0,0 +1,15 @@ +name: CI +on: + push: + tags: + - "v*" + branches: [main] + pull_request: + branches: [main] +jobs: + ash-ci: + uses: ash-project/ash/.github/workflows/ash-ci.yml@main + with: + sqlite: true + secrets: + hex_api_key: ${{ secrets.HEX_API_KEY }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2a07a23 --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where third-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Ignore package tarball (built via "mix hex.build"). +ash_sqlite-*.tar + +test_migration_path +test_snapshots_path + diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 0000000..44acbf0 --- /dev/null +++ b/.tool-versions @@ -0,0 +1,2 @@ +erlang 26.0.2 +elixir 1.15.4 diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..3f9c00f --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + "cSpell.words": [ + "citext", + "mapset", + "strpos" + ] +} \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..4eb51a5 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Zachary Scott Daniel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..6c7352d --- /dev/null +++ b/README.md @@ -0,0 +1,52 @@ +# AshSqlite + +![Elixir CI](https://github.com/ash-project/ash_sqlite/workflows/Elixir%20CI/badge.svg) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![Coverage Status](https://coveralls.io/repos/github/ash-project/ash_sqlite/badge.svg?branch=main)](https://coveralls.io/github/ash-project/ash_sqlite?branch=main) +[![Hex version badge](https://img.shields.io/hexpm/v/ash_sqlite.svg)](https://hex.pm/packages/ash_sqlite) + +## DSL + +See the DSL documentation in `AshSqlite.DataLayer` for DSL documentation + +## Usage + +Add `ash_qlite` to your `mix.exs` file. + +```elixir +{:ash_sqlite, "~> 0.1.0"} +``` + +To use this data layer, you need to chage your Ecto Repo's from `use Ecto.Repo`, +to `use Sqlite.Repo`. because AshSqlite adds functionality to Ecto Repos. + +Then, configure each of your `Ash.Resource` resources by adding `use Ash.Resource, data_layer: AshSqlite.DataLayer` like so: + +```elixir +defmodule MyApp.SomeResource do + use Ash.Resource, data_layer: AshSqlite.DataLayer + + sqlite do + repo MyApp.Repo + table "table_name" + end + + attributes do + # ... Attribute definitions + end +end +``` + +## Generating Migrations + +See the documentation for `Mix.Tasks.AshSqlite.GenerateMigrations` for how to generate migrations from your resources + +# Contributors + +Ash is made possible by its excellent community! + + + + + +[Become a contributor](https://ash-hq.org/docs/guides/ash/latest/how_to/contribute.md) diff --git a/config/config.exs b/config/config.exs new file mode 100644 index 0000000..5bc925f --- /dev/null +++ b/config/config.exs @@ -0,0 +1,52 @@ +import Config + +config :ash, :use_all_identities_in_manage_relationship?, false + +if Mix.env() == :dev do + config :git_ops, + mix_project: AshSqlite.MixProject, + changelog_file: "CHANGELOG.md", + repository_url: "https://github.com/ash-project/ash_sqlite", + # Instructs the tool to manage your mix version in your `mix.exs` file + # See below for more information + manage_mix_version?: true, + # Instructs the tool to manage the version in your README.md + # Pass in `true` to use `"README.md"` or a string to customize + manage_readme_version: ["README.md", "documentation/tutorials/get-started-with-sqlite.md"], + version_tag_prefix: "v" +end + +if Mix.env() == :test do + config :ash, :validate_api_resource_inclusion?, false + config :ash, :validate_api_config_inclusion?, false + + config :ash_sqlite, AshSqlite.TestRepo, + username: "sqlite", + database: "ash_sqlite", + hostname: "localhost", + pool: Ecto.Adapters.SQL.Sandbox + + # sobelow_skip ["Config.Secrets"] + config :ash_sqlite, AshSqlite.TestRepo, password: "sqlite" + + config :ash_sqlite, AshSqlite.TestRepo, migration_primary_key: [name: :id, type: :binary_id] + + config :ash_sqlite, AshSqlite.TestNoSandboxRepo, + username: "sqlite", + database: "ash_sqlite_test", + hostname: "localhost" + + # sobelow_skip ["Config.Secrets"] + config :ash_sqlite, AshSqlite.TestNoSandboxRepo, password: "sqlite" + + config :ash_sqlite, AshSqlite.TestNoSandboxRepo, + migration_primary_key: [name: :id, type: :binary_id] + + config :ash_sqlite, + ecto_repos: [AshSqlite.TestRepo, AshSqlite.TestNoSandboxRepo], + ash_apis: [ + AshSqlite.Test.Api + ] + + config :logger, level: :warning +end diff --git a/documentation/how_to/join-manual-relationships.md b/documentation/how_to/join-manual-relationships.md new file mode 100644 index 0000000..ad431cf --- /dev/null +++ b/documentation/how_to/join-manual-relationships.md @@ -0,0 +1,87 @@ +# Join Manual Relationships + +See [Defining Manual Relationships](https://hexdocs.pm/ash/defining-manual-relationships.html) for an idea of manual relationships in general. +Manual relationships allow for expressing complex/non-typical relationships between resources in a standard way. +Individual data layers may interact with manual relationships in their own way, so see their corresponding guides. + +## Example + +```elixir +# in the resource + +relationships do + has_many :tickets_above_threshold, Helpdesk.Support.Ticket do + manual Helpdesk.Support.Ticket.Relationships.TicketsAboveThreshold + end +end + +# implementation +defmodule Helpdesk.Support.Ticket.Relationships.TicketsAboveThreshold do + use Ash.Resource.ManualRelationship + use AshSqlite.ManualRelationship + + require Ash.Query + require Ecto.Query + + def load(records, _opts, %{query: query, actor: actor, authorize?: authorize?}) do + # Use existing records to limit resultds + rep_ids = Enum.map(records, & &1.id) + # Using Ash to get the destination records is ideal, so you can authorize access like normal + # but if you need to use a raw ecto query here, you can. As long as you return the right structure. + + {:ok, + query + |> Ash.Query.filter(representative_id in ^rep_ids) + |> Ash.Query.filter(priority > representative.priority_threshold) + |> Helpdesk.Support.read!(actor: actor, authorize?: authorize?) + # Return the items grouped by the primary key of the source, i.e representative.id => [...tickets above threshold] + |> Enum.group_by(& &1.representative_id)} + end + + # query is the "source" query that is being built. + + # _opts are options provided to the manual relationship, i.e `{Manual, opt: :val}` + + # current_binding is what the source of the relationship is bound to. Access fields with `as(^current_binding).field` + + # as_binding is the binding that your join should create. When you join, make sure you say `as: ^as_binding` on the + # part of the query that represents the destination of the relationship + + # type is `:inner` or `:left`. + # destination_query is what you should join to to add the destination to the query, i.e `join: dest in ^destination-query` + def ash_sqlite_join(query, _opts, current_binding, as_binding, :inner, destination_query) do + {:ok, + Ecto.Query.from(_ in query, + join: dest in ^destination_query, + as: ^as_binding, + on: dest.representative_id == as(^current_binding).id, + on: dest.priority > as(^current_binding).priority_threshold + )} + end + + def ash_sqlite_join(query, _opts, current_binding, as_binding, :left, destination_query) do + {:ok, + Ecto.Query.from(_ in query, + left_join: dest in ^destination_query, + as: ^as_binding, + on: dest.representative_id == as(^current_binding).id, + on: dest.priority > as(^current_binding).priority_threshold + )} + end + + # _opts are options provided to the manual relationship, i.e `{Manual, opt: :val}` + + # current_binding is what the source of the relationship is bound to. Access fields with `parent_as(^current_binding).field` + + # as_binding is the binding that has already been created for your join. Access fields on it via `as(^as_binding)` + + # destination_query is what you should use as the basis of your query + def ash_sqlite_subquery(_opts, current_binding, as_binding, destination_query) do + {:ok, + Ecto.Query.from(_ in destination_query, + where: parent_as(^current_binding).id == as(^as_binding).representative_id, + where: as(^as_binding).priority > parent_as(^current_binding).priority_threshold + )} + end +end +``` diff --git a/documentation/how_to/test-with-sqlite.md b/documentation/how_to/test-with-sqlite.md new file mode 100644 index 0000000..772c887 --- /dev/null +++ b/documentation/how_to/test-with-sqlite.md @@ -0,0 +1,11 @@ +# Testing With Sqlite + +Testing resources with SQLite generally requires passing `async?: false` to +your tests, due to `SQLite`'s limitation of having a single write transaction +open at any one time. + +This should be coupled with to make sure that Ash does not spawn any tasks. + +```elixir +config :ash, :disable_async?, true +``` diff --git a/documentation/how_to/using-fragments.md b/documentation/how_to/using-fragments.md new file mode 100644 index 0000000..f03d881 --- /dev/null +++ b/documentation/how_to/using-fragments.md @@ -0,0 +1,25 @@ +# Using Fragments + +Fragments allow you to use arbitrary sqlite expressions in your queries. Fragments can often be an escape hatch to allow you to do things that don't have something officially supported with Ash. + +## Examples + +Use simple expressions + +```elixir +fragment("? / ?", points, count) +``` + +Call functions + +```elixir +fragment("repeat('hello', 4)") +``` + +Use entire queries + +```elixir +fragment("points > (SELECT SUM(points) FROM games WHERE user_id = ? AND id != ?)", user_id, id) +``` + +Using entire queries like the above is a last resort, but can often help us avoid having to add extra structure unnecessarily. diff --git a/documentation/topics/migrations_and_tasks.md b/documentation/topics/migrations_and_tasks.md new file mode 100644 index 0000000..0f89637 --- /dev/null +++ b/documentation/topics/migrations_and_tasks.md @@ -0,0 +1,106 @@ +# Migrations + +## Migration Generator Primer + + + +## Tasks + +The available tasks are: + +- `mix ash_sqlite.generate_migrations` +- `mix ash_sqlite.create` +- `mix ash_sqlite.drop` + +AshSqlite is built on top of ecto, so much of its behavior is pass-through/orchestration of that tooling. + +## Basic Workflow + +- Make resource changes +- Run `mix ash_sqlite.generate_migrations` to generate migrations and resource snapshots +- Run `mix ash_sqlite.migrate` to run those migrations + +For more information on generating migrations, see the module documentation here: +`Mix.Tasks.AshSqlite.GenerateMigrations`, or run `mix help ash_sqlite.generate_migrations` + +For running your migrations, there is a mix task that will find all of the repos configured in your apis and run their +migrations. It is a thin wrapper around `mix ecto.migrate`. Ours is called `mix ash_sqlite.migrate` + +If you want to run or rollback individual migrations, use the corresponding + +### Regenerating Migrations + +Often, you will run into a situation where you want to make a slight change to a resource after you've already generated and run migrations. If you are using git and would like to undo those changes, then regenerate the migrations, this script may prove useful: + +```bash +#!/bin/bash + +# Get count of untracked migrations +N_MIGRATIONS=$(git ls-files --others priv/repo/migrations | wc -l) + +# Rollback untracked migrations +mix ecto.rollback -n $N_MIGRATIONS + +# Delete untracked migrations and snapshots +git ls-files --others priv/repo/migrations | xargs rm +git ls-files --others priv/resource_snapshots | xargs rm + +# Regenerate migrations +mix ash_sqlite.generate_migrations + +# Run migrations if flag +if echo $* | grep -e "-m" -q +then + mix ecto.migrate +fi +``` + +After saving this file to something like `regen.sh`, make it executable with `chmod +x regen.sh`. Now you can run it with `./regen.sh`. If you would like the migrations to automatically run after regeneration, add the `-m` flag: `./regen.sh -m`. + +## Multiple Repos + +If you are using multiple repos, you will likely need to use `mix ecto.migrate` and manage it separately for each repo, as the options would +be applied to both repo, which wouldn't make sense. + +## Running Migrations in Production + +Define a module similar to the following: + +```elixir +defmodule MyApp.Release do + @moduledoc """ + Houses tasks that need to be executed in the released application (because mix is not present in releases). + """ + @app :my_ap + def migrate do + load_app() + + for repo <- repos() do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true)) + end + end + + def rollback(repo, version) do + load_app() + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version)) + end + + defp repos do + apis() + |> Enum.flat_map(fn api -> + api + |> Ash.Api.Info.resources() + |> Enum.map(&AshSqlite.repo/1) + end) + |> Enum.uniq() + end + + defp apis do + Application.fetch_env!(:my_app, :ash_apis) + end + + defp load_app do + Application.load(@app) + end +end +``` diff --git a/documentation/topics/polymorphic_resources.md b/documentation/topics/polymorphic_resources.md new file mode 100644 index 0000000..d4cea32 --- /dev/null +++ b/documentation/topics/polymorphic_resources.md @@ -0,0 +1,82 @@ +# Polymorphic Resources + +To support leveraging the same resource backed by multiple tables (useful for things like polymorphic associations), AshSqlite supports setting the `data_layer.table` context for a given resource. For this example, lets assume that you have a `MyApp.Post` resource and a `MyApp.Comment` resource. For each of those resources, users can submit `reactions`. However, you want a separate table for `post_reactions` and `comment_reactions`. You could accomplish that like so: + +```elixir +defmodule MyApp.Reaction do + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + polymorphic? true # Without this, `table` is a required configuration + end + + attributes do + attribute(:resource_id, :uuid) + end + + ... +end +``` + +Then, in your related resources, you set the table context like so: + +```elixir +defmodule MyApp.Post do + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + ... + + relationships do + has_many :reactions, MyApp.Reaction, + relationship_context: %{data_layer: %{table: "post_reactions"}}, + destination_attribute: :resource_id + end +end + +defmodule MyApp.Comment do + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + ... + + relationships do + has_many :reactions, MyApp.Reaction, + relationship_context: %{data_layer: %{table: "comment_reactions"}}, + destination_attribute: :resource_id + end +end +``` + +With this, when loading or editing related data, ash will automatically set that context. +For managing related data, see `Ash.Changeset.manage_relationship/4` and other relationship functions +in `Ash.Changeset` + +## Table specific actions + +To make actions use a specific table, you can use the `set_context` query preparation/change. + +For example: + +```elixir +defmodule MyApp.Reaction do + actions do + read :for_comments do + prepare set_context(%{data_layer: %{table: "comment_reactions"}}) + end + + read :for_posts do + prepare set_context(%{data_layer: %{table: "post_reactions"}}) + end + end +end +``` + +## Migrations + +When a migration is marked as `polymorphic? true`, the migration generator will look at +all resources that are related to it, that set the `%{data_layer: %{table: "table"}}` context. +For each of those, a migration is generated/managed automatically. This means that adding reactions +to a new resource is as easy as adding the relationship and table context, and then running +`mix ash_sqlite.generate_migrations`. diff --git a/documentation/topics/references.md b/documentation/topics/references.md new file mode 100644 index 0000000..ceecb61 --- /dev/null +++ b/documentation/topics/references.md @@ -0,0 +1,23 @@ +# References + +To configure the foreign keys on a resource, we use the `references` block. + +For example: + +```elixir +references do + reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey" +end +``` + +## Important + +No resource logic is applied with these operations! No authorization rules or validations take place, and no notifications are issued. This operation happens *directly* in the database. That + +## Nothing vs Restrict + +The difference between `:nothing` and `:restrict` is subtle and, if you are unsure, choose `:nothing` (the default behavior). `:restrict` will prevent the deletion from happening *before* the end of the database transaction, whereas `:nothing` allows the transaction to complete before doing so. This allows for things like updating or deleting the destination row and *then* updating updating or deleting the reference(as long as you are in a transaction). + +## On Delete + +This option is called `on_delete`, instead of `on_destroy`, because it is hooking into the database level deletion, *not* a `destroy` action in your resource. diff --git a/documentation/topics/sqlite-expressions.md b/documentation/topics/sqlite-expressions.md new file mode 100644 index 0000000..8f900da --- /dev/null +++ b/documentation/topics/sqlite-expressions.md @@ -0,0 +1,24 @@ +# Sqlite Expressions + +In addition to the expressions listed in the [Ash expressions guide](https://hexdocs.pm/ash/expressions.html), AshSqlite provides the following expressions + +## Fragments +`fragment` allows you to embed raw sql into the query. Use question marks to interpolate values from the outer expression. + +For example: + +```elixir +Ash.Query.filter(User, fragment("? IS NOT NULL", first_name)) +``` + +# Like + +This wraps the builtin sqlite `LIKE` operator. + +Please be aware, these match *patterns* not raw text. Use `contains/1` if you want to match text without supporting patterns, i.e `%` and `_` have semantic meaning! + +For example: + +```elixir +Ash.Query.filter(User, like(name, "%obo%")) # name contains obo anywhere in the string, case sensitively +``` diff --git a/documentation/tutorials/get-started-with-sqlite.md b/documentation/tutorials/get-started-with-sqlite.md new file mode 100644 index 0000000..df0841c --- /dev/null +++ b/documentation/tutorials/get-started-with-sqlite.md @@ -0,0 +1,286 @@ +# Get Started With Sqlite + +## Goals + +In this guide we will: + +1. Setup AshSqlite, which includes setting up [Ecto](https://hexdocs.pm/ecto/Ecto.html) +2. Add AshSqlite to the resources created in [the Ash getting started guide](https://hexdocs.pm/ash/get-started.html) +3. Show how the various features of AshSqlite can help you work quickly and cleanly against a sqlite database +4. Highlight some of the more advanced features you can use when using AshSqlite. +5. Point you to additional resources you may need on your journey + +## Requirements + +- A working SQLite installation, with a sufficiently permissive user +- If you would like to follow along, you will need to add begin with [the Ash getting started guide](https://hexdocs.pm/ash/get-started.html) + +## Steps + +### Add AshSqlite + +Add the `:ash_sqlite` dependency to your application + +`{:ash_sqlite, "~> 1.3.6"}` + +Add `:ash_sqlite` to your `.formatter.exs` file + +```elixir +[ + # import the formatter rules from `:ash_sqlite` + import_deps: [..., :ash_sqlite], + inputs: [...] +] +``` + +### Create and configure your Repo + +Create `lib/helpdesk/repo.ex` with the following contents. `AshSqlite.Repo` is a thin wrapper around `Ecto.Repo`, so see their documentation for how to use it if you need to use it directly. For standard Ash usage, all you will need to do is configure your resources to use your repo. + +```elixir +# in lib/helpdesk/repo.ex + +defmodule Helpdesk.Repo do + use AshSqlite.Repo, otp_app: :helpdesk +end +``` + +Next we will need to create configuration files for various environments. Run the following to create the configuration files we need. + +```bash +mkdir -p config +touch config/config.exs +touch config/dev.exs +touch config/runtime.exs +touch config/test.exs +``` + +Place the following contents in those files, ensuring that the credentials match the user you created for your database. For most conventional installations this will work out of the box. If you've followed other guides before this one, they may have had you create these files already, so just make sure these contents are there. + +```elixir +# in config/config.exs +import Config + +# This should already have been added in the first +# getting started guide +config :helpdesk, + ash_apis: [Helpdesk.Support] + +config :helpdesk, + ecto_repos: [Helpdesk.Repo] + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{config_env()}.exs" +``` + +```elixir +# in config/dev.exs + +import Config + +# Configure your database +config :helpdesk, Helpdesk.Repo, + username: "sqlite", + password: "sqlite", + hostname: "localhost", + database: "helpdesk_dev", + port: 5432, + show_sensitive_data_on_connection_error: true, + pool_size: 10 +``` + +```elixir +# in config/runtime.exs + +import Config + +if config_env() == :prod do + database_url = + System.get_env("DATABASE_URL") || + raise """ + environment variable DATABASE_URL is missing. + For example: ecto://USER:PASS@HOST/DATABASE + """ + + config :helpdesk, Helpdesk.Repo, + url: database_url, + pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10") +end +``` + +```elixir +# in config/test.exs + +import Config + +# Configure your database +# +# The MIX_TEST_PARTITION environment variable can be used +# to provide built-in test partitioning in CI environment. +# Run `mix help test` for more information. +config :helpdesk, Helpdesk.Repo, + username: "sqlite", + password: "sqlite", + hostname: "localhost", + database: "helpdesk_test#{System.get_env("MIX_TEST_PARTITION")}", + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: 10 +``` + +And finally, add the repo to your application + +```elixir +# in lib/helpdesk/application.ex + + def start(_type, _args) do + children = [ + # Starts a worker by calling: Helpdesk.Worker.start_link(arg) + # {Helpdesk.Worker, arg} + Helpdesk.Repo + ] + + ... +``` + +### Add AshSqlite to our resources + +Now we can add the data layer to our resources. The basic configuration for a resource requires the `d:AshSqlite.sqlite|table` and the `d:AshSqlite.sqlite|repo`. + +```elixir +# in lib/helpdesk/support/resources/ticket.ex + + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table "tickets" + repo Helpdesk.Repo + end +``` + +```elixir +# in lib/helpdesk/support/resources/representative.ex + + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table "representatives" + repo Helpdesk.Repo + end +``` + +### Create the database and tables + +First, we'll create the database with `mix ash_sqlite.create`. + +Then we will generate database migrations. This is one of the many ways that AshSqlite can save time and reduce complexity. + +```bash +mix ash_sqlite.generate_migrations --name add_tickets_and_representatives +``` + +If you are unfamiliar with database migrations, it is a good idea to get a rough idea of what they are and how they work. See the links at the bottom of this guide for more. A rough overview of how migrations work is that each time you need to make changes to your database, they are saved as small, reproducible scripts that can be applied in order. This is necessary both for clean deploys as well as working with multiple developers making changes to the structure of a single database. + +Typically, you need to write these by hand. AshSqlite, however, will store snapshots each time you run the command to generate migrations and will figure out what migrations need to be created. + +You should always look at the generated migrations to ensure that they look correct. Do so now by looking at the generated file in `priv/repo/migrations`. + +Finally, we will create the local database and apply the generated migrations: + +```bash +mix ash_sqlite.create +mix ash_sqlite.migrate +``` + +### Try it out + +And now we're ready to try it out! Run the following in iex: + +Lets create some data. We'll make a representative and give them some open and some closed tickets. + +```elixir +require Ash.Query + +representative = ( + Helpdesk.Support.Representative + |> Ash.Changeset.for_create(:create, %{name: "Joe Armstrong"}) + |> Helpdesk.Support.create!() +) + +for i <- 0..5 do + ticket = + Helpdesk.Support.Ticket + |> Ash.Changeset.for_create(:open, %{subject: "Issue #{i}"}) + |> Helpdesk.Support.create!() + |> Ash.Changeset.for_update(:assign, %{representative_id: representative.id}) + |> Helpdesk.Support.update!() + + if rem(i, 2) == 0 do + ticket + |> Ash.Changeset.for_update(:close) + |> Helpdesk.Support.update!() + end +end +``` + +And now we can read that data. You should see some debug logs that show the sql queries AshSqlite is generating. + +```elixir +require Ash.Query + +# Show the tickets where the subject contains "2" +Helpdesk.Support.Ticket +|> Ash.Query.filter(contains(subject, "2")) +|> Helpdesk.Support.read!() +``` + +```elixir +require Ash.Query + +# Show the tickets that are closed and their subject does not contain "4" +Helpdesk.Support.Ticket +|> Ash.Query.filter(status == :closed and not(contains(subject, "4"))) +|> Helpdesk.Support.read!() +``` + +And, naturally, now that we are storing this in sqlite, this database is persisted even if we stop/start our application. The nice thing, however, is that this was the _exact_ same code that we ran against our resources when they were backed by ETS. + +### Calculations + +Calculations can be pushed down into SQL using expressions. + +For example, we can determine the percentage of tickets that are open: + +```elixir +# in lib/helpdesk/support/resources/representative.ex + + calculations do + calculate :percent_open, :float, expr(open_tickets / total_tickets ) + end +``` + +Calculations can be loaded. + +```elixir +require Ash.Query + +Helpdesk.Support.Representative +|> Ash.Query.filter(percent_open > 0.25) +|> Ash.Query.sort(:percent_open) +|> Ash.Query.load(:percent_open) +|> Helpdesk.Support.read!() +``` + +### Rich Configuration Options + +Take a look at the DSL documentation for more information on what you can configure. You can add check constraints, configure the behavior of foreign keys and more! + +### What next? + +- Check out the data layer docs: `AshSqlite.DataLayer` + +- [Ecto's documentation](https://hexdocs.pm/ecto/Ecto.html). AshSqlite (and much of Ash itself) is made possible by the amazing Ecto. If you find yourself looking for escape hatches when using Ash or ways to work directly with your database, you will want to know how Ecto works. Ash and AshSqlite intentionally do not hide Ecto, and in fact encourages its use whenever you need an escape hatch. + +- [Ecto's Migration documentation](https://hexdocs.pm/ecto_sql/Ecto.Migration.html) read more about migrations. Even with the ash_sqlite migration generator, you will very likely need to modify your own migrations some day. diff --git a/lib/ash_sqlite.ex b/lib/ash_sqlite.ex new file mode 100644 index 0000000..59404f2 --- /dev/null +++ b/lib/ash_sqlite.ex @@ -0,0 +1,7 @@ +defmodule AshSqlite do + @moduledoc """ + The AshSqlite extension gives you tools to map a resource to a sqlite database table. + + For more, check out the [getting started guide](/documentation/tutorials/get-started-with-sqlite.md) + """ +end diff --git a/lib/calculation.ex b/lib/calculation.ex new file mode 100644 index 0000000..e1a400e --- /dev/null +++ b/lib/calculation.ex @@ -0,0 +1,73 @@ +defmodule AshSqlite.Calculation do + @moduledoc false + + require Ecto.Query + + def add_calculations(query, [], _, _), do: {:ok, query} + + def add_calculations(query, calculations, resource, source_binding) do + query = AshSqlite.DataLayer.default_bindings(query, resource) + + query = + if query.select do + query + else + Ecto.Query.select_merge(query, %{}) + end + + dynamics = + Enum.map(calculations, fn {calculation, expression} -> + type = + AshSqlite.Types.parameterized_type( + calculation.type, + Map.get(calculation, :constraints, []) + ) + + expr = + AshSqlite.Expr.dynamic_expr( + query, + expression, + query.__ash_bindings__, + false, + type + ) + + expr = + if type do + Ecto.Query.dynamic(type(^expr, ^type)) + else + expr + end + + {calculation.load, calculation.name, expr} + end) + + {:ok, add_calculation_selects(query, dynamics)} + end + + defp add_calculation_selects(query, dynamics) do + {in_calculations, in_body} = + Enum.split_with(dynamics, fn {load, _name, _dynamic} -> is_nil(load) end) + + calcs = + in_body + |> Map.new(fn {load, _, dynamic} -> + {load, dynamic} + end) + + calcs = + if Enum.empty?(in_calculations) do + calcs + else + Map.put( + calcs, + :calculations, + Map.new(in_calculations, fn {_, name, dynamic} -> + {name, dynamic} + end) + ) + end + + Ecto.Query.select_merge(query, ^calcs) + end +end diff --git a/lib/check_constraint.ex b/lib/check_constraint.ex new file mode 100644 index 0000000..629ef7d --- /dev/null +++ b/lib/check_constraint.ex @@ -0,0 +1,30 @@ +defmodule AshSqlite.CheckConstraint do + @moduledoc "Represents a configured check constraint on the table backing a resource" + + defstruct [:attribute, :name, :message, :check] + + def schema do + [ + attribute: [ + type: :any, + doc: + "The attribute or list of attributes to which an error will be added if the check constraint fails", + required: true + ], + name: [ + type: :string, + required: true, + doc: "The name of the constraint" + ], + message: [ + type: :string, + doc: "The message to be added if the check constraint fails" + ], + check: [ + type: :string, + doc: + "The contents of the check. If this is set, the migration generator will include it when generating migrations" + ] + ] + end +end diff --git a/lib/custom_extension.ex b/lib/custom_extension.ex new file mode 100644 index 0000000..62cdaad --- /dev/null +++ b/lib/custom_extension.ex @@ -0,0 +1,20 @@ +defmodule AshSqlite.CustomExtension do + @moduledoc """ + A custom extension implementation. + """ + + @callback install(version :: integer) :: String.t() + + @callback uninstall(version :: integer) :: String.t() + + defmacro __using__(name: name, latest_version: latest_version) do + quote do + @behaviour AshSqlite.CustomExtension + + @extension_name unquote(name) + @extension_latest_version unquote(latest_version) + + def extension, do: {@extension_name, @extension_latest_version, &install/1, &uninstall/1} + end + end +end diff --git a/lib/custom_index.ex b/lib/custom_index.ex new file mode 100644 index 0000000..915ec92 --- /dev/null +++ b/lib/custom_index.ex @@ -0,0 +1,120 @@ +defmodule AshSqlite.CustomIndex do + @moduledoc "Represents a custom index on the table backing a resource" + @fields [ + :table, + :fields, + :name, + :unique, + :concurrently, + :using, + :prefix, + :where, + :include, + :message + ] + + defstruct @fields + + def fields, do: @fields + + @schema [ + fields: [ + type: {:wrap_list, {:or, [:atom, :string]}}, + doc: "The fields to include in the index." + ], + name: [ + type: :string, + doc: "the name of the index. Defaults to \"\#\{table\}_\#\{column\}_index\"." + ], + unique: [ + type: :boolean, + doc: "indicates whether the index should be unique.", + default: false + ], + concurrently: [ + type: :boolean, + doc: "indicates whether the index should be created/dropped concurrently.", + default: false + ], + using: [ + type: :string, + doc: "configures the index type." + ], + prefix: [ + type: :string, + doc: "specify an optional prefix for the index." + ], + where: [ + type: :string, + doc: "specify conditions for a partial index." + ], + message: [ + type: :string, + doc: "A custom message to use for unique indexes that have been violated" + ], + include: [ + type: {:list, :string}, + doc: + "specify fields for a covering index. This is not supported by all databases. For more information on SQLite support, please read the official docs." + ] + ] + + def schema, do: @schema + + # sobelow_skip ["DOS.StringToAtom"] + def transform(%__MODULE__{fields: fields} = index) do + index = %{ + index + | fields: + Enum.map(fields, fn field -> + if is_atom(field) do + field + else + String.to_atom(field) + end + end) + } + + cond do + index.name -> + if Regex.match?(~r/^[0-9a-zA-Z_]+$/, index.name) do + {:ok, index} + else + {:error, + "Custom index name #{index.name} is not valid. Must have letters, numbers and underscores only"} + end + + mismatched_field = + Enum.find(index.fields, fn field -> + !Regex.match?(~r/^[0-9a-zA-Z_]+$/, to_string(field)) + end) -> + {:error, + """ + Custom index field #{mismatched_field} contains invalid index name characters. + + A name must be set manually, i.e + + `name: "your_desired_index_name"` + + Index names must have letters, numbers and underscores only + """} + + true -> + {:ok, index} + end + end + + def name(_resource, %{name: name}) when is_binary(name) do + name + end + + # sobelow_skip ["DOS.StringToAtom"] + def name(table, %{fields: fields}) do + [table, fields, "index"] + |> List.flatten() + |> Enum.map(&to_string(&1)) + |> Enum.map(&String.replace(&1, ~r"[^\w_]", "_")) + |> Enum.map_join("_", &String.replace_trailing(&1, "_", "")) + |> String.to_atom() + end +end diff --git a/lib/data_layer.ex b/lib/data_layer.ex new file mode 100644 index 0000000..526dbd1 --- /dev/null +++ b/lib/data_layer.ex @@ -0,0 +1,1829 @@ +defmodule AshSqlite.DataLayer do + @index %Spark.Dsl.Entity{ + name: :index, + describe: """ + Add an index to be managed by the migration generator. + """, + examples: [ + "index [\"column\", \"column2\"], unique: true, where: \"thing = TRUE\"" + ], + target: AshSqlite.CustomIndex, + schema: AshSqlite.CustomIndex.schema(), + transform: {AshSqlite.CustomIndex, :transform, []}, + args: [:fields] + } + + @custom_indexes %Spark.Dsl.Section{ + name: :custom_indexes, + describe: """ + A section for configuring indexes to be created by the migration generator. + + In general, prefer to use `identities` for simple unique constraints. This is a tool to allow + for declaring more complex indexes. + """, + examples: [ + """ + custom_indexes do + index [:column1, :column2], unique: true, where: "thing = TRUE" + end + """ + ], + entities: [ + @index + ] + } + + @statement %Spark.Dsl.Entity{ + name: :statement, + describe: """ + Add a custom statement for migrations. + """, + examples: [ + """ + statement :pgweb_idx do + up "CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', title || ' ' || body));" + down "DROP INDEX pgweb_idx;" + end + """ + ], + target: AshSqlite.Statement, + schema: AshSqlite.Statement.schema(), + args: [:name] + } + + @custom_statements %Spark.Dsl.Section{ + name: :custom_statements, + describe: """ + A section for configuring custom statements to be added to migrations. + + Changing custom statements may require manual intervention, because Ash can't determine what order they should run + in (i.e if they depend on table structure that you've added, or vice versa). As such, any `down` statements we run + for custom statements happen first, and any `up` statements happen last. + + Additionally, when changing a custom statement, we must make some assumptions, i.e that we should migrate + the old structure down using the previously configured `down` and recreate it. + + This may not be desired, and so what you may end up doing is simply modifying the old migration and deleting whatever was + generated by the migration generator. As always: read your migrations after generating them! + """, + examples: [ + """ + custom_statements do + # the name is used to detect if you remove or modify the statement + statement :pgweb_idx do + up "CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', title || ' ' || body));" + down "DROP INDEX pgweb_idx;" + end + end + """ + ], + entities: [ + @statement + ] + } + + @reference %Spark.Dsl.Entity{ + name: :reference, + describe: """ + Configures the reference for a relationship in resource migrations. + + Keep in mind that multiple relationships can theoretically involve the same destination and foreign keys. + In those cases, you only need to configure the `reference` behavior for one of them. Any conflicts will result + in an error, across this resource and any other resources that share a table with this one. For this reason, + instead of adding a reference configuration for `:nothing`, its best to just leave the configuration out, as that + is the default behavior if *no* relationship anywhere has configured the behavior of that reference. + """, + examples: [ + "reference :post, on_delete: :delete, on_update: :update, name: \"comments_to_posts_fkey\"" + ], + args: [:relationship], + target: AshSqlite.Reference, + schema: AshSqlite.Reference.schema() + } + + @references %Spark.Dsl.Section{ + name: :references, + describe: """ + A section for configuring the references (foreign keys) in resource migrations. + + This section is only relevant if you are using the migration generator with this resource. + Otherwise, it has no effect. + """, + examples: [ + """ + references do + reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey" + end + """ + ], + entities: [@reference], + schema: [ + polymorphic_on_delete: [ + type: {:one_of, [:delete, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_delete behavior of the automatically generated foreign keys to source tables." + ], + polymorphic_on_update: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables." + ], + polymorphic_name: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables." + ] + ] + } + + @check_constraint %Spark.Dsl.Entity{ + name: :check_constraint, + describe: """ + Add a check constraint to be validated. + + If a check constraint exists on the table but not in this section, and it produces an error, a runtime error will be raised. + + Provide a list of attributes instead of a single attribute to add the message to multiple attributes. + + By adding the `check` option, the migration generator will include it when generating migrations. + """, + examples: [ + """ + check_constraint :price, "price_must_be_positive", check: "price > 0", message: "price must be positive" + """ + ], + args: [:attribute, :name], + target: AshSqlite.CheckConstraint, + schema: AshSqlite.CheckConstraint.schema() + } + + @check_constraints %Spark.Dsl.Section{ + name: :check_constraints, + describe: """ + A section for configuring the check constraints for a given table. + + This can be used to automatically create those check constraints, or just to provide message when they are raised + """, + examples: [ + """ + check_constraints do + check_constraint :price, "price_must_be_positive", check: "price > 0", message: "price must be positive" + end + """ + ], + entities: [@check_constraint] + } + + @references %Spark.Dsl.Section{ + name: :references, + describe: """ + A section for configuring the references (foreign keys) in resource migrations. + + This section is only relevant if you are using the migration generator with this resource. + Otherwise, it has no effect. + """, + examples: [ + """ + references do + reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey" + end + """ + ], + entities: [@reference], + schema: [ + polymorphic_on_delete: [ + type: {:one_of, [:delete, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_delete behavior of the automatically generated foreign keys to source tables." + ], + polymorphic_on_update: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables." + ], + polymorphic_name: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables." + ] + ] + } + + @sqlite %Spark.Dsl.Section{ + name: :sqlite, + describe: """ + Sqlite data layer configuration + """, + sections: [ + @custom_indexes, + @custom_statements, + @references, + @check_constraints + ], + modules: [ + :repo + ], + examples: [ + """ + sqlite do + repo MyApp.Repo + table "organizations" + end + """ + ], + schema: [ + repo: [ + type: :atom, + required: true, + doc: + "The repo that will be used to fetch your data. See the `AshSqlite.Repo` documentation for more" + ], + migrate?: [ + type: :boolean, + default: true, + doc: + "Whether or not to include this resource in the generated migrations with `mix ash.generate_migrations`" + ], + migration_types: [ + type: :keyword_list, + default: [], + doc: + "A keyword list of attribute names to the ecto migration type that should be used for that attribute. Only necessary if you need to override the defaults." + ], + migration_defaults: [ + type: :keyword_list, + default: [], + doc: """ + A keyword list of attribute names to the ecto migration default that should be used for that attribute. The string you use will be placed verbatim in the migration. Use fragments like `fragment(\\\\"now()\\\\")`, or for `nil`, use `\\\\"nil\\\\"`. + """ + ], + base_filter_sql: [ + type: :string, + doc: + "A raw sql version of the base_filter, e.g `representative = true`. Required if trying to create a unique constraint on a resource with a base_filter" + ], + skip_unique_indexes: [ + type: {:wrap_list, :atom}, + default: false, + doc: "Skip generating unique indexes when generating migrations" + ], + unique_index_names: [ + type: + {:list, + {:or, + [{:tuple, [{:list, :atom}, :string]}, {:tuple, [{:list, :atom}, :string, :string]}]}}, + default: [], + doc: """ + A list of unique index names that could raise errors that are not configured in identities, or an mfa to a function that takes a changeset and returns the list. In the format `{[:affected, :keys], "name_of_constraint"}` or `{[:affected, :keys], "name_of_constraint", "custom error message"}` + """ + ], + exclusion_constraint_names: [ + type: :any, + default: [], + doc: """ + A list of exclusion constraint names that could raise errors. Must be in the format `{:affected_key, "name_of_constraint"}` or `{:affected_key, "name_of_constraint", "custom error message"}` + """ + ], + identity_index_names: [ + type: :any, + default: [], + doc: """ + A keyword list of identity names to the unique index name that they should use when being managed by the migration generator. + """ + ], + foreign_key_names: [ + type: {:list, {:or, [{:tuple, [:atom, :string]}, {:tuple, [:string, :string]}]}}, + default: [], + doc: """ + A list of foreign keys that could raise errors, or an mfa to a function that takes a changeset and returns a list. In the format: `{:key, "name_of_constraint"}` or `{:key, "name_of_constraint", "custom error message"}` + """ + ], + migration_ignore_attributes: [ + type: {:list, :atom}, + default: [], + doc: """ + A list of attributes that will be ignored when generating migrations. + """ + ], + table: [ + type: :string, + doc: """ + The table to store and read the resource from. If this is changed, the migration generator will not remove the old table. + """ + ], + polymorphic?: [ + type: :boolean, + default: false, + doc: """ + Declares this resource as polymorphic. See the [polymorphic resources guide](/documentation/topics/polymorphic_resources.md) for more. + """ + ] + ] + } + + alias Ash.Filter + alias Ash.Query.{BooleanExpression, Not, Ref} + + @behaviour Ash.DataLayer + + @sections [@sqlite] + + @moduledoc """ + A sqlite data layer that leverages Ecto's sqlite capabilities. + """ + + use Spark.Dsl.Extension, + sections: @sections, + transformers: [ + AshSqlite.Transformers.ValidateReferences, + AshSqlite.Transformers.VerifyRepo, + AshSqlite.Transformers.EnsureTableOrPolymorphic + ] + + def migrate(args) do + # TODO: take args that we care about + Mix.Task.run("ash_sqlite.migrate", args) + end + + def codegen(args) do + # TODO: take args that we care about + Mix.Task.run("ash_sqlite.generate_migrations", args) + end + + def setup(args) do + # TODO: take args that we care about + Mix.Task.run("ash_sqlite.create", args) + Mix.Task.run("ash_sqlite.migrate", args) + end + + def tear_down(args) do + # TODO: take args that we care about + Mix.Task.run("ash_sqlite.drop", args) + end + + import Ecto.Query, only: [from: 2, subquery: 1] + + @impl true + def can?(_, :async_engine), do: true + def can?(_, :bulk_create), do: true + def can?(_, {:lock, :for_update}), do: true + + def can?(_, {:lock, string}) do + string = String.trim_trailing(string, " NOWAIT") + + String.upcase(string) in [ + "FOR UPDATE", + "FOR NO KEY UPDATE", + "FOR SHARE", + "FOR KEY SHARE" + ] + end + + def can?(_, :transact), do: true + def can?(_, :composite_primary_key), do: true + def can?(_, {:atomic, :update}), do: true + def can?(_, :upsert), do: true + def can?(_, :changeset_filter), do: true + + def can?(resource, {:join, other_resource}) do + data_layer = Ash.DataLayer.data_layer(resource) + other_data_layer = Ash.DataLayer.data_layer(other_resource) + + data_layer == other_data_layer and + AshSqlite.DataLayer.Info.repo(resource) == AshSqlite.DataLayer.Info.repo(other_resource) + end + + def can?(resource, {:lateral_join, _}) do + false + end + + def can?(_, :boolean_filter), do: true + + def can?(_, {:aggregate, _type}), do: false + + def can?(_, :aggregate_filter), do: false + def can?(_, :aggregate_sort), do: false + def can?(_, :expression_calculation), do: true + def can?(_, :expression_calculation_sort), do: true + def can?(_, :create), do: true + def can?(_, :select), do: true + def can?(_, :read), do: true + + def can?(resource, action) when action in ~w[update destroy]a do + resource + |> Ash.Resource.Info.primary_key() + |> Enum.any?() + end + + def can?(_, :filter), do: true + def can?(_, :limit), do: true + def can?(_, :offset), do: true + def can?(_, :multitenancy), do: false + + def can?(_, {:filter_relationship, %{manual: {module, _}}}) do + Spark.implements_behaviour?(module, AshSqlite.ManualRelationship) + end + + def can?(_, {:filter_relationship, _}), do: true + + def can?(_, {:aggregate_relationship, _}), do: false + + def can?(_, :timeout), do: true + def can?(_, {:filter_expr, _}), do: true + def can?(_, :nested_expressions), do: true + def can?(_, {:query_aggregate, :count}), do: false + def can?(_, :sort), do: true + def can?(_, :distinct_sort), do: true + def can?(_, :distinct), do: true + def can?(_, {:sort, _}), do: true + def can?(_, _), do: false + + @impl true + def in_transaction?(resource) do + AshSqlite.DataLayer.Info.repo(resource).in_transaction?() + end + + @impl true + def limit(query, nil, _), do: {:ok, query} + + def limit(query, limit, _resource) do + {:ok, from(row in query, limit: ^limit)} + end + + @impl true + def source(resource) do + AshSqlite.DataLayer.Info.table(resource) || "" + end + + @impl true + def set_context(resource, data_layer_query, context) do + start_bindings = context[:data_layer][:start_bindings_at] || 0 + data_layer_query = from(row in data_layer_query, as: ^start_bindings) + + data_layer_query = + if context[:data_layer][:table] do + %{ + data_layer_query + | from: %{data_layer_query.from | source: {context[:data_layer][:table], resource}} + } + else + data_layer_query + end + + data_layer_query = + if context[:data_layer][:schema] do + Ecto.Query.put_query_prefix(data_layer_query, to_string(context[:data_layer][:schema])) + else + data_layer_query + end + + default_bindings(data_layer_query, resource, context) + end + + @impl true + def offset(query, nil, _), do: query + + def offset(%{offset: old_offset} = query, 0, _resource) when old_offset in [0, nil] do + {:ok, query} + end + + def offset(query, offset, _resource) do + {:ok, from(row in query, offset: ^offset)} + end + + @impl true + def run_query(query, resource) do + query = default_bindings(query, resource) + + with_sort_applied = + if query.__ash_bindings__[:sort_applied?] do + {:ok, query} + else + apply_sort(query, query.__ash_bindings__[:sort], resource) + end + + case with_sort_applied do + {:error, error} -> + {:error, error} + + {:ok, query} -> + query = + if query.__ash_bindings__[:__order__?] && query.windows[:order] do + if query.distinct do + query_with_order = + from(row in query, select_merge: %{__order__: over(row_number(), :order)}) + + query_without_limit_and_offset = + query_with_order + |> Ecto.Query.exclude(:limit) + |> Ecto.Query.exclude(:offset) + + from(row in subquery(query_without_limit_and_offset), + select: row, + order_by: row.__order__ + ) + |> Map.put(:limit, query.limit) + |> Map.put(:offset, query.offset) + else + order_by = %{query.windows[:order] | expr: query.windows[:order].expr[:order_by]} + + %{ + query + | windows: Keyword.delete(query.windows, :order), + order_bys: [order_by] + } + end + else + %{query | windows: Keyword.delete(query.windows, :order)} + end + + if AshSqlite.DataLayer.Info.polymorphic?(resource) && no_table?(query) do + raise_table_error!(resource, :read) + else + {:ok, dynamic_repo(resource, query).all(query, repo_opts(nil, nil, resource))} + end + end + rescue + e -> + handle_raised_error(e, __STACKTRACE__, query, resource) + end + + defp no_table?(%{from: %{source: {"", _}}}), do: true + defp no_table?(_), do: false + + defp repo_opts(timeout, nil, resource) do + if schema = AshSqlite.DataLayer.Info.schema(resource) do + [prefix: schema] + else + [] + end + |> add_timeout(timeout) + end + + defp repo_opts(timeout, _resource) do + add_timeout([], timeout) + end + + defp add_timeout(opts, timeout) when not is_nil(timeout) do + Keyword.put(opts, :timeout, timeout) + end + + defp add_timeout(opts, _), do: opts + + @impl true + def functions(_resource) do + [ + AshSqlite.Functions.Fragment, + AshSqlite.Functions.Like, + ] + end + + defp add_exists_aggs(result, resource, query, exists) do + repo = dynamic_repo(resource, query) + repo_opts = repo_opts(nil, nil, resource) + + Enum.reduce(exists, result, fn agg, result -> + {:ok, filtered} = + case agg do + %{query: %{filter: filter}} when not is_nil(filter) -> + filter(query, filter, resource) + + _ -> + {:ok, query} + end + + Map.put( + result || %{}, + agg.name, + repo.exists?(filtered, repo_opts) + ) + end) + end + + @impl true + def resource_to_query(resource, _) do + from(row in {AshSqlite.DataLayer.Info.table(resource) || "", resource}, []) + end + + @impl true + def bulk_create(resource, stream, options) do + opts = repo_opts(nil, resource) + + opts = + if options.return_records? do + Keyword.put(opts, :returning, true) + else + opts + end + + opts = + if options[:upsert?] do + opts + |> Keyword.put(:on_conflict, {:replace, options[:upsert_fields] || []}) + |> Keyword.put( + :conflict_target, + conflict_target( + resource, + options[:upsert_keys] || Ash.Resource.Info.primary_key(resource) + ) + ) + else + opts + end + + changesets = Enum.to_list(stream) + + ecto_changesets = Enum.map(changesets, & &1.attributes) + + source = + if table = Enum.at(changesets, 0).context[:data_layer][:table] do + {table, resource} + else + resource + end + + repo = dynamic_repo(resource, Enum.at(changesets, 0)) + + source + |> repo.insert_all(ecto_changesets, opts) + |> case do + {_, nil} -> + :ok + + {_, results} -> + if options[:single?] do + {:ok, results} + else + {:ok, + Stream.zip_with(results, changesets, fn result, changeset -> + + Ash.Resource.put_metadata( + result, + :bulk_create_index, + changeset.context.bulk_create.index + ) + end)} + end + end + rescue + e -> + changeset = Ash.Changeset.new(resource) + + handle_raised_error( + e, + __STACKTRACE__, + {:bulk_create, ecto_changeset(changeset.data, changeset, :create, false)}, + resource + ) + end + + @impl true + def create(resource, changeset) do + changeset = %{ + changeset + | data: + Map.update!( + changeset.data, + :__meta__, + &Map.put(&1, :source, table(resource, changeset)) + ) + } + + case bulk_create(resource, [changeset], %{ + single?: true, + return_records?: true + }) do + {:ok, [result]} -> + {:ok, result} + + {:error, error} -> + {:error, error} + end + end + + defp handle_errors({:error, %Ecto.Changeset{errors: errors}}) do + {:error, Enum.map(errors, &to_ash_error/1)} + end + + defp to_ash_error({field, {message, vars}}) do + Ash.Error.Changes.InvalidAttribute.exception( + field: field, + message: message, + private_vars: vars + ) + end + + defp ecto_changeset(record, changeset, type, table_error? \\ true) do + filters = + if changeset.action_type == :create do + %{} + else + Map.get(changeset, :filters, %{}) + end + + filters = + if changeset.action_type == :create do + filters + else + changeset.resource + |> Ash.Resource.Info.primary_key() + |> Enum.reduce(filters, fn key, filters -> + Map.put(filters, key, Map.get(record, key)) + end) + end + + attributes = + changeset.resource + |> Ash.Resource.Info.attributes() + |> Enum.map(& &1.name) + + attributes_to_change = + Enum.reject(attributes, fn attribute -> + Keyword.has_key?(changeset.atomics, attribute) + end) + + ecto_changeset = + record + |> to_ecto() + |> set_table(changeset, type, table_error?) + |> Ecto.Changeset.change(Map.take(changeset.attributes, attributes_to_change)) + |> Map.update!(:filters, &Map.merge(&1, filters)) + |> add_configured_foreign_key_constraints(record.__struct__) + |> add_unique_indexes(record.__struct__, changeset) + |> add_check_constraints(record.__struct__) + |> add_exclusion_constraints(record.__struct__) + + case type do + :create -> + ecto_changeset + |> add_my_foreign_key_constraints(record.__struct__) + + type when type in [:upsert, :update] -> + ecto_changeset + |> add_my_foreign_key_constraints(record.__struct__) + |> add_related_foreign_key_constraints(record.__struct__) + + :delete -> + ecto_changeset + |> add_related_foreign_key_constraints(record.__struct__) + end + end + + defp handle_raised_error( + %Ecto.StaleEntryError{changeset: %{data: %resource{}, filters: filters}}, + stacktrace, + context, + resource + ) do + handle_raised_error( + Ash.Error.Changes.StaleRecord.exception(resource: resource, filters: filters), + stacktrace, + context, + resource + ) + end + + defp handle_raised_error(%Ecto.Query.CastError{} = e, stacktrace, context, resource) do + handle_raised_error( + Ash.Error.Query.InvalidFilterValue.exception(value: e.value, context: context), + stacktrace, + context, + resource + ) + end + + defp handle_raised_error(error, stacktrace, _ecto_changeset, _resource) do + {:error, Ash.Error.to_ash_error(error, stacktrace)} + end + + defp constraints_to_errors(%{constraints: user_constraints} = changeset, action, constraints) do + Enum.map(constraints, fn {type, constraint} -> + user_constraint = + Enum.find(user_constraints, fn c -> + case {c.type, c.constraint, c.match} do + {^type, ^constraint, :exact} -> true + {^type, cc, :suffix} -> String.ends_with?(constraint, cc) + {^type, cc, :prefix} -> String.starts_with?(constraint, cc) + {^type, %Regex{} = r, _match} -> Regex.match?(r, constraint) + _ -> false + end + end) + + case user_constraint do + %{field: field, error_message: error_message, type: type, constraint: constraint} -> + Ash.Error.Changes.InvalidAttribute.exception( + field: field, + message: error_message, + private_vars: [ + constraint: constraint, + constraint_type: type + ] + ) + + nil -> + Ecto.ConstraintError.exception( + action: action, + type: type, + constraint: constraint, + changeset: changeset + ) + end + end) + end + + defp set_table(record, changeset, operation, table_error?) do + if AshSqlite.DataLayer.Info.polymorphic?(record.__struct__) do + table = + changeset.context[:data_layer][:table] || + AshSqlite.DataLayer.Info.table(record.__struct__) + + record = + if table do + Ecto.put_meta(record, source: table) + else + if table_error? do + raise_table_error!(changeset.resource, operation) + else + record + end + end + + prefix = + changeset.context[:data_layer][:schema] || + AshSqlite.DataLayer.Info.schema(record.__struct__) + + if prefix do + Ecto.put_meta(record, prefix: table) + else + record + end + else + record + end + end + + def from_ecto({:ok, result}), do: {:ok, from_ecto(result)} + def from_ecto({:error, _} = other), do: other + + def from_ecto(nil), do: nil + + def from_ecto(value) when is_list(value) do + Enum.map(value, &from_ecto/1) + end + + def from_ecto(%resource{} = record) do + if Spark.Dsl.is?(resource, Ash.Resource) do + empty = struct(resource) + + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(record, fn relationship, record -> + case Map.get(record, relationship.name) do + %Ecto.Association.NotLoaded{} -> + Map.put(record, relationship.name, Map.get(empty, relationship.name)) + + value -> + Map.put(record, relationship.name, from_ecto(value)) + end + end) + else + record + end + end + + def from_ecto(other), do: other + + def to_ecto(nil), do: nil + + def to_ecto(value) when is_list(value) do + Enum.map(value, &to_ecto/1) + end + + def to_ecto(%resource{} = record) do + if Spark.Dsl.is?(resource, Ash.Resource) do + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(record, fn relationship, record -> + value = + case Map.get(record, relationship.name) do + %Ash.NotLoaded{} -> + %Ecto.Association.NotLoaded{ + __field__: relationship.name, + __cardinality__: relationship.cardinality + } + + value -> + to_ecto(value) + end + + Map.put(record, relationship.name, value) + end) + else + record + end + end + + def to_ecto(other), do: other + + defp add_check_constraints(changeset, resource) do + resource + |> AshSqlite.DataLayer.Info.check_constraints() + |> Enum.reduce(changeset, fn constraint, changeset -> + constraint.attribute + |> List.wrap() + |> Enum.reduce(changeset, fn attribute, changeset -> + Ecto.Changeset.check_constraint(changeset, attribute, + name: constraint.name, + message: constraint.message || "is invalid" + ) + end) + end) + end + + defp add_exclusion_constraints(changeset, resource) do + resource + |> AshSqlite.DataLayer.Info.exclusion_constraint_names() + |> Enum.reduce(changeset, fn constraint, changeset -> + case constraint do + {key, name} -> + Ecto.Changeset.exclusion_constraint(changeset, key, name: name) + + {key, name, message} -> + Ecto.Changeset.exclusion_constraint(changeset, key, name: name, message: message) + end + end) + end + + defp add_related_foreign_key_constraints(changeset, resource) do + # TODO: this doesn't guarantee us to get all of them, because if something is related to this + # schema and there is no back-relation, then this won't catch it's foreign key constraints + resource + |> Ash.Resource.Info.relationships() + |> Enum.map(& &1.destination) + |> Enum.uniq() + |> Enum.flat_map(fn related -> + related + |> Ash.Resource.Info.relationships() + |> Enum.filter(&(&1.destination == resource)) + |> Enum.map(&Map.take(&1, [:source, :source_attribute, :destination_attribute, :name])) + end) + |> Enum.reduce(changeset, fn %{ + source: source, + source_attribute: source_attribute, + destination_attribute: destination_attribute, + name: relationship_name + }, + changeset -> + case AshSqlite.DataLayer.Info.reference(resource, relationship_name) do + %{name: name} when not is_nil(name) -> + Ecto.Changeset.foreign_key_constraint(changeset, destination_attribute, + name: name, + message: "would leave records behind" + ) + + _ -> + Ecto.Changeset.foreign_key_constraint(changeset, destination_attribute, + name: "#{AshSqlite.DataLayer.Info.table(source)}_#{source_attribute}_fkey", + message: "would leave records behind" + ) + end + end) + end + + defp add_my_foreign_key_constraints(changeset, resource) do + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(changeset, &Ecto.Changeset.foreign_key_constraint(&2, &1.source_attribute)) + end + + defp add_configured_foreign_key_constraints(changeset, resource) do + resource + |> AshSqlite.DataLayer.Info.foreign_key_names() + |> case do + {m, f, a} -> List.wrap(apply(m, f, [changeset | a])) + value -> List.wrap(value) + end + |> Enum.reduce(changeset, fn + {key, name}, changeset -> + Ecto.Changeset.foreign_key_constraint(changeset, key, name: name) + + {key, name, message}, changeset -> + Ecto.Changeset.foreign_key_constraint(changeset, key, name: name, message: message) + end) + end + + defp add_unique_indexes(changeset, resource, ash_changeset) do + changeset = + resource + |> Ash.Resource.Info.identities() + |> Enum.reduce(changeset, fn identity, changeset -> + name = + AshSqlite.DataLayer.Info.identity_index_names(resource)[identity.name] || + "#{table(resource, ash_changeset)}_#{identity.name}_index" + + opts = + if Map.get(identity, :message) do + [name: name, message: identity.message] + else + [name: name] + end + + Ecto.Changeset.unique_constraint(changeset, identity.keys, opts) + end) + + changeset = + resource + |> AshSqlite.DataLayer.Info.custom_indexes() + |> Enum.reduce(changeset, fn index, changeset -> + opts = + if index.message do + [name: index.name, message: index.message] + else + [name: index.name] + end + + Ecto.Changeset.unique_constraint(changeset, index.fields, opts) + end) + + names = + resource + |> AshSqlite.DataLayer.Info.unique_index_names() + |> case do + {m, f, a} -> List.wrap(apply(m, f, [changeset | a])) + value -> List.wrap(value) + end + + names = + case Ash.Resource.Info.primary_key(resource) do + [] -> + names + + fields -> + if table = table(resource, ash_changeset) do + [{fields, table <> "_pkey"} | names] + else + [] + end + end + + Enum.reduce(names, changeset, fn + {keys, name}, changeset -> + Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name) + + {keys, name, message}, changeset -> + Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name, message: message) + end) + end + + @impl true + def upsert(resource, changeset, keys \\ nil) do + keys = keys || Ash.Resource.Info.primary_key(resource) + + explicitly_changing_attributes = + Enum.map( + Map.keys(changeset.attributes) -- Map.get(changeset, :defaults, []) -- keys, + fn key -> + {key, Ash.Changeset.get_attribute(changeset, key)} + end + ) + + on_conflict = + changeset + |> update_defaults() + |> Keyword.merge(explicitly_changing_attributes) + + case bulk_create(resource, [changeset], %{ + single?: true, + upsert?: true, + upsert_keys: keys, + upsert_fields: Keyword.keys(on_conflict), + return_records?: true + }) do + {:ok, [result]} -> + {:ok, result} + + {:error, error} -> + {:error, error} + end + end + + defp conflict_target(resource, keys) do + if Ash.Resource.Info.base_filter(resource) do + base_filter_sql = + AshSqlite.DataLayer.Info.base_filter_sql(resource) || + raise """ + Cannot use upserts with resources that have a base_filter without also adding `base_filter_sql` in the sqlite section. + """ + + sources = + Enum.map(keys, fn key -> + ~s("#{Ash.Resource.Info.attribute(resource, key).source || key}") + end) + + {:unsafe_fragment, "(" <> Enum.join(sources, ", ") <> ") WHERE (#{base_filter_sql})"} + else + keys + end + end + + defp update_defaults(changeset) do + attributes = + changeset.resource + |> Ash.Resource.Info.attributes() + |> Enum.reject(&is_nil(&1.update_default)) + + attributes + |> static_defaults() + |> Enum.concat(lazy_matching_defaults(attributes)) + |> Enum.concat(lazy_non_matching_defaults(attributes)) + end + + defp static_defaults(attributes) do + attributes + |> Enum.reject(&get_default_fun(&1)) + |> Enum.map(&{&1.name, &1.update_default}) + end + + defp lazy_non_matching_defaults(attributes) do + attributes + |> Enum.filter(&(!&1.match_other_defaults? && get_default_fun(&1))) + |> Enum.map(fn attribute -> + default_value = + case attribute.update_default do + function when is_function(function) -> + function.() + + {m, f, a} when is_atom(m) and is_atom(f) and is_list(a) -> + apply(m, f, a) + end + + {attribute.name, default_value} + end) + end + + defp lazy_matching_defaults(attributes) do + attributes + |> Enum.filter(&(&1.match_other_defaults? && get_default_fun(&1))) + |> Enum.group_by(& &1.update_default) + |> Enum.flat_map(fn {default_fun, attributes} -> + default_value = + case default_fun do + function when is_function(function) -> + function.() + + {m, f, a} when is_atom(m) and is_atom(f) and is_list(a) -> + apply(m, f, a) + end + + Enum.map(attributes, &{&1.name, default_value}) + end) + end + + defp get_default_fun(attribute) do + if is_function(attribute.update_default) or match?({_, _, _}, attribute.update_default) do + attribute.update_default + end + end + + @impl true + def update(resource, changeset) do + ecto_changeset = + changeset.data + |> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset))) + |> ecto_changeset(changeset, :update) + + try do + query = from(row in resource, as: ^0) + + select = Keyword.keys(changeset.atomics) ++ Ash.Resource.Info.primary_key(resource) + + query = + query + |> default_bindings(resource, changeset.context) + |> Ecto.Query.select(^select) + + query = + Enum.reduce(ecto_changeset.filters, query, fn {key, value}, query -> + from(row in query, + where: field(row, ^key) == ^value + ) + end) + + atomics_result = + Enum.reduce_while(changeset.atomics, {:ok, query, []}, fn {field, expr}, + {:ok, query, set} -> + used_calculations = + Ash.Filter.used_calculations( + expr, + resource + ) + + with {:ok, query} <- + AshSqlite.Join.join_all_relationships( + query, + %Ash.Filter{ + resource: resource, + expression: expr + }, + left_only?: true + ), + dynamic <- + AshSqlite.Expr.dynamic_expr(query, expr, query.__ash_bindings__) do + {:cont, {:ok, query, Keyword.put(set, field, dynamic)}} + else + other -> + {:halt, other} + end + end) + + case atomics_result do + {:ok, query, dynamics} -> + {params, set, count} = + ecto_changeset.changes + |> Map.to_list() + |> Enum.reduce({[], [], 0}, fn {key, value}, {params, set, count} -> + {[{value, {0, key}} | params], [{key, {:^, [], [count]}} | set], count + 1} + end) + + {params, set, _} = + Enum.reduce(dynamics, {params, set, count}, fn {key, value}, {params, set, count} -> + case AshSqlite.Expr.dynamic_expr(query, value, query.__ash_bindings__) do + %Ecto.Query.DynamicExpr{} = dynamic -> + result = + Ecto.Query.Builder.Dynamic.partially_expand( + :select, + query, + dynamic, + params, + count + ) + + expr = elem(result, 0) + new_params = elem(result, 1) + + new_count = + result |> Tuple.to_list() |> List.last() + + {new_params, [{key, expr} | set], new_count} + + other -> + {[{other, {0, key}} | params], [{key, {:^, [], [count]}} | set], count + 1} + end + end) + + case set do + [] -> + {:ok, changeset.data} + + set -> + query = + Map.put(query, :updates, [ + %Ecto.Query.QueryExpr{ + # why do I have to reverse the `set`??? + # it breaks if I don't + expr: [set: Enum.reverse(set)], + params: Enum.reverse(params) + } + ]) + + repo_opts = repo_opts(changeset.timeout, changeset.resource) + + repo_opts = + Keyword.put(repo_opts, :returning, Keyword.keys(changeset.atomics)) + + result = + dynamic_repo(resource, changeset).update_all( + query, + [], + repo_opts + ) + + case result do + {0, []} -> + {:error, + Ash.Error.Changes.StaleRecord.exception( + resource: resource, + filters: ecto_changeset.filters + )} + + {1, [result]} -> + record = + changeset.data + |> Map.merge(changeset.attributes) + |> Map.merge(Map.take(result, Keyword.keys(changeset.atomics))) + + {:ok, record} + end + end + + {:error, error} -> + {:error, error} + end + rescue + e -> + handle_raised_error(e, __STACKTRACE__, ecto_changeset, resource) + end + end + + @impl true + def destroy(resource, %{data: record} = changeset) do + ecto_changeset = ecto_changeset(record, changeset, :delete) + + try do + ecto_changeset + |> dynamic_repo(resource, changeset).delete( + repo_opts(changeset.timeout, changeset.resource) + ) + |> from_ecto() + |> case do + {:ok, _record} -> + :ok + + {:error, error} -> + handle_errors({:error, error}) + end + rescue + e -> + handle_raised_error(e, __STACKTRACE__, ecto_changeset, resource) + end + end + + @impl true + def lock(query, :for_update, _) do + if query.distinct do + new_query = + Ecto.Query.lock(%{query | distinct: nil}, [{^0, a}], fragment("FOR UPDATE OF ?", a)) + + q = from(row in subquery(new_query), []) + {:ok, %{q | distinct: query.distinct}} + else + {:ok, Ecto.Query.lock(query, [{^0, a}], fragment("FOR UPDATE OF ?", a))} + end + end + + @locks [ + "FOR UPDATE", + "FOR NO KEY UPDATE", + "FOR SHARE", + "FOR KEY SHARE" + ] + + for lock <- @locks do + frag = "#{lock} OF ?" + + def lock(query, unquote(lock), _) do + {:ok, Ecto.Query.lock(query, [{^0, a}], fragment(unquote(frag), a))} + end + + frag = "#{lock} OF ? NOWAIT" + lock = "#{lock} NOWAIT" + + def lock(query, unquote(lock), _) do + {:ok, Ecto.Query.lock(query, [{^0, a}], fragment(unquote(frag), a))} + end + end + + @impl true + def sort(query, sort, _resource) do + {:ok, Map.update!(query, :__ash_bindings__, &Map.put(&1, :sort, sort))} + end + + @impl true + def select(query, select, resource) do + query = default_bindings(query, resource) + + {:ok, + from(row in query, + select: struct(row, ^Enum.uniq(select)) + )} + end + + @impl true + def distinct_sort(query, sort, _) when sort in [nil, []] do + {:ok, query} + end + + def distinct_sort(query, sort, _) do + {:ok, Map.update!(query, :__ash_bindings__, &Map.put(&1, :distinct_sort, sort))} + end + + # If the order by does not match the initial sort clause, then we use a subquery + # to limit to only distinct rows. This may not perform that well, so we may need + # to come up with alternatives here. + @impl true + def distinct(query, empty, resource) when empty in [nil, []] do + query |> apply_sort(query.__ash_bindings__[:sort], resource) + end + + def distinct(query, distinct_on, resource) do + case get_distinct_statement(query, distinct_on) do + {:ok, distinct_statement} -> + %{query | distinct: distinct_statement} + |> apply_sort(query.__ash_bindings__[:sort], resource) + + {:error, distinct_statement} -> + query + |> Ecto.Query.exclude(:order_by) + |> default_bindings(resource) + |> Map.put(:distinct, distinct_statement) + |> apply_sort( + query.__ash_bindings__[:distinct_sort] || query.__ash_bindings__[:sort], + resource, + true + ) + |> case do + {:ok, distinct_query} -> + on = + Enum.reduce(Ash.Resource.Info.primary_key(resource), nil, fn key, dynamic -> + if dynamic do + Ecto.Query.dynamic( + [row, distinct], + ^dynamic and field(row, ^key) == field(distinct, ^key) + ) + else + Ecto.Query.dynamic([row, distinct], field(row, ^key) == field(distinct, ^key)) + end + end) + + joined_query_source = + Enum.reduce( + [ + :join, + :order_by, + :group_by, + :having, + :distinct, + :select, + :combinations, + :with_ctes, + :limit, + :offset, + :lock, + :preload, + :update, + :where + ], + query, + &Ecto.Query.exclude(&2, &1) + ) + + joined_query = + from(row in joined_query_source, + join: distinct in subquery(distinct_query), + on: ^on + ) + + from([row, distinct] in joined_query, + select: distinct + ) + |> default_bindings(resource) + |> apply_sort(query.__ash_bindings__[:sort], resource) + |> case do + {:ok, joined_query} -> + {:ok, + Map.update!( + joined_query, + :__ash_bindings__, + &Map.put(&1, :__order__?, query.__ash_bindings__[:__order__?] || false) + )} + + {:error, error} -> + {:error, error} + end + + {:error, error} -> + {:error, error} + end + end + end + + defp apply_sort(query, sort, resource, directly? \\ false) + + defp apply_sort(query, sort, _resource, _) when sort in [nil, []] do + {:ok, query |> set_sort_applied()} + end + + defp apply_sort(query, sort, resource, directly?) do + query + |> AshSqlite.Sort.sort(sort, resource, [], 0, directly?) + |> case do + {:ok, sort} when directly? -> + {:ok, query |> Ecto.Query.order_by(^sort) |> set_sort_applied()} + + {:ok, query} -> + {:ok, query |> set_sort_applied()} + + {:error, error} -> + {:error, error} + end + end + + defp set_sort_applied(query) do + Map.update!(query, :__ash_bindings__, &Map.put(&1, :sort_applied?, true)) + end + + defp get_distinct_statement(query, distinct_on) do + has_distinct_sort? = match?(%{__ash_bindings__: %{distinct_sort: _}}, query) + + if has_distinct_sort? do + {:error, default_distinct_statement(query, distinct_on)} + else + sort = query.__ash_bindings__[:sort] || [] + + distinct = + query.distinct || + %Ecto.Query.QueryExpr{ + expr: [], + params: [] + } + + if sort == [] do + {:ok, default_distinct_statement(query, distinct_on)} + else + distinct_on + |> Enum.reduce_while({sort, [], [], Enum.count(distinct.params)}, fn + _, {[], _distinct_statement, _, _count} -> + {:halt, :error} + + distinct_on, {[order_by | rest_order_by], distinct_statement, params, count} -> + case order_by do + {^distinct_on, order} -> + {distinct_expr, params, count} = + distinct_on_expr(query, distinct_on, params, count) + + {:cont, + {rest_order_by, [{order, distinct_expr} | distinct_statement], params, count}} + + _ -> + {:halt, :error} + end + end) + |> case do + :error -> + {:error, default_distinct_statement(query, distinct_on)} + + {_, result, params, _} -> + {:ok, + %{ + distinct + | expr: distinct.expr ++ Enum.reverse(result), + params: distinct.params ++ Enum.reverse(params) + }} + end + end + end + end + + defp default_distinct_statement(query, distinct_on) do + distinct = + query.distinct || + %Ecto.Query.QueryExpr{ + expr: [] + } + + {expr, params, _} = + Enum.reduce(distinct_on, {[], [], Enum.count(distinct.params)}, fn + {distinct_on_field, order}, {expr, params, count} -> + {distinct_expr, params, count} = + distinct_on_expr(query, distinct_on_field, params, count) + + {[{order, distinct_expr} | expr], params, count} + + distinct_on_field, {expr, params, count} -> + {distinct_expr, params, count} = + distinct_on_expr(query, distinct_on_field, params, count) + + {[{:asc, distinct_expr} | expr], params, count} + end) + + %{ + distinct + | expr: distinct.expr ++ Enum.reverse(expr), + params: distinct.params ++ Enum.reverse(params) + } + end + + defp distinct_on_expr(query, field, params, count) do + resource = query.__ash_bindings__.resource + + ref = + case field do + %Ash.Query.Calculation{} = calc -> + %Ref{attribute: calc, relationship_path: [], resource: resource} + + field -> + %Ref{ + attribute: Ash.Resource.Info.field(resource, field), + relationship_path: [], + resource: resource + } + end + + dynamic = AshSqlite.Expr.dynamic_expr(query, ref, query.__ash_bindings__) + + result = + Ecto.Query.Builder.Dynamic.partially_expand( + :distinct, + query, + dynamic, + params, + count + ) + + expr = elem(result, 0) + new_params = elem(result, 1) + new_count = result |> Tuple.to_list() |> List.last() + + {expr, new_params, new_count} + end + + @impl true + def filter(query, filter, resource, opts \\ []) do + query = default_bindings(query, resource) + + used_calculations = + Ash.Filter.used_calculations( + filter, + resource + ) + + query + |> AshSqlite.Join.join_all_relationships(filter, opts) + |> case do + {:ok, query} -> + {:ok, add_filter_expression(query, filter)} + + {:error, error} -> + {:error, error} + end + end + + @doc false + def default_bindings(query, resource, context \\ %{}) do + start_bindings = context[:data_layer][:start_bindings_at] || 0 + + Map.put_new(query, :__ash_bindings__, %{ + resource: resource, + current: Enum.count(query.joins) + 1 + start_bindings, + in_group?: false, + calculations: %{}, + parent_resources: [], + context: context, + bindings: %{start_bindings => %{path: [], type: :root, source: resource}} + }) + end + + @impl true + def add_calculations(query, calculations, resource) do + AshSqlite.Calculation.add_calculations(query, calculations, resource, 0) + end + + @doc false + def get_binding(resource, path, query, type, name_match \\ nil) + + def get_binding(resource, path, %{__ash_bindings__: _} = query, type, name_match) do + types = List.wrap(type) + + Enum.find_value(query.__ash_bindings__.bindings, fn + {binding, %{path: candidate_path, type: binding_type} = data} -> + if binding_type in types do + if name_match do + if data[:name] == name_match do + if Ash.SatSolver.synonymous_relationship_paths?(resource, candidate_path, path) do + binding + end + end + else + if Ash.SatSolver.synonymous_relationship_paths?(resource, candidate_path, path) do + binding + else + false + end + end + end + + _ -> + nil + end) + end + + def get_binding(_, _, _, _, _), do: nil + + defp add_filter_expression(query, filter) do + filter + |> split_and_statements() + |> Enum.reduce(query, fn filter, query -> + dynamic = AshSqlite.Expr.dynamic_expr(query, filter, query.__ash_bindings__) + + Ecto.Query.where(query, ^dynamic) + end) + end + + defp split_and_statements(%Filter{expression: expression}) do + split_and_statements(expression) + end + + defp split_and_statements(%BooleanExpression{op: :and, left: left, right: right}) do + split_and_statements(left) ++ split_and_statements(right) + end + + defp split_and_statements(%Not{expression: %Not{expression: expression}}) do + split_and_statements(expression) + end + + defp split_and_statements(%Not{ + expression: %BooleanExpression{op: :or, left: left, right: right} + }) do + split_and_statements(%BooleanExpression{ + op: :and, + left: %Not{expression: left}, + right: %Not{expression: right} + }) + end + + defp split_and_statements(other), do: [other] + + @doc false + def add_binding(query, data, additional_bindings \\ 0) do + current = query.__ash_bindings__.current + bindings = query.__ash_bindings__.bindings + + new_ash_bindings = %{ + query.__ash_bindings__ + | bindings: Map.put(bindings, current, data), + current: current + 1 + additional_bindings + } + + %{query | __ash_bindings__: new_ash_bindings} + end + + def add_known_binding(query, data, known_binding) do + bindings = query.__ash_bindings__.bindings + + new_ash_bindings = %{ + query.__ash_bindings__ + | bindings: Map.put(bindings, known_binding, data) + } + + %{query | __ash_bindings__: new_ash_bindings} + end + + @impl true + def transaction(resource, func, timeout \\ nil, reason \\ %{type: :custom, metadata: %{}}) do + repo = + case reason[:data_layer_context] do + %{repo: repo} when not is_nil(repo) -> + repo + + _ -> + AshSqlite.DataLayer.Info.repo(resource) + end + + func = fn -> + repo.on_transaction_begin(reason) + func.() + end + + if timeout do + repo.transaction(func, timeout: timeout) + else + repo.transaction(func) + end + end + + @impl true + def rollback(resource, term) do + AshSqlite.DataLayer.Info.repo(resource).rollback(term) + end + + defp table(resource, changeset) do + changeset.context[:data_layer][:table] || AshSqlite.DataLayer.Info.table(resource) + end + + defp raise_table_error!(resource, operation) do + if AshSqlite.DataLayer.Info.polymorphic?(resource) do + raise """ + Could not determine table for #{operation} on #{inspect(resource)}. + + Polymorphic resources require that the `data_layer[:table]` context is provided. + See the guide on polymorphic resources for more information. + """ + else + raise """ + Could not determine table for #{operation} on #{inspect(resource)}. + """ + end + end + + defp dynamic_repo(resource, %{__ash_bindings__: %{context: %{data_layer: %{repo: repo}}}}) do + repo || AshSqlite.DataLayer.Info.repo(resource) + end + + defp dynamic_repo(resource, %{context: %{data_layer: %{repo: repo}}}) do + repo || AshSqlite.DataLayer.Info.repo(resource) + end + + defp dynamic_repo(resource, _) do + AshSqlite.DataLayer.Info.repo(resource) + end +end diff --git a/lib/data_layer/info.ex b/lib/data_layer/info.ex new file mode 100644 index 0000000..d1c4e05 --- /dev/null +++ b/lib/data_layer/info.ex @@ -0,0 +1,128 @@ +defmodule AshSqlite.DataLayer.Info do + @moduledoc "Introspection functions for " + + alias Spark.Dsl.Extension + + @doc "The configured repo for a resource" + def repo(resource) do + Extension.get_opt(resource, [:sqlite], :repo, nil, true) + end + + @doc "The configured table for a resource" + def table(resource) do + Extension.get_opt(resource, [:sqlite], :table, nil, true) + end + + @doc "The configured schema for a resource" + def schema(resource) do + Extension.get_opt(resource, [:sqlite], :schema, nil, true) + end + + @doc "The configured references for a resource" + def references(resource) do + Extension.get_entities(resource, [:sqlite, :references]) + end + + @doc "The configured reference for a given relationship of a resource" + def reference(resource, relationship) do + resource + |> Extension.get_entities([:sqlite, :references]) + |> Enum.find(&(&1.relationship == relationship)) + end + + @doc "A keyword list of customized migration types" + def migration_types(resource) do + Extension.get_opt(resource, [:sqlite], :migration_types, []) + end + + @doc "A keyword list of customized migration defaults" + def migration_defaults(resource) do + Extension.get_opt(resource, [:sqlite], :migration_defaults, []) + end + + @doc "A list of attributes to be ignored when generating migrations" + def migration_ignore_attributes(resource) do + Extension.get_opt(resource, [:sqlite], :migration_ignore_attributes, []) + end + + @doc "The configured check_constraints for a resource" + def check_constraints(resource) do + Extension.get_entities(resource, [:sqlite, :check_constraints]) + end + + @doc "The configured custom_indexes for a resource" + def custom_indexes(resource) do + Extension.get_entities(resource, [:sqlite, :custom_indexes]) + end + + @doc "The configured custom_statements for a resource" + def custom_statements(resource) do + Extension.get_entities(resource, [:sqlite, :custom_statements]) + end + + @doc "The configured polymorphic_reference_on_delete for a resource" + def polymorphic_on_delete(resource) do + Extension.get_opt(resource, [:sqlite, :references], :polymorphic_on_delete, nil, true) + end + + @doc "The configured polymorphic_reference_on_update for a resource" + def polymorphic_on_update(resource) do + Extension.get_opt(resource, [:sqlite, :references], :polymorphic_on_update, nil, true) + end + + @doc "The configured polymorphic_reference_name for a resource" + def polymorphic_name(resource) do + Extension.get_opt(resource, [:sqlite, :references], :polymorphic_on_delete, nil, true) + end + + @doc "The configured polymorphic? for a resource" + def polymorphic?(resource) do + Extension.get_opt(resource, [:sqlite], :polymorphic?, nil, true) + end + + @doc "The configured unique_index_names" + def unique_index_names(resource) do + Extension.get_opt(resource, [:sqlite], :unique_index_names, [], true) + end + + @doc "The configured exclusion_constraint_names" + def exclusion_constraint_names(resource) do + Extension.get_opt(resource, [:sqlite], :exclusion_constraint_names, [], true) + end + + @doc "The configured identity_index_names" + def identity_index_names(resource) do + Extension.get_opt(resource, [:sqlite], :identity_index_names, [], true) + end + + @doc "Identities not to include in the migrations" + def skip_identities(resource) do + Extension.get_opt(resource, [:sqlite], :skip_identities, [], true) + end + + @doc "The configured foreign_key_names" + def foreign_key_names(resource) do + Extension.get_opt(resource, [:sqlite], :foreign_key_names, [], true) + end + + @doc "Whether or not the resource should be included when generating migrations" + def migrate?(resource) do + Extension.get_opt(resource, [:sqlite], :migrate?, nil, true) + end + + @doc "A list of keys to always include in upserts." + def global_upsert_keys(resource) do + Extension.get_opt(resource, [:sqlite], :global_upsert_keys, []) + end + + @doc "A stringified version of the base_filter, to be used in a where clause when generating unique indexes" + def base_filter_sql(resource) do + Extension.get_opt(resource, [:sqlite], :base_filter_sql, nil) + end + + @doc "Skip generating unique indexes when generating migrations" + def skip_unique_indexes(resource) do + Extension.get_opt(resource, [:sqlite], :skip_unique_indexes, []) + end + +end diff --git a/lib/ecto_migration_default.ex b/lib/ecto_migration_default.ex new file mode 100644 index 0000000..dd31318 --- /dev/null +++ b/lib/ecto_migration_default.ex @@ -0,0 +1,89 @@ +defprotocol EctoMigrationDefault do + @moduledoc """ + Allows configuring how values are translated to default values in migrations. + + Still a work in progress, but covers most standard values aside from maps. + """ + @fallback_to_any true + @doc "Returns the text (elixir code) that will be placed into a migration as the default value" + def to_default(value) +end + +defimpl EctoMigrationDefault, for: Any do + require Logger + + def to_default(value) do + Logger.warning(""" + You have specified a default value for a type that cannot be explicitly + converted to an Ecto default: + + `#{inspect(value)}` + + The default value in the migration will be set to `nil` and you can edit + your migration accordingly. + + To prevent this warning, implement the `EctoMigrationDefault` protocol + for the appropriate Elixir type in your Ash project, or configure its + default value in `migration_defaults` in the sqlite section. Use `\\\"nil\\\"` + for no default. + """) + + "nil" + end +end + +defimpl EctoMigrationDefault, for: Integer do + def to_default(value) do + to_string(value) + end +end + +defimpl EctoMigrationDefault, for: Float do + def to_default(value) do + to_string(value) + end +end + +defimpl EctoMigrationDefault, for: Decimal do + def to_default(value) do + ~s["#{value}"] + end +end + +defimpl EctoMigrationDefault, for: BitString do + def to_default(value) do + inspect(value) + end +end + +defimpl EctoMigrationDefault, for: DateTime do + def to_default(value) do + ~s[fragment("'#{to_string(value)}'")] + end +end + +defimpl EctoMigrationDefault, for: NaiveDateTime do + def to_default(value) do + ~s[fragment("'#{to_string(value)}'")] + end +end + +defimpl EctoMigrationDefault, for: Date do + def to_default(value) do + ~s[fragment("'#{to_string(value)}'")] + end +end + +defimpl EctoMigrationDefault, for: Time do + def to_default(value) do + ~s[fragment("'#{to_string(value)}'")] + end +end + +defimpl EctoMigrationDefault, for: Atom do + def to_default(value) when value in [nil, true, false], do: inspect(value) + + def to_default(value) do + inspect(to_string(value)) + end +end diff --git a/lib/expr.ex b/lib/expr.ex new file mode 100644 index 0000000..541a641 --- /dev/null +++ b/lib/expr.ex @@ -0,0 +1,1459 @@ +defmodule AshSqlite.Expr do + @moduledoc false + + alias Ash.Filter + alias Ash.Query.{BooleanExpression, Exists, Not, Ref} + alias Ash.Query.Operator.IsNil + + alias Ash.Query.Function.{ + Ago, + At, + Contains, + DateAdd, + DateTimeAdd, + FromNow, + GetPath, + If, + Length, + Now, + StringJoin, + StringSplit, + Today, + Type + } + + alias AshSqlite.Functions.{Fragment, Like} + + require Ecto.Query + + def dynamic_expr(query, expr, bindings, embedded? \\ false, type \\ nil) + + def dynamic_expr(query, %Filter{expression: expression}, bindings, embedded?, type) do + dynamic_expr(query, expression, bindings, embedded?, type) + end + + # A nil filter means "everything" + def dynamic_expr(_, nil, _, _, _), do: true + # A true filter means "everything" + def dynamic_expr(_, true, _, _, _), do: true + # A false filter means "nothing" + def dynamic_expr(_, false, _, _, _), do: false + + def dynamic_expr(query, expression, bindings, embedded?, type) do + do_dynamic_expr(query, expression, bindings, embedded?, type) + end + + defp do_dynamic_expr(query, expr, bindings, embedded?, type \\ nil) + + defp do_dynamic_expr(_, {:embed, other}, _bindings, _true, _type) do + other + end + + defp do_dynamic_expr(query, %Not{expression: expression}, bindings, embedded?, _type) do + new_expression = do_dynamic_expr(query, expression, bindings, embedded?, :boolean) + Ecto.Query.dynamic(not (^new_expression)) + end + + defp do_dynamic_expr( + query, + %TrigramSimilarity{arguments: [arg1, arg2], embedded?: pred_embedded?}, + bindings, + embedded?, + _type + ) do + arg1 = do_dynamic_expr(query, arg1, bindings, pred_embedded? || embedded?, :string) + arg2 = do_dynamic_expr(query, arg2, bindings, pred_embedded? || embedded?, :string) + + Ecto.Query.dynamic(fragment("similarity(?, ?)", ^arg1, ^arg2)) + end + + defp do_dynamic_expr( + query, + %Like{arguments: [arg1, arg2], embedded?: pred_embedded?}, + bindings, + embedded?, + _type + ) do + arg1 = do_dynamic_expr(query, arg1, bindings, pred_embedded? || embedded?, :string) + arg2 = do_dynamic_expr(query, arg2, bindings, pred_embedded? || embedded?, :string) + + Ecto.Query.dynamic(like(^arg1, ^arg2)) + end + + defp do_dynamic_expr( + query, + %IsNil{left: left, right: right, embedded?: pred_embedded?}, + bindings, + embedded?, + _type + ) do + left_expr = do_dynamic_expr(query, left, bindings, pred_embedded? || embedded?) + right_expr = do_dynamic_expr(query, right, bindings, pred_embedded? || embedded?, :boolean) + Ecto.Query.dynamic(is_nil(^left_expr) == ^right_expr) + end + + defp do_dynamic_expr( + query, + %Ago{arguments: [left, right], embedded?: pred_embedded?}, + bindings, + embedded?, + _type + ) + when is_binary(right) or is_atom(right) do + left = do_dynamic_expr(query, left, bindings, pred_embedded? || embedded?, :integer) + + Ecto.Query.dynamic( + fragment("(?)", datetime_add(^DateTime.utc_now(), ^left * -1, ^to_string(right))) + ) + end + + defp do_dynamic_expr( + query, + %At{arguments: [left, right], embedded?: pred_embedded?}, + bindings, + embedded?, + _type + ) do + left = do_dynamic_expr(query, left, bindings, pred_embedded? || embedded?, :integer) + right = do_dynamic_expr(query, right, bindings, pred_embedded? || embedded?, :integer) + + if is_integer(right) do + Ecto.Query.dynamic(fragment("(?)[?]", ^left, ^(right + 1))) + else + Ecto.Query.dynamic(fragment("(?)[? + 1]", ^left, ^right)) + end + end + + defp do_dynamic_expr( + query, + %FromNow{arguments: [left, right], embedded?: pred_embedded?}, + bindings, + embedded?, + _type + ) + when is_binary(right) or is_atom(right) do + left = do_dynamic_expr(query, left, bindings, pred_embedded? || embedded?, :integer) + + Ecto.Query.dynamic( + fragment("(?)", datetime_add(^DateTime.utc_now(), ^left, ^to_string(right))) + ) + end + + defp do_dynamic_expr( + query, + %DateTimeAdd{arguments: [datetime, amount, interval], embedded?: pred_embedded?}, + bindings, + embedded?, + _type + ) + when is_binary(interval) or is_atom(interval) do + datetime = do_dynamic_expr(query, datetime, bindings, pred_embedded? || embedded?) + amount = do_dynamic_expr(query, amount, bindings, pred_embedded? || embedded?, :integer) + Ecto.Query.dynamic(fragment("(?)", datetime_add(^datetime, ^amount, ^to_string(interval)))) + end + + defp do_dynamic_expr( + query, + %DateAdd{arguments: [date, amount, interval], embedded?: pred_embedded?}, + bindings, + embedded?, + _type + ) + when is_binary(interval) or is_atom(interval) do + date = do_dynamic_expr(query, date, bindings, pred_embedded? || embedded?) + amount = do_dynamic_expr(query, amount, bindings, pred_embedded? || embedded?, :integer) + Ecto.Query.dynamic(fragment("(?)", datetime_add(^date, ^amount, ^to_string(interval)))) + end + + defp do_dynamic_expr( + query, + %GetPath{ + arguments: [%Ref{attribute: %{type: type}}, right] + } = get_path, + bindings, + embedded?, + nil + ) + when is_atom(type) and is_list(right) do + if Ash.Type.embedded_type?(type) do + type = determine_type_at_path(type, right) + + do_get_path(query, get_path, bindings, embedded?, type) + else + do_get_path(query, get_path, bindings, embedded?) + end + end + + defp do_dynamic_expr( + query, + %GetPath{ + arguments: [%Ref{attribute: %{type: {:array, type}}}, right] + } = get_path, + bindings, + embedded?, + nil + ) + when is_atom(type) and is_list(right) do + if Ash.Type.embedded_type?(type) do + type = determine_type_at_path(type, right) + do_get_path(query, get_path, bindings, embedded?, type) + else + do_get_path(query, get_path, bindings, embedded?) + end + end + + defp do_dynamic_expr( + query, + %GetPath{} = get_path, + bindings, + embedded?, + type + ) do + do_get_path(query, get_path, bindings, embedded?, type) + end + + defp do_dynamic_expr( + query, + %Contains{arguments: [left, %Ash.CiString{} = right], embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + if "citext" in AshSqlite.DataLayer.Info.repo(query.__ash_bindings__.resource).installed_extensions() do + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "(strpos((", + expr: left, + raw: "::citext), (", + expr: right, + raw: ")) > 0)" + ] + }, + bindings, + embedded?, + type + ) + else + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "(strpos(lower(", + expr: left, + raw: "), lower(", + expr: right, + raw: ")) > 0)" + ] + }, + bindings, + embedded?, + type + ) + end + end + + defp do_dynamic_expr( + query, + %Contains{arguments: [left, right], embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "(strpos((", + expr: left, + raw: "), (", + expr: right, + raw: ")) > 0)" + ] + }, + bindings, + embedded?, + type + ) + end + + defp do_dynamic_expr( + query, + %Length{arguments: [list], embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "array_length((", + expr: list, + raw: "), 1)" + ] + }, + bindings, + embedded?, + type + ) + end + + defp do_dynamic_expr( + query, + %If{arguments: [condition, when_true, when_false], embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + [condition_type, when_true_type, when_false_type] = + case AshSqlite.Types.determine_types(If, [condition, when_true, when_false]) do + [condition_type, when_true] -> + [condition_type, when_true, nil] + + [condition_type, when_true, when_false] -> + [condition_type, when_true, when_false] + end + |> case do + [condition_type, nil, nil] -> + [condition_type, type, type] + + [condition_type, when_true, nil] -> + [condition_type, when_true, type] + + [condition_type, nil, when_false] -> + [condition_type, type, when_false] + + [condition_type, when_true, when_false] -> + [condition_type, when_true, when_false] + end + + condition = + do_dynamic_expr(query, condition, bindings, pred_embedded? || embedded?, condition_type) + + when_true = + do_dynamic_expr(query, when_true, bindings, pred_embedded? || embedded?, when_true_type) + + when_false = + do_dynamic_expr( + query, + when_false, + bindings, + pred_embedded? || embedded?, + when_false_type + ) + + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "(CASE WHEN ", + casted_expr: condition, + raw: " THEN ", + casted_expr: when_true, + raw: " ELSE ", + casted_expr: when_false, + raw: " END)" + ] + }, + bindings, + embedded?, + type + ) + end + + defp do_dynamic_expr( + query, + %StringJoin{arguments: [values, joiner], embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: + Enum.reduce(values, [raw: "(concat_ws(", expr: joiner], fn value, acc -> + acc ++ [raw: ", ", expr: value] + end) ++ [raw: "))"] + }, + bindings, + embedded?, + type + ) + end + + defp do_dynamic_expr( + query, + %StringSplit{arguments: [string, delimiter, options], embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + if options[:trim?] do + require_ash_functions!(query, "string_split(..., trim?: true)") + + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "ash_trim_whitespace(string_to_array(", + expr: string, + raw: ", NULLIF(", + expr: delimiter, + raw: ", '')))" + ] + }, + bindings, + embedded?, + type + ) + else + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "string_to_array(", + expr: string, + raw: ", NULLIF(", + expr: delimiter, + raw: ", ''))" + ] + }, + bindings, + embedded?, + type + ) + end + end + + defp do_dynamic_expr( + query, + %StringJoin{arguments: [values], embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: + [raw: "(concat("] ++ + (values + |> Enum.reduce([], fn value, acc -> + acc ++ [expr: value] + end) + |> Enum.intersperse({:raw, ", "})) ++ + [raw: "))"] + }, + bindings, + embedded?, + type + ) + end + + # Sorry :( + # This is bad to do, but is the only reasonable way I could find. + defp do_dynamic_expr( + query, + %Fragment{arguments: arguments, embedded?: pred_embedded?}, + bindings, + embedded?, + _type + ) do + arguments = + case arguments do + [{:raw, _} | _] -> + arguments + + arguments -> + [{:raw, ""} | arguments] + end + + arguments = + case List.last(arguments) do + nil -> + arguments + + {:raw, _} -> + arguments + + _ -> + arguments ++ [{:raw, ""}] + end + + {params, fragment_data, _} = + Enum.reduce(arguments, {[], [], 0}, fn + {:raw, str}, {params, fragment_data, count} -> + {params, [{:raw, str} | fragment_data], count} + + {:casted_expr, dynamic}, {params, fragment_data, count} -> + {item, params, count} = + {{:^, [], [count]}, [{dynamic, :any} | params], count + 1} + + {params, [{:expr, item} | fragment_data], count} + + {:expr, expr}, {params, fragment_data, count} -> + dynamic = do_dynamic_expr(query, expr, bindings, pred_embedded? || embedded?) + + type = + if is_binary(expr) do + :string + else + :any + end + + {item, params, count} = + {{:^, [], [count]}, [{dynamic, type} | params], count + 1} + + {params, [{:expr, item} | fragment_data], count} + end) + + %Ecto.Query.DynamicExpr{ + fun: fn _query -> + {{:fragment, [], Enum.reverse(fragment_data)}, Enum.reverse(params), [], %{}} + end, + binding: [], + file: __ENV__.file, + line: __ENV__.line + } + end + + defp do_dynamic_expr( + query, + %BooleanExpression{op: op, left: left, right: right}, + bindings, + embedded?, + _type + ) do + left_expr = do_dynamic_expr(query, left, bindings, embedded?, :boolean) + right_expr = do_dynamic_expr(query, right, bindings, embedded?, :boolean) + + case op do + :and -> + Ecto.Query.dynamic(^left_expr and ^right_expr) + + :or -> + Ecto.Query.dynamic(^left_expr or ^right_expr) + end + end + + defp do_dynamic_expr( + query, + %Ash.Query.Function.Minus{arguments: [arg], embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + [determined_type] = AshSqlite.Types.determine_types(Ash.Query.Function.Minus, [arg]) + + expr = + do_dynamic_expr(query, arg, bindings, pred_embedded? || embedded?, determined_type || type) + + Ecto.Query.dynamic(-(^expr)) + end + + # Honestly we need to either 1. not type cast or 2. build in type compatibility concepts + # instead of `:same` we need an `ANY COMPATIBLE` equivalent. + @cast_operands_for [:<>] + + defp do_dynamic_expr( + query, + %mod{ + __predicate__?: _, + left: left, + right: right, + embedded?: pred_embedded?, + operator: operator + }, + bindings, + embedded?, + type + ) do + [left_type, right_type] = + mod + |> AshSqlite.Types.determine_types([left, right]) + + left_expr = + if left_type && operator in @cast_operands_for do + left_expr = do_dynamic_expr(query, left, bindings, pred_embedded? || embedded?) + + Ecto.Query.dynamic(type(^left_expr, ^left_type)) + else + do_dynamic_expr(query, left, bindings, pred_embedded? || embedded?, left_type) + end + + right_expr = + if right_type && operator in @cast_operands_for do + right_expr = do_dynamic_expr(query, right, bindings, pred_embedded? || embedded?) + Ecto.Query.dynamic(type(^right_expr, ^right_type)) + else + do_dynamic_expr(query, right, bindings, pred_embedded? || embedded?, right_type) + end + + case operator do + :== -> + Ecto.Query.dynamic(^left_expr == ^right_expr) + + :!= -> + Ecto.Query.dynamic(^left_expr != ^right_expr) + + :> -> + Ecto.Query.dynamic(^left_expr > ^right_expr) + + :< -> + Ecto.Query.dynamic(^left_expr < ^right_expr) + + :>= -> + Ecto.Query.dynamic(^left_expr >= ^right_expr) + + :<= -> + Ecto.Query.dynamic(^left_expr <= ^right_expr) + + :in -> + Ecto.Query.dynamic(^left_expr in ^right_expr) + + :+ -> + Ecto.Query.dynamic(^left_expr + ^right_expr) + + :- -> + Ecto.Query.dynamic(^left_expr - ^right_expr) + + :/ -> + Ecto.Query.dynamic(type(^left_expr, :decimal) / type(^right_expr, :decimal)) + + :* -> + Ecto.Query.dynamic(^left_expr * ^right_expr) + + :<> -> + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "(", + casted_expr: left_expr, + raw: " || ", + casted_expr: right_expr, + raw: ")" + ] + }, + bindings, + embedded?, + type + ) + + :|| -> + require_ash_functions!(query, "||") + + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "ash_elixir_or(", + casted_expr: left_expr, + raw: ", ", + casted_expr: right_expr, + raw: ")" + ] + }, + bindings, + embedded?, + type + ) + + :&& -> + require_ash_functions!(query, "&&") + + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "ash_elixir_and(", + casted_expr: left_expr, + raw: ", ", + casted_expr: right_expr, + raw: ")" + ] + }, + bindings, + embedded?, + type + ) + + other -> + raise "Operator not implemented #{other}" + end + end + + defp do_dynamic_expr(query, %MapSet{} = mapset, bindings, embedded?, type) do + do_dynamic_expr(query, Enum.to_list(mapset), bindings, embedded?, type) + end + + defp do_dynamic_expr( + query, + %Ash.CiString{string: string} = expression, + bindings, + embedded?, + type + ) do + string = do_dynamic_expr(query, string, bindings, embedded?) + + require_extension!(query, "citext", expression) + + do_dynamic_expr( + query, + %Fragment{ + embedded?: embedded?, + arguments: [ + raw: "", + casted_expr: string, + raw: "::citext" + ] + }, + bindings, + embedded?, + type + ) + end + + defp do_dynamic_expr( + query, + %Ref{ + attribute: %Ash.Query.Calculation{} = calculation, + relationship_path: [], + resource: resource + } = type_expr, + bindings, + embedded?, + _type + ) do + calculation = %{calculation | load: calculation.name} + + type = + AshSqlite.Types.parameterized_type( + calculation.type, + Map.get(calculation, :constraints, []) + ) + + validate_type!(query, type, type_expr) + + case Ash.Filter.hydrate_refs( + calculation.module.expression(calculation.opts, calculation.context), + %{ + resource: resource, + calculations: %{}, + public?: false + } + ) do + {:ok, expression} -> + do_dynamic_expr( + query, + expression, + bindings, + embedded?, + type + ) + + {:error, error} -> + raise """ + Failed to hydrate references in #{inspect(calculation.module.expression(calculation.opts, calculation.context))} + + #{inspect(error)} + """ + end + end + + defp do_dynamic_expr( + _query, + %Ref{ + attribute: %Ash.Resource.Calculation{} = calculation + }, + _bindings, + _embedded?, + _type + ) do + raise "cannot build expression from resource calculation! #{calculation.name}" + end + + defp do_dynamic_expr( + query, + %Ref{ + attribute: %Ash.Query.Calculation{} = calculation, + relationship_path: relationship_path + } = ref, + bindings, + embedded?, + _type + ) do + binding_to_replace = + Enum.find_value(bindings.bindings, fn {i, binding} -> + if binding.path == relationship_path do + i + end + end) + + temp_bindings = + bindings.bindings + |> Map.delete(0) + |> Map.update!(binding_to_replace, &Map.merge(&1, %{path: [], type: :root})) + + type = + AshSqlite.Types.parameterized_type( + calculation.type, + Map.get(calculation, :constraints, []) + ) + + validate_type!(query, type, ref) + + case Ash.Filter.hydrate_refs( + calculation.module.expression(calculation.opts, calculation.context), + %{ + resource: ref.resource, + calculations: %{}, + public?: false + } + ) do + {:ok, hydrated} -> + expr = + do_dynamic_expr( + query, + hydrated, + %{bindings | bindings: temp_bindings}, + embedded?, + type + ) + + if type do + Ecto.Query.dynamic(type(^expr, ^type)) + else + expr + end + + _ -> + raise "Failed to hydrate references in #{inspect(calculation.module.expression(calculation.opts, calculation.context))}" + end + end + + defp do_dynamic_expr( + query, + %Type{arguments: [arg1, arg2, constraints]}, + bindings, + embedded?, + _type + ) do + arg2 = Ash.Type.get_type(arg2) + arg1 = maybe_uuid_to_binary(arg2, arg1, arg1) + type = AshSqlite.Types.parameterized_type(arg2, constraints) + + if type do + Ecto.Query.dynamic(type(^do_dynamic_expr(query, arg1, bindings, embedded?, type), ^type)) + else + do_dynamic_expr(query, arg1, bindings, embedded?, type) + end + end + + defp do_dynamic_expr( + query, + %Now{embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + do_dynamic_expr( + query, + DateTime.utc_now(), + bindings, + embedded? || pred_embedded?, + type + ) + end + + defp do_dynamic_expr( + query, + %Today{embedded?: pred_embedded?}, + bindings, + embedded?, + type + ) do + do_dynamic_expr( + query, + Date.utc_today(), + bindings, + embedded? || pred_embedded?, + type + ) + end + + defp do_dynamic_expr( + query, + %Ash.Query.Parent{expr: expr}, + bindings, + embedded?, + type + ) do + parent? = Map.get(bindings.parent_bindings, :parent_is_parent_as?, true) + + do_dynamic_expr( + %{ + query + | __ash_bindings__: Map.put(bindings.parent_bindings, :parent?, parent?) + }, + expr, + bindings, + embedded?, + type + ) + end + + defp do_dynamic_expr( + query, + %Exists{at_path: at_path, path: [first | rest], expr: expr}, + bindings, + _embedded?, + _type + ) do + resource = Ash.Resource.Info.related(query.__ash_bindings__.resource, at_path) + first_relationship = Ash.Resource.Info.relationship(resource, first) + + last_relationship = + Enum.reduce(rest, first_relationship, fn name, relationship -> + Ash.Resource.Info.relationship(relationship.destination, name) + end) + + {:ok, expr} = + Ash.Filter.hydrate_refs(expr, %{ + resource: last_relationship.destination, + parent_stack: [ + query.__ash_bindings__.resource + | query.__ash_bindings__[:parent_resources] || [] + ], + calculations: %{}, + public?: false + }) + + filter = + %Ash.Filter{expression: expr, resource: first_relationship.destination} + |> nest_expression(rest) + + {:ok, source} = + AshSqlite.Join.maybe_get_resource_query( + first_relationship.destination, + first_relationship, + query, + [first_relationship.name] + ) + + used_calculations = + Ash.Filter.used_calculations( + filter, + first_relationship.destination, + [] + ) + + {:ok, filtered} = + source + |> set_parent_path(query) + |> AshSqlite.DataLayer.filter( + filter, + first_relationship.destination, + no_this?: true + ) + + free_binding = filtered.__ash_bindings__.current + + exists_query = + cond do + Map.get(first_relationship, :manual) -> + {module, opts} = first_relationship.manual + + [pkey_attr | _] = Ash.Resource.Info.primary_key(first_relationship.destination) + + pkey_attr = Ash.Resource.Info.attribute(first_relationship.destination, pkey_attr) + + source_ref = + ref_binding( + %Ref{ + attribute: pkey_attr, + relationship_path: at_path, + resource: resource + }, + bindings + ) + + {:ok, subquery} = + module.ash_sqlite_subquery( + opts, + source_ref, + 0, + filtered + ) + + subquery + + first_relationship.type == :many_to_many -> + source_ref = + ref_binding( + %Ref{ + attribute: + Ash.Resource.Info.attribute(resource, first_relationship.source_attribute), + relationship_path: at_path, + resource: resource + }, + bindings + ) + + through_relationship = + Ash.Resource.Info.relationship(resource, first_relationship.join_relationship) + + through_bindings = + query + |> Map.delete(:__ash_bindings__) + |> AshSqlite.DataLayer.default_bindings( + query.__ash_bindings__.resource, + query.__ash_bindings__.context + ) + |> Map.get(:__ash_bindings__) + |> Map.put(:bindings, %{ + free_binding => %{path: [], source: first_relationship.through, type: :root} + }) + + {:ok, through} = + AshSqlite.Join.maybe_get_resource_query( + first_relationship.through, + through_relationship, + query, + [first_relationship.join_relationship], + through_bindings, + nil, + false + ) + + Ecto.Query.from(destination in filtered, + join: through in ^through, + as: ^free_binding, + on: + field(through, ^first_relationship.destination_attribute_on_join_resource) == + field(destination, ^first_relationship.destination_attribute), + on: + field(parent_as(^source_ref), ^first_relationship.source_attribute) == + field(through, ^first_relationship.source_attribute_on_join_resource) + ) + + Map.get(first_relationship, :no_attributes?) -> + filtered + + true -> + source_ref = + ref_binding( + %Ref{ + attribute: + Ash.Resource.Info.attribute(resource, first_relationship.source_attribute), + relationship_path: at_path, + resource: resource + }, + bindings + ) + + Ecto.Query.from(destination in filtered, + where: + field(parent_as(^source_ref), ^first_relationship.source_attribute) == + field(destination, ^first_relationship.destination_attribute) + ) + end + + exists_query = + exists_query + |> Ecto.Query.exclude(:select) + |> Ecto.Query.select(1) + + Ecto.Query.dynamic(exists(Ecto.Query.subquery(exists_query))) + end + + defp do_dynamic_expr( + query, + %Ref{ + attribute: %Ash.Resource.Attribute{ + name: name, + type: attr_type, + constraints: constraints + } + } = ref, + bindings, + _embedded?, + expr_type + ) do + ref_binding = ref_binding(ref, bindings) + + if is_nil(ref_binding) do + raise "Error while building reference: #{inspect(ref)}" + end + + constraints = + if attr_type do + constraints + end + + case AshSqlite.Types.parameterized_type(attr_type || expr_type, constraints) do + nil -> + if query.__ash_bindings__[:parent?] do + Ecto.Query.dynamic(field(parent_as(^ref_binding), ^name)) + else + Ecto.Query.dynamic(field(as(^ref_binding), ^name)) + end + + type -> + validate_type!(query, type, ref) + + if query.__ash_bindings__[:parent?] do + Ecto.Query.dynamic(type(field(parent_as(^ref_binding), ^name), ^type)) + else + Ecto.Query.dynamic(type(field(as(^ref_binding), ^name), ^type)) + end + end + end + + defp do_dynamic_expr(query, value, bindings, embedded?, _type) + when is_map(value) and not is_struct(value) do + Map.new(value, fn {key, value} -> + {key, do_dynamic_expr(query, value, bindings, embedded?)} + end) + end + + defp do_dynamic_expr(query, other, bindings, true, type) do + if other && is_atom(other) && !is_boolean(other) do + to_string(other) + else + if Ash.Filter.TemplateHelpers.expr?(other) do + if is_list(other) do + list_expr(query, other, bindings, true, type) + else + raise "Unsupported expression in AshSqlite query: #{inspect(other)}" + end + else + maybe_sanitize_list(query, other, bindings, true, type) + end + end + end + + defp do_dynamic_expr(query, value, bindings, embedded?, {:in, type}) when is_list(value) do + list_expr(query, value, bindings, embedded?, {:array, type}) + end + + defp do_dynamic_expr(query, value, bindings, embedded?, type) + when not is_nil(value) and is_atom(value) and not is_boolean(value) do + do_dynamic_expr(query, to_string(value), bindings, embedded?, type) + end + + defp do_dynamic_expr(query, value, bindings, false, type) when type == nil or type == :any do + if is_list(value) do + list_expr(query, value, bindings, false, type) + else + maybe_sanitize_list(query, value, bindings, true, type) + end + end + + defp do_dynamic_expr(query, value, bindings, false, type) do + if Ash.Filter.TemplateHelpers.expr?(value) do + if is_list(value) do + list_expr(query, value, bindings, false, type) + else + raise "Unsupported expression in AshSqlite query: #{inspect(value)}" + end + else + case maybe_sanitize_list(query, value, bindings, true, type) do + ^value -> + if type do + validate_type!(query, type, value) + + Ecto.Query.dynamic(type(^value, ^type)) + else + value + end + + value -> + value + end + end + end + + defp list_expr(query, value, bindings, embedded?, type) do + type = + case type do + {:array, type} -> type + {:in, type} -> type + _ -> nil + end + + {params, exprs, _} = + Enum.reduce(value, {[], [], 0}, fn value, {params, data, count} -> + case do_dynamic_expr(query, value, bindings, embedded?, type) do + %Ecto.Query.DynamicExpr{} = dynamic -> + result = + Ecto.Query.Builder.Dynamic.partially_expand( + :select, + query, + dynamic, + params, + count + ) + + expr = elem(result, 0) + new_params = elem(result, 1) + new_count = result |> Tuple.to_list() |> List.last() + + {new_params, [expr | data], new_count} + + other -> + {params, [other | data], count} + end + end) + + %Ecto.Query.DynamicExpr{ + fun: fn _query -> + {Enum.reverse(exprs), Enum.reverse(params), [], []} + end, + binding: [], + file: __ENV__.file, + line: __ENV__.line + } + end + + defp maybe_uuid_to_binary({:array, type}, value, _original_value) when is_list(value) do + Enum.map(value, &maybe_uuid_to_binary(type, &1, &1)) + end + + defp maybe_uuid_to_binary(type, value, original_value) + when type in [ + Ash.Type.UUID.EctoType, + :uuid + ] and is_binary(value) do + case Ecto.UUID.dump(value) do + {:ok, encoded} -> encoded + _ -> original_value + end + end + + defp maybe_uuid_to_binary(_type, _value, original_value), do: original_value + + @doc false + def validate_type!(_query, _type, _context) do + :ok + end + + defp maybe_type(dynamic, nil, _query), do: dynamic + + defp maybe_type(dynamic, type, query) do + validate_type!(query, type, type) + + Ecto.Query.dynamic(type(^dynamic, ^type)) + end + + defp maybe_sanitize_list(query, value, bindings, embedded?, type) do + if is_list(value) do + Enum.map(value, &do_dynamic_expr(query, &1, bindings, embedded?, type)) + else + value + end + end + + defp ref_binding(%{attribute: %Ash.Resource.Attribute{}} = ref, bindings) do + Enum.find_value(bindings.bindings, fn {binding, data} -> + data.path == ref.relationship_path && data.type in [:inner, :left, :root] && binding + end) + end + + defp do_get_path( + query, + %GetPath{arguments: [left, right], embedded?: pred_embedded?} = get_path, + bindings, + embedded?, + type \\ nil + ) do + path = Enum.map(right, &to_string/1) + + path_frags = + path + |> Enum.flat_map(fn item -> + [expr: item, raw: "::text,"] + end) + |> :lists.droplast() + |> Enum.concat(raw: "::text)") + + expr = + do_dynamic_expr( + query, + %Fragment{ + embedded?: pred_embedded?, + arguments: + [ + raw: "jsonb_extract_path_text(", + expr: left, + raw: "::jsonb," + ] ++ path_frags + }, + bindings, + embedded? + ) + + if type do + validate_type!(query, type, get_path) + + Ecto.Query.dynamic(type(^expr, ^type)) + else + expr + end + end + + defp require_ash_functions!(query, operator) do + installed_extensions = + AshSqlite.DataLayer.Info.repo(query.__ash_bindings__.resource).installed_extensions() + + unless "ash-functions" in installed_extensions do + raise """ + Cannot use `#{operator}` without adding the extension `ash-functions` to your repo. + + Add it to the list in `installed_extensions/0` and generate migrations. + """ + end + end + + defp require_extension!(query, extension, context) do + repo = AshSqlite.DataLayer.Info.repo(query.__ash_bindings__.resource) + + unless extension in repo.installed_extensions() do + raise Ash.Error.Query.InvalidExpression, + expression: context, + message: + "The #{extension} extension needs to be installed before #{inspect(context)} can be used. Please add \"#{extension}\" to the list of installed_extensions in #{inspect(repo)}." + end + end + + defp determine_type_at_path(type, path) do + path + |> Enum.reject(&is_integer/1) + |> do_determine_type_at_path(type) + |> case do + nil -> + nil + + {type, constraints} -> + AshSqlite.Types.parameterized_type(type, constraints) + end + end + + defp do_determine_type_at_path([], _), do: nil + + defp do_determine_type_at_path([item], type) do + case Ash.Resource.Info.attribute(type, item) do + nil -> + nil + + %{type: {:array, type}, constraints: constraints} -> + constraints = constraints[:items] || [] + + {type, constraints} + + %{type: type, constraints: constraints} -> + {type, constraints} + end + end + + defp do_determine_type_at_path([item | rest], type) do + case Ash.Resource.Info.attribute(type, item) do + nil -> + nil + + %{type: {:array, type}} -> + if Ash.Type.embedded_type?(type) do + type + else + nil + end + + %{type: type} -> + if Ash.Type.embedded_type?(type) do + type + else + nil + end + end + |> case do + nil -> + nil + + type -> + do_determine_type_at_path(rest, type) + end + end + + defp set_parent_path(query, parent) do + # This is a stupid name. Its actually the path we *remove* when stepping up a level. I.e the child's path + Map.update!(query, :__ash_bindings__, fn ash_bindings -> + ash_bindings + |> Map.put(:parent_bindings, parent.__ash_bindings__) + |> Map.put(:parent_resources, [ + parent.__ash_bindings__.resource | parent.__ash_bindings__[:parent_resources] || [] + ]) + end) + end + + defp nest_expression(expression, relationship_path) do + case expression do + {key, value} when is_atom(key) -> + {key, nest_expression(value, relationship_path)} + + %Not{expression: expression} = not_expr -> + %{not_expr | expression: nest_expression(expression, relationship_path)} + + %BooleanExpression{left: left, right: right} = expression -> + %{ + expression + | left: nest_expression(left, relationship_path), + right: nest_expression(right, relationship_path) + } + + %{__operator__?: true, left: left, right: right} = op -> + left = nest_expression(left, relationship_path) + right = nest_expression(right, relationship_path) + %{op | left: left, right: right} + + %Ref{} = ref -> + add_to_ref_path(ref, relationship_path) + + %{__function__?: true, arguments: args} = func -> + %{func | arguments: Enum.map(args, &nest_expression(&1, relationship_path))} + + %Ash.Query.Exists{} = exists -> + %{exists | at_path: relationship_path ++ exists.at_path} + + %Ash.Query.Parent{} = parent -> + parent + + %Ash.Query.Call{args: args} = call -> + %{call | args: Enum.map(args, &nest_expression(&1, relationship_path))} + + %Ash.Filter{expression: expression} = filter -> + %{filter | expression: nest_expression(expression, relationship_path)} + + other -> + other + end + end + + defp add_to_ref_path(%Ref{relationship_path: relationship_path} = ref, to_add) do + %{ref | relationship_path: to_add ++ relationship_path} + end +end diff --git a/lib/functions/fragment.ex b/lib/functions/fragment.ex new file mode 100644 index 0000000..1b0130e --- /dev/null +++ b/lib/functions/fragment.ex @@ -0,0 +1,72 @@ +defmodule AshSqlite.Functions.Fragment do + @moduledoc """ + A function that maps to ecto's `fragment` function + + https://hexdocs.pm/ecto/Ecto.Query.API.html#fragment/1 + """ + + use Ash.Query.Function, name: :fragment + + def private?, do: true + + # Varargs is special, and should only be used in rare circumstances (like this one) + # no type casting or help can be provided for these functions. + def args, do: :var_args + + def new([fragment | _]) when not is_binary(fragment) do + {:error, "First argument to `fragment` must be a string."} + end + + def new([fragment | rest]) do + split = split_fragment(fragment) + + if Enum.count(split, &(&1 == :slot)) != length(rest) do + {:error, + "fragment(...) expects extra arguments in the same amount of question marks in string. " <> + "It received #{Enum.count(split, &(&1 == :slot))} extra argument(s) but expected #{length(rest)}"} + else + {:ok, %__MODULE__{arguments: merge_fragment(split, rest)}} + end + end + + def casted_new([fragment | _]) when not is_binary(fragment) do + {:error, "First argument to `fragment` must be a string."} + end + + def casted_new([fragment | rest]) do + split = split_fragment(fragment) + + if Enum.count(split, &(&1 == :slot)) != length(rest) do + {:error, + "fragment(...) expects extra arguments in the same amount of question marks in string. " <> + "It received #{Enum.count(split, &(&1 == :slot))} extra argument(s) but expected #{length(rest)}"} + else + {:ok, %__MODULE__{arguments: merge_fragment(split, rest, :casted_expr)}} + end + end + + defp merge_fragment(expr, args, tag \\ :expr) + defp merge_fragment([], [], _tag), do: [] + + defp merge_fragment([:slot | rest], [arg | rest_args], tag) do + [{tag, arg} | merge_fragment(rest, rest_args, tag)] + end + + defp merge_fragment([val | rest], rest_args, tag) do + [{:raw, val} | merge_fragment(rest, rest_args, tag)] + end + + defp split_fragment(frag, consumed \\ "") + + defp split_fragment(<<>>, consumed), + do: [consumed] + + defp split_fragment(<>, consumed), + do: [consumed, :slot | split_fragment(rest, "")] + + defp split_fragment(<>, consumed), + do: split_fragment(rest, consumed <> <>) + + defp split_fragment(<>, consumed), + do: split_fragment(rest, consumed <> <>) +end diff --git a/lib/functions/like.ex b/lib/functions/like.ex new file mode 100644 index 0000000..442b807 --- /dev/null +++ b/lib/functions/like.ex @@ -0,0 +1,9 @@ +defmodule AshSqlite.Functions.Like do + @moduledoc """ + Maps to the builtin sqlite function `like`. + """ + + use Ash.Query.Function, name: :like + + def args, do: [[:string, :string]] +end diff --git a/lib/join.ex b/lib/join.ex new file mode 100644 index 0000000..50b2d0f --- /dev/null +++ b/lib/join.ex @@ -0,0 +1,775 @@ +defmodule AshSqlite.Join do + @moduledoc false + import Ecto.Query, only: [from: 2, subquery: 1] + + alias Ash.Query.{BooleanExpression, Not, Ref} + + @known_inner_join_operators [ + Eq, + GreaterThan, + GreaterThanOrEqual, + In, + LessThanOrEqual, + LessThan, + NotEq + ] + |> Enum.map(&Module.concat(Ash.Query.Operator, &1)) + + @known_inner_join_functions [ + Ago, + Contains + ] + |> Enum.map(&Module.concat(Ash.Query.Function, &1)) + + @known_inner_join_predicates @known_inner_join_functions ++ @known_inner_join_operators + + def join_all_relationships( + query, + filter, + opts \\ [], + relationship_paths \\ nil, + path \\ [], + source \\ nil + ) do + relationship_paths = + cond do + relationship_paths -> + relationship_paths + + opts[:no_this?] -> + filter + |> Ash.Filter.map(fn + %Ash.Query.Parent{} -> + # Removing any `This` from the filter + nil + + other -> + other + end) + |> Ash.Filter.relationship_paths() + |> to_joins(filter) + + true -> + filter + |> Ash.Filter.relationship_paths() + |> to_joins(filter) + end + + Enum.reduce_while(relationship_paths, {:ok, query}, fn + {_join_type, []}, {:ok, query} -> + {:cont, {:ok, query}} + + {join_type, [relationship | rest_rels]}, {:ok, query} -> + source = source || relationship.source + + current_path = path ++ [relationship] + + current_join_type = join_type + + look_for_join_types = + case join_type do + :left -> + [:left, :inner] + + :inner -> + [:left, :inner] + + other -> + [other] + end + + case get_binding(source, Enum.map(current_path, & &1.name), query, look_for_join_types) do + binding when is_integer(binding) -> + case join_all_relationships( + query, + filter, + opts, + [{join_type, rest_rels}], + current_path, + source + ) do + {:ok, query} -> + {:cont, {:ok, query}} + + {:error, error} -> + {:halt, {:error, error}} + end + + nil -> + case join_relationship( + query, + relationship, + Enum.map(path, & &1.name), + current_join_type, + source, + filter + ) do + {:ok, joined_query} -> + joined_query_with_distinct = add_distinct(relationship, join_type, joined_query) + + case join_all_relationships( + joined_query_with_distinct, + filter, + opts, + [{join_type, rest_rels}], + current_path, + source + ) do + {:ok, query} -> + {:cont, {:ok, query}} + + {:error, error} -> + {:halt, {:error, error}} + end + + {:error, error} -> + {:halt, {:error, error}} + end + end + end) + end + + defp to_joins(paths, filter) do + paths + |> Enum.map(fn path -> + if can_inner_join?(path, filter) do + {:inner, + AshSqlite.Join.relationship_path_to_relationships( + filter.resource, + path + )} + else + {:left, + AshSqlite.Join.relationship_path_to_relationships( + filter.resource, + path + )} + end + end) + end + + def relationship_path_to_relationships(resource, path, acc \\ []) + def relationship_path_to_relationships(_resource, [], acc), do: Enum.reverse(acc) + + def relationship_path_to_relationships(resource, [relationship | rest], acc) do + relationship = Ash.Resource.Info.relationship(resource, relationship) + + relationship_path_to_relationships(relationship.destination, rest, [relationship | acc]) + end + + def maybe_get_resource_query( + resource, + relationship, + root_query, + path \\ [], + bindings \\ nil, + start_binding \\ nil, + is_subquery? \\ true + ) do + resource + |> Ash.Query.new(nil, base_filter?: false) + |> Ash.Query.set_context(%{data_layer: %{start_bindings_at: start_binding}}) + |> Ash.Query.set_context((bindings || root_query.__ash_bindings__).context) + |> Ash.Query.set_context(relationship.context) + |> case do + %{valid?: true} = query -> + ash_query = query + + initial_query = %{ + AshSqlite.DataLayer.resource_to_query(resource, nil) + | prefix: Map.get(root_query, :prefix) + } + + case Ash.Query.data_layer_query(query, + initial_query: initial_query + ) do + {:ok, query} -> + query = + query + |> do_base_filter( + root_query, + ash_query, + resource, + path, + bindings + ) + |> do_relationship_filter( + relationship, + root_query, + ash_query, + resource, + path, + bindings, + is_subquery? + ) + + {:ok, query} + + {:error, error} -> + {:error, error} + end + + query -> + {:error, query} + end + end + + defp do_relationship_filter(query, %{filter: nil}, _, _, _, _, _, _), do: query + + defp do_relationship_filter( + query, + relationship, + root_query, + ash_query, + resource, + path, + bindings, + is_subquery? + ) do + filter = + resource + |> Ash.Filter.parse!( + relationship.filter, + ash_query.calculations, + Map.update( + ash_query.context, + :parent_stack, + [relationship.source], + &[&1 | relationship.source] + ) + ) + + base_bindings = bindings || query.__ash_bindings__ + + parent_binding = + case :lists.droplast(path) do + [] -> + base_bindings.bindings + |> Enum.find_value(fn {key, %{type: type}} -> + if type == :root do + key + end + end) + + path -> + get_binding( + root_query.__ash_bindings__.resource, + path, + %{query | __ash_bindings__: base_bindings}, + [ + :inner, + :left + ] + ) + end + + parent_bindings = %{ + base_bindings + | resource: relationship.source, + calculations: %{}, + parent_resources: [], + context: relationship.context, + current: parent_binding + 1 + } + + parent_bindings = + if bindings do + Map.put(parent_bindings, :parent_is_parent_as?, !is_subquery?) + else + parent_bindings + |> Map.update!(:bindings, &Map.take(&1, [parent_binding])) + end + + has_bindings? = not is_nil(bindings) + + bindings = + base_bindings + |> Map.put(:parent_bindings, parent_bindings) + |> Map.put(:parent_resources, [ + relationship.source | parent_bindings[:parent_resources] || [] + ]) + + dynamic = + if has_bindings? do + filter = + if is_subquery? do + Ash.Filter.move_to_relationship_path(filter, path) + else + filter + end + + AshSqlite.Expr.dynamic_expr(root_query, filter, bindings, true) + else + AshSqlite.Expr.dynamic_expr(query, filter, bindings, true) + end + + {:ok, query} = join_all_relationships(query, filter) + from(row in query, where: ^dynamic) + end + + defp do_base_filter(query, root_query, ash_query, resource, path, bindings) do + case Ash.Resource.Info.base_filter(resource) do + nil -> + query + + filter -> + filter = + resource + |> Ash.Filter.parse!( + filter, + ash_query.calculations, + ash_query.context + ) + + dynamic = + if bindings do + filter = Ash.Filter.move_to_relationship_path(filter, path) + + AshSqlite.Expr.dynamic_expr(root_query, filter, bindings, true) + else + AshSqlite.Expr.dynamic_expr(query, filter, query.__ash_bindings__, true) + end + + from(row in query, where: ^dynamic) + end + end + + def set_join_prefix(join_query, query, resource) do + %{ + join_query + | prefix: + AshSqlite.DataLayer.Info.schema(resource) || + AshSqlite.DataLayer.Info.repo(resource).config()[:default_prefix] || + "public" + } + end + + defp can_inner_join?(path, expr, seen_an_or? \\ false) + + defp can_inner_join?(path, %{expression: expr}, seen_an_or?), + do: can_inner_join?(path, expr, seen_an_or?) + + defp can_inner_join?(_path, expr, _seen_an_or?) when expr in [nil, true, false], do: true + + defp can_inner_join?(path, %BooleanExpression{op: :and, left: left, right: right}, seen_an_or?) do + can_inner_join?(path, left, seen_an_or?) || can_inner_join?(path, right, seen_an_or?) + end + + defp can_inner_join?(path, %BooleanExpression{op: :or, left: left, right: right}, _) do + can_inner_join?(path, left, true) && can_inner_join?(path, right, true) + end + + defp can_inner_join?( + _, + %Not{}, + _ + ) do + false + end + + defp can_inner_join?( + search_path, + %struct{__operator__?: true, left: %Ref{relationship_path: relationship_path}}, + seen_an_or? + ) + when search_path == relationship_path and struct in @known_inner_join_predicates do + not seen_an_or? + end + + defp can_inner_join?( + search_path, + %struct{__operator__?: true, right: %Ref{relationship_path: relationship_path}}, + seen_an_or? + ) + when search_path == relationship_path and struct in @known_inner_join_predicates do + not seen_an_or? + end + + defp can_inner_join?( + search_path, + %struct{__function__?: true, arguments: arguments}, + seen_an_or? + ) + when struct in @known_inner_join_predicates do + if Enum.any?(arguments, &match?(%Ref{relationship_path: ^search_path}, &1)) do + not seen_an_or? + else + true + end + end + + defp can_inner_join?(_, _, _), do: false + + @doc false + def get_binding(resource, candidate_path, %{__ash_bindings__: _} = query, types) do + types = List.wrap(types) + + Enum.find_value(query.__ash_bindings__.bindings, fn + {binding, %{path: path, source: source, type: type}} -> + if type in types && + Ash.SatSolver.synonymous_relationship_paths?(resource, path, candidate_path, source) do + binding + end + + _ -> + nil + end) + end + + def get_binding(_, _, _, _), do: nil + + defp add_distinct(relationship, _join_type, joined_query) do + if !joined_query.__ash_bindings__.in_group? && + (relationship.cardinality == :many || Map.get(relationship, :from_many?)) && + !joined_query.distinct do + from(row in joined_query, + distinct: ^Ash.Resource.Info.primary_key(joined_query.__ash_bindings__.resource) + ) + else + joined_query + end + end + + defp join_relationship( + query, + relationship, + path, + join_type, + source, + filter + ) do + case Map.get(query.__ash_bindings__.bindings, path) do + %{type: existing_join_type} when join_type != existing_join_type -> + raise "unreachable?" + + nil -> + do_join_relationship( + query, + relationship, + path, + join_type, + source, + filter + ) + + _ -> + {:ok, query} + end + end + + defp do_join_relationship( + query, + %{manual: {module, opts}} = relationship, + path, + kind, + source, + filter + ) do + full_path = path ++ [relationship.name] + initial_ash_bindings = query.__ash_bindings__ + + binding_data = %{type: kind, path: full_path, source: source} + + query = AshSqlite.DataLayer.add_binding(query, binding_data) + + used_calculations = + Ash.Filter.used_calculations( + filter, + relationship.destination, + full_path + ) + + use_root_query_bindings? = true + + root_bindings = + if use_root_query_bindings? do + query.__ash_bindings__ + end + + case maybe_get_resource_query( + relationship.destination, + relationship, + query, + full_path, + root_bindings + ) do + {:error, error} -> + {:error, error} + + {:ok, relationship_destination} -> + relationship_destination = + relationship_destination + |> Ecto.Queryable.to_query() + |> set_join_prefix(query, relationship.destination) + + binding_kinds = + case kind do + :left -> + [:left, :inner] + + :inner -> + [:left, :inner] + + other -> + [other] + end + + current_binding = + Enum.find_value(initial_ash_bindings.bindings, 0, fn {binding, data} -> + if data.type in binding_kinds && data.path == path do + binding + end + end) + + module.ash_sqlite_join( + query, + opts, + current_binding, + initial_ash_bindings.current, + kind, + relationship_destination + ) + end + rescue + e in UndefinedFunctionError -> + if e.function == :ash_sqlite_join do + reraise """ + AshSqlite cannot join to a manual relationship #{inspect(module)} that does not implement the `AshSqlite.ManualRelationship` behaviour. + """, + __STACKTRACE__ + else + reraise e, __STACKTRACE__ + end + end + + defp do_join_relationship( + query, + %{type: :many_to_many} = relationship, + path, + kind, + source, + filter + ) do + join_relationship = + Ash.Resource.Info.relationship(relationship.source, relationship.join_relationship) + + join_path = path ++ [join_relationship.name] + + full_path = path ++ [relationship.name] + + initial_ash_bindings = query.__ash_bindings__ + + binding_data = %{type: kind, path: full_path, source: source} + + query = + query + |> AshSqlite.DataLayer.add_binding(%{ + path: join_path, + type: :left, + source: source + }) + |> AshSqlite.DataLayer.add_binding(binding_data) + + used_calculations = + Ash.Filter.used_calculations( + filter, + relationship.destination, + full_path + ) + + use_root_query_bindings? = true + + root_bindings = + if use_root_query_bindings? do + query.__ash_bindings__ + end + + with {:ok, relationship_through} <- + maybe_get_resource_query( + relationship.through, + join_relationship, + query, + join_path, + root_bindings + ), + {:ok, relationship_destination} <- + maybe_get_resource_query( + relationship.destination, + relationship, + query, + path, + root_bindings + ) do + relationship_through = + relationship_through + |> Ecto.Queryable.to_query() + |> set_join_prefix(query, relationship.through) + + relationship_destination = + relationship_destination + |> Ecto.Queryable.to_query() + |> set_join_prefix(query, relationship.destination) + + binding_kinds = + case kind do + :left -> + [:left, :inner] + + :inner -> + [:left, :inner] + + other -> + [other] + end + + current_binding = + Enum.find_value(initial_ash_bindings.bindings, 0, fn {binding, data} -> + if data.type in binding_kinds && data.path == path do + binding + end + end) + + query = + case kind do + :inner -> + from([{row, current_binding}] in query, + join: through in ^relationship_through, + as: ^initial_ash_bindings.current, + on: + field(row, ^relationship.source_attribute) == + field(through, ^relationship.source_attribute_on_join_resource), + join: destination in ^relationship_destination, + as: ^(initial_ash_bindings.current + 1), + on: + field(destination, ^relationship.destination_attribute) == + field(through, ^relationship.destination_attribute_on_join_resource) + ) + + _ -> + from([{row, current_binding}] in query, + left_join: through in ^relationship_through, + as: ^initial_ash_bindings.current, + on: + field(row, ^relationship.source_attribute) == + field(through, ^relationship.source_attribute_on_join_resource), + left_join: destination in ^relationship_destination, + as: ^(initial_ash_bindings.current + 1), + on: + field(destination, ^relationship.destination_attribute) == + field(through, ^relationship.destination_attribute_on_join_resource) + ) + end + + {:ok, query} + end + end + + defp do_join_relationship( + query, + relationship, + path, + kind, + source, + filter + ) do + full_path = path ++ [relationship.name] + initial_ash_bindings = query.__ash_bindings__ + + binding_data = %{type: kind, path: full_path, source: source} + + query = AshSqlite.DataLayer.add_binding(query, binding_data) + + used_calculations = + Ash.Filter.used_calculations( + filter, + relationship.destination, + full_path + ) + + use_root_query_bindings? = true + + root_bindings = + if use_root_query_bindings? do + query.__ash_bindings__ + end + + case maybe_get_resource_query( + relationship.destination, + relationship, + query, + full_path, + root_bindings + ) do + {:error, error} -> + {:error, error} + + {:ok, relationship_destination} -> + relationship_destination = + relationship_destination + |> Ecto.Queryable.to_query() + |> set_join_prefix(query, relationship.destination) + + binding_kinds = + case kind do + :left -> + [:left, :inner] + + :inner -> + [:left, :inner] + + other -> + [other] + end + + current_binding = + Enum.find_value(initial_ash_bindings.bindings, 0, fn {binding, data} -> + if data.type in binding_kinds && data.path == path do + binding + end + end) + + query = + case {kind, Map.get(relationship, :no_attributes?)} do + {:inner, true} -> + from([{row, current_binding}] in query, + join: destination in ^relationship_destination, + as: ^initial_ash_bindings.current, + on: true + ) + + {_, true} -> + from([{row, current_binding}] in query, + left_join: destination in ^relationship_destination, + as: ^initial_ash_bindings.current, + on: true + ) + + {:inner, _} -> + from([{row, current_binding}] in query, + join: destination in ^relationship_destination, + as: ^initial_ash_bindings.current, + on: + field(row, ^relationship.source_attribute) == + field( + destination, + ^relationship.destination_attribute + ) + ) + + _ -> + from([{row, current_binding}] in query, + left_join: destination in ^relationship_destination, + as: ^initial_ash_bindings.current, + on: + field(row, ^relationship.source_attribute) == + field( + destination, + ^relationship.destination_attribute + ) + ) + end + + {:ok, query} + end + end +end diff --git a/lib/manual_relationship.ex b/lib/manual_relationship.ex new file mode 100644 index 0000000..8d26497 --- /dev/null +++ b/lib/manual_relationship.ex @@ -0,0 +1,25 @@ +defmodule AshSqlite.ManualRelationship do + @moduledoc "A behavior for sqlite-specific manual relationship functionality" + + @callback ash_sqlite_join( + source_query :: Ecto.Query.t(), + opts :: Keyword.t(), + current_binding :: term, + destination_binding :: term, + type :: :inner | :left, + destination_query :: Ecto.Query.t() + ) :: {:ok, Ecto.Query.t()} | {:error, term} + + @callback ash_sqlite_subquery( + opts :: Keyword.t(), + current_binding :: term, + destination_binding :: term, + destination_query :: Ecto.Query.t() + ) :: {:ok, Ecto.Query.t()} | {:error, term} + + defmacro __using__(_) do + quote do + @behaviour AshSqlite.ManualRelationship + end + end +end diff --git a/lib/migration_generator/migration_generator.ex b/lib/migration_generator/migration_generator.ex new file mode 100644 index 0000000..03fef30 --- /dev/null +++ b/lib/migration_generator/migration_generator.ex @@ -0,0 +1,3044 @@ +defmodule AshSqlite.MigrationGenerator do + @moduledoc false + + require Logger + + import Mix.Generator + + alias AshSqlite.MigrationGenerator.{Operation, Phase} + + defstruct snapshot_path: nil, + migration_path: nil, + name: nil, + quiet: false, + current_snapshots: nil, + answers: [], + no_shell?: false, + format: true, + dry_run: false, + check: false, + drop_columns: false + + def generate(apis, opts \\ []) do + apis = List.wrap(apis) + opts = opts(opts) + + all_resources = Enum.uniq(Enum.flat_map(apis, &Ash.Api.Info.resources/1)) + + snapshots = + all_resources + |> Enum.filter(fn resource -> + Ash.DataLayer.data_layer(resource) == AshSqlite.DataLayer && + AshSqlite.DataLayer.Info.migrate?(resource) + end) + |> Enum.flat_map(&get_snapshots(&1, all_resources)) + + repos = + snapshots + |> Enum.map(& &1.repo) + |> Enum.uniq() + + Mix.shell().info("\nExtension Migrations: ") + create_extension_migrations(repos, opts) + Mix.shell().info("\nGenerating Migrations:") + create_migrations(snapshots, opts) + end + + @doc """ + A work in progress utility for getting snapshots. + + Does not support everything supported by the migration generator. + """ + def take_snapshots(api, repo, only_resources \\ nil) do + all_resources = api |> Ash.Api.Info.resources() |> Enum.uniq() + + all_resources + |> Enum.filter(fn resource -> + Ash.DataLayer.data_layer(resource) == AshSqlite.DataLayer && + AshSqlite.DataLayer.Info.repo(resource) == repo && + (is_nil(only_resources) || resource in only_resources) + end) + |> Enum.flat_map(&get_snapshots(&1, all_resources)) + end + + @doc """ + A work in progress utility for getting operations between snapshots. + + Does not support everything supported by the migration generator. + """ + def get_operations_from_snapshots(old_snapshots, new_snapshots, opts \\ []) do + opts = %{opts(opts) | no_shell?: true} + + old_snapshots = + old_snapshots + |> Enum.map(&sanitize_snapshot/1) + + new_snapshots + |> deduplicate_snapshots(opts, old_snapshots) + |> fetch_operations(opts) + |> Enum.flat_map(&elem(&1, 1)) + |> Enum.uniq() + |> organize_operations() + end + + defp add_references_primary_key(snapshot, snapshots) do + %{ + snapshot + | attributes: + snapshot.attributes + |> Enum.map(fn + %{references: references} = attribute when not is_nil(references) -> + if is_nil(Map.get(references, :primary_key?)) do + %{ + attribute + | references: + Map.put( + references, + :primary_key?, + find_references_primary_key( + references, + snapshots + ) + ) + } + else + attribute + end + + attribute -> + attribute + end) + } + end + + defp find_references_primary_key(references, snapshots) do + Enum.find_value(snapshots, false, fn snapshot -> + if snapshot && references && snapshot.table == references.table do + Enum.any?(snapshot.attributes, fn attribute -> + attribute.source == references.destination_attribute && attribute.primary_key? + end) + end + end) + end + + defp opts(opts) do + case struct(__MODULE__, opts) do + %{check: true} = opts -> + %{opts | dry_run: true} + + opts -> + opts + end + end + + defp snapshot_path(%{snapshot_path: snapshot_path}, _) when not is_nil(snapshot_path), + do: snapshot_path + + defp snapshot_path(_config, repo) do + # Copied from ecto's mix task, thanks Ecto ❤️ + config = repo.config() + + app = Keyword.fetch!(config, :otp_app) + Path.join([Mix.Project.deps_paths()[app] || File.cwd!(), "priv", "resource_snapshots"]) + end + + @latest_ash_functions_version 1 + + defp create_extension_migrations(repos, opts) do + for repo <- repos do + snapshot_path = snapshot_path(opts, repo) + snapshot_file = Path.join(snapshot_path, "extensions.json") + + installed_extensions = + if File.exists?(snapshot_file) do + snapshot_file + |> File.read!() + |> Jason.decode!(keys: :atoms!) + else + [] + end + + {extensions_snapshot, installed_extensions} = + case installed_extensions do + installed when is_list(installed) -> + {%{ + installed: installed + }, installed} + + other -> + {other, other.installed} + end + + requesteds = + repo.installed_extensions() + |> Enum.map(fn + extension_module when is_atom(extension_module) -> + {ext_name, version, _up_fn, _down_fn} = extension = extension_module.extension() + + {"#{ext_name}_v#{version}", extension} + + extension_name -> + {extension_name, extension_name} + end) + + to_install = + requesteds + |> Enum.filter(fn {name, _extension} -> !Enum.member?(installed_extensions, name) end) + |> Enum.map(fn {_name, extension} -> extension end) + + to_install = + if "ash-functions" in requesteds && + extensions_snapshot[:ash_functions_version] != + @latest_ash_functions_version do + Enum.uniq(["ash-functions" | to_install]) + else + to_install + end + + if Enum.empty?(to_install) do + Mix.shell().info("No extensions to install") + :ok + else + {module, migration_name} = + case to_install do + [{ext_name, version, _up_fn, _down_fn}] -> + {"install_#{ext_name}_v#{version}", + "#{timestamp(true)}_install_#{ext_name}_v#{version}_extension"} + + [single] -> + {"install_#{single}", "#{timestamp(true)}_install_#{single}_extension"} + + multiple -> + {"install_#{Enum.count(multiple)}_extensions", + "#{timestamp(true)}_install_#{Enum.count(multiple)}_extensions"} + end + + migration_file = + opts + |> migration_path(repo) + |> Path.join(migration_name <> ".exs") + + sanitized_module = + module + |> String.replace("-", "_") + |> Macro.camelize() + + module_name = Module.concat([repo, Migrations, sanitized_module]) + + install = + Enum.map_join(to_install, "\n", fn + "ash-functions" -> + install_ash_functions(extensions_snapshot[:ash_functions_version]) + + {_ext_name, version, up_fn, _down_fn} when is_function(up_fn, 1) -> + up_fn.(version) + + extension -> + "execute(\"CREATE EXTENSION IF NOT EXISTS \\\"#{extension}\\\"\")" + end) + + uninstall = + Enum.map_join(to_install, "\n", fn + "ash-functions" -> + "execute(\"DROP FUNCTION IF EXISTS ash_elixir_and(BOOLEAN, ANYCOMPATIBLE), ash_elixir_and(ANYCOMPATIBLE, ANYCOMPATIBLE), ash_elixir_or(ANYCOMPATIBLE, ANYCOMPATIBLE), ash_elixir_or(BOOLEAN, ANYCOMPATIBLE)\")" + + {_ext_name, version, _up_fn, down_fn} when is_function(down_fn, 1) -> + down_fn.(version) + + extension -> + "# execute(\"DROP EXTENSION IF EXISTS \\\"#{extension}\\\"\")" + end) + + contents = """ + defmodule #{inspect(module_name)} do + @moduledoc \"\"\" + Installs any extensions that are mentioned in the repo's `installed_extensions/0` callback + + This file was autogenerated with `mix ash_sqlite.generate_migrations` + \"\"\" + + use Ecto.Migration + + def up do + #{install} + end + + def down do + # Uncomment this if you actually want to uninstall the extensions + # when this migration is rolled back: + #{uninstall} + end + end + """ + + installed = Enum.map(requesteds, fn {name, _extension} -> name end) + + snapshot_contents = + Jason.encode!( + %{ + installed: installed + } + |> set_ash_functions(installed), + pretty: true + ) + + contents = format(contents, opts) + create_file(snapshot_file, snapshot_contents, force: true) + create_file(migration_file, contents) + end + end + end + + defp install_ash_functions(nil) do + """ + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_elixir_or(left BOOLEAN, in right ANYCOMPATIBLE, out f1 ANYCOMPATIBLE) + AS $$ SELECT COALESCE(NULLIF($1, FALSE), $2) $$ + LANGUAGE SQL + IMMUTABLE; + \"\"\") + + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_elixir_or(left ANYCOMPATIBLE, in right ANYCOMPATIBLE, out f1 ANYCOMPATIBLE) + AS $$ SELECT COALESCE($1, $2) $$ + LANGUAGE SQL + IMMUTABLE; + \"\"\") + + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_elixir_and(left BOOLEAN, in right ANYCOMPATIBLE, out f1 ANYCOMPATIBLE) AS $$ + SELECT CASE + WHEN $1 IS TRUE THEN $2 + ELSE $1 + END $$ + LANGUAGE SQL + IMMUTABLE; + \"\"\") + + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_elixir_and(left ANYCOMPATIBLE, in right ANYCOMPATIBLE, out f1 ANYCOMPATIBLE) AS $$ + SELECT CASE + WHEN $1 IS NOT NULL THEN $2 + ELSE $1 + END $$ + LANGUAGE SQL + IMMUTABLE; + \"\"\") + + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_trim_whitespace(arr text[]) + RETURNS text[] AS $$ + DECLARE + start_index INT = 1; + end_index INT = array_length(arr, 1); + BEGIN + WHILE start_index <= end_index AND arr[start_index] = '' LOOP + start_index := start_index + 1; + END LOOP; + + WHILE end_index >= start_index AND arr[end_index] = '' LOOP + end_index := end_index - 1; + END LOOP; + + IF start_index > end_index THEN + RETURN ARRAY[]::text[]; + ELSE + RETURN arr[start_index : end_index]; + END IF; + END; $$ + LANGUAGE plpgsql + IMMUTABLE; + \"\"\") + """ + end + + defp install_ash_functions(0) do + """ + execute(\"\"\" + ALTER FUNCTION ash_elixir_or(left BOOLEAN, in right ANYCOMPATIBLE, out f1 ANYCOMPATIBLE) IMMUTABLE + \"\"\") + + execute(\"\"\" + ALTER FUNCTION ash_elixir_or(left ANYCOMPATIBLE, in right ANYCOMPATIBLE, out f1 ANYCOMPATIBLE) IMMUTABLE + \"\"\") + + execute(\"\"\" + ALTER FUNCTION ash_elixir_and(left BOOLEAN, in right ANYCOMPATIBLE, out f1 ANYCOMPATIBLE) IMMUTABLE + \"\"\") + + execute(\"\"\" + ALTER FUNCTION ash_elixir_and(left ANYCOMPATIBLE, in right ANYCOMPATIBLE, out f1 ANYCOMPATIBLE) IMMUTABLE + \"\"\") + + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_trim_whitespace(arr text[]) + RETURNS text[] AS $$ + DECLARE + start_index INT = 1; + end_index INT = array_length(arr, 1); + BEGIN + WHILE start_index <= end_index AND arr[start_index] = '' LOOP + start_index := start_index + 1; + END LOOP; + + WHILE end_index >= start_index AND arr[end_index] = '' LOOP + end_index := end_index - 1; + END LOOP; + + IF start_index > end_index THEN + RETURN ARRAY[]::text[]; + ELSE + RETURN arr[start_index : end_index]; + END IF; + END; $$ + LANGUAGE plpgsql + IMMUTABLE; + \"\"\") + """ + end + + defp set_ash_functions(snapshot, installed_extensions) do + if "ash-functions" in installed_extensions do + Map.put(snapshot, :ash_functions_version, @latest_ash_functions_version) + else + snapshot + end + end + + defp create_migrations(snapshots, opts) do + snapshots + |> Enum.group_by(& &1.repo) + |> Enum.each(fn {repo, snapshots} -> + deduped = deduplicate_snapshots(snapshots, opts) + + snapshots_with_operations = + deduped + |> fetch_operations(opts) + |> Enum.map(&add_order_to_operations/1) + + snapshots = Enum.map(snapshots_with_operations, &elem(&1, 0)) + + snapshots_with_operations + |> Enum.flat_map(&elem(&1, 1)) + |> Enum.uniq() + |> case do + [] -> + Mix.shell().info( + "No changes detected, so no migrations or snapshots have been created." + ) + + :ok + + operations -> + if opts.check do + IO.puts(""" + Migrations would have been generated, but the --check flag was provided. + + To see what migration would have been generated, run with the `--dry-run` + option instead. To generate those migrations, run without either flag. + """) + + exit({:shutdown, 1}) + end + + operations + |> split_into_migrations() + |> Enum.each(fn operations -> + run_without_transaction? = + Enum.any?(operations, fn + %Operation.AddCustomIndex{index: %{concurrently: true}} -> + true + + _ -> + false + end) + + operations + |> organize_operations + |> build_up_and_down() + |> write_migration!(repo, opts, run_without_transaction?) + end) + + create_new_snapshot(snapshots, repo_name(repo), opts) + end + end) + end + + defp split_into_migrations(operations) do + operations + |> Enum.split_with(fn + %Operation.AddCustomIndex{index: %{concurrently: true}} -> + true + + _ -> + false + end) + |> case do + {[], ops} -> + [ops] + + {concurrent_indexes, ops} -> + [ops, concurrent_indexes] + end + end + + defp add_order_to_operations({snapshot, operations}) do + operations_with_order = Enum.map(operations, &add_order_to_operation(&1, snapshot.attributes)) + + {snapshot, operations_with_order} + end + + defp add_order_to_operation(%{attribute: attribute} = op, attributes) do + order = Enum.find_index(attributes, &(&1.source == attribute.source)) + attribute = Map.put(attribute, :order, order) + + %{op | attribute: attribute} + end + + defp add_order_to_operation(%{new_attribute: attribute} = op, attributes) do + order = Enum.find_index(attributes, &(&1.source == attribute.source)) + attribute = Map.put(attribute, :order, order) + + %{op | new_attribute: attribute} + end + + defp add_order_to_operation(op, _), do: op + + defp organize_operations([]), do: [] + + defp organize_operations(operations) do + operations + |> sort_operations() + |> streamline() + |> group_into_phases() + |> clean_phases() + end + + defp clean_phases(phases) do + phases + |> Enum.flat_map(fn + %{operations: []} -> + [] + + %{operations: operations} = phase -> + if Enum.all?(operations, &match?(%{commented?: true}, &1)) do + [%{phase | commented?: true}] + else + [phase] + end + + op -> + [op] + end) + end + + defp deduplicate_snapshots(snapshots, opts, existing_snapshots \\ []) do + grouped = + snapshots + |> Enum.group_by(fn snapshot -> + {snapshot.table, snapshot.schema} + end) + + old_snapshots = + Map.new(grouped, fn {key, [snapshot | _]} -> + old_snapshot = + if opts.no_shell? do + Enum.find(existing_snapshots, &(&1.table == snapshot.table)) + else + get_existing_snapshot(snapshot, opts) + end + + { + key, + old_snapshot + } + end) + + old_snapshots_list = Map.values(old_snapshots) + + old_snapshots = + Map.new(old_snapshots, fn {key, old_snapshot} -> + if old_snapshot do + {key, add_references_primary_key(old_snapshot, old_snapshots_list)} + else + {key, old_snapshot} + end + end) + + grouped + |> Enum.map(fn {key, [snapshot | _] = snapshots} -> + existing_snapshot = old_snapshots[key] + + {primary_key, identities} = merge_primary_keys(existing_snapshot, snapshots, opts) + + attributes = Enum.flat_map(snapshots, & &1.attributes) + + count_with_create = + snapshots + |> Enum.filter(& &1.has_create_action) + |> Enum.count() + + new_snapshot = %{ + snapshot + | attributes: merge_attributes(attributes, snapshot.table, count_with_create), + identities: snapshots |> Enum.flat_map(& &1.identities) |> Enum.uniq(), + custom_indexes: snapshots |> Enum.flat_map(& &1.custom_indexes) |> Enum.uniq(), + custom_statements: snapshots |> Enum.flat_map(& &1.custom_statements) |> Enum.uniq() + } + + all_identities = + new_snapshot.identities + |> Kernel.++(identities) + |> Enum.sort_by(& &1.name) + # We sort the identities by there being an identity with a matching name in the existing snapshot + # so that we prefer identities that currently exist over new ones + |> Enum.sort_by(fn identity -> + existing_snapshot + |> Kernel.||(%{}) + |> Map.get(:identities, []) + |> Enum.any?(fn existing_identity -> + existing_identity.name == identity.name + end) + |> Kernel.!() + end) + |> Enum.uniq_by(fn identity -> + {Enum.sort(identity.keys), identity.base_filter} + end) + + new_snapshot = %{new_snapshot | identities: all_identities} + + { + %{ + new_snapshot + | attributes: + Enum.map(new_snapshot.attributes, fn attribute -> + if attribute.source in primary_key do + %{attribute | primary_key?: true} + else + %{attribute | primary_key?: false} + end + end) + }, + existing_snapshot + } + end) + end + + defp merge_attributes(attributes, table, count) do + attributes + |> Enum.with_index() + |> Enum.map(fn {attr, i} -> Map.put(attr, :order, i) end) + |> Enum.group_by(& &1.source) + |> Enum.map(fn {source, attributes} -> + size = + attributes + |> Enum.map(& &1.size) + |> Enum.filter(& &1) + |> case do + [] -> + nil + + sizes -> + Enum.max(sizes) + end + + %{ + source: source, + type: merge_types(Enum.map(attributes, & &1.type), source, table), + size: size, + default: merge_defaults(Enum.map(attributes, & &1.default)), + allow_nil?: Enum.any?(attributes, & &1.allow_nil?) || Enum.count(attributes) < count, + generated?: Enum.any?(attributes, & &1.generated?), + references: merge_references(Enum.map(attributes, & &1.references), source, table), + primary_key?: false, + order: attributes |> Enum.map(& &1.order) |> Enum.min() + } + end) + |> Enum.sort(&(&1.order < &2.order)) + |> Enum.map(&Map.drop(&1, [:order])) + end + + defp merge_references(references, name, table) do + references + |> Enum.reject(&is_nil/1) + |> Enum.uniq() + |> case do + [] -> + nil + + references -> + %{ + destination_attribute: merge_uniq!(references, table, :destination_attribute, name), + deferrable: merge_uniq!(references, table, :deferrable, name), + destination_attribute_default: + merge_uniq!(references, table, :destination_attribute_default, name), + destination_attribute_generated: + merge_uniq!(references, table, :destination_attribute_generated, name), + multitenancy: merge_uniq!(references, table, :multitenancy, name), + primary_key?: merge_uniq!(references, table, :primary_key?, name), + on_delete: merge_uniq!(references, table, :on_delete, name), + on_update: merge_uniq!(references, table, :on_update, name), + name: merge_uniq!(references, table, :name, name), + table: merge_uniq!(references, table, :table, name), + schema: merge_uniq!(references, table, :schema, name) + } + end + end + + defp merge_uniq!(references, table, field, attribute) do + references + |> Enum.map(&Map.get(&1, field)) + |> Enum.reject(&is_nil/1) + |> Enum.uniq() + |> case do + [] -> + nil + + [value] -> + value + + values -> + values = Enum.map_join(values, "\n", &" * #{inspect(&1)}") + + raise """ + Conflicting configurations for references for #{table}.#{attribute}: + + Values: + + #{values} + """ + end + end + + defp merge_types(types, name, table) do + types + |> Enum.uniq() + |> case do + [type] -> + type + + types -> + raise "Conflicting types for table `#{table}.#{name}`: #{inspect(types)}" + end + end + + defp merge_defaults(defaults) do + defaults + |> Enum.uniq() + |> case do + [default] -> default + _ -> "nil" + end + end + + defp merge_primary_keys(nil, [snapshot | _] = snapshots, opts) do + snapshots + |> Enum.map(&pkey_names(&1.attributes)) + |> Enum.uniq() + |> case do + [pkey_names] -> + {pkey_names, []} + + unique_primary_keys -> + unique_primary_key_names = + unique_primary_keys + |> Enum.with_index() + |> Enum.map_join("\n", fn {pkey, index} -> + "#{index}: #{inspect(pkey)}" + end) + + choice = + if opts.no_shell? do + raise "Unimplemented: cannot resolve primary key ambiguity without shell input" + else + message = """ + Which primary key should be used for the table `#{snapshot.table}` (enter the number)? + + #{unique_primary_key_names} + """ + + message + |> Mix.shell().prompt() + |> String.to_integer() + end + + identities = + unique_primary_keys + |> List.delete_at(choice) + |> Enum.map(fn pkey_names -> + pkey_name_string = Enum.join(pkey_names, "_") + name = snapshot.table <> "_" <> pkey_name_string + + %{ + keys: pkey_names, + name: name + } + end) + + primary_key = Enum.sort(Enum.at(unique_primary_keys, choice)) + + identities = + Enum.reject(identities, fn identity -> + Enum.sort(identity.keys) == primary_key + end) + + {primary_key, identities} + end + end + + defp merge_primary_keys(existing_snapshot, snapshots, opts) do + pkey_names = pkey_names(existing_snapshot.attributes) + + one_pkey_exists? = + Enum.any?(snapshots, fn snapshot -> + pkey_names(snapshot.attributes) == pkey_names + end) + + if one_pkey_exists? do + identities = + snapshots + |> Enum.map(&pkey_names(&1.attributes)) + |> Enum.uniq() + |> Enum.reject(&(&1 == pkey_names)) + |> Enum.map(fn pkey_names -> + pkey_name_string = Enum.join(pkey_names, "_") + name = existing_snapshot.table <> "_" <> pkey_name_string + + %{ + keys: pkey_names, + name: name + } + end) + + {pkey_names, identities} + else + merge_primary_keys(nil, snapshots, opts) + end + end + + defp pkey_names(attributes) do + attributes + |> Enum.filter(& &1.primary_key?) + |> Enum.map(& &1.source) + |> Enum.sort() + end + + defp migration_path(opts, repo) do + repo_name = repo_name(repo) + # Copied from ecto's mix task, thanks Ecto ❤️ + config = repo.config() + app = Keyword.fetch!(config, :otp_app) + + if opts.migration_path do + opts.migration_path + else + Path.join([Mix.Project.deps_paths()[app] || File.cwd!(), "priv"]) + end + |> Path.join(repo_name) + |> Path.join("migrations") + end + + defp repo_name(repo) do + repo |> Module.split() |> List.last() |> Macro.underscore() + end + + defp write_migration!({up, down}, repo, opts, run_without_transaction?) do + migration_path = migration_path(opts, repo) + + {migration_name, last_part} = + if opts.name do + {"#{timestamp(true)}_#{opts.name}", "#{opts.name}"} + else + count = + migration_path + |> Path.join("*_migrate_resources*") + |> Path.wildcard() + |> Enum.map(fn path -> + path + |> Path.basename() + |> String.split("_migrate_resources", parts: 2) + |> Enum.at(1) + |> Integer.parse() + |> case do + {integer, _} -> + integer + + _ -> + 0 + end + end) + |> Enum.max(fn -> 0 end) + |> Kernel.+(1) + + {"#{timestamp(true)}_migrate_resources#{count}", "migrate_resources#{count}"} + end + + migration_file = + migration_path + |> Path.join(migration_name <> ".exs") + + module_name = + Module.concat([repo, Migrations, Macro.camelize(last_part)]) + + module_attributes = + if run_without_transaction? do + """ + @disable_ddl_transaction true + @disable_migration_lock true + """ + end + + contents = """ + defmodule #{inspect(module_name)} do + @moduledoc \"\"\" + Updates resources based on their most recent snapshots. + + This file was autogenerated with `mix ash_sqlite.generate_migrations` + \"\"\" + + use Ecto.Migration + + #{module_attributes} + + def up do + #{up} + end + + def down do + #{down} + end + end + """ + + try do + contents = format(contents, opts) + + if opts.dry_run do + Mix.shell().info(contents) + else + create_file(migration_file, contents) + end + rescue + exception -> + reraise( + """ + Exception while formatting generated code: + #{Exception.format(:error, exception, __STACKTRACE__)} + + Code: + + #{add_line_numbers(contents)} + + To generate it unformatted anyway, but manually fix it, use the `--no-format` option. + """, + __STACKTRACE__ + ) + end + end + + defp add_line_numbers(contents) do + lines = String.split(contents, "\n") + + digits = String.length(to_string(Enum.count(lines))) + + lines + |> Enum.with_index() + |> Enum.map_join("\n", fn {line, index} -> + "#{String.pad_trailing(to_string(index), digits, " ")} | #{line}" + end) + end + + defp create_new_snapshot(snapshots, repo_name, opts) do + unless opts.dry_run do + Enum.each(snapshots, fn snapshot -> + snapshot_binary = snapshot_to_binary(snapshot) + + snapshot_folder = + opts + |> snapshot_path(snapshot.repo) + |> Path.join(repo_name) + + snapshot_file = Path.join(snapshot_folder, "#{snapshot.table}/#{timestamp()}.json") + + File.mkdir_p(Path.dirname(snapshot_file)) + File.write!(snapshot_file, snapshot_binary, []) + + old_snapshot_folder = Path.join(snapshot_folder, "#{snapshot.table}.json") + + if File.exists?(old_snapshot_folder) do + new_snapshot_folder = Path.join(snapshot_folder, "#{snapshot.table}/initial.json") + File.rename(old_snapshot_folder, new_snapshot_folder) + end + end) + end + end + + @doc false + def build_up_and_down(phases) do + up = + Enum.map_join(phases, "\n", fn phase -> + phase + |> phase.__struct__.up() + |> Kernel.<>("\n") + |> maybe_comment(phase) + end) + + down = + phases + |> Enum.reverse() + |> Enum.map_join("\n", fn phase -> + phase + |> phase.__struct__.down() + |> Kernel.<>("\n") + |> maybe_comment(phase) + end) + + {up, down} + end + + defp maybe_comment(text, %{commented?: true}) do + text + |> String.split("\n") + |> Enum.map_join("\n", fn line -> + if String.starts_with?(line, "#") do + line + else + "# #{line}" + end + end) + end + + defp maybe_comment(text, _), do: text + + defp format(string, opts) do + if opts.format do + Code.format_string!(string, locals_without_parens: ecto_sql_locals_without_parens()) + else + string + end + rescue + exception -> + IO.puts(""" + Exception while formatting: + + #{inspect(exception)} + + #{inspect(string)} + """) + + reraise exception, __STACKTRACE__ + end + + defp ecto_sql_locals_without_parens do + path = File.cwd!() |> Path.join("deps/ecto_sql/.formatter.exs") + + if File.exists?(path) do + {opts, _} = Code.eval_file(path) + Keyword.get(opts, :locals_without_parens, []) + else + [] + end + end + + defp streamline(ops, acc \\ []) + defp streamline([], acc), do: Enum.reverse(acc) + + defp streamline( + [ + %Operation.AddAttribute{ + attribute: %{ + source: name + }, + schema: schema, + table: table + } = add + | rest + ], + acc + ) do + rest + |> Enum.take_while(fn + %custom{} when custom in [Operation.AddCustomStatement, Operation.RemoveCustomStatement] -> + false + + op -> + op.table == table && op.schema == schema + end) + |> Enum.with_index() + |> Enum.find(fn + {%Operation.AlterAttribute{ + new_attribute: %{source: ^name, references: references}, + old_attribute: %{source: ^name} + }, _} + when not is_nil(references) -> + true + + _ -> + false + end) + |> case do + nil -> + streamline(rest, [add | acc]) + + {alter, index} -> + new_attribute = Map.put(add.attribute, :references, alter.new_attribute.references) + streamline(List.delete_at(rest, index), [%{add | attribute: new_attribute} | acc]) + end + end + + defp streamline([first | rest], acc) do + streamline(rest, [first | acc]) + end + + defp group_into_phases(ops, current \\ nil, acc \\ []) + + defp group_into_phases([], nil, acc), do: Enum.reverse(acc) + + defp group_into_phases([], phase, acc) do + phase = %{phase | operations: Enum.reverse(phase.operations)} + Enum.reverse([phase | acc]) + end + + defp group_into_phases( + [ + %Operation.CreateTable{table: table, schema: schema, multitenancy: multitenancy} | rest + ], + nil, + acc + ) do + group_into_phases( + rest, + %Phase.Create{table: table, schema: schema, multitenancy: multitenancy}, + acc + ) + end + + defp group_into_phases( + [%Operation.AddAttribute{table: table, schema: schema} = op | rest], + %{table: table, schema: schema} = phase, + acc + ) do + group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc) + end + + defp group_into_phases( + [%Operation.AlterAttribute{table: table, schema: schema} = op | rest], + %Phase.Alter{table: table, schema: schema} = phase, + acc + ) do + group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc) + end + + defp group_into_phases( + [%Operation.RenameAttribute{table: table, schema: schema} = op | rest], + %Phase.Alter{table: table, schema: schema} = phase, + acc + ) do + group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc) + end + + defp group_into_phases( + [%Operation.RemoveAttribute{table: table, schema: schema} = op | rest], + %{table: table, schema: schema} = phase, + acc + ) do + group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc) + end + + defp group_into_phases([%{no_phase: true} = op | rest], nil, acc) do + group_into_phases(rest, nil, [op | acc]) + end + + defp group_into_phases([operation | rest], nil, acc) do + phase = %Phase.Alter{ + operations: [operation], + multitenancy: operation.multitenancy, + table: operation.table, + schema: operation.schema + } + + group_into_phases(rest, phase, acc) + end + + defp group_into_phases(operations, phase, acc) do + phase = %{phase | operations: Enum.reverse(phase.operations)} + group_into_phases(operations, nil, [phase | acc]) + end + + defp sort_operations(ops, acc \\ []) + defp sort_operations([], acc), do: acc + + defp sort_operations([op | rest], []), do: sort_operations(rest, [op]) + + defp sort_operations([op | rest], acc) do + acc = Enum.reverse(acc) + + after_index = Enum.find_index(acc, &after?(op, &1)) + + new_acc = + if after_index do + acc + |> List.insert_at(after_index, op) + |> Enum.reverse() + else + [op | Enum.reverse(acc)] + end + + sort_operations(rest, new_acc) + end + + defp after?(_, %Operation.AlterDeferrability{direction: :down}), do: true + defp after?(%Operation.AlterDeferrability{direction: :up}, _), do: true + + defp after?( + %Operation.RemovePrimaryKey{}, + %Operation.DropForeignKey{} + ), + do: true + + defp after?( + %Operation.DropForeignKey{}, + %Operation.RemovePrimaryKey{} + ), + do: false + + defp after?(%Operation.RemovePrimaryKey{}, _), do: false + defp after?(_, %Operation.RemovePrimaryKey{}), do: true + defp after?(%Operation.RemovePrimaryKeyDown{}, _), do: true + defp after?(_, %Operation.RemovePrimaryKeyDown{}), do: false + + defp after?( + %Operation.AddCustomStatement{}, + _ + ), + do: true + + defp after?( + _, + %Operation.RemoveCustomStatement{} + ), + do: true + + defp after?( + %Operation.AddAttribute{attribute: %{order: l}, table: table, schema: schema}, + %Operation.AddAttribute{attribute: %{order: r}, table: table, schema: schema} + ), + do: l > r + + defp after?( + %Operation.RenameUniqueIndex{ + table: table, + schema: schema + }, + %{table: table, schema: schema} + ) do + true + end + + defp after?( + %Operation.AddUniqueIndex{ + table: table, + schema: schema + }, + %{table: table, schema: schema} + ) do + true + end + + defp after?( + %Operation.AddCheckConstraint{ + constraint: %{attribute: attribute_or_attributes}, + table: table, + multitenancy: multitenancy, + schema: schema + }, + %Operation.AddAttribute{table: table, attribute: %{source: source}, schema: schema} + ) do + source in List.wrap(attribute_or_attributes) || + (multitenancy.attribute && multitenancy.attribute in List.wrap(attribute_or_attributes)) + end + + defp after?( + %Operation.AddCustomIndex{ + table: table, + schema: schema + }, + %Operation.AddAttribute{table: table, schema: schema} + ) do + true + end + + defp after?( + %Operation.AddCustomIndex{ + table: table, + schema: schema, + index: %{ + concurrently: true + } + }, + %Operation.AddCustomIndex{ + table: table, + schema: schema, + index: %{ + concurrently: false + } + } + ) do + true + end + + defp after?( + %Operation.AddCheckConstraint{table: table, schema: schema, constraint: %{name: name}}, + %Operation.RemoveCheckConstraint{ + table: table, + schema: schema, + constraint: %{ + name: name + } + } + ), + do: true + + defp after?( + %Operation.RemoveCheckConstraint{ + table: table, + schema: schema, + constraint: %{ + name: name + } + }, + %Operation.AddCheckConstraint{table: table, schema: schema, constraint: %{name: name}} + ), + do: false + + defp after?( + %Operation.AddCheckConstraint{ + constraint: %{attribute: attribute_or_attributes}, + table: table, + schema: schema + }, + %Operation.AlterAttribute{table: table, new_attribute: %{source: source}, schema: schema} + ) do + source in List.wrap(attribute_or_attributes) + end + + defp after?( + %Operation.AddCheckConstraint{ + constraint: %{attribute: attribute_or_attributes}, + table: table, + schema: schema + }, + %Operation.RenameAttribute{ + table: table, + new_attribute: %{source: source}, + schema: schema + } + ) do + source in List.wrap(attribute_or_attributes) + end + + defp after?( + %Operation.RemoveUniqueIndex{table: table, schema: schema}, + %Operation.AddUniqueIndex{table: table, schema: schema} + ) do + false + end + + defp after?( + %Operation.RemoveUniqueIndex{table: table, schema: schema}, + %{table: table, schema: schema} + ) do + true + end + + defp after?( + %Operation.RemoveCheckConstraint{ + constraint: %{attribute: attributes}, + table: table, + schema: schema + }, + %Operation.RemoveAttribute{table: table, attribute: %{source: source}, schema: schema} + ) do + source in List.wrap(attributes) + end + + defp after?( + %Operation.RemoveCheckConstraint{ + constraint: %{attribute: attributes}, + table: table, + schema: schema + }, + %Operation.RenameAttribute{ + table: table, + old_attribute: %{source: source}, + schema: schema + } + ) do + source in List.wrap(attributes) + end + + defp after?(%Operation.AlterAttribute{table: table, schema: schema}, %Operation.DropForeignKey{ + table: table, + schema: schema, + direction: :up + }), + do: true + + defp after?( + %Operation.AlterAttribute{table: table, schema: schema}, + %Operation.DropForeignKey{ + table: table, + schema: schema, + direction: :down + } + ), + do: false + + defp after?( + %Operation.DropForeignKey{ + table: table, + schema: schema, + direction: :down + }, + %Operation.AlterAttribute{table: table, schema: schema} + ), + do: true + + defp after?(%Operation.AddAttribute{table: table, schema: schema}, %Operation.CreateTable{ + table: table, + schema: schema + }) do + true + end + + defp after?( + %Operation.AddAttribute{ + attribute: %{ + references: %{table: table, destination_attribute: name} + } + }, + %Operation.AddAttribute{table: table, attribute: %{source: name}} + ), + do: true + + defp after?( + %Operation.AddAttribute{ + table: table, + schema: schema, + attribute: %{ + primary_key?: false + } + }, + %Operation.AddAttribute{schema: schema, table: table, attribute: %{primary_key?: true}} + ), + do: true + + defp after?( + %Operation.AddAttribute{ + table: table, + schema: schema, + attribute: %{ + primary_key?: true + } + }, + %Operation.RemoveAttribute{ + schema: schema, + table: table, + attribute: %{primary_key?: true} + } + ), + do: true + + defp after?( + %Operation.AddAttribute{ + table: table, + schema: schema, + attribute: %{ + primary_key?: true + } + }, + %Operation.AlterAttribute{ + schema: schema, + table: table, + new_attribute: %{primary_key?: false}, + old_attribute: %{primary_key?: true} + } + ), + do: true + + defp after?( + %Operation.AddAttribute{ + table: table, + schema: schema, + attribute: %{ + primary_key?: true + } + }, + %Operation.AlterAttribute{ + schema: schema, + table: table, + new_attribute: %{primary_key?: false}, + old_attribute: %{primary_key?: true} + } + ), + do: true + + defp after?( + %Operation.RemoveAttribute{ + schema: schema, + table: table, + attribute: %{primary_key?: true} + }, + %Operation.AlterAttribute{ + table: table, + schema: schema, + new_attribute: %{ + primary_key?: true + }, + old_attribute: %{ + primary_key?: false + } + } + ), + do: true + + defp after?( + %Operation.AlterAttribute{ + schema: schema, + table: table, + new_attribute: %{primary_key?: false}, + old_attribute: %{ + primary_key?: true + } + }, + %Operation.AlterAttribute{ + table: table, + schema: schema, + new_attribute: %{ + primary_key?: true + }, + old_attribute: %{ + primary_key?: false + } + } + ), + do: true + + defp after?( + %Operation.AlterAttribute{ + schema: schema, + table: table, + new_attribute: %{primary_key?: false}, + old_attribute: %{ + primary_key?: true + } + }, + %Operation.AddAttribute{ + table: table, + schema: schema, + attribute: %{ + primary_key?: true + } + } + ), + do: false + + defp after?( + %Operation.AlterAttribute{ + table: table, + schema: schema, + new_attribute: %{primary_key?: false}, + old_attribute: %{primary_key?: true} + }, + %Operation.AddAttribute{ + table: table, + schema: schema, + attribute: %{ + primary_key?: true + } + } + ), + do: true + + defp after?( + %Operation.AlterAttribute{ + new_attribute: %{ + references: %{destination_attribute: destination_attribute, table: table} + } + }, + %Operation.AddUniqueIndex{identity: %{keys: keys}, table: table} + ) do + destination_attribute in keys + end + + defp after?( + %Operation.AlterAttribute{ + new_attribute: %{references: %{table: table, destination_attribute: source}} + }, + %Operation.AlterAttribute{ + new_attribute: %{ + source: source + }, + table: table + } + ) do + true + end + + defp after?( + %Operation.AlterAttribute{ + new_attribute: %{ + source: source + }, + table: table + }, + %Operation.AlterAttribute{ + new_attribute: %{references: %{table: table, destination_attribute: source}} + } + ) do + false + end + + defp after?( + %Operation.RemoveAttribute{attribute: %{source: source}, table: table}, + %Operation.AlterAttribute{ + old_attribute: %{ + references: %{table: table, destination_attribute: source} + } + } + ), + do: true + + defp after?( + %Operation.AlterAttribute{ + new_attribute: %{ + references: %{table: table, destination_attribute: name} + } + }, + %Operation.AddAttribute{table: table, attribute: %{source: name}} + ), + do: true + + defp after?(%Operation.AddCheckConstraint{table: table, schema: schema}, %Operation.CreateTable{ + table: table, + schema: schema + }) do + true + end + + defp after?( + %Operation.AlterAttribute{new_attribute: %{references: references}, table: table}, + %{table: table} + ) + when not is_nil(references), + do: true + + defp after?(%Operation.AddCheckConstraint{}, _), do: true + defp after?(%Operation.RemoveCheckConstraint{}, _), do: true + + defp after?(_, _), do: false + + defp fetch_operations(snapshots, opts) do + snapshots + |> Enum.map(fn {snapshot, existing_snapshot} -> + {snapshot, do_fetch_operations(snapshot, existing_snapshot, opts)} + end) + |> Enum.reject(fn {_, ops} -> + Enum.empty?(ops) + end) + end + + defp do_fetch_operations(snapshot, existing_snapshot, opts, acc \\ []) + + defp do_fetch_operations( + %{schema: new_schema} = snapshot, + %{schema: old_schema}, + opts, + [] + ) + when new_schema != old_schema do + do_fetch_operations(snapshot, nil, opts, []) + end + + defp do_fetch_operations(snapshot, nil, opts, acc) do + empty_snapshot = %{ + attributes: [], + identities: [], + schema: nil, + custom_indexes: [], + custom_statements: [], + check_constraints: [], + table: snapshot.table, + repo: snapshot.repo, + base_filter: nil, + empty?: true, + multitenancy: %{ + attribute: nil, + strategy: nil, + global: nil + } + } + + do_fetch_operations(snapshot, empty_snapshot, opts, [ + %Operation.CreateTable{ + table: snapshot.table, + schema: snapshot.schema, + multitenancy: snapshot.multitenancy, + old_multitenancy: empty_snapshot.multitenancy + } + | acc + ]) + end + + defp do_fetch_operations(snapshot, old_snapshot, opts, acc) do + attribute_operations = attribute_operations(snapshot, old_snapshot, opts) + pkey_operations = pkey_operations(snapshot, old_snapshot, attribute_operations) + + rewrite_all_identities? = changing_multitenancy_affects_identities?(snapshot, old_snapshot) + + custom_statements_to_add = + snapshot.custom_statements + |> Enum.reject(fn statement -> + Enum.any?(old_snapshot.custom_statements, &(&1.name == statement.name)) + end) + |> Enum.map(&%Operation.AddCustomStatement{statement: &1, table: snapshot.table}) + + custom_statements_to_remove = + old_snapshot.custom_statements + |> Enum.reject(fn old_statement -> + Enum.any?(snapshot.custom_statements, &(&1.name == old_statement.name)) + end) + |> Enum.map(&%Operation.RemoveCustomStatement{statement: &1, table: snapshot.table}) + + custom_statements_to_alter = + snapshot.custom_statements + |> Enum.flat_map(fn statement -> + old_statement = Enum.find(old_snapshot.custom_statements, &(&1.name == statement.name)) + + if old_statement && + (old_statement.code? != statement.code? || + old_statement.up != statement.up || old_statement.down != statement.down) do + [ + %Operation.RemoveCustomStatement{statement: old_statement, table: snapshot.table}, + %Operation.AddCustomStatement{statement: statement, table: snapshot.table} + ] + else + [] + end + end) + + custom_indexes_to_add = + Enum.filter(snapshot.custom_indexes, fn index -> + !Enum.find(old_snapshot.custom_indexes, fn old_custom_index -> + indexes_match?(snapshot.table, old_custom_index, index) + end) + end) + |> Enum.map(fn custom_index -> + %Operation.AddCustomIndex{ + index: custom_index, + table: snapshot.table, + schema: snapshot.schema, + multitenancy: snapshot.multitenancy, + base_filter: snapshot.base_filter + } + end) + + custom_indexes_to_remove = + Enum.filter(old_snapshot.custom_indexes, fn old_custom_index -> + rewrite_all_identities? || + !Enum.find(snapshot.custom_indexes, fn index -> + indexes_match?(snapshot.table, old_custom_index, index) + end) + end) + |> Enum.map(fn custom_index -> + %Operation.RemoveCustomIndex{ + index: custom_index, + table: old_snapshot.table, + schema: old_snapshot.schema, + multitenancy: old_snapshot.multitenancy, + base_filter: old_snapshot.base_filter + } + end) + + unique_indexes_to_remove = + if rewrite_all_identities? do + old_snapshot.identities + else + Enum.reject(old_snapshot.identities, fn old_identity -> + Enum.find(snapshot.identities, fn identity -> + identity.name == old_identity.name && + Enum.sort(old_identity.keys) == Enum.sort(identity.keys) && + old_identity.base_filter == identity.base_filter + end) + end) + end + |> Enum.map(fn identity -> + %Operation.RemoveUniqueIndex{ + identity: identity, + table: snapshot.table, + schema: snapshot.schema + } + end) + + unique_indexes_to_rename = + if rewrite_all_identities? do + [] + else + snapshot.identities + |> Enum.map(fn identity -> + Enum.find_value(old_snapshot.identities, fn old_identity -> + if old_identity.name == identity.name && + old_identity.index_name != identity.index_name do + {old_identity, identity} + end + end) + end) + |> Enum.filter(& &1) + end + |> Enum.map(fn {old_identity, new_identity} -> + %Operation.RenameUniqueIndex{ + old_identity: old_identity, + new_identity: new_identity, + schema: snapshot.schema, + table: snapshot.table + } + end) + + unique_indexes_to_add = + if rewrite_all_identities? do + snapshot.identities + else + Enum.reject(snapshot.identities, fn identity -> + Enum.find(old_snapshot.identities, fn old_identity -> + old_identity.name == identity.name && + Enum.sort(old_identity.keys) == Enum.sort(identity.keys) && + old_identity.base_filter == identity.base_filter + end) + end) + end + |> Enum.map(fn identity -> + %Operation.AddUniqueIndex{ + identity: identity, + schema: snapshot.schema, + table: snapshot.table + } + end) + + constraints_to_add = + snapshot.check_constraints + |> Enum.reject(fn constraint -> + Enum.find(old_snapshot.check_constraints, fn old_constraint -> + old_constraint.check == constraint.check && old_constraint.name == constraint.name + end) + end) + |> Enum.map(fn constraint -> + %Operation.AddCheckConstraint{ + constraint: constraint, + table: snapshot.table, + schema: snapshot.schema + } + end) + + constraints_to_remove = + old_snapshot.check_constraints + |> Enum.reject(fn old_constraint -> + Enum.find(snapshot.check_constraints, fn constraint -> + old_constraint.check == constraint.check && old_constraint.name == constraint.name + end) + end) + |> Enum.map(fn old_constraint -> + %Operation.RemoveCheckConstraint{ + constraint: old_constraint, + table: old_snapshot.table, + schema: old_snapshot.schema + } + end) + + [ + pkey_operations, + unique_indexes_to_remove, + attribute_operations, + unique_indexes_to_add, + unique_indexes_to_rename, + constraints_to_remove, + constraints_to_add, + custom_indexes_to_add, + custom_indexes_to_remove, + custom_statements_to_add, + custom_statements_to_remove, + custom_statements_to_alter, + acc + ] + |> Enum.concat() + |> Enum.map(&Map.put(&1, :multitenancy, snapshot.multitenancy)) + |> Enum.map(&Map.put(&1, :old_multitenancy, old_snapshot.multitenancy)) + end + + defp indexes_match?(table, left, right) do + left = + left + |> Map.update!(:fields, fn fields -> + Enum.map(fields, &to_string/1) + end) + |> add_custom_index_name(table) + + right = + right + |> Map.update!(:fields, fn fields -> + Enum.map(fields, &to_string/1) + end) + |> add_custom_index_name(table) + + left == right + end + + defp add_custom_index_name(custom_index, table) do + custom_index + |> Map.put_new_lazy(:name, fn -> + AshSqlite.CustomIndex.name(table, %{fields: custom_index.fields}) + end) + |> Map.update!( + :name, + &(&1 || AshSqlite.CustomIndex.name(table, %{fields: custom_index.fields})) + ) + end + + defp pkey_operations(snapshot, old_snapshot, attribute_operations) do + if old_snapshot[:empty?] do + [] + else + must_drop_pkey? = + Enum.any?( + attribute_operations, + fn + %Operation.AlterAttribute{ + old_attribute: %{primary_key?: old_primary_key}, + new_attribute: %{primary_key?: new_primary_key} + } + when old_primary_key != new_primary_key -> + true + + %Operation.AddAttribute{ + attribute: %{primary_key?: true} + } -> + true + + _ -> + false + end + ) + + if must_drop_pkey? do + [ + %Operation.RemovePrimaryKey{schema: snapshot.schema, table: snapshot.table}, + %Operation.RemovePrimaryKeyDown{schema: snapshot.schema, table: snapshot.table} + ] + else + [] + end + end + end + + defp attribute_operations(snapshot, old_snapshot, opts) do + attributes_to_add = + Enum.reject(snapshot.attributes, fn attribute -> + Enum.find(old_snapshot.attributes, &(&1.source == attribute.source)) + end) + + attributes_to_remove = + Enum.reject(old_snapshot.attributes, fn attribute -> + Enum.find(snapshot.attributes, &(&1.source == attribute.source)) + end) + + {attributes_to_add, attributes_to_remove, attributes_to_rename} = + resolve_renames(snapshot.table, attributes_to_add, attributes_to_remove, opts) + + attributes_to_alter = + snapshot.attributes + |> Enum.map(fn attribute -> + {attribute, + Enum.find( + old_snapshot.attributes, + &(&1.source == attribute.source && + attributes_unequal?(&1, attribute, snapshot.repo, old_snapshot, snapshot)) + )} + end) + |> Enum.filter(&elem(&1, 1)) + + rename_attribute_events = + Enum.map(attributes_to_rename, fn {new, old} -> + %Operation.RenameAttribute{ + new_attribute: new, + old_attribute: old, + table: snapshot.table, + schema: snapshot.schema + } + end) + + add_attribute_events = + Enum.flat_map(attributes_to_add, fn attribute -> + if attribute.references do + reference_ops = + if attribute.references.deferrable do + [ + %Operation.AlterDeferrability{ + table: snapshot.table, + schema: snapshot.schema, + references: attribute.references, + direction: :up + }, + %Operation.AlterDeferrability{ + table: snapshot.table, + schema: snapshot.schema, + references: Map.get(attribute, :references), + direction: :down + } + ] + else + [] + end + + [ + %Operation.AddAttribute{ + attribute: Map.delete(attribute, :references), + schema: snapshot.schema, + table: snapshot.table + }, + %Operation.AlterAttribute{ + old_attribute: Map.delete(attribute, :references), + new_attribute: attribute, + schema: snapshot.schema, + table: snapshot.table + }, + %Operation.DropForeignKey{ + attribute: attribute, + table: snapshot.table, + schema: snapshot.schema, + multitenancy: Map.get(attribute, :multitenancy), + direction: :down + } + ] ++ reference_ops + else + [ + %Operation.AddAttribute{ + attribute: attribute, + table: snapshot.table, + schema: snapshot.schema + } + ] + end + end) + + alter_attribute_events = + Enum.flat_map(attributes_to_alter, fn {new_attribute, old_attribute} -> + deferrable_ops = + if differently_deferrable?(new_attribute, old_attribute) do + [ + %Operation.AlterDeferrability{ + table: snapshot.table, + schema: snapshot.schema, + references: new_attribute.references, + direction: :up + }, + %Operation.AlterDeferrability{ + table: snapshot.table, + schema: snapshot.schema, + references: Map.get(old_attribute, :references), + direction: :down + } + ] + else + [] + end + + if has_reference?(old_snapshot.multitenancy, old_attribute) and + Map.get(old_attribute, :references) != Map.get(new_attribute, :references) do + redo_deferrability = + if differently_deferrable?(new_attribute, old_attribute) do + [] + else + [ + %Operation.AlterDeferrability{ + table: snapshot.table, + schema: snapshot.schema, + references: new_attribute.references, + direction: :up + } + ] + end + + old_and_alter = + [ + %Operation.DropForeignKey{ + attribute: old_attribute, + table: snapshot.table, + schema: snapshot.schema, + multitenancy: old_snapshot.multitenancy, + direction: :up + }, + %Operation.AlterAttribute{ + new_attribute: new_attribute, + old_attribute: old_attribute, + schema: snapshot.schema, + table: snapshot.table + } + ] ++ redo_deferrability + + if has_reference?(snapshot.multitenancy, new_attribute) do + reference_ops = [ + %Operation.DropForeignKey{ + attribute: new_attribute, + table: snapshot.table, + schema: snapshot.schema, + multitenancy: snapshot.multitenancy, + direction: :down + } + ] + + old_and_alter ++ + reference_ops + else + old_and_alter + end + else + [ + %Operation.AlterAttribute{ + new_attribute: Map.delete(new_attribute, :references), + old_attribute: Map.delete(old_attribute, :references), + schema: snapshot.schema, + table: snapshot.table + } + ] + end + |> Enum.concat(deferrable_ops) + end) + + remove_attribute_events = + Enum.map(attributes_to_remove, fn attribute -> + %Operation.RemoveAttribute{ + attribute: attribute, + table: snapshot.table, + schema: snapshot.schema, + commented?: !opts.drop_columns + } + end) + + add_attribute_events ++ + alter_attribute_events ++ remove_attribute_events ++ rename_attribute_events + end + + defp differently_deferrable?(%{references: %{deferrable: left}}, %{ + references: %{deferrable: right} + }) + when left != right do + true + end + + defp differently_deferrable?(%{references: %{deferrable: same}}, %{ + references: %{deferrable: same} + }) do + false + end + + defp differently_deferrable?(%{references: %{deferrable: left}}, _) when left != false, do: true + + defp differently_deferrable?(_, %{references: %{deferrable: right}}) when right != false, + do: true + + defp differently_deferrable?(_, _), do: false + + # This exists to handle the fact that the remapping of the key name -> source caused attributes + # to be considered unequal. We ignore things that only differ in that way using this function. + defp attributes_unequal?(left, right, repo, _old_snapshot, _new_snapshot) do + left = clean_for_equality(left, repo) + + right = clean_for_equality(right, repo) + + left != right + end + + defp clean_for_equality(attribute, repo) do + cond do + attribute[:source] -> + Map.put(attribute, :name, attribute[:source]) + |> Map.update!(:source, &to_string/1) + |> Map.update!(:name, &to_string/1) + + attribute[:name] -> + attribute + |> Map.put(:source, attribute[:name]) + |> Map.update!(:source, &to_string/1) + |> Map.update!(:name, &to_string/1) + + true -> + attribute + end + |> add_schema(repo) + |> add_ignore() + |> then(fn + # only :integer cares about `destination_attribute_generated` + # so we clean it here to avoid generating unnecessary snapshots + # during the transitionary period of adding it + %{type: type, references: references} = attribute + when not is_nil(references) and type != :integer -> + Map.update!(attribute, :references, &Map.delete(&1, :destination_attribute_generated)) + + attribute -> + attribute + end) + end + + defp add_ignore(%{references: references} = attribute) when is_map(references) do + %{attribute | references: Map.put_new(references, :ignore?, false)} + end + + defp add_ignore(attribute) do + attribute + end + + defp add_schema(%{references: references} = attribute, repo) when is_map(references) do + schema = Map.get(references, :schema) || repo.config()[:default_prefix] || "public" + + %{ + attribute + | references: Map.put(references, :schema, schema) + } + end + + defp add_schema(attribute, _) do + attribute + end + + def changing_multitenancy_affects_identities?(snapshot, old_snapshot) do + snapshot.multitenancy != old_snapshot.multitenancy || + snapshot.base_filter != old_snapshot.base_filter + end + + def has_reference?(multitenancy, attribute) do + not is_nil(Map.get(attribute, :references)) + end + + def get_existing_snapshot(snapshot, opts) do + repo_name = snapshot.repo |> Module.split() |> List.last() |> Macro.underscore() + + folder = + opts + |> snapshot_path(snapshot.repo) + |> Path.join(repo_name) + + snapshot_folder = Path.join(folder, snapshot.table) + + if File.exists?(snapshot_folder) do + snapshot_folder + |> File.ls!() + |> Enum.filter(&String.ends_with?(&1, ".json")) + |> Enum.map(&String.trim_trailing(&1, ".json")) + |> Enum.map(&Integer.parse/1) + |> Enum.filter(fn + {_int, remaining} -> + remaining == "" + + :error -> + false + end) + |> Enum.map(&elem(&1, 0)) + |> case do + [] -> + get_old_snapshot(folder, snapshot) + + timestamps -> + timestamp = Enum.max(timestamps) + snapshot_file = Path.join(snapshot_folder, "#{timestamp}.json") + + snapshot_file + |> File.read!() + |> load_snapshot() + end + else + get_old_snapshot(folder, snapshot) + end + end + + defp get_old_snapshot(folder, snapshot) do + old_snapshot_file = Path.join(folder, "#{snapshot.table}.json") + # This is adapter code for the old version, where migrations were stored in a flat directory + if File.exists?(old_snapshot_file) do + old_snapshot_file + |> File.read!() + |> load_snapshot() + end + end + + defp resolve_renames(_table, adding, [], _opts), do: {adding, [], []} + + defp resolve_renames(_table, [], removing, _opts), do: {[], removing, []} + + defp resolve_renames(table, [adding], [removing], opts) do + if renaming_to?(table, removing.source, adding.source, opts) do + {[], [], [{adding, removing}]} + else + {[adding], [removing], []} + end + end + + defp resolve_renames(table, adding, [removing | rest], opts) do + {new_adding, new_removing, new_renames} = + if renaming?(table, removing, opts) do + new_attribute = + if opts.no_shell? do + raise "Unimplemented: Cannot get new_attribute without the shell!" + else + get_new_attribute(adding) + end + + {adding -- [new_attribute], [], [{new_attribute, removing}]} + else + {adding, [removing], []} + end + + {rest_adding, rest_removing, rest_renames} = resolve_renames(table, new_adding, rest, opts) + + {new_adding ++ rest_adding, new_removing ++ rest_removing, rest_renames ++ new_renames} + end + + defp renaming_to?(table, removing, adding, opts) do + if opts.no_shell? do + raise "Unimplemented: cannot determine: Are you renaming #{table}.#{removing} to #{table}.#{adding}? without shell input" + else + Mix.shell().yes?("Are you renaming #{table}.#{removing} to #{table}.#{adding}?") + end + end + + defp renaming?(table, removing, opts) do + if opts.no_shell? do + raise "Unimplemented: cannot determine: Are you renaming #{table}.#{removing.source}? without shell input" + else + Mix.shell().yes?("Are you renaming #{table}.#{removing.source}?") + end + end + + defp get_new_attribute(adding, tries \\ 3) + + defp get_new_attribute(_adding, 0) do + raise "Could not get matching name after 3 attempts." + end + + defp get_new_attribute(adding, tries) do + name = + Mix.shell().prompt( + "What are you renaming it to?: #{Enum.map_join(adding, ", ", & &1.source)}" + ) + + name = + if name do + String.trim(name) + else + nil + end + + case Enum.find(adding, &(to_string(&1.source) == name)) do + nil -> get_new_attribute(adding, tries - 1) + new_attribute -> new_attribute + end + end + + defp timestamp(require_unique? \\ false) do + # Alright, this is silly I know. But migration ids need to be unique + # and "synthesizing" that behavior is significantly more annoying than + # just waiting a bit, ensuring the migration versions are unique. + if require_unique?, do: :timer.sleep(1500) + {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() + "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" + end + + defp pad(i) when i < 10, do: <> + defp pad(i), do: to_string(i) + + def get_snapshots(resource, all_resources) do + Code.ensure_compiled!(AshSqlite.DataLayer.Info.repo(resource)) + + if AshSqlite.DataLayer.Info.polymorphic?(resource) do + all_resources + |> Enum.flat_map(&Ash.Resource.Info.relationships/1) + |> Enum.filter(&(&1.destination == resource)) + |> Enum.reject(&(&1.type == :belongs_to)) + |> Enum.filter(& &1.context[:data_layer][:table]) + |> Enum.uniq() + |> Enum.map(fn relationship -> + resource + |> do_snapshot( + relationship.context[:data_layer][:table], + relationship.context[:data_layer][:schema] + ) + |> Map.update!(:identities, fn identities -> + identity_index_names = AshSqlite.DataLayer.Info.identity_index_names(resource) + + Enum.map(identities, fn identity -> + Map.put( + identity, + :index_name, + identity_index_names[identity.name] || + "#{relationship.context[:data_layer][:table]}_#{identity.name}_index" + ) + end) + end) + |> Map.update!(:attributes, fn attributes -> + Enum.map(attributes, fn attribute -> + destination_attribute_source = + relationship.destination + |> Ash.Resource.Info.attribute(relationship.destination_attribute) + |> Map.get(:source) + + if attribute.source == destination_attribute_source do + source_attribute = + Ash.Resource.Info.attribute(relationship.source, relationship.source_attribute) + + Map.put(attribute, :references, %{ + destination_attribute: source_attribute.source, + destination_attribute_default: + default( + source_attribute, + relationship.destination, + AshSqlite.DataLayer.Info.repo(relationship.destination) + ), + deferrable: false, + destination_attribute_generated: source_attribute.generated?, + multitenancy: multitenancy(relationship.source), + table: AshSqlite.DataLayer.Info.table(relationship.source), + schema: AshSqlite.DataLayer.Info.schema(relationship.source), + on_delete: AshSqlite.DataLayer.Info.polymorphic_on_delete(relationship.source), + on_update: AshSqlite.DataLayer.Info.polymorphic_on_update(relationship.source), + primary_key?: source_attribute.primary_key?, + name: + AshSqlite.DataLayer.Info.polymorphic_name(relationship.source) || + "#{relationship.context[:data_layer][:table]}_#{destination_attribute_source}_fkey" + }) + else + attribute + end + end) + end) + end) + else + [do_snapshot(resource, AshSqlite.DataLayer.Info.table(resource))] + end + end + + defp do_snapshot(resource, table, schema \\ nil) do + snapshot = %{ + attributes: attributes(resource, table), + identities: identities(resource), + table: table || AshSqlite.DataLayer.Info.table(resource), + schema: schema || AshSqlite.DataLayer.Info.schema(resource), + check_constraints: check_constraints(resource), + custom_indexes: custom_indexes(resource), + custom_statements: custom_statements(resource), + repo: AshSqlite.DataLayer.Info.repo(resource), + multitenancy: multitenancy(resource), + base_filter: AshSqlite.DataLayer.Info.base_filter_sql(resource), + has_create_action: has_create_action?(resource) + } + + hash = + :sha256 + |> :crypto.hash(inspect(snapshot)) + |> Base.encode16() + + Map.put(snapshot, :hash, hash) + end + + defp has_create_action?(resource) do + resource + |> Ash.Resource.Info.actions() + |> Enum.any?(&(&1.type == :create)) + end + + defp check_constraints(resource) do + resource + |> AshSqlite.DataLayer.Info.check_constraints() + |> Enum.filter(& &1.check) + |> case do + [] -> + [] + + constraints -> + base_filter = Ash.Resource.Info.base_filter(resource) + + if base_filter && !AshSqlite.DataLayer.Info.base_filter_sql(resource) do + raise """ + Cannot create a check constraint for a resource with a base filter without also configuring `base_filter_sql`. + + You must provide the `base_filter_sql` option, or manually create add the check constraint to your migrations. + """ + end + + constraints + end + |> Enum.map(fn constraint -> + attributes = + constraint.attribute + |> List.wrap() + |> Enum.map(fn attribute -> + attr = + resource + |> Ash.Resource.Info.attribute(attribute) + + attr.source || attr.name + end) + + %{ + name: constraint.name, + attribute: attributes, + check: constraint.check, + base_filter: AshSqlite.DataLayer.Info.base_filter_sql(resource) + } + end) + end + + defp custom_indexes(resource) do + resource + |> AshSqlite.DataLayer.Info.custom_indexes() + |> Enum.map(fn custom_index -> + Map.take(custom_index, AshSqlite.CustomIndex.fields()) + end) + end + + defp custom_statements(resource) do + resource + |> AshSqlite.DataLayer.Info.custom_statements() + |> Enum.map(fn custom_statement -> + Map.take(custom_statement, AshSqlite.Statement.fields()) + end) + end + + defp multitenancy(resource) do + strategy = Ash.Resource.Info.multitenancy_strategy(resource) + attribute = Ash.Resource.Info.multitenancy_attribute(resource) + global = Ash.Resource.Info.multitenancy_global?(resource) + + %{ + strategy: strategy, + attribute: attribute, + global: global + } + end + + defp attributes(resource, table) do + repo = AshSqlite.DataLayer.Info.repo(resource) + ignored = AshSqlite.DataLayer.Info.migration_ignore_attributes(resource) || [] + + resource + |> Ash.Resource.Info.attributes() + |> Enum.reject(&(&1.name in ignored)) + |> Enum.map( + &Map.take(&1, [ + :name, + :source, + :type, + :default, + :allow_nil?, + :generated?, + :primary_key?, + :constraints + ]) + ) + |> Enum.map(fn attribute -> + default = default(attribute, resource, repo) + + type = + AshSqlite.DataLayer.Info.migration_types(resource)[attribute.name] || + migration_type(attribute.type, attribute.constraints) + + type = + if :erlang.function_exported(repo, :override_migration_type, 1) do + repo.override_migration_type(type) + else + type + end + + {type, size} = + case type do + {:varchar, size} -> + {:varchar, size} + + {:binary, size} -> + {:binary, size} + + {other, size} when is_atom(other) and is_integer(size) -> + {other, size} + + other -> + {other, nil} + end + + attribute + |> Map.put(:default, default) + |> Map.put(:size, size) + |> Map.put(:type, type) + |> Map.put(:source, attribute.source || attribute.name) + |> Map.drop([:name, :constraints]) + end) + |> Enum.map(fn attribute -> + references = find_reference(resource, table, attribute) + + Map.put(attribute, :references, references) + end) + end + + defp find_reference(resource, table, attribute) do + Enum.find_value(Ash.Resource.Info.relationships(resource), fn relationship -> + source_attribute_name = + relationship.source + |> Ash.Resource.Info.attribute(relationship.source_attribute) + |> then(fn attribute -> + attribute.source || attribute.name + end) + + if attribute.source == source_attribute_name && relationship.type == :belongs_to && + foreign_key?(relationship) do + configured_reference = + configured_reference(resource, table, attribute.source || attribute.name, relationship) + + unless Map.get(configured_reference, :ignore?) do + destination_attribute = + Ash.Resource.Info.attribute( + relationship.destination, + relationship.destination_attribute + ) + + destination_attribute_source = + destination_attribute.source || destination_attribute.name + + %{ + destination_attribute: destination_attribute_source, + deferrable: configured_reference.deferrable, + multitenancy: multitenancy(relationship.destination), + on_delete: configured_reference.on_delete, + on_update: configured_reference.on_update, + name: configured_reference.name, + primary_key?: destination_attribute.primary_key?, + schema: + relationship.context[:data_layer][:schema] || + AshSqlite.DataLayer.Info.schema(relationship.destination) || + AshSqlite.DataLayer.Info.repo(relationship.destination).config()[ + :default_prefix + ], + table: + relationship.context[:data_layer][:table] || + AshSqlite.DataLayer.Info.table(relationship.destination) + } + end + end + end) + end + + defp configured_reference(resource, table, attribute, relationship) do + ref = + resource + |> AshSqlite.DataLayer.Info.references() + |> Enum.find(&(&1.relationship == relationship.name)) + |> Kernel.||(%{ + on_delete: nil, + on_update: nil, + deferrable: false, + schema: + relationship.context[:data_layer][:schema] || + AshSqlite.DataLayer.Info.schema(relationship.destination) || + AshSqlite.DataLayer.Info.repo(relationship.destination).config()[:default_prefix], + name: nil, + ignore?: false + }) + + ref + |> Map.put(:name, ref.name || "#{table}_#{attribute}_fkey") + |> Map.put( + :primary_key?, + Ash.Resource.Info.attribute( + relationship.destination, + relationship.destination_attribute + ).primary_key? + ) + end + + def get_migration_type(type, constraints), do: migration_type(type, constraints) + + defp migration_type({:array, type}, constraints), + do: {:array, migration_type(type, constraints)} + + defp migration_type(Ash.Type.CiString, _), do: :citext + defp migration_type(Ash.Type.UUID, _), do: :uuid + defp migration_type(Ash.Type.Integer, _), do: :bigint + + defp migration_type(other, constraints) do + type = Ash.Type.get_type(other) + + if Ash.Type.NewType.new_type?(type) do + migration_type( + Ash.Type.NewType.subtype_of(type), + Ash.Type.NewType.constraints(type, constraints) + ) + else + migration_type_from_storage_type(Ash.Type.storage_type(other, constraints)) + end + end + + defp migration_type_from_storage_type(:string), do: :text + defp migration_type_from_storage_type(storage_type), do: storage_type + + defp foreign_key?(relationship) do + Ash.DataLayer.data_layer(relationship.source) == AshSqlite.DataLayer && + AshSqlite.DataLayer.Info.repo(relationship.source) == + AshSqlite.DataLayer.Info.repo(relationship.destination) + end + + defp identities(resource) do + identity_index_names = AshSqlite.DataLayer.Info.identity_index_names(resource) + + resource + |> Ash.Resource.Info.identities() + |> case do + [] -> + [] + + identities -> + base_filter = Ash.Resource.Info.base_filter(resource) + + if base_filter && !AshSqlite.DataLayer.Info.base_filter_sql(resource) do + raise """ + Cannot create a unique index for a resource with a base filter without also configuring `base_filter_sql`. + + You must provide the `base_filter_sql` option, or skip unique indexes with `skip_unique_indexes`" + """ + end + + identities + end + |> Enum.reject(fn identity -> + identity.name in AshSqlite.DataLayer.Info.skip_unique_indexes(resource) + end) + |> Enum.filter(fn identity -> + Enum.all?(identity.keys, fn key -> + Ash.Resource.Info.attribute(resource, key) + end) + end) + |> Enum.sort_by(& &1.name) + |> Enum.map(&Map.take(&1, [:name, :keys])) + |> Enum.map(fn %{keys: keys} = identity -> + %{ + identity + | keys: + Enum.map(keys, fn key -> + attribute = Ash.Resource.Info.attribute(resource, key) + attribute.source || attribute.name + end) + } + end) + |> Enum.map(fn identity -> + Map.put( + identity, + :index_name, + identity_index_names[identity.name] || + "#{AshSqlite.DataLayer.Info.table(resource)}_#{identity.name}_index" + ) + end) + |> Enum.map(&Map.put(&1, :base_filter, AshSqlite.DataLayer.Info.base_filter_sql(resource))) + end + + @uuid_functions [&Ash.UUID.generate/0, &Ecto.UUID.generate/0] + + defp default(%{name: name, default: default}, resource, repo) when is_function(default) do + configured_default(resource, name) || + cond do + default in @uuid_functions && "uuid-ossp" in (repo.config()[:installed_extensions] || []) -> + ~S[fragment("uuid_generate_v4()")] + + default == (&DateTime.utc_now/0) -> + ~S[fragment("now()")] + + true -> + "nil" + end + end + + defp default(%{name: name, default: {_, _, _}}, resource, _), + do: configured_default(resource, name) || "nil" + + defp default(%{name: name, default: nil}, resource, _), + do: configured_default(resource, name) || "nil" + + defp default(%{name: name, default: []}, resource, _), + do: configured_default(resource, name) || "[]" + + defp default(%{name: name, default: default}, resource, _) when default == %{}, + do: configured_default(resource, name) || "%{}" + + defp default(%{name: name, default: value, type: type} = attr, resource, _) do + case configured_default(resource, name) do + nil -> + case migration_default(type, Map.get(attr, :constraints, []), value) do + {:ok, default} -> + default + + :error -> + EctoMigrationDefault.to_default(value) + end + + default -> + default + end + end + + defp migration_default(type, constraints, value) do + type = + type + |> unwrap_type() + |> Ash.Type.get_type() + + if function_exported?(type, :value_to_sqlite_default, 3) do + type.value_to_sqlite_default(type, constraints, value) + else + :error + end + end + + defp unwrap_type({:array, type}), do: unwrap_type(type) + defp unwrap_type(type), do: type + + defp configured_default(resource, attribute) do + AshSqlite.DataLayer.Info.migration_defaults(resource)[attribute] + end + + defp snapshot_to_binary(snapshot) do + snapshot + |> Map.update!(:attributes, fn attributes -> + Enum.map(attributes, fn attribute -> + %{attribute | type: sanitize_type(attribute.type, attribute[:size])} + end) + end) + |> Jason.encode!(pretty: true) + end + + defp sanitize_type({:array, type}, size) do + ["array", sanitize_type(type, size)] + end + + defp sanitize_type(:varchar, size) when not is_nil(size) do + ["varchar", size] + end + + defp sanitize_type(:binary, size) when not is_nil(size) do + ["binary", size] + end + + defp sanitize_type(type, size) when is_atom(type) and is_integer(size) do + [sanitize_type(type, nil), size] + end + + defp sanitize_type(type, _) do + type + end + + defp load_snapshot(json) do + json + |> Jason.decode!(keys: :atoms!) + |> sanitize_snapshot() + end + + defp sanitize_snapshot(snapshot) do + snapshot + |> Map.put_new(:has_create_action, true) + |> Map.put_new(:schema, nil) + |> Map.update!(:identities, fn identities -> + Enum.map(identities, &load_identity(&1, snapshot.table)) + end) + |> Map.update!(:attributes, fn attributes -> + Enum.map(attributes, fn attribute -> + attribute = load_attribute(attribute, snapshot.table) + + if is_map(Map.get(attribute, :references)) do + %{ + attribute + | references: rewrite(attribute.references, :ignore, :ignore?) + } + else + attribute + end + end) + end) + |> Map.put_new(:custom_indexes, []) + |> Map.update!(:custom_indexes, &load_custom_indexes/1) + |> Map.put_new(:custom_statements, []) + |> Map.update!(:custom_statements, &load_custom_statements/1) + |> Map.put_new(:check_constraints, []) + |> Map.update!(:check_constraints, &load_check_constraints/1) + |> Map.update!(:repo, &String.to_atom/1) + |> Map.put_new(:multitenancy, %{ + attribute: nil, + strategy: nil, + global: nil + }) + |> Map.update!(:multitenancy, &load_multitenancy/1) + |> Map.put_new(:base_filter, nil) + end + + defp load_check_constraints(constraints) do + Enum.map(constraints, fn constraint -> + Map.update!(constraint, :attribute, fn attribute -> + attribute + |> List.wrap() + |> Enum.map(&String.to_atom/1) + end) + end) + end + + defp load_custom_indexes(custom_indexes) do + Enum.map(custom_indexes || [], fn custom_index -> + custom_index + |> Map.put_new(:fields, []) + |> Map.put_new(:include, []) + |> Map.put_new(:message, nil) + end) + end + + defp load_custom_statements(statements) do + Enum.map(statements || [], fn statement -> + Map.update!(statement, :name, &String.to_atom/1) + end) + end + + defp load_multitenancy(multitenancy) do + multitenancy + |> Map.update!(:strategy, fn strategy -> strategy && String.to_atom(strategy) end) + |> Map.update!(:attribute, fn attribute -> attribute && String.to_atom(attribute) end) + end + + defp load_attribute(attribute, table) do + type = load_type(attribute.type) + + {type, size} = + case type do + {:varchar, size} -> + {:varchar, size} + + {:binary, size} -> + {:binary, size} + + {other, size} when is_atom(other) and is_integer(size) -> + {other, size} + + other -> + {other, nil} + end + + attribute = + if Map.has_key?(attribute, :name) do + Map.put(attribute, :source, String.to_atom(attribute.name)) + else + Map.update!(attribute, :source, &String.to_atom/1) + end + + attribute + |> Map.put(:type, type) + |> Map.put(:size, size) + |> Map.put_new(:default, "nil") + |> Map.update!(:default, &(&1 || "nil")) + |> Map.update!(:references, fn + nil -> + nil + + references -> + references + |> rewrite( + destination_field: :destination_attribute, + destination_field_default: :destination_attribute_default, + destination_field_generated: :destination_attribute_generated + ) + |> Map.delete(:ignore) + |> rewrite(:ignore?, :ignore) + |> Map.update!(:destination_attribute, &String.to_atom/1) + |> Map.put_new(:deferrable, false) + |> Map.update!(:deferrable, fn + "initially" -> :initially + other -> other + end) + |> Map.put_new(:schema, nil) + |> Map.put_new(:destination_attribute_default, "nil") + |> Map.put_new(:destination_attribute_generated, false) + |> Map.put_new(:on_delete, nil) + |> Map.put_new(:on_update, nil) + |> Map.update!(:on_delete, &(&1 && String.to_atom(&1))) + |> Map.update!(:on_update, &(&1 && String.to_atom(&1))) + |> Map.put( + :name, + Map.get(references, :name) || "#{table}_#{attribute.source}_fkey" + ) + |> Map.put_new(:multitenancy, %{ + attribute: nil, + strategy: nil, + global: nil + }) + |> Map.update!(:multitenancy, &load_multitenancy/1) + |> sanitize_name(table) + end) + end + + defp rewrite(map, keys) do + Enum.reduce(keys, map, fn {key, to}, map -> + rewrite(map, key, to) + end) + end + + defp rewrite(map, key, to) do + if Map.has_key?(map, key) do + map + |> Map.put(to, Map.get(map, key)) + |> Map.delete(key) + else + map + end + end + + defp sanitize_name(reference, table) do + if String.starts_with?(reference.name, "_") do + Map.put(reference, :name, "#{table}#{reference.name}") + else + reference + end + end + + defp load_type(["array", type]) do + {:array, load_type(type)} + end + + defp load_type(["varchar", size]) do + {:varchar, size} + end + + defp load_type(["binary", size]) do + {:binary, size} + end + + defp load_type([string, size]) when is_binary(string) and is_integer(size) do + {String.to_existing_atom(string), size} + end + + defp load_type(type) do + String.to_atom(type) + end + + defp load_identity(identity, table) do + identity + |> Map.update!(:name, &String.to_atom/1) + |> Map.update!(:keys, fn keys -> + keys + |> Enum.map(&String.to_atom/1) + |> Enum.sort() + end) + |> add_index_name(table) + |> Map.put_new(:base_filter, nil) + end + + defp add_index_name(%{name: name} = index, table) do + Map.put_new(index, :index_name, "#{table}_#{name}_unique_index") + end +end diff --git a/lib/migration_generator/operation.ex b/lib/migration_generator/operation.ex new file mode 100644 index 0000000..2365750 --- /dev/null +++ b/lib/migration_generator/operation.ex @@ -0,0 +1,902 @@ +defmodule AshSqlite.MigrationGenerator.Operation do + @moduledoc false + + defmodule Helper do + @moduledoc false + def join(list), + do: + list + |> List.flatten() + |> Enum.reject(&is_nil/1) + |> Enum.join(", ") + |> String.replace(", )", ")") + + def maybe_add_default("nil"), do: nil + def maybe_add_default(value), do: "default: #{value}" + + def maybe_add_primary_key(true), do: "primary_key: true" + def maybe_add_primary_key(_), do: nil + + def maybe_add_null(false), do: "null: false" + def maybe_add_null(_), do: nil + + def maybe_add_prefix(nil), do: nil + def maybe_add_prefix(prefix), do: "prefix: #{prefix}" + + def in_quotes(nil), do: nil + def in_quotes(value), do: "\"#{value}\"" + + def as_atom(value) when is_atom(value), do: Macro.inspect_atom(:remote_call, value) + # sobelow_skip ["DOS.StringToAtom"] + def as_atom(value), do: Macro.inspect_atom(:remote_call, String.to_atom(value)) + + def option(key, value) do + if value do + "#{as_atom(key)}: #{inspect(value)}" + end + end + + def on_delete(%{on_delete: on_delete}) when on_delete in [:delete, :nilify] do + "on_delete: :#{on_delete}_all" + end + + def on_delete(%{on_delete: on_delete}) when is_atom(on_delete) and not is_nil(on_delete) do + "on_delete: :#{on_delete}" + end + + def on_delete(_), do: nil + + def on_update(%{on_update: on_update}) when on_update in [:update, :nilify] do + "on_update: :#{on_update}_all" + end + + def on_update(%{on_update: on_update}) when is_atom(on_update) and not is_nil(on_update) do + "on_update: :#{on_update}" + end + + def on_update(_), do: nil + + def reference_type( + %{type: :integer}, + %{destination_attribute_generated: true, destination_attribute_default: "nil"} + ) do + :bigint + end + + def reference_type(%{type: type}, _) do + type + end + end + + defmodule CreateTable do + @moduledoc false + defstruct [:table, :schema, :multitenancy, :old_multitenancy] + end + + defmodule AddAttribute do + @moduledoc false + defstruct [:attribute, :table, :schema, :multitenancy, :old_multitenancy] + + import Helper + + def up(%{ + multitenancy: %{strategy: :attribute, attribute: source_attribute}, + attribute: + %{ + references: + %{ + table: table, + destination_attribute: reference_attribute, + schema: destination_schema, + multitenancy: %{strategy: :attribute, attribute: destination_attribute} + } = reference + } = attribute + }) do + with_match = + if destination_attribute != reference_attribute do + "with: [#{as_atom(source_attribute)}: :#{as_atom(destination_attribute)}], match: :full" + end + + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + [ + "add #{inspect(attribute.source)}", + "references(:#{as_atom(table)}", + [ + "column: #{inspect(reference_attribute)}", + with_match, + "name: #{inspect(reference.name)}", + "type: #{inspect(reference_type(attribute, reference))}", + option("prefix", destination_schema), + on_delete(reference), + on_update(reference), + size + ], + ")", + maybe_add_default(attribute.default), + maybe_add_primary_key(attribute.primary_key?), + maybe_add_null(attribute.allow_nil?) + ] + |> join() + end + + def up(%{ + attribute: + %{ + references: + %{ + table: table, + schema: destination_schema, + destination_attribute: destination_attribute + } = reference + } = attribute + }) do + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + [ + "add #{inspect(attribute.source)}", + "references(:#{as_atom(table)}", + [ + "column: #{inspect(destination_attribute)}", + "name: #{inspect(reference.name)}", + "type: #{inspect(reference_type(attribute, reference))}", + option("prefix", destination_schema), + size, + on_delete(reference), + on_update(reference) + ], + ")", + maybe_add_default(attribute.default), + maybe_add_primary_key(attribute.primary_key?), + maybe_add_null(attribute.allow_nil?) + ] + |> join() + end + + def up(%{attribute: %{type: :bigint, default: "nil", generated?: true} = attribute}) do + [ + "add #{inspect(attribute.source)}", + ":bigserial", + maybe_add_null(attribute.allow_nil?), + maybe_add_primary_key(attribute.primary_key?) + ] + |> join() + end + + def up(%{attribute: %{type: :integer, default: "nil", generated?: true} = attribute}) do + [ + "add #{inspect(attribute.source)}", + ":serial", + maybe_add_null(attribute.allow_nil?), + maybe_add_primary_key(attribute.primary_key?) + ] + |> join() + end + + def up(%{attribute: attribute}) do + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + [ + "add #{inspect(attribute.source)}", + "#{inspect(attribute.type)}", + maybe_add_null(attribute.allow_nil?), + maybe_add_default(attribute.default), + size, + maybe_add_primary_key(attribute.primary_key?) + ] + |> join() + end + + def down( + %{ + attribute: attribute, + table: table, + multitenancy: multitenancy + } = op + ) do + AshSqlite.MigrationGenerator.Operation.RemoveAttribute.up(%{ + op + | attribute: attribute, + table: table, + multitenancy: multitenancy + }) + end + end + + defmodule AlterDeferrability do + @moduledoc false + defstruct [:table, :schema, :references, :direction, no_phase: true] + + def up(%{direction: :up, table: table, references: %{name: name, deferrable: true}}) do + "execute(\"ALTER TABLE #{table} alter CONSTRAINT #{name} DEFERRABLE INITIALLY IMMEDIATE\");" + end + + def up(%{direction: :up, table: table, references: %{name: name, deferrable: :initially}}) do + "execute(\"ALTER TABLE #{table} alter CONSTRAINT #{name} DEFERRABLE INITIALLY DEFERRED\");" + end + + def up(%{direction: :up, table: table, references: %{name: name}}) do + "execute(\"ALTER TABLE #{table} alter CONSTRAINT #{name} NOT DEFERRABLE\");" + end + + def up(_), do: "" + + def down(%{direction: :down} = data), do: up(%{data | direction: :up}) + def down(_), do: "" + end + + defmodule AlterAttribute do + @moduledoc false + defstruct [ + :old_attribute, + :new_attribute, + :table, + :schema, + :multitenancy, + :old_multitenancy + ] + + import Helper + + defp alter_opts(attribute, old_attribute) do + primary_key = + cond do + attribute.primary_key? and !old_attribute.primary_key? -> + ", primary_key: true" + + old_attribute.primary_key? and !attribute.primary_key? -> + ", primary_key: false" + + true -> + nil + end + + default = + if attribute.default != old_attribute.default do + if is_nil(attribute.default) do + ", default: nil" + else + ", default: #{attribute.default}" + end + end + + null = + if attribute.allow_nil? != old_attribute.allow_nil? do + ", null: #{attribute.allow_nil?}" + end + + "#{null}#{default}#{primary_key}" + end + + def up(%{ + multitenancy: multitenancy, + old_attribute: old_attribute, + new_attribute: attribute, + schema: schema + }) do + type_or_reference = + if AshSqlite.MigrationGenerator.has_reference?(multitenancy, attribute) and + Map.get(old_attribute, :references) != Map.get(attribute, :references) do + reference(multitenancy, attribute, schema) + else + inspect(attribute.type) + end + + "modify #{inspect(attribute.source)}, #{type_or_reference}#{alter_opts(attribute, old_attribute)}" + end + + defp reference( + %{strategy: :attribute, attribute: source_attribute}, + %{ + references: + %{ + multitenancy: %{strategy: :attribute, attribute: destination_attribute}, + table: table, + schema: destination_schema, + destination_attribute: reference_attribute + } = reference + } = attribute, + schema + ) do + destination_schema = + if schema != destination_schema do + destination_schema + end + + with_match = + if destination_attribute != reference_attribute do + "with: [#{as_atom(source_attribute)}: :#{as_atom(destination_attribute)}], match: :full" + end + + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + join([ + "references(:#{as_atom(table)}, column: #{inspect(reference_attribute)}", + with_match, + "name: #{inspect(reference.name)}", + "type: #{inspect(reference_type(attribute, reference))}", + size, + option("prefix", destination_schema), + on_delete(reference), + on_update(reference), + ")" + ]) + end + + defp reference( + _, + %{ + references: + %{ + table: table, + destination_attribute: destination_attribute, + schema: destination_schema + } = reference + } = attribute, + schema + ) do + destination_schema = + if schema != destination_schema do + destination_schema + end + + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + join([ + "references(:#{as_atom(table)}, column: #{inspect(destination_attribute)}", + "name: #{inspect(reference.name)}", + "type: #{inspect(reference_type(attribute, reference))}", + size, + option("prefix", destination_schema), + on_delete(reference), + on_update(reference), + ")" + ]) + end + + def down(op) do + up(%{ + op + | old_attribute: op.new_attribute, + new_attribute: op.old_attribute, + old_multitenancy: op.multitenancy, + multitenancy: op.old_multitenancy + }) + end + end + + defmodule DropForeignKey do + @moduledoc false + # We only run this migration in one direction, based on the input + # This is because the creation of a foreign key is handled by `references/3` + # We only need to drop it before altering an attribute with `references/3` + defstruct [:attribute, :schema, :table, :multitenancy, :direction, no_phase: true] + + import Helper + + def up(%{table: table, schema: schema, attribute: %{references: reference}, direction: :up}) do + "drop constraint(:#{as_atom(table)}, #{join([inspect(reference.name), option("prefix", schema)])})" + end + + def up(_) do + "" + end + + def down(%{ + table: table, + schema: schema, + attribute: %{references: reference}, + direction: :down + }) do + "drop constraint(:#{as_atom(table)}, #{join([inspect(reference.name), option("prefix", schema)])})" + end + + def down(_) do + "" + end + end + + defmodule RenameAttribute do + @moduledoc false + defstruct [ + :old_attribute, + :new_attribute, + :table, + :schema, + :multitenancy, + :old_multitenancy, + no_phase: true + ] + + import Helper + + def up(%{ + old_attribute: old_attribute, + new_attribute: new_attribute, + schema: schema, + table: table + }) do + table_statement = join([":#{as_atom(table)}", option("prefix", schema)]) + + "rename table(#{table_statement}), #{inspect(old_attribute.source)}, to: #{inspect(new_attribute.source)}" + end + + def down( + %{ + old_attribute: old_attribute, + new_attribute: new_attribute + } = data + ) do + up(%{data | new_attribute: old_attribute, old_attribute: new_attribute}) + end + end + + defmodule RemoveAttribute do + @moduledoc false + defstruct [:attribute, :schema, :table, :multitenancy, :old_multitenancy, commented?: true] + + def up(%{attribute: attribute, commented?: true}) do + """ + # Attribute removal has been commented out to avoid data loss. See the migration generator documentation for more + # If you uncomment this, be sure to also uncomment the corresponding attribute *addition* in the `down` migration + # remove #{inspect(attribute.source)} + """ + end + + def up(%{attribute: attribute}) do + "remove #{inspect(attribute.source)}" + end + + def down(%{attribute: attribute, multitenancy: multitenancy, commented?: true}) do + prefix = """ + # This is the `down` migration of the statement: + # + # remove #{inspect(attribute.source)} + # + """ + + contents = + %AshSqlite.MigrationGenerator.Operation.AddAttribute{ + attribute: attribute, + multitenancy: multitenancy + } + |> AshSqlite.MigrationGenerator.Operation.AddAttribute.up() + |> String.split("\n") + |> Enum.map_join("\n", &"# #{&1}") + + prefix <> "\n" <> contents + end + + def down(%{attribute: attribute, multitenancy: multitenancy, table: table, schema: schema}) do + AshSqlite.MigrationGenerator.Operation.AddAttribute.up( + %AshSqlite.MigrationGenerator.Operation.AddAttribute{ + attribute: attribute, + table: table, + schema: schema, + multitenancy: multitenancy + } + ) + end + end + + defmodule AddUniqueIndex do + @moduledoc false + defstruct [:identity, :table, :schema, :multitenancy, :old_multitenancy, no_phase: true] + + import Helper + + def up(%{ + identity: %{name: name, keys: keys, base_filter: base_filter, index_name: index_name}, + table: table, + schema: schema, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [multitenancy.attribute | keys] + + _ -> + keys + end + + index_name = index_name || "#{table}_#{name}_index" + + if base_filter do + "create unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], where: \"#{base_filter}\", #{join(["name: \"#{index_name}\"", option("prefix", schema)])})" + else + "create unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\"", option("prefix", schema)])})" + end + end + + def down(%{ + identity: %{name: name, keys: keys, index_name: index_name}, + table: table, + schema: schema, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [multitenancy.attribute | keys] + + _ -> + keys + end + + index_name = index_name || "#{table}_#{name}_index" + + "drop_if_exists unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\"", option("prefix", schema)])})" + end + end + + defmodule AddCustomStatement do + @moduledoc false + defstruct [:statement, :table, no_phase: true] + + def up(%{statement: %{up: up, code?: false}}) do + """ + execute(\"\"\" + #{String.trim(up)} + \"\"\") + """ + end + + def up(%{statement: %{up: up, code?: true}}) do + up + end + + def down(%{statement: %{down: down, code?: false}}) do + """ + execute(\"\"\" + #{String.trim(down)} + \"\"\") + """ + end + + def down(%{statement: %{down: down, code?: true}}) do + down + end + end + + defmodule RemoveCustomStatement do + @moduledoc false + defstruct [:statement, :table, no_phase: true] + + def up(%{statement: statement, table: table}) do + AddCustomStatement.down(%AddCustomStatement{statement: statement, table: table}) + end + + def down(%{statement: statement, table: table}) do + AddCustomStatement.up(%AddCustomStatement{statement: statement, table: table}) + end + end + + defmodule AddCustomIndex do + @moduledoc false + defstruct [:table, :schema, :index, :base_filter, :multitenancy, no_phase: true] + import Helper + + def up(%{ + index: index, + table: table, + schema: schema, + base_filter: base_filter, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [to_string(multitenancy.attribute) | Enum.map(index.fields, &to_string/1)] + + _ -> + Enum.map(index.fields, &to_string/1) + end + + index = + if index.where && base_filter do + %{index | where: base_filter <> " AND " <> index.where} + else + index + end + + opts = + join([ + option(:name, index.name), + option(:unique, index.unique), + option(:concurrently, index.concurrently), + option(:using, index.using), + option(:prefix, index.prefix), + option(:where, index.where), + option(:include, index.include), + option(:prefix, schema) + ]) + + if opts == "", + do: "create index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}])", + else: + "create index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{opts})" + end + + def down(%{schema: schema, index: index, table: table, multitenancy: multitenancy}) do + index_name = AshSqlite.CustomIndex.name(table, index) + + keys = + case multitenancy.strategy do + :attribute -> + [to_string(multitenancy.attribute) | Enum.map(index.fields, &to_string/1)] + + _ -> + Enum.map(index.fields, &to_string/1) + end + + "drop_if_exists index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\"", option(:prefix, schema)])})" + end + end + + defmodule RemovePrimaryKey do + @moduledoc false + defstruct [:schema, :table, no_phase: true] + + def up(%{schema: schema, table: table}) do + if schema do + "drop constraint(#{inspect(table)}, \"#{table}_pkey\", prefix: \"#{schema}\")" + else + "drop constraint(#{inspect(table)}, \"#{table}_pkey\")" + end + end + + def down(_) do + "" + end + end + + defmodule RemovePrimaryKeyDown do + @moduledoc false + defstruct [:schema, :table, no_phase: true] + + def up(_) do + "" + end + + def down(%{schema: schema, table: table}) do + if schema do + "drop constraint(#{inspect(table)}, \"#{table}_pkey\", prefix: \"#{schema}\")" + else + "drop constraint(#{inspect(table)}, \"#{table}_pkey\")" + end + end + end + + defmodule RemoveCustomIndex do + @moduledoc false + defstruct [:schema, :table, :index, :base_filter, :multitenancy, no_phase: true] + import Helper + + def up(%{index: index, table: table, multitenancy: multitenancy, schema: schema}) do + index_name = AshSqlite.CustomIndex.name(table, index) + + keys = + case multitenancy.strategy do + :attribute -> + [to_string(multitenancy.attribute) | Enum.map(index.fields, &to_string/1)] + + _ -> + Enum.map(index.fields, &to_string/1) + end + + "drop_if_exists index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\"", option(:prefix, schema)])})" + end + + def down(%{ + index: index, + table: table, + schema: schema, + base_filter: base_filter, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [to_string(multitenancy.attribute) | Enum.map(index.fields, &to_string/1)] + + _ -> + Enum.map(index.fields, &to_string/1) + end + + index = + if index.where && base_filter do + %{index | where: base_filter <> " AND " <> index.where} + else + index + end + + opts = + join([ + option(:name, index.name), + option(:unique, index.unique), + option(:concurrently, index.concurrently), + option(:using, index.using), + option(:prefix, index.prefix), + option(:where, index.where), + option(:include, index.include), + option(:prefix, schema) + ]) + + if opts == "" do + "create index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}])" + else + "create index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{opts})" + end + end + end + + defmodule RenameUniqueIndex do + @moduledoc false + defstruct [ + :new_identity, + :old_identity, + :table, + :schema, + :multitenancy, + :old_multitenancy, + no_phase: true + ] + + defp prefix_name(name, prefix) do + if prefix do + "#{prefix}.#{name}" + else + name + end + end + + def up(%{ + old_identity: %{index_name: old_index_name, name: old_name}, + new_identity: %{index_name: new_index_name}, + schema: schema, + table: table + }) do + old_index_name = old_index_name || "#{table}_#{old_name}_index" + + "execute(\"ALTER INDEX #{prefix_name(old_index_name, schema)} " <> + "RENAME TO #{prefix_name(new_index_name, schema)}\")\n" + end + + def down(%{ + old_identity: %{index_name: old_index_name, name: old_name}, + new_identity: %{index_name: new_index_name}, + schema: schema, + table: table + }) do + old_index_name = old_index_name || "#{table}_#{old_name}_index" + + "execute(\"ALTER INDEX #{prefix_name(new_index_name, schema)} " <> + "RENAME TO #{prefix_name(old_index_name, schema)}\")\n" + end + end + + defmodule RemoveUniqueIndex do + @moduledoc false + defstruct [:identity, :schema, :table, :multitenancy, :old_multitenancy, no_phase: true] + + import Helper + + def up(%{ + identity: %{name: name, keys: keys, index_name: index_name}, + table: table, + schema: schema, + old_multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [multitenancy.attribute | keys] + + _ -> + keys + end + + index_name = index_name || "#{table}_#{name}_index" + + "drop_if_exists unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\"", option(:prefix, schema)])})" + end + + def down(%{ + identity: %{name: name, keys: keys, base_filter: base_filter, index_name: index_name}, + table: table, + schema: schema, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [multitenancy.attribute | keys] + + _ -> + keys + end + + index_name = index_name || "#{table}_#{name}_index" + + if base_filter do + "create unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], where: \"#{base_filter}\", #{join(["name: \"#{index_name}\"", option(:prefix, schema)])})" + else + "create unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\"", option(:prefix, schema)])})" + end + end + end + + defmodule AddCheckConstraint do + @moduledoc false + defstruct [:table, :schema, :constraint, :multitenancy, :old_multitenancy, no_phase: true] + + import Helper + + def up(%{ + schema: schema, + constraint: %{ + name: name, + check: check, + base_filter: base_filter + }, + table: table + }) do + if base_filter do + "create constraint(:#{as_atom(table)}, :#{as_atom(name)}, #{join(["check: \"#{base_filter} AND #{check}\")", option(:prefix, schema)])}" + else + "create constraint(:#{as_atom(table)}, :#{as_atom(name)}, #{join(["check: \"#{check}\")", option(:prefix, schema)])}" + end + end + + def down(%{ + constraint: %{name: name}, + schema: schema, + table: table + }) do + "drop_if_exists constraint(:#{as_atom(table)}, #{join([":#{as_atom(name)}", option(:prefix, schema)])})" + end + end + + defmodule RemoveCheckConstraint do + @moduledoc false + defstruct [:table, :schema, :constraint, :multitenancy, :old_multitenancy, no_phase: true] + + import Helper + + def up(%{constraint: %{name: name}, schema: schema, table: table}) do + "drop_if_exists constraint(:#{as_atom(table)}, #{join([":#{as_atom(name)}", option(:prefix, schema)])})" + end + + def down(%{ + constraint: %{ + name: name, + check: check, + base_filter: base_filter + }, + schema: schema, + table: table + }) do + if base_filter do + "create constraint(:#{as_atom(table)}, :#{as_atom(name)}, #{join(["check: \"#{base_filter} AND #{check}\")", option(:prefix, schema)])}" + else + "create constraint(:#{as_atom(table)}, :#{as_atom(name)}, #{join(["check: \"#{check}\")", option(:prefix, schema)])}" + end + end + end +end diff --git a/lib/migration_generator/phase.ex b/lib/migration_generator/phase.ex new file mode 100644 index 0000000..a1cc8f6 --- /dev/null +++ b/lib/migration_generator/phase.ex @@ -0,0 +1,86 @@ +defmodule AshSqlite.MigrationGenerator.Phase do + @moduledoc false + + defmodule Create do + @moduledoc false + defstruct [:table, :schema, :multitenancy, operations: [], commented?: false] + + import AshSqlite.MigrationGenerator.Operation.Helper, only: [as_atom: 1] + + def up(%{schema: schema, table: table, operations: operations, multitenancy: multitenancy}) do + opts = + if schema do + ", prefix: \"#{schema}\"" + else + "" + end + + "create table(:#{as_atom(table)}, primary_key: false#{opts}) do\n" <> + Enum.map_join(operations, "\n", fn operation -> operation.__struct__.up(operation) end) <> + "\nend" + end + + def down(%{schema: schema, table: table, multitenancy: multitenancy}) do + opts = + if schema do + ", prefix: \"#{schema}\"" + else + "" + end + + "drop table(:#{as_atom(table)}#{opts})" + end + end + + defmodule Alter do + @moduledoc false + defstruct [:schema, :table, :multitenancy, operations: [], commented?: false] + + import AshSqlite.MigrationGenerator.Operation.Helper, only: [as_atom: 1] + + def up(%{table: table, schema: schema, operations: operations, multitenancy: multitenancy}) do + body = + operations + |> Enum.map_join("\n", fn operation -> operation.__struct__.up(operation) end) + |> String.trim() + + if body == "" do + "" + else + opts = + if schema do + ", prefix: \"#{schema}\"" + else + "" + end + + "alter table(:#{as_atom(table)}#{opts}) do\n" <> + body <> + "\nend" + end + end + + def down(%{table: table, schema: schema, operations: operations, multitenancy: multitenancy}) do + body = + operations + |> Enum.reverse() + |> Enum.map_join("\n", fn operation -> operation.__struct__.down(operation) end) + |> String.trim() + + if body == "" do + "" + else + opts = + if schema do + ", prefix: \"#{schema}\"" + else + "" + end + + "alter table(:#{as_atom(table)}#{opts}) do\n" <> + body <> + "\nend" + end + end + end +end diff --git a/lib/mix/helpers.ex b/lib/mix/helpers.ex new file mode 100644 index 0000000..cb2696d --- /dev/null +++ b/lib/mix/helpers.ex @@ -0,0 +1,133 @@ +defmodule AshSqlite.MixHelpers do + @moduledoc false + def apis!(opts, args) do + apps = + if apps_paths = Mix.Project.apps_paths() do + apps_paths |> Map.keys() |> Enum.sort() + else + [Mix.Project.config()[:app]] + end + + configured_apis = Enum.flat_map(apps, &Application.get_env(&1, :ash_apis, [])) + + apis = + if opts[:apis] && opts[:apis] != "" do + opts[:apis] + |> Kernel.||("") + |> String.split(",") + |> Enum.flat_map(fn + "" -> + [] + + api -> + [Module.concat([api])] + end) + else + configured_apis + end + + apis + |> Enum.map(&ensure_compiled(&1, args)) + |> case do + [] -> + raise "must supply the --apis argument, or set `config :my_app, ash_apis: [...]` in config" + + apis -> + apis + end + end + + def repos!(opts, args) do + apis = apis!(opts, args) + + resources = + apis + |> Enum.flat_map(&Ash.Api.Info.resources/1) + |> Enum.filter(&(Ash.DataLayer.data_layer(&1) == AshSqlite.DataLayer)) + |> case do + [] -> + raise """ + No resources with `data_layer: AshSqlite.DataLayer` found in the apis #{Enum.map_join(apis, ",", &inspect/1)}. + + Must be able to find at least one resource with `data_layer: AshSqlite.DataLayer`. + """ + + resources -> + resources + end + + resources + |> Enum.map(&AshSqlite.DataLayer.Info.repo(&1)) + |> Enum.uniq() + |> case do + [] -> + raise """ + No repos could be found configured on the resources in the apis: #{Enum.map_join(apis, ",", &inspect/1)} + + At least one resource must have a repo configured. + + The following resources were found with `data_layer: AshSqlite.DataLayer`: + + #{Enum.map_join(resources, "\n", &"* #{inspect(&1)}")} + """ + + repos -> + repos + end + end + + def delete_flag(args, arg) do + case Enum.split_while(args, &(&1 != arg)) do + {left, [_ | rest]} -> + left ++ rest + + _ -> + args + end + end + + def delete_arg(args, arg) do + case Enum.split_while(args, &(&1 != arg)) do + {left, [_, _ | rest]} -> + left ++ rest + + _ -> + args + end + end + + defp ensure_compiled(api, args) do + if Code.ensure_loaded?(Mix.Tasks.App.Config) do + Mix.Task.run("app.config", args) + else + Mix.Task.run("loadpaths", args) + "--no-compile" not in args && Mix.Task.run("compile", args) + end + + case Code.ensure_compiled(api) do + {:module, _} -> + api + |> Ash.Api.Info.resources() + |> Enum.each(&Code.ensure_compiled/1) + + # TODO: We shouldn't need to make sure that the resources are compiled + + api + + {:error, error} -> + Mix.raise("Could not load #{inspect(api)}, error: #{inspect(error)}. ") + end + end + + def migrations_path(opts, repo) do + opts[:migrations_path] || repo.config()[:migrations_path] || derive_migrations_path(repo) + end + + + def derive_migrations_path(repo) do + config = repo.config() + priv = config[:priv] || "priv/#{repo |> Module.split() |> List.last() |> Macro.underscore()}" + app = Keyword.fetch!(config, :otp_app) + Application.app_dir(app, Path.join(priv, "migrations")) + end +end diff --git a/lib/mix/tasks/ash_sqlite.create.ex b/lib/mix/tasks/ash_sqlite.create.ex new file mode 100644 index 0000000..9ea1b18 --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.create.ex @@ -0,0 +1,48 @@ +defmodule Mix.Tasks.AshSqlite.Create do + use Mix.Task + + @shortdoc "Creates the repository storage" + + @switches [ + quiet: :boolean, + apis: :string, + no_compile: :boolean, + no_deps_check: :boolean + ] + + @aliases [ + q: :quiet + ] + + @moduledoc """ + Create the storage for repos in all resources for the given (or configured) apis. + + ## Examples + + mix ash_sqlite.create + mix ash_sqlite.create --apis MyApp.Api1,MyApp.Api2 + + ## Command line options + + * `--apis` - the apis who's repos you want to migrate. + * `--quiet` - do not log output + * `--no-compile` - do not compile before creating + * `--no-deps-check` - do not compile before creating + """ + + @doc false + def run(args) do + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + repos = AshSqlite.MixHelpers.repos!(opts, args) + + repo_args = + Enum.flat_map(repos, fn repo -> + ["-r", to_string(repo)] + end) + + rest_opts = AshSqlite.MixHelpers.delete_arg(args, "--apis") + + Mix.Task.run("ecto.create", repo_args ++ rest_opts) + end +end diff --git a/lib/mix/tasks/ash_sqlite.drop.ex b/lib/mix/tasks/ash_sqlite.drop.ex new file mode 100644 index 0000000..25e1ed3 --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.drop.ex @@ -0,0 +1,56 @@ +defmodule Mix.Tasks.AshSqlite.Drop do + use Mix.Task + + @shortdoc "Drops the repository storage for the repos in the specified (or configured) apis" + @default_opts [force: false, force_drop: false] + + @aliases [ + f: :force, + q: :quiet + ] + + @switches [ + force: :boolean, + force_drop: :boolean, + quiet: :boolean, + apis: :string, + no_compile: :boolean, + no_deps_check: :boolean + ] + + @moduledoc """ + Drop the storage for the given repository. + + ## Examples + + mix ash_sqlite.drop + mix ash_sqlite.drop -r MyApp.Api1,MyApp.Api2 + + ## Command line options + + * `--apis` - the apis who's repos should be dropped + * `-q`, `--quiet` - run the command quietly + * `-f`, `--force` - do not ask for confirmation when dropping the database. + Configuration is asked only when `:start_permanent` is set to true + (typically in production) + * `--no-compile` - do not compile before dropping + * `--no-deps-check` - do not compile before dropping + """ + + @doc false + def run(args) do + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + opts = Keyword.merge(@default_opts, opts) + + repos = AshSqlite.MixHelpers.repos!(opts, args) + + repo_args = + Enum.flat_map(repos, fn repo -> + ["-r", to_string(repo)] + end) + + rest_opts = AshSqlite.MixHelpers.delete_arg(args, "--apis") + + Mix.Task.run("ecto.drop", repo_args ++ rest_opts) + end +end diff --git a/lib/mix/tasks/ash_sqlite.generate_migrations.ex b/lib/mix/tasks/ash_sqlite.generate_migrations.ex new file mode 100644 index 0000000..37d2b68 --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.generate_migrations.ex @@ -0,0 +1,96 @@ +defmodule Mix.Tasks.AshSqlite.GenerateMigrations do + @moduledoc """ + Generates migrations, and stores a snapshot of your resources. + + Options: + + * `apis` - a comma separated list of API modules, for which migrations will be generated + * `snapshot-path` - a custom path to store the snapshots, defaults to "priv/resource_snapshots" + * `migration-path` - a custom path to store the migrations, defaults to "priv". + Migrations are stored in a folder for each repo, so `priv/repo_name/migrations` + * `drop-columns` - whether or not to drop columns as attributes are removed. See below for more + * `name` - + names the generated migrations, prepending with the timestamp. The default is `migrate_resources_`, + where `` is the count of migrations matching `*migrate_resources*` plus one. + For example, `--name add_special_column` would get a name like `20210708181402_add_special_column.exs` + + Flags: + + * `quiet` - messages for file creations will not be printed + * `no-format` - files that are created will not be formatted with the code formatter + * `dry-run` - no files are created, instead the new migration is printed + * `check` - no files are created, returns an exit(1) code if the current snapshots and resources don't fit + + #### Snapshots + + Snapshots are stored in a folder for each table that migrations are generated for. Each snapshot is + stored in a file with a timestamp of when it was generated. + This is important because it allows for simultaneous work to be done on separate branches, and for rolling back + changes more easily, e.g removing a generated migration, and deleting the most recent snapshot, without having to redo + all of it + + #### Dropping columns + + Generally speaking, it is bad practice to drop columns when you deploy a change that + would remove an attribute. The main reasons for this are backwards compatibility and rolling restarts. + If you deploy an attribute removal, and run migrations. Regardless of your deployment sstrategy, you + won't be able to roll back, because the data has been deleted. In a rolling restart situation, some of + the machines/pods/whatever may still be running after the column has been deleted, causing errors. With + this in mind, its best not to delete those columns until later, after the data has been confirmed unnecessary. + To that end, the migration generator leaves the column dropping code commented. You can pass `--drop_columns` + to tell it to uncomment those statements. Additionally, you can just uncomment that code on a case by case + basis. + + #### Conflicts/Multiple Resources + + The migration generator can support multiple schemas using the same table. + It will raise on conflicts that it can't resolve, like the same field with different + types. It will prompt to resolve conflicts that can be resolved with human input. + For example, if you remove an attribute and add an attribute, it will ask you if you are renaming + the column in question. If not, it will remove one column and add the other. + + Additionally, it lowers things to the database where possible: + + #### Defaults + There are three anonymous functions that will translate to database-specific defaults currently: + + * `&DateTime.utc_now/0` + + Non-function default values will be dumped to their native type and inspected. This may not work for some types, + and may require manual intervention/patches to the migration generator code. + + #### Identities + + Identities will cause the migration generator to generate unique constraints. If multiple + resources target the same table, you will be asked to select the primary key, and any others + will be added as unique constraints. + """ + use Mix.Task + + @shortdoc "Generates migrations, and stores a snapshot of your resources" + def run(args) do + {opts, _} = + OptionParser.parse!(args, + strict: [ + apis: :string, + snapshot_path: :string, + migration_path: :string, + quiet: :boolean, + name: :string, + no_format: :boolean, + dry_run: :boolean, + check: :boolean, + drop_columns: :boolean + ] + ) + + apis = AshSqlite.MixHelpers.apis!(opts, args) + + opts = + opts + |> Keyword.put(:format, !opts[:no_format]) + |> Keyword.delete(:no_format) + + AshSqlite.MigrationGenerator.generate(apis, opts) + end +end diff --git a/lib/mix/tasks/ash_sqlite.migrate.ex b/lib/mix/tasks/ash_sqlite.migrate.ex new file mode 100644 index 0000000..3462b7d --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.migrate.ex @@ -0,0 +1,115 @@ +defmodule Mix.Tasks.AshSqlite.Migrate do + use Mix.Task + + import AshSqlite.MixHelpers, + only: [migrations_path: 2] + + @shortdoc "Runs the repository migrations for all repositories in the provided (or congigured) apis" + + @aliases [ + n: :step + ] + + @switches [ + all: :boolean, + step: :integer, + to: :integer, + quiet: :boolean, + prefix: :string, + pool_size: :integer, + log_sql: :boolean, + strict_version_order: :boolean, + apis: :string, + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :keep + ] + + @moduledoc """ + Runs the pending migrations for the given repository. + + Migrations are expected at "priv/YOUR_REPO/migrations" directory + of the current application, where "YOUR_REPO" is the last segment + in your repository name. For example, the repository `MyApp.Repo` + will use "priv/repo/migrations". The repository `Whatever.MyRepo` + will use "priv/my_repo/migrations". + + This task runs all pending migrations by default. To migrate up to a + specific version number, supply `--to version_number`. To migrate a + specific number of times, use `--step n`. + + This is only really useful if your api or apis only use a single repo. + If you have multiple repos and you want to run a single migration and/or + migrate/roll them back to different points, you will need to use the + ecto specific task, `mix ecto.migrate` and provide your repo name. + + If a repository has not yet been started, one will be started outside + your application supervision tree and shutdown afterwards. + + ## Examples + + mix ash_sqlite.migrate + mix ash_sqlite.migrate --apis MyApp.Api1,MyApp.Api2 + + mix ash_sqlite.migrate -n 3 + mix ash_sqlite.migrate --step 3 + + mix ash_sqlite.migrate --to 20080906120000 + + ## Command line options + + * `--apis` - the apis who's repos should be migrated + + * `--all` - run all pending migrations + + * `--step`, `-n` - run n number of pending migrations + + * `--to` - run all migrations up to and including version + + * `--quiet` - do not log migration commands + + * `--pool-size` - the pool size if the repository is started only for the task (defaults to 2) + + * `--log-sql` - log the raw sql migrations are running + + * `--strict-version-order` - abort when applying a migration with old timestamp + + * `--no-compile` - does not compile applications before migrating + + * `--no-deps-check` - does not check depedendencies before migrating + + * `--migrations-path` - the path to load the migrations from, defaults to + `"priv/repo/migrations"`. This option may be given multiple times in which case the migrations + are loaded from all the given directories and sorted as if they were in the same one. + + Note, if you have migrations paths e.g. `a/` and `b/`, and run + `mix ecto.migrate --migrations-path a/`, the latest migrations from `a/` will be run (even + if `b/` contains the overall latest migrations.) + """ + + @impl true + def run(args) do + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + repos = AshSqlite.MixHelpers.repos!(opts, args) + + repo_args = + Enum.flat_map(repos, fn repo -> + ["-r", to_string(repo)] + end) + + rest_opts = + args + |> AshSqlite.MixHelpers.delete_arg("--apis") + |> AshSqlite.MixHelpers.delete_arg("--migrations-path") + + for repo <- repos do + Mix.Task.run( + "ecto.migrate", + repo_args ++ rest_opts ++ ["--migrations-path", migrations_path(opts, repo)] + ) + + Mix.Task.reenable("ecto.migrate") + end + end +end diff --git a/lib/mix/tasks/ash_sqlite.rollback.ex b/lib/mix/tasks/ash_sqlite.rollback.ex new file mode 100644 index 0000000..0835b68 --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.rollback.ex @@ -0,0 +1,81 @@ +defmodule Mix.Tasks.AshSqlite.Rollback do + use Mix.Task + + import AshSqlite.MixHelpers, + only: [migrations_path: 2] + + @shortdoc "Rolls back the repository migrations for all repositories in the provided (or configured) apis" + + @moduledoc """ + Reverts applied migrations in the given repository. + Migrations are expected at "priv/YOUR_REPO/migrations" directory + of the current application but it can be configured by specifying + the `:priv` key under the repository configuration. + Runs the latest applied migration by default. To roll back to + a version number, supply `--to version_number`. To roll back a + specific number of times, use `--step n`. To undo all applied + migrations, provide `--all`. + + This is only really useful if your api or apis only use a single repo. + If you have multiple repos and you want to run a single migration and/or + migrate/roll them back to different points, you will need to use the + ecto specific task, `mix ecto.migrate` and provide your repo name. + + ## Examples + mix ash_sqlite.rollback + mix ash_sqlite.rollback -r Custom.Repo + mix ash_sqlite.rollback -n 3 + mix ash_sqlite.rollback --step 3 + mix ash_sqlite.rollback -v 20080906120000 + mix ash_sqlite.rollback --to 20080906120000 + + ## Command line options + * `--apis` - the apis who's repos should be rolledback + * `--all` - revert all applied migrations + * `--step` / `-n` - revert n number of applied migrations + * `--to` / `-v` - revert all migrations down to and including version + * `--quiet` - do not log migration commands + * `--prefix` - the prefix to run migrations on + * `--pool-size` - the pool size if the repository is started only for the task (defaults to 1) + * `--log-sql` - log the raw sql migrations are running + """ + + @doc false + def run(args) do + {opts, _, _} = + OptionParser.parse(args, + switches: [ + all: :boolean, + step: :integer, + to: :integer, + start: :boolean, + quiet: :boolean, + prefix: :string, + pool_size: :integer, + log_sql: :boolean + ], + aliases: [n: :step, v: :to] + ) + + repos = AshSqlite.MixHelpers.repos!(opts, args) + + repo_args = + Enum.flat_map(repos, fn repo -> + ["-r", to_string(repo)] + end) + + rest_opts = + args + |> AshSqlite.MixHelpers.delete_arg("--apis") + |> AshSqlite.MixHelpers.delete_arg("--migrations-path") + + for repo <- repos do + Mix.Task.run( + "ecto.rollback", + repo_args ++ rest_opts ++ ["--migrations-path", migrations_path(opts, repo)] + ) + + Mix.Task.reenable("ecto.rollback") + end + end +end diff --git a/lib/reference.ex b/lib/reference.ex new file mode 100644 index 0000000..275bfd7 --- /dev/null +++ b/lib/reference.ex @@ -0,0 +1,43 @@ +defmodule AshSqlite.Reference do + @moduledoc "Represents the configuration of a reference (i.e foreign key)." + defstruct [:relationship, :on_delete, :on_update, :name, :deferrable, ignore?: false] + + def schema do + [ + relationship: [ + type: :atom, + required: true, + doc: "The relationship to be configured" + ], + ignore?: [ + type: :boolean, + doc: + "If set to true, no reference is created for the given relationship. This is useful if you need to define it in some custom way" + ], + on_delete: [ + type: {:one_of, [:delete, :nilify, :nothing, :restrict]}, + doc: """ + What should happen to records of this resource when the referenced record of the *destination* resource is deleted. + """ + ], + on_update: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: """ + What should happen to records of this resource when the referenced destination_attribute of the *destination* record is update. + """ + ], + deferrable: [ + type: {:one_of, [false, true, :initially]}, + default: false, + doc: """ + Wether or not the constraint is deferrable. This only affects the migration generator. + """ + ], + name: [ + type: :string, + doc: + "The name of the foreign key to generate in the database. Defaults to __fkey" + ] + ] + end +end diff --git a/lib/repo.ex b/lib/repo.ex new file mode 100644 index 0000000..58e3ef2 --- /dev/null +++ b/lib/repo.ex @@ -0,0 +1,172 @@ +defmodule AshSqlite.Repo do + @moduledoc """ + Resources that use `AshSqlite.DataLayer` use a `Repo` to access the database. + + This repo is a thin wrapper around an `Ecto.Repo`. + + You can use `Ecto.Repo`'s `init/2` to configure your repo like normal, but + instead of returning `{:ok, config}`, use `super(config)` to pass the + configuration to the `AshSqlite.Repo` implementation. + + ## Transaction Hooks + + You can define `on_transaction_begin/1`, which will be invoked whenever a transaction is started for Ash. + + This will be invoked with a map containing a `type` key and metadata. + + ```elixir + %{type: :create, %{resource: YourApp.YourResource, action: :action}} + ``` + """ + + @doc "Use this to inform the data layer about what extensions are installed" + @callback installed_extensions() :: [String.t()] + + @doc """ + Use this to inform the data layer about the oldest potential sqlite version it will be run on. + + Must be an integer greater than or equal to 13. + """ + @callback min_pg_version() :: integer() + + @callback on_transaction_begin(reason :: Ash.DataLayer.transaction_reason()) :: term + + @doc "The path where your migrations are stored" + @callback migrations_path() :: String.t() | nil + @doc "Allows overriding a given migration type for *all* fields, for example if you wanted to always use :timestamptz for :utc_datetime fields" + @callback override_migration_type(atom) :: atom + + defmacro __using__(opts) do + quote bind_quoted: [opts: opts] do + otp_app = opts[:otp_app] || raise("Must configure OTP app") + + use Ecto.Repo, + adapter: Ecto.Adapters.SQLite3, + otp_app: otp_app + + @behaviour AshSqlite.Repo + + defoverridable insert: 2, insert: 1, insert!: 2, insert!: 1 + + def installed_extensions, do: [] + def migrations_path, do: nil + def default_prefix, do: "public" + def override_migration_type(type), do: type + def min_pg_version, do: 10 + + def init(_, config) do + new_config = + config + |> Keyword.put(:installed_extensions, installed_extensions()) + |> Keyword.put(:migrations_path, migrations_path()) + |> Keyword.put(:default_prefix, default_prefix()) + + {:ok, new_config} + end + + def on_transaction_begin(_reason), do: :ok + + def insert(struct_or_changeset, opts \\ []) do + struct_or_changeset + |> to_ecto() + |> then(fn value -> + repo = get_dynamic_repo() + + Ecto.Repo.Schema.insert( + __MODULE__, + repo, + value, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert, opts)) + ) + end) + |> from_ecto() + end + + def insert!(struct_or_changeset, opts \\ []) do + struct_or_changeset + |> to_ecto() + |> then(fn value -> + repo = get_dynamic_repo() + + Ecto.Repo.Schema.insert!( + __MODULE__, + repo, + value, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert, opts)) + ) + end) + |> from_ecto() + end + + def from_ecto({:ok, result}), do: {:ok, from_ecto(result)} + def from_ecto({:error, _} = other), do: other + + def from_ecto(nil), do: nil + + def from_ecto(value) when is_list(value) do + Enum.map(value, &from_ecto/1) + end + + def from_ecto(%resource{} = record) do + if Spark.Dsl.is?(resource, Ash.Resource) do + empty = struct(resource) + + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(record, fn relationship, record -> + case Map.get(record, relationship.name) do + %Ecto.Association.NotLoaded{} -> + Map.put(record, relationship.name, Map.get(empty, relationship.name)) + + value -> + Map.put(record, relationship.name, from_ecto(value)) + end + end) + else + record + end + end + + def from_ecto(other), do: other + + def to_ecto(nil), do: nil + + def to_ecto(value) when is_list(value) do + Enum.map(value, &to_ecto/1) + end + + def to_ecto(%resource{} = record) do + if Spark.Dsl.is?(resource, Ash.Resource) do + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(record, fn relationship, record -> + value = + case Map.get(record, relationship.name) do + %Ash.NotLoaded{} -> + %Ecto.Association.NotLoaded{ + __field__: relationship.name, + __cardinality__: relationship.cardinality + } + + value -> + to_ecto(value) + end + + Map.put(record, relationship.name, value) + end) + else + record + end + end + + def to_ecto(other), do: other + + defoverridable init: 2, + on_transaction_begin: 1, + installed_extensions: 0, + default_prefix: 0, + override_migration_type: 1, + min_pg_version: 0 + end + end +end diff --git a/lib/sort.ex b/lib/sort.ex new file mode 100644 index 0000000..96e62bb --- /dev/null +++ b/lib/sort.ex @@ -0,0 +1,139 @@ +defmodule AshSqlite.Sort do + @moduledoc false + require Ecto.Query + + def sort( + query, + sort, + resource, + relationship_path \\ [], + binding \\ 0, + return_order_by? \\ false + ) do + query = AshSqlite.DataLayer.default_bindings(query, resource) + + sort + |> sanitize_sort() + |> Enum.reduce_while({:ok, []}, fn + {order, %Ash.Query.Calculation{} = calc}, {:ok, query_expr} -> + type = + if calc.type do + AshSqlite.Types.parameterized_type(calc.type, calc.constraints) + else + nil + end + + calc.opts + |> calc.module.expression(calc.context) + |> Ash.Filter.hydrate_refs(%{ + resource: resource, + calculations: %{}, + public?: false + }) + |> Ash.Filter.move_to_relationship_path(relationship_path) + |> case do + {:ok, expr} -> + expr = + AshSqlite.Expr.dynamic_expr(query, expr, query.__ash_bindings__, false, type) + + {:cont, {:ok, query_expr ++ [{order, expr}]}} + + {:error, error} -> + {:halt, {:error, error}} + end + + {order, sort}, {:ok, query_expr} -> + expr = + Ecto.Query.dynamic(field(as(^binding), ^sort)) + + {:cont, {:ok, query_expr ++ [{order, expr}]}} + end) + |> case do + {:ok, []} -> + {:ok, query} + + {:ok, sort_exprs} -> + if return_order_by? do + {:ok, order_to_fragments(sort_exprs)} + else + new_query = Ecto.Query.order_by(query, ^sort_exprs) + + sort_expr = List.last(new_query.order_bys) + + new_query = + new_query + |> Map.update!(:windows, fn windows -> + order_by_expr = %{sort_expr | expr: [order_by: sort_expr.expr]} + Keyword.put(windows, :order, order_by_expr) + end) + |> Map.update!(:__ash_bindings__, &Map.put(&1, :__order__?, true)) + + {:ok, new_query} + end + + {:error, error} -> + {:error, error} + end + end + + def order_to_fragments([]), do: [] + + def order_to_fragments(order) when is_list(order) do + Enum.map(order, &do_order_to_fragments(&1)) + end + + def do_order_to_fragments({order, sort}) do + case order do + :asc -> + Ecto.Query.dynamic([row], fragment("? ASC", ^sort)) + + :desc -> + Ecto.Query.dynamic([row], fragment("? DESC", ^sort)) + + :asc_nulls_last -> + Ecto.Query.dynamic([row], fragment("? ASC NULLS LAST", ^sort)) + + :asc_nulls_first -> + Ecto.Query.dynamic([row], fragment("? ASC NULLS FIRST", ^sort)) + + :desc_nulls_first -> + Ecto.Query.dynamic([row], fragment("? DESC NULLS FIRST", ^sort)) + + :desc_nulls_last -> + Ecto.Query.dynamic([row], fragment("? DESC NULLS LAST", ^sort)) + "DESC NULLS LAST" + end + end + + def order_to_sqlite_order(dir) do + case dir do + :asc -> nil + :asc_nils_last -> " ASC NULLS LAST" + :asc_nils_first -> " ASC NULLS FIRST" + :desc -> " DESC" + :desc_nils_last -> " DESC NULLS LAST" + :desc_nils_first -> " DESC NULLS FIRST" + end + end + + defp sanitize_sort(sort) do + sort + |> List.wrap() + |> Enum.map(fn + {sort, {order, context}} -> + {ash_to_ecto_order(order), {sort, context}} + + {sort, order} -> + {ash_to_ecto_order(order), sort} + + sort -> + sort + end) + end + + defp ash_to_ecto_order(:asc_nils_last), do: :asc_nulls_last + defp ash_to_ecto_order(:asc_nils_first), do: :asc_nulls_first + defp ash_to_ecto_order(:desc_nils_last), do: :desc_nulls_last + defp ash_to_ecto_order(:desc_nils_first), do: :desc_nulls_first + defp ash_to_ecto_order(other), do: other +end diff --git a/lib/statement.ex b/lib/statement.ex new file mode 100644 index 0000000..506c963 --- /dev/null +++ b/lib/statement.ex @@ -0,0 +1,45 @@ +defmodule AshSqlite.Statement do + @moduledoc "Represents a custom statement to be run in generated migrations" + + @fields [ + :name, + :up, + :down, + :code? + ] + + defstruct @fields + + def fields, do: @fields + + @schema [ + name: [ + type: :atom, + required: true, + doc: """ + The name of the statement, must be unique within the resource + """ + ], + code?: [ + type: :boolean, + default: false, + doc: """ + By default, we place the strings inside of ecto migration's `execute/1` function and assume they are sql. Use this option if you want to provide custom elixir code to be placed directly in the migrations + """ + ], + up: [ + type: :string, + doc: """ + How to create the structure of the statement + """, + required: true + ], + down: [ + type: :string, + doc: "How to tear down the structure of the statement", + required: true + ] + ] + + def schema, do: @schema +end diff --git a/lib/transformers/ensure_table_or_polymorphic.ex b/lib/transformers/ensure_table_or_polymorphic.ex new file mode 100644 index 0000000..b6a4dd4 --- /dev/null +++ b/lib/transformers/ensure_table_or_polymorphic.ex @@ -0,0 +1,30 @@ +defmodule AshSqlite.Transformers.EnsureTableOrPolymorphic do + @moduledoc false + use Spark.Dsl.Transformer + alias Spark.Dsl.Transformer + + def transform(dsl) do + if Transformer.get_option(dsl, [:sqlite], :polymorphic?) || + Transformer.get_option(dsl, [:sqlite], :table) do + {:ok, dsl} + else + resource = Transformer.get_persisted(dsl, :module) + + raise Spark.Error.DslError, + module: resource, + message: """ + Must configure a table for #{inspect(resource)}. + + For example: + + ```elixir + sqlite do + table "the_table" + repo YourApp.Repo + end + ``` + """, + path: [:sqlite, :table] + end + end +end diff --git a/lib/transformers/validate_references.ex b/lib/transformers/validate_references.ex new file mode 100644 index 0000000..0d63a0b --- /dev/null +++ b/lib/transformers/validate_references.ex @@ -0,0 +1,23 @@ +defmodule AshSqlite.Transformers.ValidateReferences do + @moduledoc false + use Spark.Dsl.Transformer + alias Spark.Dsl.Transformer + + def after_compile?, do: true + + def transform(dsl) do + dsl + |> AshSqlite.DataLayer.Info.references() + |> Enum.each(fn reference -> + unless Ash.Resource.Info.relationship(dsl, reference.relationship) do + raise Spark.Error.DslError, + path: [:sqlite, :references, reference.relationship], + module: Transformer.get_persisted(dsl, :module), + message: + "Found reference configuration for relationship `#{reference.relationship}`, but no such relationship exists" + end + end) + + {:ok, dsl} + end +end diff --git a/lib/transformers/verify_repo.ex b/lib/transformers/verify_repo.ex new file mode 100644 index 0000000..eb2fb44 --- /dev/null +++ b/lib/transformers/verify_repo.ex @@ -0,0 +1,22 @@ +defmodule AshSqlite.Transformers.VerifyRepo do + @moduledoc false + use Spark.Dsl.Transformer + alias Spark.Dsl.Transformer + + def after_compile?, do: true + + def transform(dsl) do + repo = Transformer.get_option(dsl, [:sqlite], :repo) + + cond do + match?({:error, _}, Code.ensure_compiled(repo)) -> + {:error, "Could not find repo module #{repo}"} + + repo.__adapter__() != Ecto.Adapters.Sqlite3 -> + {:error, "Expected a repo using the sqlite adapter `Ecto.Adapters.SQLite3`"} + + true -> + {:ok, dsl} + end + end +end diff --git a/lib/type.ex b/lib/type.ex new file mode 100644 index 0000000..1be8904 --- /dev/null +++ b/lib/type.ex @@ -0,0 +1,19 @@ +defmodule AshSqlite.Type do + @moduledoc """ + Sqlite specific callbacks for `Ash.Type`. + + Use this in addition to `Ash.Type`. + """ + + @callback value_to_sqlite_default(Ash.Type.t(), Ash.Type.constraints(), term) :: + {:ok, String.t()} | :error + + defmacro __using__(_) do + quote do + @behaviour AshSqlite.Type + def value_to_sqlite_default(_, _, _), do: :error + + defoverridable value_to_sqlite_default: 3 + end + end +end diff --git a/lib/types/ci_string_wrapper copy.ex b/lib/types/ci_string_wrapper copy.ex new file mode 100644 index 0000000..72f850d --- /dev/null +++ b/lib/types/ci_string_wrapper copy.ex @@ -0,0 +1,14 @@ +defmodule Ash.Type.CiStringWrapper do + @moduledoc false + use Ash.Type + + @impl true + def storage_type(_), do: :citext + + @impl true + defdelegate cast_input(value, constraints), to: Ash.Type.CiString + @impl true + defdelegate cast_stored(value, constraints), to: Ash.Type.CiString + @impl true + defdelegate dump_to_native(value, constraints), to: Ash.Type.CiString +end diff --git a/lib/types/ci_string_wrapper.ex b/lib/types/ci_string_wrapper.ex new file mode 100644 index 0000000..82d067d --- /dev/null +++ b/lib/types/ci_string_wrapper.ex @@ -0,0 +1,14 @@ +defmodule Ash.Type.StringWrapper do + @moduledoc false + use Ash.Type + + @impl true + def storage_type(_), do: :text + + @impl true + defdelegate cast_input(value, constraints), to: Ash.Type.String + @impl true + defdelegate cast_stored(value, constraints), to: Ash.Type.String + @impl true + defdelegate dump_to_native(value, constraints), to: Ash.Type.String +end diff --git a/lib/types/types.ex b/lib/types/types.ex new file mode 100644 index 0000000..4bfe051 --- /dev/null +++ b/lib/types/types.ex @@ -0,0 +1,190 @@ +defmodule AshSqlite.Types do + @moduledoc false + + alias Ash.Query.Ref + + def parameterized_type({:parameterized, _, _} = type, _) do + type + end + + def parameterized_type({:in, type}, constraints) do + parameterized_type({:array, type}, constraints) + end + + def parameterized_type({:array, type}, constraints) do + case parameterized_type(type, constraints[:items] || []) do + nil -> + nil + + type -> + {:array, type} + end + end + + def parameterized_type(Ash.Type.CiString, constraints) do + parameterized_type(Ash.Type.CiStringWrapper, constraints) + end + + def parameterized_type(Ash.Type.String.EctoType, constraints) do + parameterized_type(Ash.Type.StringWrapper, constraints) + end + + def parameterized_type(type, _constraints) when type in [Ash.Type.Map, Ash.Type.Map.EctoType], + do: nil + + def parameterized_type(type, constraints) do + if Ash.Type.ash_type?(type) do + cast_in_query? = + if function_exported?(Ash.Type, :cast_in_query?, 2) do + Ash.Type.cast_in_query?(type, constraints) + else + Ash.Type.cast_in_query?(type) + end + + if cast_in_query? do + parameterized_type(Ash.Type.ecto_type(type), constraints) + else + nil + end + else + if is_atom(type) && :erlang.function_exported(type, :type, 1) do + {:parameterized, type, constraints || []} + else + type + end + end + end + + def determine_types(mod, values) do + Code.ensure_compiled(mod) + + cond do + :erlang.function_exported(mod, :types, 0) -> + mod.types() + + :erlang.function_exported(mod, :args, 0) -> + mod.args() + + true -> + [:any] + end + |> Enum.map(fn types -> + case types do + :same -> + types = + for _ <- values do + :same + end + + closest_fitting_type(types, values) + + :any -> + for _ <- values do + :any + end + + types -> + closest_fitting_type(types, values) + end + end) + |> Enum.filter(fn types -> + Enum.all?(types, &(vagueness(&1) == 0)) + end) + |> case do + [type] -> + if type == :any || type == {:in, :any} do + nil + else + type + end + + # There are things we could likely do here + # We only say "we know what types these are" when we explicitly know + _ -> + Enum.map(values, fn _ -> nil end) + end + end + + defp closest_fitting_type(types, values) do + types_with_values = Enum.zip(types, values) + + types_with_values + |> fill_in_known_types() + |> clarify_types() + end + + defp clarify_types(types) do + basis = + types + |> Enum.map(&elem(&1, 0)) + |> Enum.min_by(&vagueness(&1)) + + Enum.map(types, fn {type, _value} -> + replace_same(type, basis) + end) + end + + defp replace_same({:in, type}, basis) do + {:in, replace_same(type, basis)} + end + + defp replace_same(:same, :same) do + :any + end + + defp replace_same(:same, {:in, :same}) do + {:in, :any} + end + + defp replace_same(:same, basis) do + basis + end + + defp replace_same(other, _basis) do + other + end + + defp fill_in_known_types(types) do + Enum.map(types, &fill_in_known_type/1) + end + + defp fill_in_known_type( + {vague_type, %Ref{attribute: %{type: type, constraints: constraints}}} = ref + ) + when vague_type in [:any, :same] do + if Ash.Type.ash_type?(type) do + type = type |> parameterized_type(constraints) |> array_to_in() + + {type || :any, ref} + else + type = + if is_atom(type) && :erlang.function_exported(type, :type, 1) do + {:parameterized, type, []} |> array_to_in() + else + type |> array_to_in() + end + + {type, ref} + end + end + + defp fill_in_known_type( + {{:array, type}, %Ref{attribute: %{type: {:array, type}} = attribute} = ref} + ) do + {:in, fill_in_known_type({type, %{ref | attribute: %{attribute | type: type}}})} + end + + defp fill_in_known_type({type, value}), do: {array_to_in(type), value} + + defp array_to_in({:array, v}), do: {:in, array_to_in(v)} + + defp array_to_in({:parameterized, type, constraints}), + do: {:parameterized, array_to_in(type), constraints} + + defp array_to_in(v), do: v + + defp vagueness({:in, type}), do: vagueness(type) + defp vagueness(:same), do: 2 + defp vagueness(:any), do: 1 + defp vagueness(_), do: 0 +end diff --git a/logos/small-logo.png b/logos/small-logo.png new file mode 100644 index 0000000..9fc9aa1 Binary files /dev/null and b/logos/small-logo.png differ diff --git a/mix.exs b/mix.exs new file mode 100644 index 0000000..bb1a103 --- /dev/null +++ b/mix.exs @@ -0,0 +1,231 @@ +defmodule AshSqlite.MixProject do + use Mix.Project + + @description """ + A sqlite data layer for `Ash` resources. Leverages Ecto's sqlite + support, and delegates to a configured repo. + """ + + @version "0.1.0" + + def project do + [ + app: :ash_sqlite, + version: @version, + elixir: "~> 1.11", + start_permanent: Mix.env() == :prod, + deps: deps(), + description: @description, + test_coverage: [tool: ExCoveralls], + elixirc_paths: elixirc_paths(Mix.env()), + preferred_cli_env: [ + coveralls: :test, + "coveralls.github": :test, + "test.create": :test, + "test.migrate": :test, + "test.rollback": :test, + "test.check_migrations": :test, + "test.drop": :test, + "test.generate_migrations": :test, + "test.reset": :test + ], + dialyzer: [ + plt_add_apps: [:ecto, :ash, :mix] + ], + docs: docs(), + aliases: aliases(), + package: package(), + source_url: "https://github.com/ash-project/ash_sqlite", + homepage_url: "https://github.com/ash-project/ash_sqlite", + consolidate_protocols: Mix.env() != :test + ] + end + + if Mix.env() == :test do + def application() do + [applications: [:ecto, :ecto_sql, :jason, :ash, :postgrex], mod: {AshSqlite.TestApp, []}] + end + end + + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] + + defp package do + [ + name: :ash_sqlite, + licenses: ["MIT"], + files: ~w(lib .formatter.exs mix.exs README* LICENSE* + CHANGELOG* documentation), + links: %{ + GitHub: "https://github.com/ash-project/ash_sqlite" + } + ] + end + + defp extras() do + "documentation/**/*.{md,livemd,cheatmd}" + |> Path.wildcard() + |> Enum.map(fn path -> + title = + path + |> Path.basename(".md") + |> Path.basename(".livemd") + |> Path.basename(".cheatmd") + |> String.split(~r/[-_]/) + |> Enum.map_join(" ", &capitalize/1) + |> case do + "F A Q" -> + "FAQ" + + other -> + other + end + + {String.to_atom(path), + [ + title: title + ]} + end) + end + + defp capitalize(string) do + string + |> String.split(" ") + |> Enum.map(fn string -> + [hd | tail] = String.graphemes(string) + String.capitalize(hd) <> Enum.join(tail) + end) + end + + defp groups_for_extras() do + [ + Tutorials: [ + ~r'documentation/tutorials' + ], + "How To": ~r'documentation/how_to', + Topics: ~r'documentation/topics', + DSLs: ~r'documentation/dsls' + ] + end + + defp docs do + [ + main: "get-started-with-sqlite", + source_ref: "v#{@version}", + logo: "logos/small-logo.png", + extras: extras(), + spark: [ + mix_tasks: [ + SQLite: [ + Mix.Tasks.AshSqlite.GenerateMigrations, + Mix.Tasks.AshSqlite.Create, + Mix.Tasks.AshSqlite.Drop, + Mix.Tasks.AshSqlite.Migrate, + Mix.Tasks.AshSqlite.Rollback + ] + ], + extensions: [ + %{ + module: AshSqlite.DataLayer, + name: "AshSqlite", + target: "Ash.Resource", + type: "DataLayer" + } + ] + ], + groups_for_extras: groups_for_extras(), + groups_for_modules: [ + AshSqlite: [ + AshSqlite, + AshSqlite.Repo, + AshSqlite.DataLayer + ], + Utilities: [ + AshSqlite.ManualRelationship + ], + Introspection: [ + AshSqlite.DataLayer.Info, + AshSqlite.CheckConstraint, + AshSqlite.CustomExtension, + AshSqlite.CustomIndex, + AshSqlite.Reference, + AshSqlite.Statement + ], + Types: [ + AshSqlite.Type + ], + "Sqlite Migrations": [ + EctoMigrationDefault + ], + Expressions: [ + AshSqlite.Functions.Fragment, + AshSqlite.Functions.Like + ], + Internals: ~r/.*/ + ] + ] + end + + # Run "mix help deps" to learn about dependencies. + defp deps do + [ + {:ecto_sql, "~> 3.9"}, + {:ecto_sqlite3, "~> 0.11"}, + {:ecto, "~> 3.9"}, + {:jason, "~> 1.0"}, + {:postgrex, ">= 0.0.0"}, + {:ash, ash_version("~> 2.14 and >= 2.14.18")}, + {:git_ops, "~> 2.5", only: [:dev, :test]}, + {:ex_doc, "~> 0.22", only: [:dev, :test], runtime: false}, + {:ex_check, "~> 0.14", only: [:dev, :test]}, + {:credo, ">= 0.0.0", only: [:dev, :test], runtime: false}, + {:dialyxir, ">= 0.0.0", only: [:dev, :test], runtime: false}, + {:sobelow, ">= 0.0.0", only: [:dev, :test], runtime: false}, + {:excoveralls, "~> 0.14", only: [:dev, :test]} + ] + end + + defp ash_version(default_version) do + case System.get_env("ASH_VERSION") do + nil -> + default_version + + "local" -> + [path: "../ash"] + + "main" -> + [git: "https://github.com/ash-project/ash.git"] + + version when is_binary(version) -> + "~> #{version}" + + version -> + version + end + end + + defp aliases do + [ + sobelow: + "sobelow --skip -i Config.Secrets --ignore-files lib/migration_generator/migration_generator.ex", + credo: "credo --strict", + docs: [ + "spark.cheat_sheets", + "docs", + "ash.replace_doc_links", + "spark.cheat_sheets_in_search" + ], + "spark.formatter": "spark.formatter --extensions AshSqlite.DataLayer", + "spark.cheat_sheets": "spark.cheat_sheets --extensions AshSqlite.DataLayer", + "spark.cheat_sheets_in_search": + "spark.cheat_sheets_in_search --extensions AshSqlite.DataLayer", + "test.generate_migrations": "ash_sqlite.generate_migrations", + "test.check_migrations": "ash_sqlite.generate_migrations --check", + "test.migrate": "ash_sqlite.migrate", + "test.rollback": "ash_sqlite.rollback", + "test.create": "ash_sqlite.create", + "test.reset": ["test.drop", "test.create", "test.migrate"], + "test.drop": "ash_sqlite.drop" + ] + end +end diff --git a/mix.lock b/mix.lock new file mode 100644 index 0000000..47cc9c4 --- /dev/null +++ b/mix.lock @@ -0,0 +1,46 @@ +%{ + "ash": {:hex, :ash, "2.14.18", "ac2fd2f274f4989d3c71de3df9a603941bc47ac6c8d27006df78f78844114969", [:mix], [{:comparable, "~> 1.0", [hex: :comparable, repo: "hexpm", optional: false]}, {:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:earmark, "~> 1.4", [hex: :earmark, repo: "hexpm", optional: true]}, {:ecto, "~> 3.7", [hex: :ecto, repo: "hexpm", optional: false]}, {:ets, "~> 0.8", [hex: :ets, repo: "hexpm", optional: false]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: false]}, {:picosat_elixir, "~> 0.2", [hex: :picosat_elixir, repo: "hexpm", optional: false]}, {:plug, ">= 0.0.0", [hex: :plug, repo: "hexpm", optional: true]}, {:spark, ">= 1.1.20 and < 2.0.0-0", [hex: :spark, repo: "hexpm", optional: false]}, {:stream_data, "~> 0.5", [hex: :stream_data, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.1", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ec44ad258eb71a2dd5210f67bd882698ea112f6dad79505b156594be06e320e5"}, + "bunt": {:hex, :bunt, "0.2.0", "951c6e801e8b1d2cbe58ebbd3e616a869061ddadcc4863d0a2182541acae9a38", [:mix], [], "hexpm", "7af5c7e09fe1d40f76c8e4f9dd2be7cebd83909f31fee7cd0e9eadc567da8353"}, + "cc_precompiler": {:hex, :cc_precompiler, "0.1.8", "933a5f4da3b19ee56539a076076ce4d7716d64efc8db46fd066996a7e46e2bfd", [:mix], [{:elixir_make, "~> 0.7.3", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "176bdf4366956e456bf761b54ad70bc4103d0269ca9558fd7cee93d1b3f116db"}, + "certifi": {:hex, :certifi, "2.9.0", "6f2a475689dd47f19fb74334859d460a2dc4e3252a3324bd2111b8f0429e7e21", [:rebar3], [], "hexpm", "266da46bdb06d6c6d35fde799bcb28d36d985d424ad7c08b5bb48f5b5cdd4641"}, + "comparable": {:hex, :comparable, "1.0.0", "bb669e91cedd14ae9937053e5bcbc3c52bb2f22422611f43b6e38367d94a495f", [:mix], [{:typable, "~> 0.1", [hex: :typable, repo: "hexpm", optional: false]}], "hexpm", "277c11eeb1cd726e7cd41c6c199e7e52fa16ee6830b45ad4cdc62e51f62eb60c"}, + "credo": {:hex, :credo, "1.6.4", "ddd474afb6e8c240313f3a7b0d025cc3213f0d171879429bf8535d7021d9ad78", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "c28f910b61e1ff829bffa056ef7293a8db50e87f2c57a9b5c3f57eee124536b7"}, + "db_connection": {:hex, :db_connection, "2.5.0", "bb6d4f30d35ded97b29fe80d8bd6f928a1912ca1ff110831edcd238a1973652c", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c92d5ba26cd69ead1ff7582dbb860adeedfff39774105a4f1c92cbb654b55aa2"}, + "decimal": {:hex, :decimal, "2.1.1", "5611dca5d4b2c3dd497dec8f68751f1f1a54755e8ed2a966c2633cf885973ad6", [:mix], [], "hexpm", "53cfe5f497ed0e7771ae1a475575603d77425099ba5faef9394932b35020ffcc"}, + "dialyxir": {:hex, :dialyxir, "1.1.0", "c5aab0d6e71e5522e77beff7ba9e08f8e02bad90dfbeffae60eaf0cb47e29488", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "07ea8e49c45f15264ebe6d5b93799d4dd56a44036cf42d0ad9c960bc266c0b9a"}, + "earmark_parser": {:hex, :earmark_parser, "1.4.35", "437773ca9384edf69830e26e9e7b2e0d22d2596c4a6b17094a3b29f01ea65bb8", [:mix], [], "hexpm", "8652ba3cb85608d0d7aa2d21b45c6fad4ddc9a1f9a1f1b30ca3a246f0acc33f6"}, + "ecto": {:hex, :ecto, "3.10.3", "eb2ae2eecd210b4eb8bece1217b297ad4ff824b4384c0e3fdd28aaf96edd6135", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "44bec74e2364d491d70f7e42cd0d690922659d329f6465e89feb8a34e8cd3433"}, + "ecto_sql": {:hex, :ecto_sql, "3.10.2", "6b98b46534b5c2f8b8b5f03f126e75e2a73c64f3c071149d32987a5378b0fdbd", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.10.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16.0 or ~> 0.17.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "68c018debca57cb9235e3889affdaec7a10616a4e3a80c99fa1d01fdafaa9007"}, + "ecto_sqlite3": {:hex, :ecto_sqlite3, "0.11.0", "1e094ade9ff1bc7c33c5c6b114f8a5156d0b7c5ddf9038d61cb8fdd61e7c4c55", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:ecto, "~> 3.10", [hex: :ecto, repo: "hexpm", optional: false]}, {:ecto_sql, "~> 3.10", [hex: :ecto_sql, repo: "hexpm", optional: false]}, {:exqlite, "~> 0.9", [hex: :exqlite, repo: "hexpm", optional: false]}], "hexpm", "3d5b9a69b9a9547329413b278b4b072b9bbadf4fd599a746b3d6b0e174a418bb"}, + "elixir_make": {:hex, :elixir_make, "0.7.7", "7128c60c2476019ed978210c245badf08b03dbec4f24d05790ef791da11aa17c", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}], "hexpm", "5bc19fff950fad52bbe5f211b12db9ec82c6b34a9647da0c2224b8b8464c7e6c"}, + "erlex": {:hex, :erlex, "0.2.6", "c7987d15e899c7a2f34f5420d2a2ea0d659682c06ac607572df55a43753aa12e", [:mix], [], "hexpm", "2ed2e25711feb44d52b17d2780eabf998452f6efda104877a3881c2f8c0c0c75"}, + "ets": {:hex, :ets, "0.9.0", "79c6a6c205436780486f72d84230c6cba2f8a9920456750ddd1e47389107d5fd", [:mix], [], "hexpm", "2861fdfb04bcaeff370f1a5904eec864f0a56dcfebe5921ea9aadf2a481c822b"}, + "ex_check": {:hex, :ex_check, "0.14.0", "d6fbe0bcc51cf38fea276f5bc2af0c9ae0a2bb059f602f8de88709421dae4f0e", [:mix], [], "hexpm", "8a602e98c66e6a4be3a639321f1f545292042f290f91fa942a285888c6868af0"}, + "ex_doc": {:hex, :ex_doc, "0.30.6", "5f8b54854b240a2b55c9734c4b1d0dd7bdd41f71a095d42a70445c03cf05a281", [:mix], [{:earmark_parser, "~> 1.4.31", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "bd48f2ddacf4e482c727f9293d9498e0881597eae6ddc3d9562bd7923375109f"}, + "excoveralls": {:hex, :excoveralls, "0.14.4", "295498f1ae47bdc6dce59af9a585c381e1aefc63298d48172efaaa90c3d251db", [:mix], [{:hackney, "~> 1.16", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "e3ab02f2df4c1c7a519728a6f0a747e71d7d6e846020aae338173619217931c1"}, + "exqlite": {:hex, :exqlite, "0.14.0", "f275c6fe1ce35d383b4ed52461ca98c02354eeb2c651c13f5b4badcfd39b743f", [:make, :mix], [{:cc_precompiler, "~> 0.1", [hex: :cc_precompiler, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.7", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "e335eca54749d04dcdedcbc87be85e2176030aab3d7b74b6323fda7e3552ee4c"}, + "file_system": {:hex, :file_system, "0.2.10", "fb082005a9cd1711c05b5248710f8826b02d7d1784e7c3451f9c1231d4fc162d", [:mix], [], "hexpm", "41195edbfb562a593726eda3b3e8b103a309b733ad25f3d642ba49696bf715dc"}, + "git_cli": {:hex, :git_cli, "0.3.0", "a5422f9b95c99483385b976f5d43f7e8233283a47cda13533d7c16131cb14df5", [:mix], [], "hexpm", "78cb952f4c86a41f4d3511f1d3ecb28edb268e3a7df278de2faa1bd4672eaf9b"}, + "git_ops": {:hex, :git_ops, "2.5.5", "4f8369f3c9347e06a7f289de98fadfc95194149156335c5292479a53eddbccd2", [:mix], [{:git_cli, "~> 0.2", [hex: :git_cli, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.0", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "3b1e3b12968f9da6f79b5e2b2274477206949376e3579d05a5f3d439eda0b746"}, + "hackney": {:hex, :hackney, "1.18.1", "f48bf88f521f2a229fc7bae88cf4f85adc9cd9bcf23b5dc8eb6a1788c662c4f6", [:rebar3], [{:certifi, "~>2.9.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~>6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~>1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~>1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "a4ecdaff44297e9b5894ae499e9a070ea1888c84afdd1fd9b7b2bc384950128e"}, + "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, + "jason": {:hex, :jason, "1.4.1", "af1504e35f629ddcdd6addb3513c3853991f694921b1b9368b0bd32beb9f1b63", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "fbb01ecdfd565b56261302f7e1fcc27c4fb8f32d56eab74db621fc154604a7a1"}, + "makeup": {:hex, :makeup, "1.1.0", "6b67c8bc2882a6b6a445859952a602afc1a41c2e08379ca057c0f525366fc3ca", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "0a45ed501f4a8897f580eabf99a2e5234ea3e75a4373c8a52824f6e873be57a6"}, + "makeup_elixir": {:hex, :makeup_elixir, "0.16.1", "cc9e3ca312f1cfeccc572b37a09980287e243648108384b97ff2b76e505c3555", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "e127a341ad1b209bd80f7bd1620a15693a9908ed780c3b763bccf7d200c767c6"}, + "makeup_erlang": {:hex, :makeup_erlang, "0.1.2", "ad87296a092a46e03b7e9b0be7631ddcf64c790fa68a9ef5323b6cbb36affc72", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "f3f5a1ca93ce6e092d92b6d9c049bcda58a3b617a8d888f8e7231c85630e8108"}, + "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, + "mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm", "f278585650aa581986264638ebf698f8bb19df297f66ad91b18910dfc6e19323"}, + "nimble_options": {:hex, :nimble_options, "1.0.2", "92098a74df0072ff37d0c12ace58574d26880e522c22801437151a159392270e", [:mix], [], "hexpm", "fd12a8db2021036ce12a309f26f564ec367373265b53e25403f0ee697380f1b8"}, + "nimble_parsec": {:hex, :nimble_parsec, "1.3.1", "2c54013ecf170e249e9291ed0a62e5832f70a476c61da16f6aac6dca0189f2af", [:mix], [], "hexpm", "2682e3c0b2eb58d90c6375fc0cc30bc7be06f365bf72608804fb9cffa5e1b167"}, + "parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"}, + "picosat_elixir": {:hex, :picosat_elixir, "0.2.3", "bf326d0f179fbb3b706bb2c15fbc367dacfa2517157d090fdfc32edae004c597", [:make, :mix], [{:elixir_make, "~> 0.6", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "f76c9db2dec9d2561ffaa9be35f65403d53e984e8cd99c832383b7ab78c16c66"}, + "postgrex": {:hex, :postgrex, "0.17.2", "a3ec9e3239d9b33f1e5841565c4eb200055c52cc0757a22b63ca2d529bbe764c", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "80a918a9e9531d39f7bd70621422f3ebc93c01618c645f2d91306f50041ed90c"}, + "sobelow": {:hex, :sobelow, "0.11.1", "23438964486f8112b41e743bbfd402da3e5b296fdc9eacab29914b79c48916dd", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "9897363a7eff96f4809304a90aad819e2ad5e5d24db547af502885146746a53c"}, + "sourceror": {:hex, :sourceror, "0.14.0", "b6b8552d0240400d66b6f107c1bab7ac1726e998efc797f178b7b517e928e314", [:mix], [], "hexpm", "809c71270ad48092d40bbe251a133e49ae229433ce103f762a2373b7a10a8d8b"}, + "spark": {:hex, :spark, "1.1.39", "f143b84a5b796bf2d83ec8fb4793ee9e66e67510c40d785f9a67050bb88e7677", [:mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.5 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:sourceror, "~> 0.1", [hex: :sourceror, repo: "hexpm", optional: false]}], "hexpm", "d71bc26014c7e7abcdcf553f4cf7c5a5ff96f8365b1e20be3768ce503aafb203"}, + "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, + "stream_data": {:hex, :stream_data, "0.6.0", "e87a9a79d7ec23d10ff83eb025141ef4915eeb09d4491f79e52f2562b73e5f47", [:mix], [], "hexpm", "b92b5031b650ca480ced047578f1d57ea6dd563f5b57464ad274718c9c29501c"}, + "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, + "typable": {:hex, :typable, "0.3.0", "0431e121d124cd26f312123e313d2689b9a5322b15add65d424c07779eaa3ca1", [:mix], [], "hexpm", "880a0797752da1a4c508ac48f94711e04c86156f498065a83d160eef945858f8"}, + "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, +} diff --git a/test/ash_sqlite_test.exs b/test/ash_sqlite_test.exs new file mode 100644 index 0000000..ad3f0da --- /dev/null +++ b/test/ash_sqlite_test.exs @@ -0,0 +1,14 @@ +defmodule AshSqliteTest do + use AshSqlite.RepoCase, async: false + + test "transaction metadata is given to on_transaction_begin" do + AshSqlite.Test.Post + |> Ash.Changeset.new(%{title: "title"}) + |> AshSqlite.Test.Api.create!() + + assert_receive %{ + type: :create, + metadata: %{action: :create, actor: nil, resource: AshSqlite.Test.Post} + } + end +end diff --git a/test/atomics_test.exs b/test/atomics_test.exs new file mode 100644 index 0000000..5420614 --- /dev/null +++ b/test/atomics_test.exs @@ -0,0 +1,59 @@ +defmodule AshSqlite.AtomicsTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + import Ash.Expr + + test "a basic atomic works" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "foo", price: 1}) + |> Api.create!() + + assert %{price: 2} = + post + |> Ash.Changeset.for_update(:update, %{}) + |> Ash.Changeset.atomic_update(:price, expr(price + 1)) + |> Api.update!() + end + + test "an atomic that violates a constraint will return the proper error" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "foo", price: 1}) + |> Api.create!() + + assert_raise Ash.Error.Invalid, ~r/does not exist/, fn -> + post + |> Ash.Changeset.for_update(:update, %{}) + |> Ash.Changeset.atomic_update(:organization_id, Ash.UUID.generate()) + |> Api.update!() + end + end + + test "an atomic can refer to a calculation" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "foo", price: 1}) + |> Api.create!() + + post = + post + |> Ash.Changeset.for_update(:update, %{}) + |> Ash.Changeset.atomic_update(:score, expr(score_after_winning)) + |> Api.update!() + + assert post.score == 1 + end + + test "an atomic can be attached to an action" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "foo", price: 1}) + |> Api.create!() + + assert Post.increment_score!(post, 2).score == 2 + + assert Post.increment_score!(post, 2).score == 4 + end +end diff --git a/test/bulk_create_test.exs b/test/bulk_create_test.exs new file mode 100644 index 0000000..394c2cd --- /dev/null +++ b/test/bulk_create_test.exs @@ -0,0 +1,239 @@ +defmodule AshSqlite.BulkCreateTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + describe "bulk creates" do + test "bulk creates insert each input" do + Api.bulk_create!([%{title: "fred"}, %{title: "george"}], Post, :create) + + assert [%{title: "fred"}, %{title: "george"}] = + Post + |> Ash.Query.sort(:title) + |> Api.read!() + end + + test "bulk creates can be streamed" do + assert [{:ok, %{title: "fred"}}, {:ok, %{title: "george"}}] = + Api.bulk_create!([%{title: "fred"}, %{title: "george"}], Post, :create, + return_stream?: true, + return_records?: true + ) + |> Enum.sort_by(fn {:ok, result} -> result.title end) + end + + test "bulk creates can upsert" do + assert [ + {:ok, %{title: "fred", uniq_one: "one", uniq_two: "two", price: 10}}, + {:ok, %{title: "george", uniq_one: "three", uniq_two: "four", price: 20}} + ] = + Api.bulk_create!( + [ + %{title: "fred", uniq_one: "one", uniq_two: "two", price: 10}, + %{title: "george", uniq_one: "three", uniq_two: "four", price: 20} + ], + Post, + :create, + return_stream?: true, + return_records?: true + ) + |> Enum.sort_by(fn {:ok, result} -> result.title end) + + assert [ + {:ok, %{title: "fred", uniq_one: "one", uniq_two: "two", price: 1000}}, + {:ok, %{title: "george", uniq_one: "three", uniq_two: "four", price: 20_000}} + ] = + Api.bulk_create!( + [ + %{title: "something", uniq_one: "one", uniq_two: "two", price: 1000}, + %{title: "else", uniq_one: "three", uniq_two: "four", price: 20_000} + ], + Post, + :create, + upsert?: true, + upsert_identity: :uniq_one_and_two, + upsert_fields: [:price], + return_stream?: true, + return_records?: true + ) + |> Enum.sort_by(fn + {:ok, result} -> + result.title + + _ -> + nil + end) + end + + # confirmed that this doesn't work because it can't. An upsert must map to a potentially successful insert. + # leaving this test here for posterity + # test "bulk creates can upsert with id" do + # org_id = Ash.UUID.generate() + + # _new_org = + # Organization + # |> Ash.Changeset.for_create(:create, %{ + # id: org_id, + # title: "Avengers" + # }) + # |> Api.create!() + + # assert [ + # {:ok, + # %{ + # name: "Bruce Banner", + # code: "BB01", + # must_be_present: "I am Hulk", + # organization_id: org_id + # }}, + # {:ok, + # %{ + # name: "Tony Stark", + # code: "TS01", + # must_be_present: "I am Iron Man", + # organization_id: org_id + # }} + # ] = + # Api.bulk_create!( + # [ + # %{ + # name: "Tony Stark", + # code: "TS01", + # must_be_present: "I am Iron Man", + # organization_id: org_id + # }, + # %{ + # name: "Bruce Banner", + # code: "BB01", + # must_be_present: "I am Hulk", + # organization_id: org_id + # } + # ], + # Manager, + # :create, + # return_stream?: true, + # return_records?: true, + # return_errors?: true + # ) + # |> Enum.sort_by(fn {:ok, result} -> result.name end) + + # assert [ + # {:ok, + # %{ + # name: "Bruce Banner", + # code: "BB01", + # must_be_present: "I am Hulk", + # organization_id: org_id, + # role: "bone breaker" + # }}, + # {:ok, + # %{ + # name: "Tony Stark", + # code: "TS01", + # must_be_present: "I am Iron Man", + # organization_id: org_id, + # role: "master in chief" + # }} + # ] = + # Api.bulk_create!( + # [ + # %{ + # name: "Tony Stark", + # code: "TS01", + # organization_id: org_id, + # role: "master in chief" + # }, + # %{ + # name: "Brice Brenner", + # code: "BB01", + # organization_id: org_id, + # role: "bone breaker" + # } + # ], + # Manager, + # :create, + # upsert?: true, + # upsert_identity: :uniq_code, + # upsert_fields: [:role], + # return_stream?: true, + # return_records?: true, + # return_errors?: true + # ) + # |> Enum.sort_by(fn + # {:ok, result} -> + # result.name + + # _ -> + # nil + # end) + # end + + test "bulk creates can create relationships" do + Api.bulk_create!( + [%{title: "fred", rating: %{score: 5}}, %{title: "george", rating: %{score: 0}}], + Post, + :create + ) + + assert [ + %{title: "fred", ratings: [%{score: 5}]}, + %{title: "george", ratings: [%{score: 0}]} + ] = + Post + |> Ash.Query.sort(:title) + |> Ash.Query.load(:ratings) + |> Api.read!() + end + end + + describe "validation errors" do + test "skips invalid by default" do + assert %{records: [_], errors: [_]} = + Api.bulk_create!([%{title: "fred"}, %{title: "not allowed"}], Post, :create, + return_records?: true, + return_errors?: true + ) + end + + test "returns errors in the stream" do + assert [{:ok, _}, {:error, _}] = + Api.bulk_create!([%{title: "fred"}, %{title: "not allowed"}], Post, :create, + return_records?: true, + return_stream?: true, + return_errors?: true + ) + |> Enum.to_list() + end + end + + describe "database errors" do + test "database errors affect the entire batch" do + # assert %{records: [_], errors: [_]} = + Api.bulk_create( + [%{title: "fred"}, %{title: "george", organization_id: Ash.UUID.generate()}], + Post, + :create, + return_records?: true + ) + + assert [] = + Post + |> Ash.Query.sort(:title) + |> Api.read!() + end + + test "database errors don't affect other batches" do + Api.bulk_create( + [%{title: "george", organization_id: Ash.UUID.generate()}, %{title: "fred"}], + Post, + :create, + return_records?: true, + batch_size: 1 + ) + + assert [%{title: "fred"}] = + Post + |> Ash.Query.sort(:title) + |> Api.read!() + end + end +end diff --git a/test/calculation_test.exs b/test/calculation_test.exs new file mode 100644 index 0000000..d317c8f --- /dev/null +++ b/test/calculation_test.exs @@ -0,0 +1,381 @@ +defmodule AshSqlite.CalculationTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Account, Api, Author, Comment, Post, User} + + require Ash.Query + import Ash.Expr + + test "calculations can refer to embedded attributes" do + author = + Author + |> Ash.Changeset.for_create(:create, %{bio: %{title: "Mr.", bio: "Bones"}}) + |> Api.create!() + + assert %{title: "Mr."} = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load(:title) + |> Api.read_one!() + end + + test "calculations can use the || operator" do + author = + Author + |> Ash.Changeset.for_create(:create, %{bio: %{title: "Mr.", bio: "Bones"}}) + |> Api.create!() + + assert %{first_name_or_bob: "bob"} = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load(:first_name_or_bob) + |> Api.read_one!() + end + + test "calculations can use the && operator" do + author = + Author + |> Ash.Changeset.for_create(:create, %{ + first_name: "fred", + bio: %{title: "Mr.", bio: "Bones"} + }) + |> Api.create!() + + assert %{first_name_and_bob: "bob"} = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load(:first_name_and_bob) + |> Api.read_one!() + end + + test "concat calculation can be filtered on" do + author = + Author + |> Ash.Changeset.new(%{first_name: "is", last_name: "match"}) + |> Api.create!() + + Author + |> Ash.Changeset.new(%{first_name: "not", last_name: "match"}) + |> Api.create!() + + author_id = author.id + + assert %{id: ^author_id} = + Author + |> Ash.Query.load(:full_name) + |> Ash.Query.filter(full_name == "is match") + |> Api.read_one!() + end + + test "conditional calculations can be filtered on" do + author = + Author + |> Ash.Changeset.new(%{first_name: "tom"}) + |> Api.create!() + + Author + |> Ash.Changeset.new(%{first_name: "tom", last_name: "holland"}) + |> Api.create!() + + author_id = author.id + + assert %{id: ^author_id} = + Author + |> Ash.Query.load([:conditional_full_name, :full_name]) + |> Ash.Query.filter(conditional_full_name == "(none)") + |> Api.read_one!() + end + + test "parameterized calculations can be filtered on" do + Author + |> Ash.Changeset.new(%{first_name: "tom", last_name: "holland"}) + |> Api.create!() + + assert %{param_full_name: "tom holland"} = + Author + |> Ash.Query.load(:param_full_name) + |> Api.read_one!() + + assert %{param_full_name: "tom~holland"} = + Author + |> Ash.Query.load(param_full_name: [separator: "~"]) + |> Api.read_one!() + + assert %{} = + Author + |> Ash.Query.filter(param_full_name(separator: "~") == "tom~holland") + |> Api.read_one!() + end + + test "parameterized related calculations can be filtered on" do + author = + Author + |> Ash.Changeset.new(%{first_name: "tom", last_name: "holland"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "match"}) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Api.create!() + + assert %{title: "match"} = + Comment + |> Ash.Query.filter(author.param_full_name(separator: "~") == "tom~holland") + |> Api.read_one!() + + assert %{title: "match"} = + Comment + |> Ash.Query.filter( + author.param_full_name(separator: "~") == "tom~holland" and + author.param_full_name(separator: " ") == "tom holland" + ) + |> Api.read_one!() + end + + test "parameterized calculations can be sorted on" do + Author + |> Ash.Changeset.new(%{first_name: "tom", last_name: "holland"}) + |> Api.create!() + + Author + |> Ash.Changeset.new(%{first_name: "abc", last_name: "def"}) + |> Api.create!() + + assert [%{first_name: "abc"}, %{first_name: "tom"}] = + Author + |> Ash.Query.sort(param_full_name: [separator: "~"]) + |> Api.read!() + end + + test "calculations using if and literal boolean results can run" do + Post + |> Ash.Query.load(:was_created_in_the_last_month) + |> Ash.Query.filter(was_created_in_the_last_month == true) + |> Api.read!() + end + + test "nested conditional calculations can be loaded" do + Author + |> Ash.Changeset.new(%{last_name: "holland"}) + |> Api.create!() + + Author + |> Ash.Changeset.new(%{first_name: "tom"}) + |> Api.create!() + + assert [%{nested_conditional: "No First Name"}, %{nested_conditional: "No Last Name"}] = + Author + |> Ash.Query.load(:nested_conditional) + |> Ash.Query.sort(:nested_conditional) + |> Api.read!() + end + + test "loading a calculation loads its dependent loads" do + user = + User + |> Ash.Changeset.for_create(:create, %{is_active: true}) + |> Api.create!() + + account = + Account + |> Ash.Changeset.for_create(:create, %{is_active: true}) + |> Ash.Changeset.manage_relationship(:user, user, type: :append_and_remove) + |> Api.create!() + |> Api.load!([:active]) + + assert account.active + end + + describe "string join expression" do + test "no nil values" do + author = + Author + |> Ash.Changeset.for_create(:create, %{ + first_name: "Bill", + last_name: "Jones", + bio: %{title: "Mr.", bio: "Bones"} + }) + |> Api.create!() + + assert %{ + full_name_with_nils: "Bill Jones", + full_name_with_nils_no_joiner: "BillJones" + } = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load(:full_name_with_nils) + |> Ash.Query.load(:full_name_with_nils_no_joiner) + |> Api.read_one!() + end + + test "with nil value" do + author = + Author + |> Ash.Changeset.for_create(:create, %{ + first_name: "Bill", + bio: %{title: "Mr.", bio: "Bones"} + }) + |> Api.create!() + + assert %{ + full_name_with_nils: "Bill", + full_name_with_nils_no_joiner: "Bill" + } = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load(:full_name_with_nils) + |> Ash.Query.load(:full_name_with_nils_no_joiner) + |> Api.read_one!() + end + end + + test "arguments with cast_in_query?: false are not cast" do + Post + |> Ash.Changeset.new(%{title: "match", score: 42}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "not", score: 42}) + |> Api.create!() + + assert [post] = + Post + |> Ash.Query.filter(similarity(search: expr(query(search: "match")))) + |> Api.read!() + + assert post.title == "match" + end + + describe "string split expression" do + test "with the default delimiter" do + author = + Author + |> Ash.Changeset.for_create(:create, %{ + first_name: "Bill", + last_name: "Jones", + bio: %{title: "Mr.", bio: "Bones"} + }) + |> Api.create!() + + assert %{ + split_full_name: ["Bill", "Jones"] + } = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load(:split_full_name) + |> Api.read_one!() + end + + test "trimming whitespace" do + author = + Author + |> Ash.Changeset.for_create(:create, %{ + first_name: "Bill ", + last_name: "Jones ", + bio: %{title: "Mr.", bio: "Bones"} + }) + |> Api.create!() + + assert %{ + split_full_name_trim: ["Bill", "Jones"], + split_full_name: ["Bill", "Jones"] + } = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load([:split_full_name_trim, :split_full_name]) + |> Api.read_one!() + end + end + + describe "-/1" do + test "makes numbers negative" do + Post + |> Ash.Changeset.new(%{title: "match", score: 42}) + |> Api.create!() + + assert [%{negative_score: -42}] = + Post + |> Ash.Query.load(:negative_score) + |> Api.read!() + end + end + + describe "maps" do + test "maps can be constructed" do + Post + |> Ash.Changeset.new(%{title: "match", score: 42}) + |> Api.create!() + + assert [%{score_map: %{negative_score: %{foo: -42}}}] = + Post + |> Ash.Query.load(:score_map) + |> Api.read!() + end + end + + describe "at/2" do + test "selects items by index" do + author = + Author + |> Ash.Changeset.for_create(:create, %{ + first_name: "Bill ", + last_name: "Jones ", + bio: %{title: "Mr.", bio: "Bones"} + }) + |> Api.create!() + + assert %{ + first_name_from_split: "Bill" + } = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load([:first_name_from_split]) + |> Api.read_one!() + end + end + + test "dependent calc" do + post = + Post + |> Ash.Changeset.new(%{title: "match", price: 10_024}) + |> Api.create!() + + Post.get_by_id(post.id, + query: Post |> Ash.Query.select([:id]) |> Ash.Query.load([:price_string_with_currency_sign]) + ) + end + + test "nested get_path works" do + assert "thing" = + Post + |> Ash.Changeset.new(%{title: "match", price: 10_024, stuff: %{foo: %{bar: "thing"}}}) + |> Ash.Changeset.deselect(:stuff) + |> Api.create!() + |> Api.load!(:foo_bar_from_stuff) + |> Map.get(:foo_bar_from_stuff) + end + + test "runtime expression calcs" do + author = + Author + |> Ash.Changeset.for_create(:create, %{ + first_name: "Bill", + last_name: "Jones", + bio: %{title: "Mr.", bio: "Bones"} + }) + |> Api.create!() + + assert %AshSqlite.Test.Money{} = + Post + |> Ash.Changeset.new(%{title: "match", price: 10_024}) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Api.create!() + |> Api.load!(:calc_returning_json) + |> Map.get(:calc_returning_json) + + assert [%AshSqlite.Test.Money{}] = + author + |> Api.load!(posts: :calc_returning_json) + |> Map.get(:posts) + |> Enum.map(&Map.get(&1, :calc_returning_json)) + end +end diff --git a/test/constraint_test.exs b/test/constraint_test.exs new file mode 100644 index 0000000..d79a88f --- /dev/null +++ b/test/constraint_test.exs @@ -0,0 +1,15 @@ +defmodule AshSqlite.ConstraintTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + require Ash.Query + + test "constraint messages are properly raised" do + assert_raise Ash.Error.Invalid, ~r/yo, bad price/, fn -> + Post + |> Ash.Changeset.new(%{title: "title", price: -1}) + |> Api.create!() + end + end +end diff --git a/test/custom_index_test.exs b/test/custom_index_test.exs new file mode 100644 index 0000000..9a28653 --- /dev/null +++ b/test/custom_index_test.exs @@ -0,0 +1,24 @@ +defmodule AshSqlite.Test.CustomIndexTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + require Ash.Query + + test "unique constraint errors are properly caught" do + Post + |> Ash.Changeset.new(%{title: "first", uniq_custom_one: "what", uniq_custom_two: "what2"}) + |> Api.create!() + + assert_raise Ash.Error.Invalid, + ~r/Invalid value provided for uniq_custom_one: dude what the heck/, + fn -> + Post + |> Ash.Changeset.new(%{ + title: "first", + uniq_custom_one: "what", + uniq_custom_two: "what2" + }) + |> Api.create!() + end + end +end diff --git a/test/distinct_test.exs b/test/distinct_test.exs new file mode 100644 index 0000000..408ce02 --- /dev/null +++ b/test/distinct_test.exs @@ -0,0 +1,171 @@ +defmodule AshSqlite.DistinctTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + require Ash.Query + + setup do + Post + |> Ash.Changeset.new(%{title: "title", score: 1}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "title", score: 1}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "foo", score: 2}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "foo", score: 2}) + |> Api.create!() + + :ok + end + + test "records returned are distinct on the provided field" do + results = + Post + |> Ash.Query.distinct(:title) + |> Ash.Query.sort(:title) + |> Api.read!() + + assert [%{title: "foo"}, %{title: "title"}] = results + end + + test "distinct pairs well with sort" do + results = + Post + |> Ash.Query.distinct(:title) + |> Ash.Query.sort(title: :desc) + |> Api.read!() + + assert [%{title: "title"}, %{title: "foo"}] = results + end + + test "distinct pairs well with sort that does not match the distinct" do + results = + Post + |> Ash.Query.distinct(:title) + |> Ash.Query.sort(id: :desc) + |> Ash.Query.limit(3) + |> Api.read!() + + assert [_, _] = results + end + + test "distinct pairs well with sort that does not match the distinct using a limit" do + results = + Post + |> Ash.Query.distinct(:title) + |> Ash.Query.sort(id: :desc) + |> Ash.Query.limit(3) + |> Api.read!() + + assert [_, _] = results + end + + test "distinct pairs well with sort that does not match the distinct using a limit #2" do + results = + Post + |> Ash.Query.distinct(:title) + |> Ash.Query.sort(id: :desc) + |> Ash.Query.limit(1) + |> Api.read!() + + assert [_] = results + end + + test "distinct can use calculations sort that does not match the distinct using a limit #2" do + results = + Post + |> Ash.Query.distinct(:negative_score) + |> Ash.Query.sort(:negative_score) + |> Ash.Query.load(:negative_score) + |> Api.read!() + + assert [ + %{title: "foo", negative_score: -2}, + %{title: "title", negative_score: -1} + ] = results + + results = + Post + |> Ash.Query.distinct(:negative_score) + |> Ash.Query.sort(negative_score: :desc) + |> Ash.Query.load(:negative_score) + |> Api.read!() + + assert [ + %{title: "title", negative_score: -1}, + %{title: "foo", negative_score: -2} + ] = results + + results = + Post + |> Ash.Query.distinct(:negative_score) + |> Ash.Query.sort(:title) + |> Ash.Query.load(:negative_score) + |> Api.read!() + + assert [ + %{title: "foo", negative_score: -2}, + %{title: "title", negative_score: -1} + ] = results + end + + test "distinct, join filters and sort can be combined" do + Post + |> Ash.Changeset.new(%{title: "a", score: 2}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "a", score: 1}) + |> Api.create!() + + assert [] = + Post + |> Ash.Query.distinct(:negative_score) + |> Ash.Query.filter(author.first_name == "a") + |> Ash.Query.sort(:negative_score) + |> Api.read!() + end + + test "distinct sort is applied" do + Post + |> Ash.Changeset.new(%{title: "a", score: 2}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "a", score: 1}) + |> Api.create!() + + results = + Post + |> Ash.Query.distinct(:negative_score) + |> Ash.Query.distinct_sort(:title) + |> Ash.Query.sort(:negative_score) + |> Ash.Query.load(:negative_score) + |> Api.read!() + + assert [ + %{title: "a", negative_score: -2}, + %{title: "a", negative_score: -1} + ] = results + + results = + Post + |> Ash.Query.distinct(:negative_score) + |> Ash.Query.distinct_sort(title: :desc) + |> Ash.Query.sort(:negative_score) + |> Ash.Query.load(:negative_score) + |> Api.read!() + + assert [ + %{title: "foo", negative_score: -2}, + %{title: "title", negative_score: -1} + ] = results + end +end diff --git a/test/ecto_compatibility_test.exs b/test/ecto_compatibility_test.exs new file mode 100644 index 0000000..dbe9bb6 --- /dev/null +++ b/test/ecto_compatibility_test.exs @@ -0,0 +1,12 @@ +defmodule AshSqlite.EctoCompatibilityTest do + use AshSqlite.RepoCase, async: false + require Ash.Query + + test "call Ecto.Repo.insert! via Ash Repo" do + org = + %AshSqlite.Test.Organization{name: "The Org"} + |> AshSqlite.TestRepo.insert!() + + assert org.name == "The Org" + end +end diff --git a/test/embeddable_resource_test.exs b/test/embeddable_resource_test.exs new file mode 100644 index 0000000..4385e00 --- /dev/null +++ b/test/embeddable_resource_test.exs @@ -0,0 +1,34 @@ +defmodule AshSqlite.EmbeddableResourceTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Author, Bio, Post} + + require Ash.Query + + setup do + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + %{post: post} + end + + test "calculations can load json", %{post: post} do + assert %{calc_returning_json: %AshSqlite.Test.Money{amount: 100, currency: :usd}} = + Api.load!(post, :calc_returning_json) + end + + test "embeds with list attributes set to nil are loaded as nil" do + post = + Author + |> Ash.Changeset.new(%{bio: %Bio{list_of_strings: nil}}) + |> Api.create!() + + assert is_nil(post.bio.list_of_strings) + + post = Api.reload!(post) + + assert is_nil(post.bio.list_of_strings) + end +end diff --git a/test/enum_test.exs b/test/enum_test.exs new file mode 100644 index 0000000..b25cee5 --- /dev/null +++ b/test/enum_test.exs @@ -0,0 +1,13 @@ +defmodule AshSqlite.EnumTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + require Ash.Query + + test "valid values are properly inserted" do + Post + |> Ash.Changeset.new(%{title: "title", status: :open}) + |> Api.create!() + end +end diff --git a/test/filter_test.exs b/test/filter_test.exs new file mode 100644 index 0000000..d5c9c36 --- /dev/null +++ b/test/filter_test.exs @@ -0,0 +1,850 @@ +defmodule AshSqlite.FilterTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Author, Comment, Post} + + require Ash.Query + + describe "with no filter applied" do + test "with no data" do + assert [] = Api.read!(Post) + end + + test "with data" do + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + assert [%Post{title: "title"}] = Api.read!(Post) + end + end + + describe "invalid uuid" do + test "with an invalid uuid, an invalid error is raised" do + assert_raise Ash.Error.Invalid, fn -> + Post + |> Ash.Query.filter(id == "foo") + |> Api.read!() + end + end + end + + describe "with a simple filter applied" do + test "with no data" do + results = + Post + |> Ash.Query.filter(title == "title") + |> Api.read!() + + assert [] = results + end + + test "with data that matches" do + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(title == "title") + |> Api.read!() + + assert [%Post{title: "title"}] = results + end + + test "with some data that matches and some data that doesnt" do + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(title == "no_title") + |> Api.read!() + + assert [] = results + end + + test "with related data that doesn't match" do + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "not match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(comments.title == "match") + |> Api.read!() + + assert [] = results + end + + test "with related data two steps away that matches" do + author = + Author + |> Ash.Changeset.new(%{first_name: "match"}) + |> Api.create!() + + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "title2"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "not match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Api.create!() + + results = + Comment + |> Ash.Query.filter(author.posts.linked_posts.title == "title") + |> Api.read!() + + assert [_] = results + end + + test "with related data that does match" do + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(comments.title == "match") + |> Api.read!() + + assert [%Post{title: "title"}] = results + end + + test "with related data that does and doesn't match" do + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "not match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(comments.title == "match") + |> Api.read!() + + assert [%Post{title: "title"}] = results + end + end + + describe "in" do + test "it properly filters" do + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "title1"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "title2"}) + |> Api.create!() + + assert [%Post{title: "title1"}, %Post{title: "title2"}] = + Post + |> Ash.Query.filter(title in ["title1", "title2"]) + |> Ash.Query.sort(title: :asc) + |> Api.read!() + end + end + + describe "with a boolean filter applied" do + test "with no data" do + results = + Post + |> Ash.Query.filter(title == "title" or score == 1) + |> Api.read!() + + assert [] = results + end + + test "with data that doesn't match" do + Post + |> Ash.Changeset.new(%{title: "no title", score: 2}) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(title == "title" or score == 1) + |> Api.read!() + + assert [] = results + end + + test "with data that matches both conditions" do + Post + |> Ash.Changeset.new(%{title: "title", score: 0}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{score: 1, title: "nothing"}) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(title == "title" or score == 1) + |> Api.read!() + |> Enum.sort_by(& &1.score) + + assert [%Post{title: "title", score: 0}, %Post{title: "nothing", score: 1}] = results + end + + test "with data that matches one condition and data that matches nothing" do + Post + |> Ash.Changeset.new(%{title: "title", score: 0}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{score: 2, title: "nothing"}) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(title == "title" or score == 1) + |> Api.read!() + |> Enum.sort_by(& &1.score) + + assert [%Post{title: "title", score: 0}] = results + end + + test "with related data in an or statement that matches, while basic filter doesn't match" do + post = + Post + |> Ash.Changeset.new(%{title: "doesn't match"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(title == "match" or comments.title == "match") + |> Api.read!() + + assert [%Post{title: "doesn't match"}] = results + end + + test "with related data in an or statement that doesn't match, while basic filter does match" do + post = + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "doesn't match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(title == "match" or comments.title == "match") + |> Api.read!() + + assert [%Post{title: "match"}] = results + end + + test "with related data and an inner join condition" do + post = + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(title == comments.title) + |> Api.read!() + + assert [%Post{title: "match"}] = results + + results = + Post + |> Ash.Query.filter(title != comments.title) + |> Api.read!() + + assert [] = results + end + end + + describe "accessing embeds" do + setup do + Author + |> Ash.Changeset.for_create(:create, + bio: %{title: "Dr.", bio: "Strange", years_of_experience: 10} + ) + |> Api.create!() + + Author + |> Ash.Changeset.for_create(:create, + bio: %{title: "Highlander", bio: "There can be only one."} + ) + |> Api.create!() + + :ok + end + + test "works using simple equality" do + assert [%{bio: %{title: "Dr."}}] = + Author + |> Ash.Query.filter(bio[:title] == "Dr.") + |> Api.read!() + end + + test "works using simple equality for integers" do + assert [%{bio: %{title: "Dr."}}] = + Author + |> Ash.Query.filter(bio[:years_of_experience] == 10) + |> Api.read!() + end + + test "works using an expression" do + assert [%{bio: %{title: "Highlander"}}] = + Author + |> Ash.Query.filter(contains(type(bio[:bio], :string), "only one.")) + |> Api.read!() + end + + test "calculations that use embeds can be filtered on" do + assert [%{bio: %{title: "Dr."}}] = + Author + |> Ash.Query.filter(title == "Dr.") + |> Api.read!() + end + end + + describe "basic expressions" do + test "basic expressions work" do + Post + |> Ash.Changeset.new(%{title: "match", score: 4}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "non_match", score: 2}) + |> Api.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(score + 1 == 5) + |> Api.read!() + end + end + + describe "case insensitive fields" do + test "it matches case insensitively" do + Post + |> Ash.Changeset.new(%{title: "match", category: "FoObAr"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{category: "bazbuz"}) + |> Api.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(category == "fOoBaR") + |> Api.read!() + end + end + + describe "contains/2" do + test "it works when it matches" do + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "bazbuz"}) + |> Api.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(contains(title, "atc")) + |> Api.read!() + end + + test "it works when a case insensitive string is provided as a value" do + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "bazbuz"}) + |> Api.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(contains(title, ^%Ash.CiString{string: "ATC"})) + |> Api.read!() + end + + test "it works on a case insensitive column" do + Post + |> Ash.Changeset.new(%{category: "match"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{category: "bazbuz"}) + |> Api.create!() + + assert [%{category: %Ash.CiString{string: "match"}}] = + Post + |> Ash.Query.filter(contains(category, ^"ATC")) + |> Api.read!() + end + + test "it works on a case insensitive calculation" do + Post + |> Ash.Changeset.new(%{category: "match"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{category: "bazbuz"}) + |> Api.create!() + + assert [%{category: %Ash.CiString{string: "match"}}] = + Post + |> Ash.Query.filter(contains(category_label, ^"ATC")) + |> Api.read!() + end + + test "it works on related values" do + post = + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "abba"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + post2 = + Post + |> Ash.Changeset.new(%{title: "no_match"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "acca"}) + |> Ash.Changeset.manage_relationship(:post, post2, type: :append_and_remove) + |> Api.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(contains(comments.title, ^"bb")) + |> Api.read!() + end + end + + describe "length/1" do + test "it works with a list attribute" do + author1 = + Author + |> Ash.Changeset.new(%{badges: [:author_of_the_year]}) + |> Api.create!() + + _author2 = + Author + |> Ash.Changeset.new(%{badges: []}) + |> Api.create!() + + author1_id = author1.id + + assert [%{id: ^author1_id}] = + Author + |> Ash.Query.filter(length(badges) > 0) + |> Api.read!() + end + + test "it works with nil" do + author1 = + Author + |> Ash.Changeset.new(%{badges: [:author_of_the_year]}) + |> Api.create!() + + _author2 = + Author + |> Ash.Changeset.new() + |> Api.create!() + + author1_id = author1.id + + assert [%{id: ^author1_id}] = + Author + |> Ash.Query.filter(length(badges || []) > 0) + |> Api.read!() + end + + test "it works with a list" do + author1 = + Author + |> Ash.Changeset.new() + |> Api.create!() + + author1_id = author1.id + + explicit_list = [:foo] + + assert [%{id: ^author1_id}] = + Author + |> Ash.Query.filter(length(^explicit_list) > 0) + |> Api.read!() + + assert [] = + Author + |> Ash.Query.filter(length(^explicit_list) > 1) + |> Api.read!() + end + + test "it raises with bad values" do + Author + |> Ash.Changeset.new() + |> Api.create!() + + assert_raise(Ash.Error.Unknown, fn -> + Author + |> Ash.Query.filter(length(first_name) > 0) + |> Api.read!() + end) + end + end + + describe "exists/2" do + test "it works with single relationships" do + post = + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "abba"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + post2 = + Post + |> Ash.Changeset.new(%{title: "no_match"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "acca"}) + |> Ash.Changeset.manage_relationship(:post, post2, type: :append_and_remove) + |> Api.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(exists(comments, title == ^"abba")) + |> Api.read!() + end + + test "it works with many to many relationships" do + post = + Post + |> Ash.Changeset.new(%{title: "a"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Api.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter(exists(linked_posts, title == ^"a")) + |> Api.read!() + end + + test "it works with join association relationships" do + post = + Post + |> Ash.Changeset.new(%{title: "a"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Api.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter(exists(linked_posts, title == ^"a")) + |> Api.read!() + end + + test "it works with nested relationships as the path" do + post = + Post + |> Ash.Changeset.new(%{title: "a"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "comment"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Api.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter(exists(linked_posts.comments, title == ^"comment")) + |> Api.read!() + end + + test "it works with an `at_path`" do + post = + Post + |> Ash.Changeset.new(%{title: "a"}) + |> Api.create!() + + other_post = + Post + |> Ash.Changeset.new(%{title: "other_a"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "comment"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "comment"}) + |> Ash.Changeset.manage_relationship(:post, other_post, type: :append_and_remove) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [other_post], type: :append_and_remove) + |> Api.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter( + linked_posts.title == "a" and + linked_posts.exists(comments, title == ^"comment") + ) + |> Api.read!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter( + linked_posts.title == "a" and + linked_posts.exists(comments, title == ^"comment") + ) + |> Api.read!() + end + + test "it works with nested relationships inside of exists" do + post = + Post + |> Ash.Changeset.new(%{title: "a"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "comment"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Api.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter(exists(linked_posts, comments.title == ^"comment")) + |> Api.read!() + end + end + + describe "filtering on enum types" do + test "it allows simple filtering" do + Post + |> Ash.Changeset.new(status_enum: "open") + |> Api.create!() + + assert %{status_enum: :open} = + Post + |> Ash.Query.filter(status_enum == ^"open") + |> Api.read_one!() + end + + test "it allows simple filtering without casting" do + Post + |> Ash.Changeset.new(status_enum_no_cast: "open") + |> Api.create!() + + assert %{status_enum_no_cast: :open} = + Post + |> Ash.Query.filter(status_enum_no_cast == ^"open") + |> Api.read_one!() + end + end + + describe "atom filters" do + test "it works on matches" do + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Api.create!() + + result = + Post + |> Ash.Query.filter(type == :sponsored) + |> Api.read!() + + assert [%Post{title: "match"}] = result + end + end + + describe "like and ilike" do + test "like builds and matches" do + Post + |> Ash.Changeset.new(%{title: "MaTcH"}) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(like(title, "%aTc%")) + |> Api.read!() + + assert [%Post{title: "MaTcH"}] = results + + results = + Post + |> Ash.Query.filter(like(title, "%atc%")) + |> Api.read!() + + assert [] = results + end + + test "ilike builds and matches" do + Post + |> Ash.Changeset.new(%{title: "MaTcH"}) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(ilike(title, "%aTc%")) + |> Api.read!() + + assert [%Post{title: "MaTcH"}] = results + + results = + Post + |> Ash.Query.filter(ilike(title, "%atc%")) + |> Api.read!() + + assert [%Post{title: "MaTcH"}] = results + end + end + + describe "trigram_similarity" do + test "it works on matches" do + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(trigram_similarity(title, "match") > 0.9) + |> Api.read!() + + assert [%Post{title: "match"}] = results + end + + test "it works on non-matches" do + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Api.create!() + + results = + Post + |> Ash.Query.filter(trigram_similarity(title, "match") < 0.1) + |> Api.read!() + + assert [] = results + end + end + + describe "fragments" do + test "double replacement works" do + post = + Post + |> Ash.Changeset.new(%{title: "match", score: 4}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "non_match", score: 2}) + |> Api.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(fragment("? = ?", title, ^post.title)) + |> Api.read!() + + assert [] = + Post + |> Ash.Query.filter(fragment("? = ?", title, "nope")) + |> Api.read!() + end + end + + describe "filtering on relationships that themselves have filters" do + test "it doesn't raise an error" do + Comment + |> Ash.Query.filter(not is_nil(popular_ratings.id)) + |> Api.read!() + end + + test "it doesn't raise an error when nested" do + Post + |> Ash.Query.filter(not is_nil(comments.popular_ratings.id)) + |> Api.read!() + end + end +end diff --git a/test/load_test.exs b/test/load_test.exs new file mode 100644 index 0000000..0387f8c --- /dev/null +++ b/test/load_test.exs @@ -0,0 +1,245 @@ +defmodule AshSqlite.Test.LoadTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Comment, Post} + + require Ash.Query + + test "has_many relationships can be loaded" do + assert %Post{comments: %Ash.NotLoaded{type: :relationship}} = + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + results = + Post + |> Ash.Query.load(:comments) + |> Api.read!() + + assert [%Post{comments: [%{title: "match"}]}] = results + end + + test "belongs_to relationships can be loaded" do + assert %Comment{post: %Ash.NotLoaded{type: :relationship}} = + comment = + Comment + |> Ash.Changeset.new(%{}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "match"}) + |> Ash.Changeset.manage_relationship(:comments, [comment], type: :append_and_remove) + |> Api.create!() + + results = + Comment + |> Ash.Query.load(:post) + |> Api.read!() + + assert [%Comment{post: %{title: "match"}}] = results + end + + test "many_to_many loads work" do + source_post = + Post + |> Ash.Changeset.new(%{title: "source"}) + |> Api.create!() + + destination_post = + Post + |> Ash.Changeset.new(%{title: "destination"}) + |> Api.create!() + + destination_post2 = + Post + |> Ash.Changeset.new(%{title: "destination"}) + |> Api.create!() + + source_post + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:linked_posts, [destination_post, destination_post2], + type: :append_and_remove + ) + |> Api.update!() + + results = + source_post + |> Api.load!(:linked_posts) + + assert %{linked_posts: [%{title: "destination"}, %{title: "destination"}]} = results + end + + test "many_to_many loads work when nested" do + source_post = + Post + |> Ash.Changeset.new(%{title: "source"}) + |> Api.create!() + + destination_post = + Post + |> Ash.Changeset.new(%{title: "destination"}) + |> Api.create!() + + source_post + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:linked_posts, [destination_post], + type: :append_and_remove + ) + |> Api.update!() + + destination_post + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:linked_posts, [source_post], type: :append_and_remove) + |> Api.update!() + + results = + source_post + |> Api.load!(linked_posts: :linked_posts) + + assert %{linked_posts: [%{title: "destination", linked_posts: [%{title: "source"}]}]} = + results + end + + describe "lateral join loads" do + test "parent references are resolved" do + post1 = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + post2 = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + post2_id = post2.id + + post3 = + Post + |> Ash.Changeset.new(%{title: "no match"}) + |> Api.create!() + + assert [%{posts_with_matching_title: [%{id: ^post2_id}]}] = + Post + |> Ash.Query.load(:posts_with_matching_title) + |> Ash.Query.filter(id == ^post1.id) + |> Api.read!() + + assert [%{posts_with_matching_title: []}] = + Post + |> Ash.Query.load(:posts_with_matching_title) + |> Ash.Query.filter(id == ^post3.id) + |> Api.read!() + end + + test "parent references work when joining for filters" do + %{id: post1_id} = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + post2 = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "no match"}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "no match"}) + |> Api.create!() + + assert [%{id: ^post1_id}] = + Post + |> Ash.Query.filter(posts_with_matching_title.id == ^post2.id) + |> Api.read!() + end + + test "lateral join loads (loads with limits or offsets) are supported" do + assert %Post{comments: %Ash.NotLoaded{type: :relationship}} = + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "abc"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "def"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + comments_query = + Comment + |> Ash.Query.limit(1) + |> Ash.Query.sort(:title) + + results = + Post + |> Ash.Query.load(comments: comments_query) + |> Api.read!() + + assert [%Post{comments: [%{title: "abc"}]}] = results + + comments_query = + Comment + |> Ash.Query.limit(1) + |> Ash.Query.sort(title: :desc) + + results = + Post + |> Ash.Query.load(comments: comments_query) + |> Api.read!() + + assert [%Post{comments: [%{title: "def"}]}] = results + + comments_query = + Comment + |> Ash.Query.limit(2) + |> Ash.Query.sort(title: :desc) + + results = + Post + |> Ash.Query.load(comments: comments_query) + |> Api.read!() + + assert [%Post{comments: [%{title: "def"}, %{title: "abc"}]}] = results + end + + test "loading many to many relationships on records works without loading its join relationship when using code interface" do + source_post = + Post + |> Ash.Changeset.new(%{title: "source"}) + |> Api.create!() + + destination_post = + Post + |> Ash.Changeset.new(%{title: "abc"}) + |> Api.create!() + + destination_post2 = + Post + |> Ash.Changeset.new(%{title: "def"}) + |> Api.create!() + + source_post + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:linked_posts, [destination_post, destination_post2], + type: :append_and_remove + ) + |> Api.update!() + + assert %{linked_posts: [_, _]} = Post.get_by_id!(source_post.id, load: [:linked_posts]) + end + end +end diff --git a/test/lock_test.exs b/test/lock_test.exs new file mode 100644 index 0000000..0513b33 --- /dev/null +++ b/test/lock_test.exs @@ -0,0 +1,57 @@ +defmodule AshSqlite.Test.LockTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + require Ash.Query + + setup do + Application.put_env(:ash, :disable_async?, true) + + on_exit(fn -> + Application.put_env(:ash, :disable_async?, false) + AshSqlite.TestNoSandboxRepo.delete_all(Post) + end) + end + + test "lock conflicts raise appropriate errors" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "locked"}) + |> Ash.Changeset.set_context(%{data_layer: %{repo: AshSqlite.TestNoSandboxRepo}}) + |> Api.create!() + + task1 = + Task.async(fn -> + AshSqlite.TestNoSandboxRepo.transaction(fn -> + Post + |> Ash.Query.lock("FOR UPDATE NOWAIT") + |> Ash.Query.set_context(%{data_layer: %{repo: AshSqlite.TestNoSandboxRepo}}) + |> Ash.Query.filter(id == ^post.id) + |> Api.read!() + + :timer.sleep(1000) + :ok + end) + end) + + task2 = + Task.async(fn -> + try do + AshSqlite.TestNoSandboxRepo.transaction(fn -> + :timer.sleep(100) + + Post + |> Ash.Query.lock("FOR UPDATE NOWAIT") + |> Ash.Query.set_context(%{data_layer: %{repo: AshSqlite.TestNoSandboxRepo}}) + |> Ash.Query.filter(id == ^post.id) + |> Api.read!() + end) + rescue + e -> + {:error, e} + end + end) + + assert [{:ok, :ok}, {:error, %Ash.Error.Invalid{errors: [%Ash.Error.Invalid.Unavailable{}]}}] = + Task.await_many([task1, task2], :infinity) + end +end diff --git a/test/manual_relationships_test.exs b/test/manual_relationships_test.exs new file mode 100644 index 0000000..12d831b --- /dev/null +++ b/test/manual_relationships_test.exs @@ -0,0 +1,116 @@ +defmodule AshSqlite.Test.ManualRelationshipsTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Comment, Post} + + require Ash.Query + + describe "manual first" do + test "relationships can be filtered on with no data" do + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + assert [] = + Post |> Ash.Query.filter(comments_containing_title.title == "title") |> Api.read!() + end + + test "relationships can be filtered on with data" do + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "title2"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "title2"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "no match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + assert [_] = + Post + |> Ash.Query.filter(comments_containing_title.title == "title2") + |> Api.read!() + end + end + + describe "manual last" do + test "relationships can be filtered on with no data" do + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "no match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + assert [] = + Comment + |> Ash.Query.filter(post.comments_containing_title.title == "title2") + |> Api.read!() + end + + test "relationships can be filtered on with data" do + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "title2"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "title2"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "no match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + assert [_, _] = + Comment + |> Ash.Query.filter(post.comments_containing_title.title == "title2") + |> Api.read!() + end + end + + describe "manual middle" do + test "relationships can be filtered on with data" do + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "title2"}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "title2"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "no match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + assert [_, _] = + Comment + |> Ash.Query.filter(post.comments_containing_title.post.title == "title") + |> Api.read!() + end + end +end diff --git a/test/migration_generator_test.exs b/test/migration_generator_test.exs new file mode 100644 index 0000000..10beb7a --- /dev/null +++ b/test/migration_generator_test.exs @@ -0,0 +1,1283 @@ +defmodule AshSqlite.MigrationGeneratorTest do + use AshSqlite.RepoCase, async: false + @moduletag :migration + + import ExUnit.CaptureLog + + defmacrop defposts(mod \\ Post, do: body) do + quote do + Code.compiler_options(ignore_module_conflict: true) + + defmodule unquote(mod) do + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table "posts" + repo(AshSqlite.TestRepo) + + custom_indexes do + # need one without any opts + index(["id"]) + index(["id"], unique: true, name: "test_unique_index") + end + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + unquote(body) + end + + Code.compiler_options(ignore_module_conflict: false) + end + end + + defmacrop defapi(resources) do + quote do + Code.compiler_options(ignore_module_conflict: true) + + defmodule Registry do + use Ash.Registry + + entries do + for resource <- unquote(resources) do + entry(resource) + end + end + end + + defmodule Api do + use Ash.Api + + resources do + registry(Registry) + end + end + + Code.compiler_options(ignore_module_conflict: false) + end + end + + describe "creating initial snapshots" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + sqlite do + migration_types(second_title: {:varchar, 16}) + migration_defaults(title_with_default: "\"fred\"") + end + + identities do + identity(:title, [:title]) + identity(:thing, [:title, :second_title]) + identity(:thing_with_source, [:title, :title_with_source]) + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:second_title, :string) + attribute(:title_with_source, :string, source: :t_w_s) + attribute(:title_with_default, :string) + attribute(:email, Test.Support.Types.Email) + end + end + + defapi([Post]) + + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "the migration sets up resources correctly" do + # the snapshot exists and contains valid json + assert File.read!(Path.wildcard("test_snapshots_path/test_repo/posts/*.json")) + |> Jason.decode!(keys: :atoms!) + + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + file_contents = File.read!(file) + + # the migration creates the table + assert file_contents =~ "create table(:posts, primary_key: false) do" + + # the migration sets up the custom_indexes + assert file_contents =~ + ~S{create index(:posts, ["id"], name: "test_unique_index", unique: true)} + + assert file_contents =~ ~S{create index(:posts, ["id"]} + + # the migration adds the id, with its default + assert file_contents =~ + ~S[add :id, :uuid, null: false, default: fragment("uuid_generate_v4()"), primary_key: true] + + # the migration adds the id, with its default + assert file_contents =~ + ~S[add :title_with_default, :text, default: "fred"] + + # the migration adds other attributes + assert file_contents =~ ~S[add :title, :text] + + # the migration unwraps newtypes + assert file_contents =~ ~S[add :email, :citext] + + # the migration adds custom attributes + assert file_contents =~ ~S[add :second_title, :varchar, size: 16] + + # the migration creates unique_indexes based on the identities of the resource + assert file_contents =~ ~S{create unique_index(:posts, [:title], name: "posts_title_index")} + + # the migration creates unique_indexes based on the identities of the resource + assert file_contents =~ + ~S{create unique_index(:posts, [:title, :second_title], name: "posts_thing_index")} + + # the migration creates unique_indexes using the `source` on the attributes of the identity on the resource + assert file_contents =~ + ~S{create unique_index(:posts, [:title, :t_w_s], name: "posts_thing_with_source_index")} + end + end + + describe "creating initial snapshots for resources with a schema" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + sqlite do + migration_types(second_title: {:varchar, 16}) + schema("example") + end + + identities do + identity(:title, [:title]) + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:second_title, :string) + end + end + + defapi([Post]) + + Mix.shell(Mix.Shell.Process) + + {:ok, _} = + Ecto.Adapters.SQL.query( + AshSqlite.TestRepo, + """ + CREATE SCHEMA IF NOT EXISTS example; + """ + ) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "the migration sets up resources correctly" do + # the snapshot exists and contains valid json + assert File.read!(Path.wildcard("test_snapshots_path/test_repo/posts/*.json")) + |> Jason.decode!(keys: :atoms!) + + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + file_contents = File.read!(file) + + # the migration creates the table + assert file_contents =~ "create table(:posts, primary_key: false, prefix: \"example\") do" + + # the migration sets up the custom_indexes + assert file_contents =~ + ~S{create index(:posts, ["id"], name: "test_unique_index", unique: true, prefix: "example")} + + assert file_contents =~ ~S{create index(:posts, ["id"]} + + # the migration adds the id, with its default + assert file_contents =~ + ~S[add :id, :uuid, null: false, default: fragment("uuid_generate_v4()"), primary_key: true] + + # the migration adds other attributes + assert file_contents =~ ~S[add :title, :text] + + # the migration adds custom attributes + assert file_contents =~ ~S[add :second_title, :varchar, size: 16] + + # the migration creates unique_indexes based on the identities of the resource + assert file_contents =~ + ~S{create unique_index(:posts, [:title], name: "posts_title_index", prefix: "example")} + end + end + + describe "custom_indexes with `concurrently: true`" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + sqlite do + custom_indexes do + # need one without any opts + index([:title], concurrently: true) + end + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defapi([Post]) + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + end + + test "it creates multiple migration files" do + assert [_, custom_index_migration] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + file = File.read!(custom_index_migration) + + assert file =~ ~S[@disable_ddl_transaction true] + + assert file =~ ~S + end + end + + describe "creating follow up migrations with a schema" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + sqlite do + schema("example") + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defapi([Post]) + + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "when renaming a field, it asks if you are renaming it, and renames it if you are" do + defposts do + sqlite do + schema("example") + end + + attributes do + uuid_primary_key(:id) + attribute(:name, :string, allow_nil?: false) + end + end + + defapi([Post]) + + send(self(), {:mix_shell_input, :yes?, true}) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ ~S[rename table(:posts, prefix: "example"), :title, to: :name] + end + end + + describe "creating follow up migrations" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + identities do + identity(:title, [:title]) + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defapi([Post]) + + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "when renaming an index, it is properly renamed" do + defposts do + sqlite do + identity_index_names(title: "titles_r_unique_dawg") + end + + identities do + identity(:title, [:title]) + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defapi([Post]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[ALTER INDEX posts_title_index RENAME TO titles_r_unique_dawg] + end + + test "when adding a field, it adds the field" do + defposts do + identities do + identity(:title, [:title]) + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:name, :string, allow_nil?: false) + end + end + + defapi([Post]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[add :name, :text, null: false] + end + + test "when renaming a field, it asks if you are renaming it, and renames it if you are" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:name, :string, allow_nil?: false) + end + end + + defapi([Post]) + + send(self(), {:mix_shell_input, :yes?, true}) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ ~S[rename table(:posts), :title, to: :name] + end + + test "when renaming a field, it asks if you are renaming it, and adds it if you aren't" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:name, :string, allow_nil?: false) + end + end + + defapi([Post]) + + send(self(), {:mix_shell_input, :yes?, false}) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[add :name, :text, null: false] + end + + test "when renaming a field, it asks which field you are renaming it to, and renames it if you are" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:name, :string, allow_nil?: false) + attribute(:subject, :string, allow_nil?: false) + end + end + + defapi([Post]) + + send(self(), {:mix_shell_input, :yes?, true}) + send(self(), {:mix_shell_input, :prompt, "subject"}) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + # Up migration + assert File.read!(file2) =~ ~S[rename table(:posts), :title, to: :subject] + + # Down migration + assert File.read!(file2) =~ ~S[rename table(:posts), :subject, to: :title] + end + + test "when renaming a field, it asks which field you are renaming it to, and adds it if you arent" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:name, :string, allow_nil?: false) + attribute(:subject, :string, allow_nil?: false) + end + end + + defapi([Post]) + + send(self(), {:mix_shell_input, :yes?, false}) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[add :subject, :text, null: false] + end + + test "when multiple schemas apply to the same table, all attributes are added" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:foobar, :string) + end + end + + defposts Post2 do + attributes do + uuid_primary_key(:id) + attribute(:name, :string) + end + end + + defapi([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[add :foobar, :text] + + assert File.read!(file2) =~ + ~S[add :foobar, :text] + end + + test "when multiple schemas apply to the same table, all identities are added" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + + identities do + identity(:unique_title, [:title]) + end + end + + defposts Post2 do + attributes do + uuid_primary_key(:id) + attribute(:name, :string) + end + + identities do + identity(:unique_name, [:name]) + end + end + + defapi([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + file1_content = File.read!(file1) + + assert file1_content =~ + "create unique_index(:posts, [:title], name: \"posts_title_index\")" + + file2_content = File.read!(file2) + + assert file2_content =~ + "drop_if_exists unique_index(:posts, [:title], name: \"posts_title_index\")" + + assert file2_content =~ + "create unique_index(:posts, [:name], name: \"posts_unique_name_index\")" + + assert file2_content =~ + "create unique_index(:posts, [:title], name: \"posts_unique_title_index\")" + end + + test "when an attribute exists only on some of the resources that use the same table, it isn't marked as null: false" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:example, :string, allow_nil?: false) + end + end + + defposts Post2 do + attributes do + uuid_primary_key(:id) + end + end + + defapi([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[add :example, :text] <> "\n" + + refute File.read!(file2) =~ ~S[null: false] + end + end + + describe "auto incrementing integer, when generated" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + attributes do + attribute(:id, :integer, generated?: true, allow_nil?: false, primary_key?: true) + attribute(:views, :integer) + end + end + + defapi([Post]) + + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "when an integer is generated and default nil, it is a bigserial" do + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + assert File.read!(file) =~ + ~S[add :id, :bigserial, null: false, primary_key: true] + + assert File.read!(file) =~ + ~S[add :views, :bigint] + end + end + + describe "--check option" do + setup do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defapi([Post]) + + [api: Api] + end + + test "returns code(1) if snapshots and resources don't fit", %{api: api} do + assert catch_exit( + AshSqlite.MigrationGenerator.generate(api, + snapshot_path: "test_snapshot_path", + migration_path: "test_migration_path", + check: true + ) + ) == {:shutdown, 1} + + refute File.exists?(Path.wildcard("test_migration_path2/**/*_migrate_resources*.exs")) + refute File.exists?(Path.wildcard("test_snapshots_path2/test_repo/posts/*.json")) + end + end + + describe "references" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + end + + test "references are inferred automatically" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:foobar, :string) + end + end + + defposts Post2 do + attributes do + uuid_primary_key(:id) + attribute(:name, :string) + end + + relationships do + belongs_to(:post, Post) + end + end + + defapi([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + assert File.read!(file) =~ + ~S[references(:posts, column: :id, name: "posts_post_id_fkey", type: :uuid, prefix: "public")] + end + + test "references are inferred automatically if the attribute has a different type" do + defposts do + attributes do + attribute(:id, :string, primary_key?: true, allow_nil?: false) + attribute(:title, :string) + attribute(:foobar, :string) + end + end + + defposts Post2 do + attributes do + attribute(:id, :string, primary_key?: true, allow_nil?: false) + attribute(:name, :string) + end + + relationships do + belongs_to(:post, Post, attribute_type: :string) + end + end + + defapi([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + assert File.read!(file) =~ + ~S[references(:posts, column: :id, name: "posts_post_id_fkey", type: :text, prefix: "public")] + end + + test "when modified, the foreign key is dropped before modification" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:foobar, :string) + end + end + + defposts Post2 do + attributes do + uuid_primary_key(:id) + attribute(:name, :string) + end + + relationships do + belongs_to(:post, Post) + end + end + + defapi([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + defposts Post2 do + sqlite do + references do + reference(:post, name: "special_post_fkey", on_delete: :delete, on_update: :update) + end + end + + attributes do + uuid_primary_key(:id) + attribute(:name, :string) + end + + relationships do + belongs_to(:post, Post) + end + end + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert file = + "test_migration_path/**/*_migrate_resources*.exs" + |> Path.wildcard() + |> Enum.sort() + |> Enum.at(1) + |> File.read!() + + assert file =~ + ~S[references(:posts, column: :id, name: "special_post_fkey", type: :uuid, prefix: "public", on_delete: :delete_all, on_update: :update_all)] + + assert file =~ ~S[drop constraint(:posts, "posts_post_id_fkey")] + + assert [_, down_code] = String.split(file, "def down do") + + assert [_, after_drop] = + String.split(down_code, "drop constraint(:posts, \"special_post_fkey\")") + + assert after_drop =~ ~S[references(:posts] + end + end + + describe "check constraints" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + end + + test "when added, the constraint is created" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:price, :integer) + end + + sqlite do + check_constraints do + check_constraint(:price, "price_must_be_positive", check: "price > 0") + end + end + end + + defapi([Post]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert file = + "test_migration_path/**/*_migrate_resources*.exs" + |> Path.wildcard() + |> Enum.sort() + |> Enum.at(0) + |> File.read!() + + assert file =~ + ~S[create constraint(:posts, :price_must_be_positive, check: "price > 0")] + + defposts do + attributes do + uuid_primary_key(:id) + attribute(:price, :integer) + end + + sqlite do + check_constraints do + check_constraint(:price, "price_must_be_positive", check: "price > 1") + end + end + end + + defapi([Post]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert file = + "test_migration_path/**/*_migrate_resources*.exs" + |> Path.wildcard() + |> Enum.sort() + |> Enum.at(1) + |> File.read!() + + assert [_, down] = String.split(file, "def down do") + + assert [_, remaining] = + String.split(down, "drop_if_exists constraint(:posts, :price_must_be_positive)") + + assert remaining =~ + ~S[create constraint(:posts, :price_must_be_positive, check: "price > 0")] + end + + test "when removed, the constraint is dropped before modification" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:price, :integer) + end + + sqlite do + check_constraints do + check_constraint(:price, "price_must_be_positive", check: "price > 0") + end + end + end + + defapi([Post]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + defposts do + attributes do + uuid_primary_key(:id) + attribute(:price, :integer) + end + end + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert file = + "test_migration_path/**/*_migrate_resources*.exs" + |> Path.wildcard() + |> Enum.sort() + |> Enum.at(1) + + assert File.read!(file) =~ + ~S[drop_if_exists constraint(:posts, :price_must_be_positive)] + end + end + + describe "polymorphic resources" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defmodule Comment do + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + polymorphic?(true) + repo(AshSqlite.TestRepo) + end + + attributes do + uuid_primary_key(:id) + attribute(:resource_id, :uuid) + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + end + + defmodule Post do + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table "posts" + repo(AshSqlite.TestRepo) + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id) + end + + relationships do + has_many(:comments, Comment, + destination_attribute: :resource_id, + relationship_context: %{data_layer: %{table: "post_comments"}} + ) + + belongs_to(:best_comment, Comment, + destination_attribute: :id, + relationship_context: %{data_layer: %{table: "post_comments"}} + ) + end + end + + defapi([Post, Comment]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + [api: Api] + end + + test "it uses the relationship's table context if it is set" do + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + assert File.read!(file) =~ + ~S[references(:post_comments, column: :id, name: "posts_best_comment_id_fkey", type: :uuid, prefix: "public")] + end + end + + describe "default values" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + end + + test "when default value is specified that implements EctoMigrationDefault" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:start_date, :date, default: ~D[2022-04-19]) + attribute(:start_time, :time, default: ~T[08:30:45]) + attribute(:timestamp, :utc_datetime, default: ~U[2022-02-02 08:30:30Z]) + attribute(:timestamp_naive, :naive_datetime, default: ~N[2022-02-02 08:30:30]) + attribute(:number, :integer, default: 5) + attribute(:fraction, :float, default: 0.25) + attribute(:decimal, :decimal, default: Decimal.new("123.4567890987654321987")) + attribute(:name, :string, default: "Fred") + attribute(:tag, :atom, default: :value) + attribute(:enabled, :boolean, default: false) + end + end + + defapi([Post]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [file1] = Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + file = File.read!(file1) + + assert file =~ + ~S[add :start_date, :date, default: fragment("'2022-04-19'")] + + assert file =~ + ~S[add :start_time, :time, default: fragment("'08:30:45'")] + + assert file =~ + ~S[add :timestamp, :utc_datetime, default: fragment("'2022-02-02 08:30:30Z'")] + + assert file =~ + ~S[add :timestamp_naive, :naive_datetime, default: fragment("'2022-02-02 08:30:30'")] + + assert file =~ + ~S[add :number, :bigint, default: 5] + + assert file =~ + ~S[add :fraction, :float, default: 0.25] + + assert file =~ + ~S[add :decimal, :decimal, default: "123.4567890987654321987"] + + assert file =~ + ~S[add :name, :text, default: "Fred"] + + assert file =~ + ~S[add :tag, :text, default: "value"] + + assert file =~ + ~S[add :enabled, :boolean, default: false] + end + + test "when default value is specified that does not implement EctoMigrationDefault" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:product_code, :term, default: {"xyz"}) + end + end + + defapi([Post]) + + log = + capture_log(fn -> + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + end) + + assert log =~ "`{\"xyz\"}`" + + assert [file1] = Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + file = File.read!(file1) + + assert file =~ + ~S[add :product_code, :binary] + end + end + + describe "follow up with references" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defmodule Comment do + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table "comments" + repo AshSqlite.TestRepo + end + + attributes do + uuid_primary_key(:id) + end + + relationships do + belongs_to(:post, Post) + end + end + + defapi([Post, Comment]) + + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "when changing the primary key, it changes properly" do + defposts do + attributes do + attribute(:id, :uuid, primary_key?: false, default: &Ecto.UUID.generate/0) + uuid_primary_key(:guid) + attribute(:title, :string) + end + end + + defmodule Comment do + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table "comments" + repo AshSqlite.TestRepo + end + + attributes do + uuid_primary_key(:id) + end + + relationships do + belongs_to(:post, Post) + end + end + + defapi([Post, Comment]) + + AshSqlite.MigrationGenerator.generate(Api, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + file = File.read!(file2) + + assert [before_index_drop, after_index_drop] = + String.split(file, ~S[drop constraint("posts", "posts_pkey")], parts: 2) + + assert before_index_drop =~ ~S[drop constraint(:comments, "comments_post_id_fkey")] + + assert after_index_drop =~ ~S[modify :id, :uuid, null: true, primary_key: false] + + assert after_index_drop =~ + ~S[modify :post_id, references(:posts, column: :id, name: "comments_post_id_fkey", type: :uuid, prefix: "public")] + end + end +end diff --git a/test/polymorphism_test.exs b/test/polymorphism_test.exs new file mode 100644 index 0000000..15ae06e --- /dev/null +++ b/test/polymorphism_test.exs @@ -0,0 +1,29 @@ +defmodule AshSqlite.PolymorphismTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post, Rating} + + require Ash.Query + + test "you can create related data" do + Post + |> Ash.Changeset.for_create(:create, rating: %{score: 10}) + |> Api.create!() + + assert [%{score: 10}] = + Rating + |> Ash.Query.set_context(%{data_layer: %{table: "post_ratings"}}) + |> Api.read!() + end + + test "you can read related data" do + Post + |> Ash.Changeset.for_create(:create, rating: %{score: 10}) + |> Api.create!() + + assert [%{score: 10}] = + Post + |> Ash.Query.load(:ratings) + |> Api.read_one!() + |> Map.get(:ratings) + end +end diff --git a/test/primary_key_test.exs b/test/primary_key_test.exs new file mode 100644 index 0000000..53dacb4 --- /dev/null +++ b/test/primary_key_test.exs @@ -0,0 +1,51 @@ +defmodule AshSqlite.Test.PrimaryKeyTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, IntegerPost, Post, PostView} + + require Ash.Query + + test "creates record with integer primary key" do + assert %IntegerPost{} = IntegerPost |> Ash.Changeset.new(%{title: "title"}) |> Api.create!() + end + + test "creates record with uuid primary key" do + assert %Post{} = Post |> Ash.Changeset.new(%{title: "title"}) |> Api.create!() + end + + describe "resources without a primary key" do + test "records can be created" do + post = + Post + |> Ash.Changeset.for_action(:create, %{title: "not very interesting"}) + |> Api.create!() + + assert {:ok, view} = + PostView + |> Ash.Changeset.for_action(:create, %{browser: :firefox, post_id: post.id}) + |> Api.create() + + assert view.browser == :firefox + assert view.post_id == post.id + assert DateTime.diff(DateTime.utc_now(), view.time, :microsecond) < 1_000_000 + end + + test "records can be queried" do + post = + Post + |> Ash.Changeset.for_action(:create, %{title: "not very interesting"}) + |> Api.create!() + + expected = + PostView + |> Ash.Changeset.for_action(:create, %{browser: :firefox, post_id: post.id}) + |> Api.create!() + + assert {:ok, [actual]} = Api.read(PostView) + + assert actual.time == expected.time + assert actual.browser == expected.browser + assert actual.post_id == expected.post_id + end + end +end diff --git a/test/select_test.exs b/test/select_test.exs new file mode 100644 index 0000000..a2fbca2 --- /dev/null +++ b/test/select_test.exs @@ -0,0 +1,15 @@ +defmodule AshSqlite.SelectTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + require Ash.Query + + test "values not selected in the query are not present in the response" do + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + assert [%{title: nil}] = Api.read!(Ash.Query.select(Post, :id)) + end +end diff --git a/test/sort_test.exs b/test/sort_test.exs new file mode 100644 index 0000000..103b938 --- /dev/null +++ b/test/sort_test.exs @@ -0,0 +1,175 @@ +defmodule AshSqlite.SortTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Comment, Post, PostLink} + + require Ash.Query + + test "multi-column sorts work" do + Post + |> Ash.Changeset.new(%{title: "aaa", score: 0}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "aaa", score: 1}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "bbb", score: 0}) + |> Api.create!() + + assert [ + %{title: "aaa", score: 0}, + %{title: "aaa", score: 1}, + %{title: "bbb"} + ] = + Api.read!( + Post + |> Ash.Query.sort(title: :asc, score: :asc) + ) + end + + test "multi-column sorts work on inclusion" do + post = + Post + |> Ash.Changeset.new(%{title: "aaa", score: 0}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "aaa", score: 1}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "bbb", score: 0}) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "aaa", likes: 1}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "bbb", likes: 1}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + Comment + |> Ash.Changeset.new(%{title: "aaa", likes: 2}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Api.create!() + + posts = + Post + |> Ash.Query.load( + comments: + Comment + |> Ash.Query.sort([:title, :likes]) + |> Ash.Query.select([:title, :likes]) + |> Ash.Query.limit(1) + ) + |> Ash.Query.sort([:title, :score]) + |> Api.read!() + + assert [ + %{title: "aaa", comments: [%{title: "aaa"}]}, + %{title: "aaa"}, + %{title: "bbb"} + ] = posts + end + + test "multicolumn sort works with a select statement" do + Post + |> Ash.Changeset.new(%{title: "aaa", score: 0}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "aaa", score: 1}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "bbb", score: 0}) + |> Api.create!() + + assert [ + %{title: "aaa", score: 0}, + %{title: "aaa", score: 1}, + %{title: "bbb"} + ] = + Api.read!( + Post + |> Ash.Query.sort(title: :asc, score: :asc) + |> Ash.Query.select([:title, :score]) + ) + end + + test "sorting when joining to a many to many relationship sorts properly" do + post1 = + Post + |> Ash.Changeset.new(%{title: "aaa", score: 0}) + |> Api.create!() + + post2 = + Post + |> Ash.Changeset.new(%{title: "bbb", score: 1}) + |> Api.create!() + + post3 = + Post + |> Ash.Changeset.new(%{title: "ccc", score: 0}) + |> Api.create!() + + PostLink + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:source_post, post1, type: :append) + |> Ash.Changeset.manage_relationship(:destination_post, post3, type: :append) + |> Api.create!() + + PostLink + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:source_post, post2, type: :append) + |> Ash.Changeset.manage_relationship(:destination_post, post2, type: :append) + |> Api.create!() + + PostLink + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:source_post, post3, type: :append) + |> Ash.Changeset.manage_relationship(:destination_post, post1, type: :append) + |> Api.create!() + + assert [ + %{title: "aaa"}, + %{title: "bbb"}, + %{title: "ccc"} + ] = + Api.read!( + Post + |> Ash.Query.sort(title: :asc) + |> Ash.Query.filter(linked_posts.title in ["aaa", "bbb", "ccc"]) + ) + + assert [ + %{title: "ccc"}, + %{title: "bbb"}, + %{title: "aaa"} + ] = + Api.read!( + Post + |> Ash.Query.sort(title: :desc) + |> Ash.Query.filter(linked_posts.title in ["aaa", "bbb", "ccc"] or title == "aaa") + ) + + assert [ + %{title: "ccc"}, + %{title: "bbb"}, + %{title: "aaa"} + ] = + Api.read!( + Post + |> Ash.Query.sort(title: :desc) + |> Ash.Query.filter( + linked_posts.title in ["aaa", "bbb", "ccc"] or + post_links.source_post_id == ^post2.id + ) + ) + end +end diff --git a/test/support/api.ex b/test/support/api.ex new file mode 100644 index 0000000..75ffabe --- /dev/null +++ b/test/support/api.ex @@ -0,0 +1,8 @@ +defmodule AshSqlite.Test.Api do + @moduledoc false + use Ash.Api + + resources do + registry(AshSqlite.Test.Registry) + end +end diff --git a/test/support/concat.ex b/test/support/concat.ex new file mode 100644 index 0000000..a83e4ba --- /dev/null +++ b/test/support/concat.ex @@ -0,0 +1,35 @@ +defmodule AshSqlite.Test.Concat do + @moduledoc false + use Ash.Calculation + require Ash.Query + + def init(opts) do + if opts[:keys] && is_list(opts[:keys]) && Enum.all?(opts[:keys], &is_atom/1) do + {:ok, opts} + else + {:error, "Expected a `keys` option for which keys to concat"} + end + end + + def expression(opts, %{separator: separator}) do + Enum.reduce(opts[:keys], nil, fn key, expr -> + if expr do + if separator do + Ash.Query.expr(^expr <> ^separator <> ref(^key)) + else + Ash.Query.expr(^expr <> ref(^key)) + end + else + Ash.Query.expr(ref(^key)) + end + end) + end + + def calculate(records, opts, %{separator: separator}) do + Enum.map(records, fn record -> + Enum.map_join(opts[:keys], separator, fn key -> + to_string(Map.get(record, key)) + end) + end) + end +end diff --git a/test/support/registry.ex b/test/support/registry.ex new file mode 100644 index 0000000..f9ef255 --- /dev/null +++ b/test/support/registry.ex @@ -0,0 +1,19 @@ +defmodule AshSqlite.Test.Registry do + @moduledoc false + use Ash.Registry + + entries do + entry(AshSqlite.Test.Post) + entry(AshSqlite.Test.Comment) + entry(AshSqlite.Test.IntegerPost) + entry(AshSqlite.Test.Rating) + entry(AshSqlite.Test.PostLink) + entry(AshSqlite.Test.PostView) + entry(AshSqlite.Test.Author) + entry(AshSqlite.Test.Profile) + entry(AshSqlite.Test.User) + entry(AshSqlite.Test.Account) + entry(AshSqlite.Test.Organization) + entry(AshSqlite.Test.Manager) + end +end diff --git a/test/support/relationships/comments_containing_title.ex b/test/support/relationships/comments_containing_title.ex new file mode 100644 index 0000000..e8439be --- /dev/null +++ b/test/support/relationships/comments_containing_title.ex @@ -0,0 +1,48 @@ +defmodule AshSqlite.Test.Post.CommentsContainingTitle do + @moduledoc false + + use Ash.Resource.ManualRelationship + use AshSqlite.ManualRelationship + require Ash.Query + require Ecto.Query + + def load(posts, _opts, %{query: query, actor: actor, authorize?: authorize?}) do + post_ids = Enum.map(posts, & &1.id) + + {:ok, + query + |> Ash.Query.filter(post_id in ^post_ids) + |> Ash.Query.filter(contains(title, post.title)) + |> AshSqlite.Test.Api.read!(actor: actor, authorize?: authorize?) + |> Enum.group_by(& &1.post_id)} + end + + def ash_sqlite_join(query, _opts, current_binding, as_binding, :inner, destination_query) do + {:ok, + Ecto.Query.from(_ in query, + join: dest in ^destination_query, + as: ^as_binding, + on: dest.post_id == as(^current_binding).id, + on: fragment("strpos(?, ?) > 0", dest.title, as(^current_binding).title) + )} + end + + def ash_sqlite_join(query, _opts, current_binding, as_binding, :left, destination_query) do + {:ok, + Ecto.Query.from(_ in query, + left_join: dest in ^destination_query, + as: ^as_binding, + on: dest.post_id == as(^current_binding).id, + on: fragment("strpos(?, ?) > 0", dest.title, as(^current_binding).title) + )} + end + + def ash_sqlite_subquery(_opts, current_binding, as_binding, destination_query) do + {:ok, + Ecto.Query.from(_ in destination_query, + where: parent_as(^current_binding).id == as(^as_binding).post_id, + where: + fragment("strpos(?, ?) > 0", as(^as_binding).title, parent_as(^current_binding).title) + )} + end +end diff --git a/test/support/repo_case.ex b/test/support/repo_case.ex new file mode 100644 index 0000000..a405788 --- /dev/null +++ b/test/support/repo_case.ex @@ -0,0 +1,28 @@ +defmodule AshSqlite.RepoCase do + @moduledoc false + use ExUnit.CaseTemplate + + alias Ecto.Adapters.SQL.Sandbox + + using do + quote do + alias AshSqlite.TestRepo + + import Ecto + import Ecto.Query + import AshSqlite.RepoCase + + # and any other stuff + end + end + + setup tags do + :ok = Sandbox.checkout(AshSqlite.TestRepo) + + unless tags[:async] do + Sandbox.mode(AshSqlite.TestRepo, {:shared, self()}) + end + + :ok + end +end diff --git a/test/support/resources/account.ex b/test/support/resources/account.ex new file mode 100644 index 0000000..79bcea2 --- /dev/null +++ b/test/support/resources/account.ex @@ -0,0 +1,30 @@ +defmodule AshSqlite.Test.Account do + @moduledoc false + use Ash.Resource, data_layer: AshSqlite.DataLayer + + actions do + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id) + attribute(:is_active, :boolean) + end + + calculations do + calculate( + :active, + :boolean, + expr(is_active) + ) + end + + sqlite do + table "accounts" + repo(AshSqlite.TestRepo) + end + + relationships do + belongs_to(:user, AshSqlite.Test.User) + end +end diff --git a/test/support/resources/author.ex b/test/support/resources/author.ex new file mode 100644 index 0000000..9b20aad --- /dev/null +++ b/test/support/resources/author.ex @@ -0,0 +1,80 @@ +defmodule AshSqlite.Test.Author do + @moduledoc false + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table("authors") + repo(AshSqlite.TestRepo) + end + + attributes do + uuid_primary_key(:id, writable?: true) + attribute(:first_name, :string) + attribute(:last_name, :string) + attribute(:bio, AshSqlite.Test.Bio) + attribute(:badges, {:array, :atom}) + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + relationships do + has_one(:profile, AshSqlite.Test.Profile) + has_many(:posts, AshSqlite.Test.Post) + end + + calculations do + calculate(:title, :string, expr(bio[:title])) + calculate(:full_name, :string, expr(first_name <> " " <> last_name)) + calculate(:full_name_with_nils, :string, expr(string_join([first_name, last_name], " "))) + calculate(:full_name_with_nils_no_joiner, :string, expr(string_join([first_name, last_name]))) + calculate(:split_full_name, {:array, :string}, expr(string_split(full_name))) + + calculate( + :split_full_name_trim, + {:array, :string}, + expr(string_split(full_name, " ", trim?: true)) + ) + + calculate(:first_name_from_split, :string, expr(at(split_full_name_trim, 0))) + + calculate(:first_name_or_bob, :string, expr(first_name || "bob")) + calculate(:first_name_and_bob, :string, expr(first_name && "bob")) + + calculate( + :conditional_full_name, + :string, + expr( + if( + is_nil(first_name) or is_nil(last_name), + "(none)", + first_name <> " " <> last_name + ) + ) + ) + + calculate( + :nested_conditional, + :string, + expr( + if( + is_nil(first_name), + "No First Name", + if( + is_nil(last_name), + "No Last Name", + first_name <> " " <> last_name + ) + ) + ) + ) + + calculate :param_full_name, + :string, + {AshSqlite.Test.Concat, keys: [:first_name, :last_name]} do + argument(:separator, :string, default: " ", constraints: [allow_empty?: true, trim?: false]) + end + end +end diff --git a/test/support/resources/bio.ex b/test/support/resources/bio.ex new file mode 100644 index 0000000..27d889f --- /dev/null +++ b/test/support/resources/bio.ex @@ -0,0 +1,19 @@ +defmodule AshSqlite.Test.Bio do + @moduledoc false + use Ash.Resource, data_layer: :embedded + + actions do + defaults([:create, :read, :update, :destroy]) + end + + attributes do + attribute(:title, :string) + attribute(:bio, :string) + attribute(:years_of_experience, :integer) + + attribute :list_of_strings, {:array, :string} do + allow_nil?(true) + default(nil) + end + end +end diff --git a/test/support/resources/comment.ex b/test/support/resources/comment.ex new file mode 100644 index 0000000..b5dc7a3 --- /dev/null +++ b/test/support/resources/comment.ex @@ -0,0 +1,59 @@ +defmodule AshSqlite.Test.Comment do + @moduledoc false + use Ash.Resource, + data_layer: AshSqlite.DataLayer, + authorizers: [ + Ash.Policy.Authorizer + ] + + policies do + bypass action_type(:read) do + # Check that the comment is in the same org (via post) as actor + authorize_if(relates_to_actor_via([:post, :organization, :users])) + end + end + + sqlite do + table "comments" + repo(AshSqlite.TestRepo) + + references do + reference(:post, on_delete: :delete, on_update: :update, name: "special_name_fkey") + end + end + + actions do + defaults([:read, :update, :destroy]) + + create :create do + primary?(true) + argument(:rating, :map) + + change(manage_relationship(:rating, :ratings, on_missing: :ignore, on_match: :create)) + end + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:likes, :integer) + attribute(:arbitrary_timestamp, :utc_datetime_usec) + create_timestamp(:created_at, writable?: true) + end + + relationships do + belongs_to(:post, AshSqlite.Test.Post) + belongs_to(:author, AshSqlite.Test.Author) + + has_many(:ratings, AshSqlite.Test.Rating, + destination_attribute: :resource_id, + relationship_context: %{data_layer: %{table: "comment_ratings"}} + ) + + has_many(:popular_ratings, AshSqlite.Test.Rating, + destination_attribute: :resource_id, + relationship_context: %{data_layer: %{table: "comment_ratings"}}, + filter: expr(score > 5) + ) + end +end diff --git a/test/support/resources/integer_post.ex b/test/support/resources/integer_post.ex new file mode 100644 index 0000000..60c3f4a --- /dev/null +++ b/test/support/resources/integer_post.ex @@ -0,0 +1,19 @@ +defmodule AshSqlite.Test.IntegerPost do + @moduledoc false + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table "integer_posts" + repo AshSqlite.TestRepo + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + attributes do + integer_primary_key(:id) + attribute(:title, :string) + end +end diff --git a/test/support/resources/manager.ex b/test/support/resources/manager.ex new file mode 100644 index 0000000..3d3c5fd --- /dev/null +++ b/test/support/resources/manager.ex @@ -0,0 +1,39 @@ +defmodule AshSqlite.Test.Manager do + @moduledoc false + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table("managers") + repo(AshSqlite.TestRepo) + end + + actions do + defaults([:read, :update, :destroy]) + + create :create do + primary?(true) + argument(:organization_id, :uuid, allow_nil?: false) + + change(manage_relationship(:organization_id, :organization, type: :append_and_remove)) + end + end + + identities do + identity(:uniq_code, :code) + end + + attributes do + uuid_primary_key(:id) + attribute(:name, :string) + attribute(:code, :string, allow_nil?: false) + attribute(:must_be_present, :string, allow_nil?: false) + attribute(:role, :string) + end + + relationships do + belongs_to :organization, AshSqlite.Test.Organization do + attribute_writable?(true) + end + end +end diff --git a/test/support/resources/organization.ex b/test/support/resources/organization.ex new file mode 100644 index 0000000..5ba2624 --- /dev/null +++ b/test/support/resources/organization.ex @@ -0,0 +1,25 @@ +defmodule AshSqlite.Test.Organization do + @moduledoc false + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table("orgs") + repo(AshSqlite.TestRepo) + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id, writable?: true) + attribute(:name, :string) + end + + relationships do + has_many(:users, AshSqlite.Test.User) + has_many(:posts, AshSqlite.Test.Post) + has_many(:managers, AshSqlite.Test.Manager) + end +end diff --git a/test/support/resources/post.ex b/test/support/resources/post.ex new file mode 100644 index 0000000..87cf9f6 --- /dev/null +++ b/test/support/resources/post.ex @@ -0,0 +1,236 @@ +defmodule AshSqlite.Test.Post do + @moduledoc false + use Ash.Resource, + data_layer: AshSqlite.DataLayer, + authorizers: [ + Ash.Policy.Authorizer + ] + + policies do + bypass action_type(:read) do + # Check that the post is in the same org as actor + authorize_if(relates_to_actor_via([:organization, :users])) + end + end + + sqlite do + table("posts") + repo(AshSqlite.TestRepo) + base_filter_sql("type = 'sponsored'") + + check_constraints do + check_constraint(:price, "price_must_be_positive", + message: "yo, bad price", + check: "price > 0" + ) + end + + custom_indexes do + index([:uniq_custom_one, :uniq_custom_two], + unique: true, + concurrently: true, + message: "dude what the heck" + ) + end + end + + resource do + base_filter(expr(type == type(:sponsored, ^Ash.Type.Atom))) + end + + actions do + defaults([:update, :destroy]) + + read :read do + primary?(true) + end + + read :paginated do + pagination(offset?: true, required?: true, countable: true) + end + + create :create do + primary?(true) + argument(:rating, :map) + + change( + manage_relationship(:rating, :ratings, + on_missing: :ignore, + on_no_match: :create, + on_match: :create + ) + ) + end + + update :increment_score do + argument(:amount, :integer, default: 1) + change(atomic_update(:score, expr((score || 0) + ^arg(:amount)))) + end + end + + identities do + identity(:uniq_one_and_two, [:uniq_one, :uniq_two]) + end + + attributes do + uuid_primary_key(:id, writable?: true) + attribute(:title, :string) + attribute(:score, :integer) + attribute(:public, :boolean) + attribute(:category, :string) + attribute(:type, :atom, default: :sponsored, private?: true, writable?: false) + attribute(:price, :integer) + attribute(:decimal, :decimal, default: Decimal.new(0)) + attribute(:status, AshSqlite.Test.Types.Status) + attribute(:status_enum, AshSqlite.Test.Types.StatusEnum) + attribute(:status_enum_no_cast, AshSqlite.Test.Types.StatusEnumNoCast, source: :status_enum) + attribute(:point, AshSqlite.Test.Point) + attribute(:stuff, :map) + attribute(:uniq_one, :string) + attribute(:uniq_two, :string) + attribute(:uniq_custom_one, :string) + attribute(:uniq_custom_two, :string) + create_timestamp(:created_at) + update_timestamp(:updated_at) + end + + code_interface do + define_for(AshSqlite.Test.Api) + define(:get_by_id, action: :read, get_by: [:id]) + define(:increment_score, args: [{:optional, :amount}]) + end + + relationships do + belongs_to :organization, AshSqlite.Test.Organization do + attribute_writable?(true) + end + + belongs_to(:author, AshSqlite.Test.Author) + + has_many :posts_with_matching_title, __MODULE__ do + no_attributes?(true) + filter(expr(parent(title) == title and parent(id) != id)) + end + + has_many(:comments, AshSqlite.Test.Comment, destination_attribute: :post_id) + + has_many :comments_matching_post_title, AshSqlite.Test.Comment do + filter(expr(title == parent_expr(title))) + end + + has_many :popular_comments, AshSqlite.Test.Comment do + destination_attribute(:post_id) + filter(expr(likes > 10)) + end + + has_many :comments_containing_title, AshSqlite.Test.Comment do + manual(AshSqlite.Test.Post.CommentsContainingTitle) + end + + has_many(:ratings, AshSqlite.Test.Rating, + destination_attribute: :resource_id, + relationship_context: %{data_layer: %{table: "post_ratings"}} + ) + + has_many(:post_links, AshSqlite.Test.PostLink, + destination_attribute: :source_post_id, + filter: [state: :active] + ) + + many_to_many(:linked_posts, __MODULE__, + through: AshSqlite.Test.PostLink, + join_relationship: :post_links, + source_attribute_on_join_resource: :source_post_id, + destination_attribute_on_join_resource: :destination_post_id + ) + + has_many(:views, AshSqlite.Test.PostView) + end + + validations do + validate(attribute_does_not_equal(:title, "not allowed")) + end + + calculations do + calculate(:score_after_winning, :integer, expr((score || 0) + 1)) + calculate(:negative_score, :integer, expr(-score)) + calculate(:category_label, :string, expr("(" <> category <> ")")) + calculate(:score_with_score, :string, expr(score <> score)) + calculate(:foo_bar_from_stuff, :string, expr(stuff[:foo][:bar])) + + calculate( + :score_map, + :map, + expr(%{ + negative_score: %{foo: negative_score, bar: negative_score} + }) + ) + + calculate( + :calc_returning_json, + AshSqlite.Test.Money, + expr( + fragment(""" + '{"amount":100, "currency": "usd"}'::json + """) + ) + ) + + calculate( + :was_created_in_the_last_month, + :boolean, + expr( + # This is written in a silly way on purpose, to test a regression + if( + fragment("(? <= (? - '1 month'::interval))", now(), created_at), + true, + false + ) + ) + ) + + calculate( + :price_string, + :string, + CalculatePostPriceString + ) + + calculate( + :price_string_with_currency_sign, + :string, + CalculatePostPriceStringWithSymbol + ) + end +end + +defmodule CalculatePostPriceString do + @moduledoc false + use Ash.Calculation + + @impl true + def select(_, _, _), do: [:price] + + @impl true + def calculate(records, _, _) do + Enum.map(records, fn %{price: price} -> + dollars = div(price, 100) + cents = rem(price, 100) + "#{dollars}.#{cents}" + end) + end +end + +defmodule CalculatePostPriceStringWithSymbol do + @moduledoc false + use Ash.Calculation + + @impl true + def load(_, _, _), do: [:price_string] + + @impl true + def calculate(records, _, _) do + Enum.map(records, fn %{price_string: price_string} -> + "#{price_string}$" + end) + end +end diff --git a/test/support/resources/post_link.ex b/test/support/resources/post_link.ex new file mode 100644 index 0000000..a91b4cd --- /dev/null +++ b/test/support/resources/post_link.ex @@ -0,0 +1,37 @@ +defmodule AshSqlite.Test.PostLink do + @moduledoc false + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table "post_links" + repo AshSqlite.TestRepo + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + identities do + identity(:unique_link, [:source_post_id, :destination_post_id]) + end + + attributes do + attribute :state, :atom do + constraints(one_of: [:active, :archived]) + default(:active) + end + end + + relationships do + belongs_to :source_post, AshSqlite.Test.Post do + allow_nil?(false) + primary_key?(true) + end + + belongs_to :destination_post, AshSqlite.Test.Post do + allow_nil?(false) + primary_key?(true) + end + end +end diff --git a/test/support/resources/post_views.ex b/test/support/resources/post_views.ex new file mode 100644 index 0000000..45599f7 --- /dev/null +++ b/test/support/resources/post_views.ex @@ -0,0 +1,33 @@ +defmodule AshSqlite.Test.PostView do + @moduledoc false + use Ash.Resource, data_layer: AshSqlite.DataLayer + + actions do + defaults([:create, :read]) + end + + attributes do + create_timestamp(:time) + attribute(:browser, :atom, constraints: [one_of: [:firefox, :chrome, :edge]]) + end + + relationships do + belongs_to :post, AshSqlite.Test.Post do + allow_nil?(false) + attribute_writable?(true) + end + end + + resource do + require_primary_key?(false) + end + + sqlite do + table "post_views" + repo AshSqlite.TestRepo + + references do + reference :post, ignore?: true + end + end +end diff --git a/test/support/resources/profile.ex b/test/support/resources/profile.ex new file mode 100644 index 0000000..5051046 --- /dev/null +++ b/test/support/resources/profile.ex @@ -0,0 +1,24 @@ +defmodule AshSqlite.Test.Profile do + @moduledoc false + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + table("profile") + schema("profiles") + repo(AshSqlite.TestRepo) + end + + attributes do + uuid_primary_key(:id, writable?: true) + attribute(:description, :string) + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + relationships do + belongs_to(:author, AshSqlite.Test.Author) + end +end diff --git a/test/support/resources/rating.ex b/test/support/resources/rating.ex new file mode 100644 index 0000000..fa6f8e4 --- /dev/null +++ b/test/support/resources/rating.ex @@ -0,0 +1,20 @@ +defmodule AshSqlite.Test.Rating do + @moduledoc false + use Ash.Resource, + data_layer: AshSqlite.DataLayer + + sqlite do + polymorphic?(true) + repo AshSqlite.TestRepo + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id) + attribute(:score, :integer) + attribute(:resource_id, :uuid) + end +end diff --git a/test/support/resources/user.ex b/test/support/resources/user.ex new file mode 100644 index 0000000..26b98cb --- /dev/null +++ b/test/support/resources/user.ex @@ -0,0 +1,23 @@ +defmodule AshSqlite.Test.User do + @moduledoc false + use Ash.Resource, data_layer: AshSqlite.DataLayer + + actions do + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id) + attribute(:is_active, :boolean) + end + + sqlite do + table "users" + repo(AshSqlite.TestRepo) + end + + relationships do + belongs_to(:organization, AshSqlite.Test.Organization) + has_many(:accounts, AshSqlite.Test.Account) + end +end diff --git a/test/support/test_app.ex b/test/support/test_app.ex new file mode 100644 index 0000000..e074614 --- /dev/null +++ b/test/support/test_app.ex @@ -0,0 +1,13 @@ +defmodule AshSqlite.TestApp do + @moduledoc false + def start(_type, _args) do + children = [ + AshSqlite.TestRepo + ] + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: AshSqlite.Supervisor] + Supervisor.start_link(children, opts) + end +end diff --git a/test/support/test_custom_extension.ex b/test/support/test_custom_extension.ex new file mode 100644 index 0000000..a854a4e --- /dev/null +++ b/test/support/test_custom_extension.ex @@ -0,0 +1,38 @@ +defmodule AshSqlite.TestCustomExtension do + @moduledoc false + + use AshSqlite.CustomExtension, name: "demo-functions", latest_version: 1 + + @impl true + def install(0) do + """ + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_demo_functions() + RETURNS boolean AS $$ SELECT TRUE $$ + LANGUAGE SQL + IMMUTABLE; + \"\"\") + """ + end + + @impl true + def install(1) do + """ + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_demo_functions() + RETURNS boolean AS $$ SELECT FALSE $$ + LANGUAGE SQL + IMMUTABLE; + \"\"\") + """ + end + + @impl true + def uninstall(_version) do + """ + execute(\"\"\" + DROP FUNCTION IF EXISTS ash_demo_functions() + \"\"\") + """ + end +end diff --git a/test/support/test_no_sandbox_repo.ex b/test/support/test_no_sandbox_repo.ex new file mode 100644 index 0000000..008d0db --- /dev/null +++ b/test/support/test_no_sandbox_repo.ex @@ -0,0 +1,13 @@ +defmodule AshSqlite.TestNoSandboxRepo do + @moduledoc false + use AshSqlite.Repo, + otp_app: :ash_sqlite + + def on_transaction_begin(data) do + send(self(), data) + end + + def installed_extensions do + ["ash-functions", AshSqlite.TestCustomExtension] + end +end diff --git a/test/support/test_repo.ex b/test/support/test_repo.ex new file mode 100644 index 0000000..51bf9ab --- /dev/null +++ b/test/support/test_repo.ex @@ -0,0 +1,13 @@ +defmodule AshSqlite.TestRepo do + @moduledoc false + use AshSqlite.Repo, + otp_app: :ash_sqlite + + def on_transaction_begin(data) do + send(self(), data) + end + + def installed_extensions do + ["ash-functions", AshSqlite.TestCustomExtension] + end +end diff --git a/test/support/types/email.ex b/test/support/types/email.ex new file mode 100644 index 0000000..f9fa483 --- /dev/null +++ b/test/support/types/email.ex @@ -0,0 +1,8 @@ +defmodule Test.Support.Types.Email do + @moduledoc false + use Ash.Type.NewType, + subtype_of: :string, + constraints: [ + casing: :lower + ] +end diff --git a/test/support/types/money.ex b/test/support/types/money.ex new file mode 100644 index 0000000..b486eeb --- /dev/null +++ b/test/support/types/money.ex @@ -0,0 +1,16 @@ +defmodule AshSqlite.Test.Money do + @moduledoc false + use Ash.Resource, + data_layer: :embedded + + attributes do + attribute :amount, :integer do + allow_nil?(false) + constraints(min: 0) + end + + attribute :currency, :atom do + constraints(one_of: [:eur, :usd]) + end + end +end diff --git a/test/support/types/point.ex b/test/support/types/point.ex new file mode 100644 index 0000000..50baa89 --- /dev/null +++ b/test/support/types/point.ex @@ -0,0 +1,34 @@ +defmodule AshSqlite.Test.Point do + @moduledoc false + use Ash.Type + + def storage_type(_), do: {:array, :float} + + def cast_input(nil, _), do: {:ok, nil} + + def cast_input({a, b, c}, _) when is_float(a) and is_float(b) and is_float(c) do + {:ok, {a, b, c}} + end + + def cast_input(_, _), do: :error + + def cast_stored(nil, _), do: {:ok, nil} + + def cast_stored([a, b, c], _) when is_float(a) and is_float(b) and is_float(c) do + {:ok, {a, b, c}} + end + + def cast_stored(_, _) do + :error + end + + def dump_to_native(nil, _), do: {:ok, nil} + + def dump_to_native({a, b, c}, _) when is_float(a) and is_float(b) and is_float(c) do + {:ok, [a, b, c]} + end + + def dump_to_native(_, _) do + :error + end +end diff --git a/test/support/types/status.ex b/test/support/types/status.ex new file mode 100644 index 0000000..38f422f --- /dev/null +++ b/test/support/types/status.ex @@ -0,0 +1,6 @@ +defmodule AshSqlite.Test.Types.Status do + @moduledoc false + use Ash.Type.Enum, values: [:open, :closed] + + def storage_type, do: :string +end diff --git a/test/support/types/status_enum.ex b/test/support/types/status_enum.ex new file mode 100644 index 0000000..e95a7c8 --- /dev/null +++ b/test/support/types/status_enum.ex @@ -0,0 +1,6 @@ +defmodule AshSqlite.Test.Types.StatusEnum do + @moduledoc false + use Ash.Type.Enum, values: [:open, :closed] + + def storage_type, do: :status +end diff --git a/test/support/types/status_enum_no_cast.ex b/test/support/types/status_enum_no_cast.ex new file mode 100644 index 0000000..2cd9974 --- /dev/null +++ b/test/support/types/status_enum_no_cast.ex @@ -0,0 +1,8 @@ +defmodule AshSqlite.Test.Types.StatusEnumNoCast do + @moduledoc false + use Ash.Type.Enum, values: [:open, :closed] + + def storage_type, do: :status + + def cast_in_query?, do: false +end diff --git a/test/test_helper.exs b/test/test_helper.exs new file mode 100644 index 0000000..6282775 --- /dev/null +++ b/test/test_helper.exs @@ -0,0 +1,5 @@ +ExUnit.start() +ExUnit.configure(stacktrace_depth: 100) + +AshSqlite.TestRepo.start_link() +AshSqlite.TestNoSandboxRepo.start_link() diff --git a/test/transaction_test.exs b/test/transaction_test.exs new file mode 100644 index 0000000..bf8e45f --- /dev/null +++ b/test/transaction_test.exs @@ -0,0 +1,97 @@ +defmodule AshSqlite.Test.TransactionTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + require Ash.Query + + test "after_transaction hooks are invoked on failure" do + assert_raise Ash.Error.Unknown, ~r/something bad happened/, fn -> + Post + |> Ash.Changeset.for_create(:create) + |> Ash.Changeset.after_action(fn _changeset, _result -> + raise "something bad happened" + end) + |> send_after_transaction_result() + |> Api.create() + end + + assert_receive {:error, + %RuntimeError{ + message: "something bad happened" + }} + end + + test "after_transaction hooks are invoked on failure, even in a nested context" do + assert_raise Ash.Error.Unknown, ~r/something bad happened inside/, fn -> + Post + |> Ash.Changeset.for_create(:create) + |> Ash.Changeset.after_action(fn _changeset, result -> + Post + |> Ash.Changeset.for_create(:create) + |> Ash.Changeset.after_action(fn _changeset, _result -> + raise "something bad happened inside" + end) + |> send_after_transaction_result() + |> Api.create!() + + {:ok, result} + end) + |> send_after_transaction_result() + |> Api.create() + end + + assert_receive {:error, + %RuntimeError{ + message: "something bad happened inside" + }} + + assert_receive {:error, %Ash.Error.Unknown{}} + end + + test "after_transaction hooks are invoked on success" do + Post + |> Ash.Changeset.for_create(:create) + |> send_after_transaction_result() + |> Api.create() + + assert_receive {:ok, %Post{}} + end + + test "after_transaction hooks are invoked on success and can reverse a failure" do + assert {:ok, %Post{}} = + Post + |> Ash.Changeset.for_create(:create) + |> Ash.Changeset.after_action(fn _changeset, result -> + Post + |> Ash.Changeset.for_create(:create) + |> Ash.Changeset.after_action(fn _changeset, _result -> + raise "something bad happened inside" + end) + |> send_after_transaction_result() + |> Api.create!() + + {:ok, result} + end) + |> Ash.Changeset.after_transaction(fn _changeset, {:error, _} -> + Post + |> Ash.Changeset.for_create(:create) + |> Api.create() + end) + |> send_after_transaction_result() + |> Api.create() + + assert_receive {:error, + %RuntimeError{ + message: "something bad happened inside" + }} + + assert_receive {:ok, %Post{}} + end + + defp send_after_transaction_result(changeset) do + Ash.Changeset.after_transaction(changeset, fn _changeset, result -> + send(self(), result) + result + end) + end +end diff --git a/test/type_test.exs b/test/type_test.exs new file mode 100644 index 0000000..0263835 --- /dev/null +++ b/test/type_test.exs @@ -0,0 +1,38 @@ +defmodule AshSqlite.Test.TypeTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + require Ash.Query + + test "complex custom types can be used" do + post = + Post + |> Ash.Changeset.new(%{title: "title", point: {1.0, 2.0, 3.0}}) + |> Api.create!() + + assert post.point == {1.0, 2.0, 3.0} + end + + test "complex custom types can be accessed with fragments" do + Post + |> Ash.Changeset.new(%{title: "title", point: {1.0, 2.0, 3.0}}) + |> Api.create!() + + Post + |> Ash.Changeset.new(%{title: "title", point: {2.0, 1.0, 3.0}}) + |> Api.create!() + + assert [%{point: {2.0, 1.0, 3.0}}] = + Post + |> Ash.Query.filter(fragment("(?)[1] > (?)[2]", point, point)) + |> Api.read!() + end + + test "uuids can be used as strings in fragments" do + uuid = Ash.UUID.generate() + + Post + |> Ash.Query.filter(fragment("? = ?", id, type(^uuid, :uuid))) + |> Api.read!() + end +end diff --git a/test/unique_identity_test.exs b/test/unique_identity_test.exs new file mode 100644 index 0000000..23ffc6b --- /dev/null +++ b/test/unique_identity_test.exs @@ -0,0 +1,36 @@ +defmodule AshSqlite.Test.UniqueIdentityTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + require Ash.Query + + test "unique constraint errors are properly caught" do + post = + Post + |> Ash.Changeset.new(%{title: "title"}) + |> Api.create!() + + assert_raise Ash.Error.Invalid, + ~r/Invalid value provided for id: has already been taken/, + fn -> + Post + |> Ash.Changeset.new(%{id: post.id}) + |> Api.create!() + end + end + + test "a unique constraint can be used to upsert when the resource has a base filter" do + post = + Post + |> Ash.Changeset.new(%{title: "title", uniq_one: "fred", uniq_two: "astair", price: 10}) + |> Api.create!() + + new_post = + Post + |> Ash.Changeset.new(%{title: "title2", uniq_one: "fred", uniq_two: "astair"}) + |> Api.create!(upsert?: true, upsert_identity: :uniq_one_and_two) + + assert new_post.id == post.id + assert new_post.price == 10 + end +end diff --git a/test/upsert_test.exs b/test/upsert_test.exs new file mode 100644 index 0000000..daf94f3 --- /dev/null +++ b/test/upsert_test.exs @@ -0,0 +1,60 @@ +defmodule AshSqlite.Test.UpsertTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Api, Post} + + require Ash.Query + + test "upserting results in the same created_at timestamp, but a new updated_at timestamp" do + id = Ash.UUID.generate() + + new_post = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id, + title: "title2" + }) + |> Api.create!(upsert?: true) + + assert new_post.id == id + assert new_post.created_at == new_post.updated_at + + updated_post = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id, + title: "title2" + }) + |> Api.create!(upsert?: true) + + assert updated_post.id == id + assert updated_post.created_at == new_post.created_at + assert updated_post.created_at != updated_post.updated_at + end + + test "upserting a field with a default sets to the new value" do + id = Ash.UUID.generate() + + new_post = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id, + title: "title2" + }) + |> Api.create!(upsert?: true) + + assert new_post.id == id + assert new_post.created_at == new_post.updated_at + + updated_post = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id, + title: "title2", + decimal: Decimal.new(5) + }) + |> Api.create!(upsert?: true) + + assert updated_post.id == id + assert Decimal.equal?(updated_post.decimal, Decimal.new(5)) + end +end diff --git a/test_snapshot_path/extensions.json b/test_snapshot_path/extensions.json new file mode 100644 index 0000000..08e5809 --- /dev/null +++ b/test_snapshot_path/extensions.json @@ -0,0 +1,10 @@ +{ + "ash_functions_version": 1, + "installed": [ + "ash-functions", + "uuid-ossp", + "pg_trgm", + "citext", + "demo-functions_v1" + ] +} \ No newline at end of file