first commit

This commit is contained in:
Zach Daniel 2022-10-24 09:04:45 -06:00
commit c68a8fa9e9
9 changed files with 742 additions and 0 deletions

4
.formatter.exs Normal file
View file

@ -0,0 +1,4 @@
# Used by "mix format"
[
inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"]
]

26
.gitignore vendored Normal file
View file

@ -0,0 +1,26 @@
# The directory Mix will write compiled artifacts to.
/_build/
# If you run "mix test --cover", coverage assets end up here.
/cover/
# The directory Mix downloads your dependencies sources to.
/deps/
# Where third-party dependencies like ExDoc output generated docs.
/doc/
# Ignore .fetch files in case you like to edit your project deps locally.
/.fetch
# If the VM crashes, it generates a dump, let's ignore it too.
erl_crash.dump
# Also ignore archive artifacts (built via "mix archive.build").
*.ez
# Ignore package tarball (built via "mix hex.build").
ash_blog-*.tar
# Temporary files, for example, from tests.
/tmp/

21
README.md Normal file
View file

@ -0,0 +1,21 @@
# AshBlog
**TODO: Add description**
## Installation
If [available in Hex](https://hex.pm/docs/publish), the package can be installed
by adding `ash_blog` to your list of dependencies in `mix.exs`:
```elixir
def deps do
[
{:ash_blog, "~> 0.1.0"}
]
end
```
Documentation can be generated with [ExDoc](https://github.com/elixir-lang/ex_doc)
and published on [HexDocs](https://hexdocs.pm). Once published, the docs can
be found at <https://hexdocs.pm/ash_blog>.

18
lib/ash_blog.ex Normal file
View file

@ -0,0 +1,18 @@
defmodule AshBlog do
@moduledoc """
Documentation for `AshBlog`.
"""
@doc """
Hello world.
## Examples
iex> AshBlog.hello()
:world
"""
def hello do
:world
end
end

View file

@ -0,0 +1,618 @@
defmodule AshBlog.DataLayer do
@behaviour Ash.DataLayer
@blog %Spark.Dsl.Section{
name: :blog,
describe: """
A section for configuring the blog data layer
""",
examples: [
"""
blog do
end
"""
],
links: [],
schema: [
folder: [
type: :string,
default: "static/blog",
doc: """
A path relative to to the priv directory where the files should be placed.
"""
]
]
}
@moduledoc """
A blog data layer backed by markdown files.
<!--- ash-hq-hide-start--> <!--- -->
## DSL Documentation
### Index
#{Spark.Dsl.Extension.doc_index([@blog])}
### Docs
#{Spark.Dsl.Extension.doc([@blog])}
<!--- ash-hq-hide-stop--> <!--- -->
"""
use Spark.Dsl.Extension,
sections: [@blog],
transformers: [Ash.DataLayer.Transformers.RequirePreCheckWith]
alias Ash.Actions.Sort
defmodule Query do
@moduledoc false
defstruct [
:resource,
:filter,
:limit,
:sort,
:tenant,
:api,
calculations: [],
aggregates: [],
relationships: %{},
offset: 0
]
end
@doc false
@impl true
def can?(_, :async_engine), do: true
def can?(_, :composite_primary_key), do: true
def can?(_, :expression_calculation), do: true
def can?(_, :expression_calculation_sort), do: true
def can?(_, :multitenancy), do: true
def can?(_, :upsert), do: true
def can?(_, :aggregate_filter), do: true
def can?(_, :aggregate_sort), do: true
def can?(_, {:aggregate_relationship, _}), do: true
def can?(_, {:filter_relationship, _}), do: true
def can?(_, {:aggregate, :count}), do: true
def can?(_, :create), do: true
def can?(_, :read), do: true
def can?(_, :update), do: true
def can?(_, :destroy), do: true
def can?(_, :sort), do: true
def can?(_, :filter), do: true
def can?(_, :limit), do: true
def can?(_, :offset), do: true
def can?(_, :boolean_filter), do: true
def can?(_, {:filter_expr, _}), do: true
def can?(_, :nested_expressions), do: true
def can?(_, {:query_aggregate, :count}), do: true
def can?(_, {:sort, _}), do: true
def can?(_, :transact), do: true
def can?(_, _), do: false
@doc false
@impl true
def resource_to_query(resource, api) do
%Query{
resource: resource,
api: api
}
end
@doc false
@impl true
def limit(query, limit, _), do: {:ok, %{query | limit: limit}}
@doc false
@impl true
def offset(query, offset, _), do: {:ok, %{query | offset: offset}}
@doc false
@impl true
def add_calculation(query, calculation, _, _),
do: {:ok, %{query | calculations: [calculation | query.calculations]}}
@doc false
@impl true
def add_aggregate(query, aggregate, _),
do: {:ok, %{query | aggregates: [aggregate | query.aggregates]}}
@doc false
@impl true
def set_tenant(_resource, query, tenant) do
{:ok, %{query | tenant: tenant}}
end
@doc false
@impl true
def filter(query, filter, _resource) do
if query.filter do
{:ok, %{query | filter: Ash.Filter.add_to_filter!(query.filter, filter)}}
else
{:ok, %{query | filter: filter}}
end
end
@doc false
@impl true
def sort(query, sort, _resource) do
{:ok, %{query | sort: sort}}
end
@doc false
@impl true
def run_aggregate_query(%{api: api} = query, aggregates, resource) do
case run_query(query, resource) do
{:ok, results} ->
Enum.reduce_while(aggregates, {:ok, %{}}, fn
%{kind: :count, name: name, query: query}, {:ok, acc} ->
results
|> filter_matches(Map.get(query || %{}, :filter), api)
|> case do
{:ok, matches} ->
{:cont, {:ok, Map.put(acc, name, Enum.count(matches))}}
{:error, error} ->
{:halt, {:error, error}}
end
_, _ ->
{:halt, {:error, "unsupported aggregate"}}
end)
{:error, error} ->
{:error, error}
end
|> case do
{:error, error} ->
{:error, Ash.Error.to_ash_error(error)}
other ->
other
end
end
@doc false
@impl true
def run_query(
%Query{
resource: resource,
filter: filter,
offset: offset,
limit: limit,
sort: sort,
tenant: tenant,
calculations: calculations,
aggregates: aggregates,
api: api
},
_resource
) do
with {:ok, records} <- get_records(resource, tenant),
{:ok, records} <- do_add_aggregates(records, api, resource, aggregates),
{:ok, records} <-
filter_matches(records, filter, api),
{:ok, records} <-
do_add_calculations(records, resource, calculations) do
offset_records =
records
|> Sort.runtime_sort(sort)
|> Enum.drop(offset || 0)
if limit do
{:ok, Enum.take(offset_records, limit)}
else
{:ok, offset_records}
end
else
{:error, error} ->
{:error, Ash.Error.to_ash_error(error)}
end
end
defp do_add_calculations(records, _resource, []), do: {:ok, records}
defp do_add_calculations(records, resource, calculations) do
Enum.reduce_while(records, {:ok, []}, fn record, {:ok, records} ->
calculations
|> Enum.reduce_while({:ok, record}, fn calculation, {:ok, record} ->
expression = calculation.module.expression(calculation.opts, calculation.context)
case Ash.Filter.hydrate_refs(expression, %{
resource: resource,
public?: false
}) do
{:ok, expression} ->
case Ash.Filter.Runtime.do_match(record, expression) do
{:ok, value} ->
if calculation.load do
{:cont, {:ok, Map.put(record, calculation.load, value)}}
else
{:cont,
{:ok,
Map.update!(record, :calculations, &Map.put(&1, calculation.name, value))}}
end
:unknown ->
if calculation.load do
{:cont, {:ok, Map.put(record, calculation.load, nil)}}
else
{:cont,
{:ok, Map.update!(record, :calculations, &Map.put(&1, calculation.name, nil))}}
end
{:error, error} ->
{:halt, {:error, error}}
end
{:error, error} ->
{:halt, {:error, error}}
end
end)
|> case do
{:ok, record} ->
{:cont, {:ok, [record | records]}}
{:error, error} ->
{:halt, {:error, error}}
end
end)
|> case do
{:ok, records} ->
{:ok, Enum.reverse(records)}
{:error, error} ->
{:error, Ash.Error.to_ash_error(error)}
end
end
defp do_add_aggregates(records, _api, _resource, []), do: {:ok, records}
defp do_add_aggregates(records, api, _resource, aggregates) do
# TODO support crossing apis by getting the destination api, and set destination query context.
Enum.reduce_while(records, {:ok, []}, fn record, {:ok, records} ->
aggregates
|> Enum.reduce_while({:ok, record}, fn %{
kind: :count,
relationship_path: relationship_path,
query: query,
authorization_filter: authorization_filter,
name: name,
load: load
},
{:ok, record} ->
query =
if authorization_filter do
Ash.Query.do_filter(query, authorization_filter)
else
query
end
with {:ok, loaded_record} <- api.load(record, relationship_path),
related <- Ash.Filter.Runtime.get_related(loaded_record, relationship_path),
{:ok, filtered} <-
filter_matches(related, query.filter, api) do
{:cont, {:ok, Map.put(record, load || name, Enum.count(filtered))}}
else
other ->
{:halt, other}
end
end)
|> case do
{:ok, record} ->
{:cont, {:ok, [record | records]}}
{:error, error} ->
{:halt, {:error, error}}
end
end)
|> case do
{:ok, records} ->
{:ok, Enum.reverse(records)}
{:error, error} ->
{:error, Ash.Error.to_ash_error(error)}
end
end
defp get_records(resource, tenant) do
with {:ok, table} <- wrap_or_create_table(resource, tenant),
{:ok, record_tuples} <- ETS.Set.to_list(table),
records <- Enum.map(record_tuples, &elem(&1, 1)) do
cast_records(records, resource)
end
end
@doc false
def cast_records(records, resource) do
records
|> Enum.reduce_while({:ok, []}, fn record, {:ok, casted} ->
case cast_record(record, resource) do
{:ok, casted_record} ->
{:cont, {:ok, [casted_record | casted]}}
{:error, error} ->
{:halt, {:error, error}}
end
end)
|> case do
{:ok, records} ->
{:ok, Enum.reverse(records)}
{:error, error} ->
{:error, error}
end
end
@doc false
def cast_record(record, resource) do
resource
|> Ash.Resource.Info.attributes()
|> Enum.reduce_while({:ok, %{}}, fn attribute, {:ok, attrs} ->
case Map.get(record, attribute.name) do
nil ->
{:cont, {:ok, Map.put(attrs, attribute.name, nil)}}
value ->
case Ash.Type.cast_stored(attribute.type, value, attribute.constraints) do
{:ok, value} ->
{:cont, {:ok, Map.put(attrs, attribute.name, value)}}
:error ->
{:halt,
{:error, "Failed to load #{inspect(value)} as type #{inspect(attribute.type)}"}}
{:error, error} ->
{:halt, {:error, error}}
end
end
end)
|> case do
{:ok, attrs} ->
{:ok,
%{
struct(resource, attrs)
| __meta__: %Ecto.Schema.Metadata{state: :loaded, schema: resource}
}}
{:error, error} ->
{:error, error}
end
end
defp filter_matches(records, nil, _api), do: {:ok, records}
defp filter_matches(records, filter, api) do
Ash.Filter.Runtime.filter_matches(api, records, filter)
end
@doc false
@impl true
def upsert(resource, changeset, keys) do
keys = keys || Ash.Resource.Info.primary_key(resource)
if Enum.any?(keys, &is_nil(Ash.Changeset.get_attribute(changeset, &1))) do
create(resource, changeset)
else
key_filters =
Enum.map(keys, fn key ->
{key, Ash.Changeset.get_attribute(changeset, key)}
end)
query = Ash.Query.do_filter(resource, and: [key_filters])
resource
|> resource_to_query(changeset.api)
|> Map.put(:filter, query.filter)
|> Map.put(:tenant, changeset.tenant)
|> run_query(resource)
|> case do
{:ok, []} ->
create(resource, changeset)
{:ok, [result]} ->
to_set = Ash.Changeset.set_on_upsert(changeset, keys)
changeset =
changeset
|> Map.put(:attributes, %{})
|> Map.put(:data, result)
|> Ash.Changeset.force_change_attributes(to_set)
update(resource, changeset)
{:ok, _} ->
{:error, "Multiple records matching keys"}
end
end
end
@doc false
@impl true
def create(resource, changeset) do
pkey =
resource
|> Ash.Resource.Info.primary_key()
|> Enum.into(%{}, fn attr ->
{attr, Ash.Changeset.get_attribute(changeset, attr)}
end)
with {:ok, table} <- wrap_or_create_table(resource, changeset.tenant),
{:ok, record} <- Ash.Changeset.apply_attributes(changeset),
record <- unload_relationships(resource, record),
{:ok, record} <-
put_or_insert_new(table, {pkey, record}, resource) do
{:ok, %{record | __meta__: %Ecto.Schema.Metadata{state: :loaded, schema: resource}}}
else
{:error, error} -> {:error, Ash.Error.to_ash_error(error)}
end
end
defp put_or_insert_new(table, {pkey, record}, resource) do
attributes = resource |> Ash.Resource.Info.attributes()
case dump_to_native(record, attributes) do
{:ok, casted} ->
case ETS.Set.put(table, {pkey, casted}) do
{:ok, set} ->
{_key, record} = ETS.Set.get!(set, pkey)
cast_record(record, resource)
other ->
other
end
other ->
other
end
end
@doc false
def dump_to_native(record, attributes) do
Enum.reduce_while(attributes, {:ok, %{}}, fn attribute, {:ok, attrs} ->
case Map.get(record, attribute.name) do
nil ->
{:cont, {:ok, Map.put(attrs, attribute.name, nil)}}
value ->
case Ash.Type.dump_to_native(
attribute.type,
value,
attribute.constraints
) do
{:ok, casted_value} ->
{:cont, {:ok, Map.put(attrs, attribute.name, casted_value)}}
:error ->
{:halt,
{:error,
"Failed to dump #{inspect(Map.get(record, attribute.name))} as type #{inspect(attribute.type)}"}}
{:error, error} ->
{:halt, {:error, error}}
end
end
end)
end
@doc false
@impl true
def destroy(resource, %{data: record} = changeset) do
do_destroy(resource, record, changeset.tenant)
end
defp do_destroy(resource, record, tenant) do
pkey = Map.take(record, Ash.Resource.Info.primary_key(resource))
with {:ok, table} <- wrap_or_create_table(resource, tenant),
{:ok, _} <- ETS.Set.delete(table, pkey) do
:ok
else
{:error, error} -> {:error, Ash.Error.to_ash_error(error)}
end
end
@doc false
@impl true
def update(resource, changeset) do
pkey = pkey_map(resource, changeset.data)
with {:ok, table} <- wrap_or_create_table(resource, changeset.tenant),
{:ok, record} <- Ash.Changeset.apply_attributes(changeset),
{:ok, record} <-
do_update(table, {pkey, record}, resource),
{:ok, record} <- cast_record(record, resource) do
new_pkey = pkey_map(resource, record)
if new_pkey != pkey do
case destroy(resource, changeset) do
:ok ->
{:ok, %{record | __meta__: %Ecto.Schema.Metadata{state: :loaded, schema: resource}}}
{:error, error} ->
{:error, Ash.Error.to_ash_error(error)}
end
else
{:ok, %{record | __meta__: %Ecto.Schema.Metadata{state: :loaded, schema: resource}}}
end
else
{:error, error} ->
{:error, Ash.Error.to_ash_error(error)}
end
end
@doc false
def pkey_map(resource, data) do
resource
|> Ash.Resource.Info.primary_key()
|> Enum.into(%{}, fn attr ->
{attr, Map.get(data, attr)}
end)
end
defp do_update(table, {pkey, record}, resource) do
attributes = resource |> Ash.Resource.Info.attributes()
case dump_to_native(record, attributes) do
{:ok, casted} ->
case ETS.Set.get(table, pkey) do
{:ok, {_key, record}} when is_map(record) ->
case ETS.Set.put(table, {pkey, Map.merge(record, casted)}) do
{:ok, set} ->
{_key, record} = ETS.Set.get!(set, pkey)
{:ok, record}
error ->
error
end
{:ok, _} ->
{:error, "Record not found matching: #{inspect(pkey)}"}
other ->
other
end
{:error, error} ->
{:error, error}
end
end
@impl true
def transaction(resource, fun, timeout \\ :infinity) do
folder = folder(resource)
:global.trans(
{{:csv, folder}, System.unique_integer()},
fn ->
try do
Process.put({:blog_in_transaction, folder}, true)
{:res, fun.()}
catch
{{:csv_rollback, ^folder}, value} ->
{:error, value}
end
end,
[node() | :erlang.nodes()],
timeout
)
|> case do
{:res, result} -> {:ok, result}
{:error, error} -> {:error, error}
:aborted -> {:error, "transaction failed"}
end
end
@impl true
def rollback(resource, error) do
throw({{:blog_rollback, file(resource)}, error})
end
@impl true
def in_transaction?(resource) do
Process.get({:blog_in_transaction, file(resource)}, false) == true
end
end

30
mix.exs Normal file
View file

@ -0,0 +1,30 @@
defmodule AshBlog.MixProject do
use Mix.Project
def project do
[
app: :ash_blog,
version: "0.1.0",
elixir: "~> 1.14",
start_permanent: Mix.env() == :prod,
deps: deps()
]
end
# Run "mix help compile.app" to learn about applications.
def application do
[
extra_applications: [:logger]
]
end
# Run "mix help deps" to learn about dependencies.
defp deps do
[
{:spark, "~> 0.1.29"},
{:ash, "~> 2.0"}
# {:dep_from_hexpm, "~> 0.3.0"},
# {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"}
]
end
end

16
mix.lock Normal file
View file

@ -0,0 +1,16 @@
%{
"ash": {:hex, :ash, "2.2.0", "4fdc0fef5afb3f5045b1ca4e1ccb139b9f703cbc7c21dc645e32ac9582b11f63", [:mix], [{:comparable, "~> 1.0", [hex: :comparable, repo: "hexpm", optional: false]}, {:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:earmark, "~> 1.4", [hex: :earmark, repo: "hexpm", optional: true]}, {:ecto, "~> 3.7", [hex: :ecto, repo: "hexpm", optional: false]}, {:ets, "~> 0.8.0", [hex: :ets, repo: "hexpm", optional: false]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:picosat_elixir, "~> 0.2", [hex: :picosat_elixir, repo: "hexpm", optional: false]}, {:spark, "~> 0.1 and >= 0.1.28", [hex: :spark, repo: "hexpm", optional: false]}, {:stream_data, "~> 0.5.0", [hex: :stream_data, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.1", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "48eca587e7076fe4f8547e919c0712f081ce85e66c316f6f51dd2535ad046013"},
"comparable": {:hex, :comparable, "1.0.0", "bb669e91cedd14ae9937053e5bcbc3c52bb2f22422611f43b6e38367d94a495f", [:mix], [{:typable, "~> 0.1", [hex: :typable, repo: "hexpm", optional: false]}], "hexpm", "277c11eeb1cd726e7cd41c6c199e7e52fa16ee6830b45ad4cdc62e51f62eb60c"},
"decimal": {:hex, :decimal, "2.0.0", "a78296e617b0f5dd4c6caf57c714431347912ffb1d0842e998e9792b5642d697", [:mix], [], "hexpm", "34666e9c55dea81013e77d9d87370fe6cb6291d1ef32f46a1600230b1d44f577"},
"ecto": {:hex, :ecto, "3.9.1", "67173b1687afeb68ce805ee7420b4261649d5e2deed8fe5550df23bab0bc4396", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c80bb3d736648df790f7f92f81b36c922d9dd3203ca65be4ff01d067f54eb304"},
"elixir_make": {:hex, :elixir_make, "0.6.3", "bc07d53221216838d79e03a8019d0839786703129599e9619f4ab74c8c096eac", [:mix], [], "hexpm", "f5cbd651c5678bcaabdbb7857658ee106b12509cd976c2c2fca99688e1daf716"},
"ets": {:hex, :ets, "0.8.1", "8ff9bcda5682b98493f8878fc9dbd990e48d566cba8cce59f7c2a78130da29ea", [:mix], [], "hexpm", "6be41b50adb5bc5c43626f25ea2d0af1f4a242fb3fad8d53f0c67c20b78915cc"},
"jason": {:hex, :jason, "1.4.0", "e855647bc964a44e2f67df589ccf49105ae039d4179db7f6271dfd3843dc27e6", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "79a3791085b2a0f743ca04cec0f7be26443738779d09302e01318f97bdb82121"},
"nimble_options": {:hex, :nimble_options, "0.4.0", "c89babbab52221a24b8d1ff9e7d838be70f0d871be823165c94dd3418eea728f", [:mix], [], "hexpm", "e6701c1af326a11eea9634a3b1c62b475339ace9456c1a23ec3bc9a847bca02d"},
"picosat_elixir": {:hex, :picosat_elixir, "0.2.2", "1cacfdb4fb0c3ead5e5e9b1e98ac822a777f07eab35e29c3f8fc7086de2bfb36", [:make, :mix], [{:elixir_make, "~> 0.6", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "9d0cc569552cca417abea8270a54b71153a63be4b951ff249e94642f1c0f35d1"},
"sourceror": {:hex, :sourceror, "0.11.2", "549ce48be666421ac60cfb7f59c8752e0d393baa0b14d06271d3f6a8c1b027ab", [:mix], [], "hexpm", "9ab659118896a36be6eec68ff7b0674cba372fc8e210b1e9dc8cf2b55bb70dfb"},
"spark": {:hex, :spark, "0.1.29", "36f29894fdf8b30aa866a677134654db72807cf02a998aee948a0c5e98a48018", [:mix], [{:nimble_options, "~> 0.4.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:sourceror, "~> 0.1", [hex: :sourceror, repo: "hexpm", optional: false]}], "hexpm", "97ed044974cd47e9286d9fa0fd033620bee6b3569bee27e79d1b9bdb4605371e"},
"stream_data": {:hex, :stream_data, "0.5.0", "b27641e58941685c75b353577dc602c9d2c12292dd84babf506c2033cd97893e", [:mix], [], "hexpm", "012bd2eec069ada4db3411f9115ccafa38540a3c78c4c0349f151fc761b9e271"},
"telemetry": {:hex, :telemetry, "1.1.0", "a589817034a27eab11144ad24d5c0f9fab1f58173274b1e9bae7074af9cbee51", [:rebar3], [], "hexpm", "b727b2a1f75614774cff2d7565b64d0dfa5bd52ba517f16543e6fc7efcc0df48"},
"typable": {:hex, :typable, "0.3.0", "0431e121d124cd26f312123e313d2689b9a5322b15add65d424c07779eaa3ca1", [:mix], [], "hexpm", "880a0797752da1a4c508ac48f94711e04c86156f498065a83d160eef945858f8"},
}

8
test/ash_blog_test.exs Normal file
View file

@ -0,0 +1,8 @@
defmodule AshBlogTest do
use ExUnit.Case
doctest AshBlog
test "greets the world" do
assert AshBlog.hello() == :world
end
end

1
test/test_helper.exs Normal file
View file

@ -0,0 +1 @@
ExUnit.start()