Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ export * from "./payment-splitter";
export * from "./plutus-nft";
export * from "./swap";
export * from "./vesting";
export * from "./programmable-tokens";
313 changes: 313 additions & 0 deletions src/programmable-tokens/aiken-workspace-standard/README.md

Large diffs are not rendered by default.

27 changes: 27 additions & 0 deletions src/programmable-tokens/aiken-workspace-standard/aiken.lock
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# This file was generated by Aiken
# You typically do not need to edit this file

[[requirements]]
name = "aiken-lang/stdlib"
version = "v3.0.0"
source = "github"

[[requirements]]
name = "aiken-lang/fuzz"
version = "main"
source = "github"

[[packages]]
name = "aiken-lang/stdlib"
version = "v3.0.0"
requirements = []
source = "github"

[[packages]]
name = "aiken-lang/fuzz"
version = "main"
requirements = []
source = "github"

[etags]
"aiken-lang/fuzz@main" = [{ secs_since_epoch = 1775479957, nanos_since_epoch = 627298000 }, "9843473958e51725a9274b487d2d4aac0395ec1a2e30f090724fa737226bc127"]
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "iohk/programmable-tokens"
version = "0.3.0"
compiler = "v1.1.17"
compiler = "v1.1.21"
plutus = "v3"
license = "Apache-2.0"
description = "Aiken implementation of CIP-0143 programmable tokens (migrated from Plutarch)"
Expand All @@ -15,4 +15,9 @@ name = "aiken-lang/stdlib"
version = "v3.0.0"
source = "github"

[[dependencies]]
name = "aiken-lang/fuzz"
version = "main"
source = "github"

[config]
5 changes: 5 additions & 0 deletions src/programmable-tokens/aiken-workspace-standard/build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/usr/local/bin/bash

set -x

aiken build && cp plutus.json ../programmable-tokens-offchain-java/src/main/resources
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
pub fn assert_no_ada_policy(value: a) -> a {
value
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
use aiken/collection/pairs
use cardano/assets.{PolicyId, ada_policy_id}

/// Enforces that the given value has no ada.
pub fn assert_no_ada_policy(value: Pairs<PolicyId, v>) -> Pairs<PolicyId, v> {
trace @"assert_no_ada_policy"
expect None = pairs.get_first(value, ada_policy_id)
value
}
156 changes: 156 additions & 0 deletions src/programmable-tokens/aiken-workspace-standard/lib/assets.ak
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
use aiken/builtin.{less_than_bytearray}
use aiken/collection/dict
use cardano/assets.{PolicyId, Value}
use cardano/transaction.{Output}
use env
use list
use tokens.{Tokens}

pub type Assets =
Pairs<PolicyId, Tokens>

pub type SumStrategy<i> =
fn(i, fn(Output) -> Assets, fn() -> Assets) -> Assets

/// A Convenient helper to extract assets from values. Comes at no cost since it is usually inlined by the compiler.
pub fn from_value(self: Value) -> Assets {
self |> assets.to_dict |> dict.to_pairs
}

/// Split an dictionnary at the given key. Returning the elements before (in reverse key order), the
/// value at the key, and the elements after (in same key order).
pub fn split_at(
self: Value,
at: PolicyId,
return: fn(Assets, Tokens, Assets) -> result,
) -> result {
do_split_at(from_value(self), at, [], return)
}

fn do_split_at(
self: Assets,
at: PolicyId,
before: Assets,
return: fn(Assets, Tokens, Assets) -> result,
) -> result {
when self is {
[] -> return(before, dict.empty, [])
[head, ..tail] -> {
let k = head.1st
if less_than_bytearray(k, at) {
// Skip while keys are smaller (strictly) than searched key
do_split_at(tail, at, [head, ..before], return)
} else if k == at {
// Done searching, get the value and return the tail after
return(before, head.2nd, tail)
} else {
// The head and tail are all after the key; no need to continue searching.
return(before, dict.empty, self)
}
}
}
}

/// Lookup the first asset policy from an output, or fails loudly if the output holds no tokens (beyond Ada).
pub fn peek_first(self: Output) -> PolicyId {
list.head(list.tail(from_value(self.value))).1st
}

/// A faster version of assets.merge that preserves empty maps. This allows to
/// bypass the null check on value since quantities can only ever increase.
/// Importantly, it is also *necessary* to ensure that policies are retained in
/// the map for later validations even if they result in no outputs (e.g. a burn
/// fully compensate a spend).
///
/// It also focuses on assets and completely ignores Ada.
///
/// The function recurses over a list or input-like objects, and let the
/// caller select whether to add an output or not.
///
/// ## Example
///
/// ```aiken
/// collect(
/// self.inputs,
/// [],
/// fn(input, select, discard) {
/// let output = input.output
/// if output.address.payment_credential == needle {
/// select(output)
/// } else {
/// discard()
/// }
/// },
/// )
/// ```
///
/// /!\ PRE-CONDITION /!\
/// The given zero assets is expected to not contain any Ada. This is enforced
/// when assertions are enabled (`--env with_assertions`).
///
/// This is generally true when the initial zero is an empty list or coming
/// from the mint value.
pub fn collect(elems: List<i>, zero: Assets, strategy: SumStrategy<i>) -> Assets {
do_collect(elems, strategy, env.assert_no_ada_policy(zero))
}

fn do_collect(elems: List<i>, strategy: SumStrategy<i>, sum: Assets) -> Assets {
when elems is {
[] -> sum
[head, ..tail] ->
do_collect(
tail,
strategy,
strategy(
head,
// Output is selected
fn(output) {
output.value
|> from_value
// Drop ADA, guaranteed to be present in outputs.
|> list.tail
// NOTE: left-optimised union
// The `union` consumes the left argument into the right argument. So it is
// generally better to provide the smallest argument as left value. The `sum`
// will generally grow as large as outputs and in many scenario will no be much
// larger. However, it is very often smaller initially (often empty). Hence why
// we force it as first argument here.
|> union(sum, _)
},
// Output is discarded
fn() { sum },
),
)
}
}

/// Merge two Assets by summing token quantities.
/// Used to combine validated input prog value with validated mint prog value.
pub fn union(left: Assets, right: Assets) -> Assets {
when left is {
[] -> right
// NOTE: Preserving null assets
// It is primordial here to not discard assets if even they result in an
// empty dict. This is because the 'left' assets may have negative quantities
// coming from burns. If a burn is fully cover by a spend, we must preserve
// the key in the map to ensure that validations necessary to that policy
// occur as expected.
[Pair(k, v), ..rest] -> union(rest, do_insert(right, k, v))
}
}

fn do_insert(self: Assets, k1: PolicyId, v1: Tokens) -> Assets {
when self is {
[] -> [Pair(k1, v1)]
[Pair(k2, v2), ..rest] ->
if less_than_bytearray(k1, k2) {
[Pair(k1, v1), ..self]
} else {
if k1 == k2 {
[Pair(k1, tokens.union(v1, v2)), ..rest]
} else {
[Pair(k2, v2), ..do_insert(rest, k1, v1)]
}
}
}
}
Loading
Loading