Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,401 @@
# @ruvector/attention-unified-wasm - 18+ Attention Mechanisms in WASM
[![npm version](https://img.shields.io/npm/v/ruvector-attention-unified-wasm.svg)](https://www.npmjs.com/package/ruvector-attention-unified-wasm)
[![License: MIT OR Apache-2.0](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/ruvnet/ruvector)
[![Bundle Size](https://img.shields.io/badge/bundle%20size-331KB%20gzip-green.svg)](https://www.npmjs.com/package/ruvector-attention-unified-wasm)
[![WebAssembly](https://img.shields.io/badge/WebAssembly-654FF0?logo=webassembly&logoColor=white)](https://webassembly.org/)
**Unified WebAssembly library** with 18+ attention mechanisms spanning Neural, DAG, Graph, and State Space Model categories. Single import for all your attention needs in browser and edge environments.
## Key Features
- **7 Neural Attention**: Scaled dot-product, multi-head, hyperbolic, linear, flash, local-global, MoE
- **7 DAG Attention**: Topological, causal cone, critical path, MinCut-gated, hierarchical Lorentz, parallel branch, temporal BTSP
- **3 Graph Attention**: GAT, GCN, GraphSAGE
- **1 State Space**: Mamba SSM with hybrid attention
- **Unified API**: Single selector for all mechanisms
- **WASM-Optimized**: Runs in browsers, Node.js, and edge runtimes
## Installation
```bash
npm install ruvector-attention-unified-wasm
# or
yarn add ruvector-attention-unified-wasm
# or
pnpm add ruvector-attention-unified-wasm
```
## Quick Start
```typescript
import init, {
UnifiedAttention,
availableMechanisms,
scaledDotAttention,
WasmMultiHeadAttention,
MambaSSMAttention,
MambaConfig
} from 'ruvector-attention-unified-wasm';
await init();
// List all available mechanisms
const mechanisms = availableMechanisms();
console.log(mechanisms);
// { neural: [...], dag: [...], graph: [...], ssm: [...] }
// Use unified selector
const attention = new UnifiedAttention("multi_head");
console.log(`Category: ${attention.category}`); // "neural"
console.log(`Supports sequences: ${attention.supportsSequences()}`);
// Direct attention computation
const query = new Float32Array([1.0, 0.5, 0.3, 0.1]);
const keys = [new Float32Array([0.9, 0.4, 0.2, 0.1])];
const values = [new Float32Array([1.0, 1.0, 1.0, 1.0])];
const output = scaledDotAttention(query, keys, values);
```
## Attention Categories
### Neural Attention (7 mechanisms)
Standard transformer-style attention mechanisms for sequence processing.
```typescript
import {
scaledDotAttention,
WasmMultiHeadAttention,
WasmHyperbolicAttention,
WasmLinearAttention,
WasmFlashAttention,
WasmLocalGlobalAttention,
WasmMoEAttention
} from 'ruvector-attention-unified-wasm';
// Scaled Dot-Product Attention
const output = scaledDotAttention(query, keys, values, scale);
// Multi-Head Attention
const mha = new WasmMultiHeadAttention(256, 8); // 256 dim, 8 heads
const attended = mha.compute(query, keys, values);
console.log(`Heads: ${mha.numHeads}, Head dim: ${mha.headDim}`);
// Hyperbolic Attention (for hierarchical data)
const hyperbolic = new WasmHyperbolicAttention(64, -1.0); // curvature = -1
const hypOut = hyperbolic.compute(query, keys, values);
// Linear Attention (O(n) complexity)
const linear = new WasmLinearAttention(64, 32); // 32 random features
const linOut = linear.compute(query, keys, values);
// Flash Attention (memory-efficient)
const flash = new WasmFlashAttention(64, 32); // block size 32
const flashOut = flash.compute(query, keys, values);
// Local-Global Attention (sparse)
const localGlobal = new WasmLocalGlobalAttention(64, 128, 4); // window=128, 4 global
const lgOut = localGlobal.compute(query, keys, values);
// Mixture of Experts Attention
const moe = new WasmMoEAttention(64, 8, 2); // 8 experts, top-2
const moeOut = moe.compute(query, keys, values);
```
### DAG Attention (7 mechanisms)
Specialized attention for Directed Acyclic Graphs, query plans, and workflow optimization.
```typescript
import {
WasmQueryDag,
WasmTopologicalAttention,
WasmCausalConeAttention,
WasmCriticalPathAttention,
WasmMinCutGatedAttention,
WasmHierarchicalLorentzAttention,
WasmParallelBranchAttention,
WasmTemporalBTSPAttention
} from 'ruvector-attention-unified-wasm';
// Create a query DAG
const dag = new WasmQueryDag();
const scan = dag.addNode("scan", 10.0);
const filter = dag.addNode("filter", 5.0);
const join = dag.addNode("join", 20.0);
const aggregate = dag.addNode("aggregate", 15.0);
dag.addEdge(scan, filter);
dag.addEdge(filter, join);
dag.addEdge(scan, join);
dag.addEdge(join, aggregate);
// Topological Attention (position-aware)
const topo = new WasmTopologicalAttention(0.9); // decay factor
const topoScores = topo.forward(dag);
// Causal Cone Attention (lightcone-based)
const causal = new WasmCausalConeAttention(0.8, 0.6); // future discount, ancestor weight
const causalScores = causal.forward(dag);
// Critical Path Attention
const critical = new WasmCriticalPathAttention(2.0, 0.5); // path weight, branch penalty
const criticalScores = critical.forward(dag);
// MinCut-Gated Attention (flow-based)
const mincut = new WasmMinCutGatedAttention(0.5); // gate threshold
const mincutScores = mincut.forward(dag);
// Hierarchical Lorentz Attention (hyperbolic DAG)
const lorentz = new WasmHierarchicalLorentzAttention(-1.0, 0.1); // curvature, temperature
const lorentzScores = lorentz.forward(dag);
// Parallel Branch Attention
const parallel = new WasmParallelBranchAttention(4, 0.2); // max branches, sync penalty
const parallelScores = parallel.forward(dag);
// Temporal BTSP Attention
const btsp = new WasmTemporalBTSPAttention(0.95, 0.1); // decay, baseline
const btspScores = btsp.forward(dag);
```
### Graph Attention (3 mechanisms)
Attention mechanisms for graph-structured data.
```typescript
import {
WasmGNNLayer,
GraphAttentionFactory,
graphHierarchicalForward,
graphDifferentiableSearch,
WasmSearchConfig
} from 'ruvector-attention-unified-wasm';
// Create GNN layer with attention
const gnn = new WasmGNNLayer(
64, // input dimension
128, // hidden dimension
4, // attention heads
0.1 // dropout
);
// Forward pass for a node
const nodeEmbed = new Float32Array(64);
const neighborEmbeds = [
new Float32Array(64),
new Float32Array(64)
];
const edgeWeights = new Float32Array([0.8, 0.6]);
const updated = gnn.forward(nodeEmbed, neighborEmbeds, edgeWeights);
console.log(`Output dim: ${gnn.outputDim}`);
// Get available graph attention types
const types = GraphAttentionFactory.availableTypes(); // ["GAT", "GCN", "GraphSAGE"]
// Differentiable search
const config = new WasmSearchConfig(5, 0.1); // top-5, temperature
const candidates = [query, ...keys];
const searchResults = graphDifferentiableSearch(query, candidates, config);
// Hierarchical forward through multiple layers
const layers = [gnn, gnn2, gnn3];
const final = graphHierarchicalForward(query, layerEmbeddings, layers);
```
### Mamba SSM (State Space Model)
Selective State Space Model for efficient sequence processing with O(n) complexity.
```typescript
import {
MambaConfig,
MambaSSMAttention,
HybridMambaAttention
} from 'ruvector-attention-unified-wasm';
// Configure Mamba
const config = new MambaConfig(256) // d_model = 256
.withStateDim(16) // state space dimension
.withExpandFactor(2) // expansion factor
.withConvKernelSize(4); // conv kernel
console.log(`Dim: ${config.dim}, State: ${config.state_dim}`);
// Create Mamba SSM Attention
const mamba = new MambaSSMAttention(config);
console.log(`Inner dim: ${mamba.innerDim}`);
// Or use defaults
const mambaDefault = MambaSSMAttention.withDefaults(128);
// Forward pass (seq_len, dim) flattened to 1D
const seqLen = 32;
const input = new Float32Array(seqLen * 256);
const output = mamba.forward(input, seqLen);
// Get pseudo-attention scores for visualization
const scores = mamba.getAttentionScores(input, seqLen);
// Hybrid Mamba + Local Attention
const hybrid = new HybridMambaAttention(config, 64); // local window = 64
const hybridOut = hybrid.forward(input, seqLen);
console.log(`Local window: ${hybrid.localWindow}`);
```
## Unified Selector API
```typescript
import { UnifiedAttention } from 'ruvector-attention-unified-wasm';
// Create selector for any mechanism
const attention = new UnifiedAttention("mamba");
// Query capabilities
console.log(`Mechanism: ${attention.mechanism}`); // "mamba"
console.log(`Category: ${attention.category}`); // "ssm"
console.log(`Supports sequences: ${attention.supportsSequences()}`); // true
console.log(`Supports graphs: ${attention.supportsGraphs()}`); // false
console.log(`Supports hyperbolic: ${attention.supportsHyperbolic()}`); // false
// Valid mechanisms:
// Neural: scaled_dot_product, multi_head, hyperbolic, linear, flash, local_global, moe
// DAG: topological, causal_cone, critical_path, mincut_gated, hierarchical_lorentz, parallel_branch, temporal_btsp
// Graph: gat, gcn, graphsage
// SSM: mamba
```
## Utility Functions
```typescript
import { softmax, temperatureSoftmax, cosineSimilarity, getStats } from 'ruvector-attention-unified-wasm';
// Softmax normalization
const logits = new Float32Array([1.0, 2.0, 3.0]);
const probs = softmax(logits);
// Temperature-scaled softmax
const sharper = temperatureSoftmax(logits, 0.5); // More peaked
const flatter = temperatureSoftmax(logits, 2.0); // More uniform
// Cosine similarity
const a = new Float32Array([1, 0, 0]);
const b = new Float32Array([0.7, 0.7, 0]);
const sim = cosineSimilarity(a, b);
// Library statistics
const stats = getStats();
console.log(`Total mechanisms: ${stats.total_mechanisms}`); // 18
console.log(`Neural: ${stats.neural_count}`); // 7
console.log(`DAG: ${stats.dag_count}`); // 7
console.log(`Graph: ${stats.graph_count}`); // 3
console.log(`SSM: ${stats.ssm_count}`); // 1
```
## Tensor Compression
```typescript
import { WasmTensorCompress } from 'ruvector-attention-unified-wasm';
const compressor = new WasmTensorCompress();
const embedding = new Float32Array(256);
// Compress based on access frequency
const compressed = compressor.compress(embedding, 0.5); // 50% access frequency
const decompressed = compressor.decompress(compressed);
// Or specify compression level directly
const pq8 = compressor.compressWithLevel(embedding, "pq8"); // 8-bit product quantization
// Compression levels: "none", "half", "pq8", "pq4", "binary"
const ratio = compressor.getCompressionRatio(0.5);
```
## Performance Benchmarks
| Mechanism | Complexity | Latency (256-dim) |
|-----------|------------|-------------------|
| Scaled Dot-Product | O(n^2) | ~50us |
| Multi-Head (8 heads) | O(n^2) | ~200us |
| Linear | O(n) | ~30us |
| Flash | O(n^2) | ~100us (memory-efficient) |
| Mamba SSM | O(n) | ~80us |
| Topological DAG | O(V+E) | ~40us |
| GAT | O(E*h) | ~150us |
## API Reference Summary
### Neural Attention
| Class | Description |
|-------|-------------|
| `WasmMultiHeadAttention` | Parallel attention heads |
| `WasmHyperbolicAttention` | Hyperbolic space attention |
| `WasmLinearAttention` | O(n) performer-style |
| `WasmFlashAttention` | Memory-efficient blocked |
| `WasmLocalGlobalAttention` | Sparse with global tokens |
| `WasmMoEAttention` | Mixture of experts |
### DAG Attention
| Class | Description |
|-------|-------------|
| `WasmTopologicalAttention` | Position in topological order |
| `WasmCausalConeAttention` | Lightcone causality |
| `WasmCriticalPathAttention` | Critical path weighting |
| `WasmMinCutGatedAttention` | Flow-based gating |
| `WasmHierarchicalLorentzAttention` | Multi-scale hyperbolic |
| `WasmParallelBranchAttention` | Parallel DAG branches |
| `WasmTemporalBTSPAttention` | Temporal eligibility traces |
### Graph Attention
| Class | Description |
|-------|-------------|
| `WasmGNNLayer` | Multi-head graph attention |
| `GraphAttentionFactory` | Factory for graph attention types |
### State Space
| Class | Description |
|-------|-------------|
| `MambaSSMAttention` | Selective state space model |
| `HybridMambaAttention` | Mamba + local attention |
| `MambaConfig` | Mamba configuration |
## Use Cases
- **Transformers**: Standard and efficient attention variants
- **Query Optimization**: DAG-aware attention for SQL planners
- **Knowledge Graphs**: Graph attention for entity reasoning
- **Long Sequences**: O(n) attention with Mamba SSM
- **Hierarchical Data**: Hyperbolic attention for trees
- **Sparse Attention**: Local-global for long documents
## Bundle Size
- **WASM binary**: ~331KB (uncompressed)
- **Gzip compressed**: ~120KB
- **JavaScript glue**: ~12KB
## Related Packages
- [ruvector-learning-wasm](https://www.npmjs.com/package/ruvector-learning-wasm) - MicroLoRA adaptation
- [ruvector-nervous-system-wasm](https://www.npmjs.com/package/ruvector-nervous-system-wasm) - Bio-inspired neural
- [ruvector-economy-wasm](https://www.npmjs.com/package/ruvector-economy-wasm) - CRDT credit economy
## License
MIT OR Apache-2.0
## Links
- [GitHub Repository](https://github.com/ruvnet/ruvector)
- [Full Documentation](https://ruv.io)
- [Bug Reports](https://github.com/ruvnet/ruvector/issues)
---
**Keywords**: attention mechanism, transformer, multi-head attention, DAG attention, graph neural network, GAT, GCN, GraphSAGE, Mamba, SSM, state space model, WebAssembly, WASM, hyperbolic attention, linear attention, flash attention, query optimization, neural network, deep learning, browser ML

View File

@@ -0,0 +1,43 @@
{
"name": "@ruvector/attention-unified-wasm",
"type": "module",
"collaborators": [
"RuVector Team"
],
"author": "RuVector Team <ruvnet@users.noreply.github.com>",
"description": "Unified WebAssembly bindings for 18+ attention mechanisms: Neural, DAG, Graph, and Mamba SSM",
"version": "0.1.29",
"license": "MIT OR Apache-2.0",
"repository": {
"type": "git",
"url": "https://github.com/ruvnet/ruvector"
},
"bugs": {
"url": "https://github.com/ruvnet/ruvector/issues"
},
"files": [
"ruvector_attention_unified_wasm_bg.wasm",
"ruvector_attention_unified_wasm.js",
"ruvector_attention_unified_wasm.d.ts",
"ruvector_attention_unified_wasm_bg.wasm.d.ts",
"README.md"
],
"main": "ruvector_attention_unified_wasm.js",
"homepage": "https://ruv.io",
"types": "ruvector_attention_unified_wasm.d.ts",
"sideEffects": [
"./snippets/*"
],
"keywords": [
"attention",
"wasm",
"neural",
"dag",
"mamba",
"ruvector",
"webassembly",
"transformer",
"graph-attention",
"state-space-models"
]
}

View File

@@ -0,0 +1,790 @@
/* tslint:disable */
/* eslint-disable */
export class DagAttentionFactory {
private constructor();
free(): void;
[Symbol.dispose](): void;
/**
* Get available DAG attention types
*/
static availableTypes(): any;
/**
* Get description for a DAG attention type
*/
static getDescription(attention_type: string): string;
}
export class GraphAttentionFactory {
private constructor();
free(): void;
[Symbol.dispose](): void;
/**
* Get recommended use cases for a graph attention type
*/
static getUseCases(attention_type: string): any;
/**
* Get available graph attention types
*/
static availableTypes(): any;
/**
* Get description for a graph attention type
*/
static getDescription(attention_type: string): string;
}
/**
* Graph attention mechanism types
*/
export enum GraphAttentionType {
/**
* Graph Attention Networks (Velickovic et al., 2018)
*/
GAT = 0,
/**
* Graph Convolutional Networks (Kipf & Welling, 2017)
*/
GCN = 1,
/**
* GraphSAGE (Hamilton et al., 2017)
*/
GraphSAGE = 2,
}
export class HybridMambaAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new hybrid Mamba-Attention layer
*/
constructor(config: MambaConfig, local_window: number);
/**
* Forward pass
*/
forward(input: Float32Array, seq_len: number): Float32Array;
/**
* Get local window size
*/
readonly localWindow: number;
}
export class MambaConfig {
free(): void;
[Symbol.dispose](): void;
/**
* Set state space dimension
*/
withStateDim(state_dim: number): MambaConfig;
/**
* Set expansion factor
*/
withExpandFactor(factor: number): MambaConfig;
/**
* Set convolution kernel size
*/
withConvKernelSize(size: number): MambaConfig;
/**
* Create a new Mamba configuration
*/
constructor(dim: number);
/**
* Model dimension (d_model)
*/
dim: number;
/**
* State space dimension (n)
*/
state_dim: number;
/**
* Expansion factor for inner dimension
*/
expand_factor: number;
/**
* Convolution kernel size
*/
conv_kernel_size: number;
/**
* Delta (discretization step) range minimum
*/
dt_min: number;
/**
* Delta range maximum
*/
dt_max: number;
/**
* Whether to use learnable D skip connection
*/
use_d_skip: boolean;
}
export class MambaSSMAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create with default configuration
*/
static withDefaults(dim: number): MambaSSMAttention;
/**
* Compute attention-like scores (for visualization/analysis)
*
* Returns pseudo-attention scores showing which positions influence output
*/
getAttentionScores(input: Float32Array, seq_len: number): Float32Array;
/**
* Create a new Mamba SSM attention layer
*/
constructor(config: MambaConfig);
/**
* Forward pass through Mamba SSM
*
* # Arguments
* * `input` - Input sequence (seq_len, dim) flattened to 1D
* * `seq_len` - Sequence length
*
* # Returns
* Output sequence (seq_len, dim) flattened to 1D
*/
forward(input: Float32Array, seq_len: number): Float32Array;
/**
* Get the configuration
*/
readonly config: MambaConfig;
/**
* Get the inner dimension
*/
readonly innerDim: number;
}
export class UnifiedAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Check if this mechanism supports graph/DAG structures
*/
supportsGraphs(): boolean;
/**
* Check if this mechanism supports sequence processing
*/
supportsSequences(): boolean;
/**
* Check if this mechanism supports hyperbolic geometry
*/
supportsHyperbolic(): boolean;
/**
* Create a new unified attention selector
*/
constructor(mechanism: string);
/**
* Get the category of the selected mechanism
*/
readonly category: string;
/**
* Get the currently selected mechanism type
*/
readonly mechanism: string;
}
export class WasmCausalConeAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new causal cone attention instance
*
* # Arguments
* * `future_discount` - Discount for future nodes
* * `ancestor_weight` - Weight for ancestor influence
*/
constructor(future_discount: number, ancestor_weight: number);
/**
* Compute attention scores for the DAG
*/
forward(dag: WasmQueryDag): Float32Array;
}
export class WasmCriticalPathAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new critical path attention instance
*
* # Arguments
* * `path_weight` - Weight for critical path membership
* * `branch_penalty` - Penalty for branching nodes
*/
constructor(path_weight: number, branch_penalty: number);
/**
* Compute attention scores for the DAG
*/
forward(dag: WasmQueryDag): Float32Array;
}
export class WasmFlashAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new flash attention instance
*
* # Arguments
* * `dim` - Embedding dimension
* * `block_size` - Block size for tiled computation
*/
constructor(dim: number, block_size: number);
/**
* Compute flash attention
*/
compute(query: Float32Array, keys: any, values: any): Float32Array;
}
export class WasmGNNLayer {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new GNN layer with attention
*
* # Arguments
* * `input_dim` - Dimension of input node embeddings
* * `hidden_dim` - Dimension of hidden representations
* * `heads` - Number of attention heads
* * `dropout` - Dropout rate (0.0 to 1.0)
*/
constructor(input_dim: number, hidden_dim: number, heads: number, dropout: number);
/**
* Forward pass through the GNN layer
*
* # Arguments
* * `node_embedding` - Current node's embedding (Float32Array)
* * `neighbor_embeddings` - Embeddings of neighbor nodes (array of Float32Arrays)
* * `edge_weights` - Weights of edges to neighbors (Float32Array)
*
* # Returns
* Updated node embedding (Float32Array)
*/
forward(node_embedding: Float32Array, neighbor_embeddings: any, edge_weights: Float32Array): Float32Array;
/**
* Get the output dimension
*/
readonly outputDim: number;
}
export class WasmHierarchicalLorentzAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new hierarchical Lorentz attention instance
*
* # Arguments
* * `curvature` - Hyperbolic curvature parameter
* * `temperature` - Temperature for softmax
*/
constructor(curvature: number, temperature: number);
/**
* Compute attention scores for the DAG
*/
forward(dag: WasmQueryDag): Float32Array;
}
export class WasmHyperbolicAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new hyperbolic attention instance
*
* # Arguments
* * `dim` - Embedding dimension
* * `curvature` - Hyperbolic curvature parameter (negative for hyperbolic space)
*/
constructor(dim: number, curvature: number);
/**
* Compute hyperbolic attention
*/
compute(query: Float32Array, keys: any, values: any): Float32Array;
/**
* Get the curvature parameter
*/
readonly curvature: number;
}
export class WasmLinearAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new linear attention instance
*
* # Arguments
* * `dim` - Embedding dimension
* * `num_features` - Number of random features for kernel approximation
*/
constructor(dim: number, num_features: number);
/**
* Compute linear attention
*/
compute(query: Float32Array, keys: any, values: any): Float32Array;
}
export class WasmLocalGlobalAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new local-global attention instance
*
* # Arguments
* * `dim` - Embedding dimension
* * `local_window` - Size of local attention window
* * `global_tokens` - Number of global attention tokens
*/
constructor(dim: number, local_window: number, global_tokens: number);
/**
* Compute local-global attention
*/
compute(query: Float32Array, keys: any, values: any): Float32Array;
}
export class WasmMinCutGatedAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new MinCut-gated attention instance
*
* # Arguments
* * `gate_threshold` - Threshold for gating (0.0-1.0)
*/
constructor(gate_threshold: number);
/**
* Compute attention scores for the DAG
*/
forward(dag: WasmQueryDag): Float32Array;
}
export class WasmMoEAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new MoE attention instance
*
* # Arguments
* * `dim` - Embedding dimension
* * `num_experts` - Number of expert attention mechanisms
* * `top_k` - Number of experts to activate per query
*/
constructor(dim: number, num_experts: number, top_k: number);
/**
* Compute MoE attention
*/
compute(query: Float32Array, keys: any, values: any): Float32Array;
}
export class WasmMultiHeadAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new multi-head attention instance
*
* # Arguments
* * `dim` - Embedding dimension (must be divisible by num_heads)
* * `num_heads` - Number of parallel attention heads
*/
constructor(dim: number, num_heads: number);
/**
* Compute multi-head attention
*
* # Arguments
* * `query` - Query vector
* * `keys` - Array of key vectors
* * `values` - Array of value vectors
*/
compute(query: Float32Array, keys: any, values: any): Float32Array;
/**
* Get the embedding dimension
*/
readonly dim: number;
/**
* Get the dimension per head
*/
readonly headDim: number;
/**
* Get the number of attention heads
*/
readonly numHeads: number;
}
export class WasmParallelBranchAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new parallel branch attention instance
*
* # Arguments
* * `max_branches` - Maximum number of branches to consider
* * `sync_penalty` - Penalty for synchronization between branches
*/
constructor(max_branches: number, sync_penalty: number);
/**
* Compute attention scores for the DAG
*/
forward(dag: WasmQueryDag): Float32Array;
}
export class WasmQueryDag {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new empty DAG
*/
constructor();
/**
* Serialize to JSON
*/
toJson(): string;
/**
* Add an edge between nodes
*
* # Arguments
* * `from` - Source node ID
* * `to` - Target node ID
*
* # Returns
* True if edge was added successfully
*/
addEdge(from: number, to: number): boolean;
/**
* Add a node with operator type and cost
*
* # Arguments
* * `op_type` - Operator type: "scan", "filter", "join", "aggregate", "project", "sort"
* * `cost` - Estimated execution cost
*
* # Returns
* Node ID
*/
addNode(op_type: string, cost: number): number;
/**
* Get the number of edges
*/
readonly edgeCount: number;
/**
* Get the number of nodes
*/
readonly nodeCount: number;
}
export class WasmSearchConfig {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new search configuration
*/
constructor(k: number, temperature: number);
/**
* Number of top results to return
*/
k: number;
/**
* Temperature for softmax
*/
temperature: number;
}
export class WasmTemporalBTSPAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new temporal BTSP attention instance
*
* # Arguments
* * `eligibility_decay` - Decay rate for eligibility traces (0.0-1.0)
* * `baseline_attention` - Baseline attention for nodes without history
*/
constructor(eligibility_decay: number, baseline_attention: number);
/**
* Compute attention scores for the DAG
*/
forward(dag: WasmQueryDag): Float32Array;
}
export class WasmTensorCompress {
free(): void;
[Symbol.dispose](): void;
/**
* Decompress a compressed tensor
*/
decompress(compressed: any): Float32Array;
/**
* Compress with explicit compression level
*
* # Arguments
* * `embedding` - The input embedding vector
* * `level` - Compression level: "none", "half", "pq8", "pq4", "binary"
*/
compressWithLevel(embedding: Float32Array, level: string): any;
/**
* Get compression ratio estimate for a given access frequency
*/
getCompressionRatio(access_freq: number): number;
/**
* Create a new tensor compressor
*/
constructor();
/**
* Compress an embedding based on access frequency
*
* # Arguments
* * `embedding` - The input embedding vector
* * `access_freq` - Access frequency in range [0.0, 1.0]
* - f > 0.8: Full precision (hot data)
* - f > 0.4: Half precision (warm data)
* - f > 0.1: 8-bit PQ (cool data)
* - f > 0.01: 4-bit PQ (cold data)
* - f <= 0.01: Binary (archive)
*/
compress(embedding: Float32Array, access_freq: number): any;
}
export class WasmTopologicalAttention {
free(): void;
[Symbol.dispose](): void;
/**
* Create a new topological attention instance
*
* # Arguments
* * `decay_factor` - Decay factor for position-based attention (0.0-1.0)
*/
constructor(decay_factor: number);
/**
* Compute attention scores for the DAG
*
* # Returns
* Attention scores for each node
*/
forward(dag: WasmQueryDag): Float32Array;
}
/**
* Get information about all available attention mechanisms
*/
export function availableMechanisms(): any;
/**
* Compute cosine similarity between two vectors
*/
export function cosineSimilarity(a: Float32Array, b: Float32Array): number;
/**
* Get summary statistics about the unified attention library
*/
export function getStats(): any;
/**
* Differentiable search using soft attention mechanism
*
* # Arguments
* * `query` - The query vector
* * `candidate_embeddings` - List of candidate embedding vectors
* * `config` - Search configuration
*
* # Returns
* Object with indices and weights for top-k candidates
*/
export function graphDifferentiableSearch(query: Float32Array, candidate_embeddings: any, config: WasmSearchConfig): any;
/**
* Hierarchical forward pass through multiple GNN layers
*
* # Arguments
* * `query` - The query vector
* * `layer_embeddings` - Embeddings organized by layer
* * `gnn_layers` - Array of GNN layers
*
* # Returns
* Final embedding after hierarchical processing
*/
export function graphHierarchicalForward(query: Float32Array, layer_embeddings: any, gnn_layers: WasmGNNLayer[]): Float32Array;
/**
* Initialize the WASM module with panic hook for better error messages
*/
export function init(): void;
/**
* Compute scaled dot-product attention
*
* Standard transformer attention: softmax(QK^T / sqrt(d)) * V
*
* # Arguments
* * `query` - Query vector (Float32Array)
* * `keys` - Array of key vectors (JsValue - array of Float32Arrays)
* * `values` - Array of value vectors (JsValue - array of Float32Arrays)
* * `scale` - Optional scaling factor (defaults to 1/sqrt(dim))
*
* # Returns
* Attention-weighted output vector
*/
export function scaledDotAttention(query: Float32Array, keys: any, values: any, scale?: number | null): Float32Array;
/**
* Softmax normalization
*/
export function softmax(values: Float32Array): Float32Array;
/**
* Temperature-scaled softmax
*/
export function temperatureSoftmax(values: Float32Array, temperature: number): Float32Array;
/**
* Get the version of the unified attention WASM crate
*/
export function version(): string;
export type InitInput = RequestInfo | URL | Response | BufferSource | WebAssembly.Module;
export interface InitOutput {
readonly memory: WebAssembly.Memory;
readonly __wbg_dagattentionfactory_free: (a: number, b: number) => void;
readonly __wbg_get_mambaconfig_conv_kernel_size: (a: number) => number;
readonly __wbg_get_mambaconfig_dim: (a: number) => number;
readonly __wbg_get_mambaconfig_dt_max: (a: number) => number;
readonly __wbg_get_mambaconfig_dt_min: (a: number) => number;
readonly __wbg_get_mambaconfig_expand_factor: (a: number) => number;
readonly __wbg_get_mambaconfig_state_dim: (a: number) => number;
readonly __wbg_get_mambaconfig_use_d_skip: (a: number) => number;
readonly __wbg_get_wasmsearchconfig_temperature: (a: number) => number;
readonly __wbg_hybridmambaattention_free: (a: number, b: number) => void;
readonly __wbg_mambaconfig_free: (a: number, b: number) => void;
readonly __wbg_mambassmattention_free: (a: number, b: number) => void;
readonly __wbg_set_mambaconfig_conv_kernel_size: (a: number, b: number) => void;
readonly __wbg_set_mambaconfig_dim: (a: number, b: number) => void;
readonly __wbg_set_mambaconfig_dt_max: (a: number, b: number) => void;
readonly __wbg_set_mambaconfig_dt_min: (a: number, b: number) => void;
readonly __wbg_set_mambaconfig_expand_factor: (a: number, b: number) => void;
readonly __wbg_set_mambaconfig_state_dim: (a: number, b: number) => void;
readonly __wbg_set_mambaconfig_use_d_skip: (a: number, b: number) => void;
readonly __wbg_set_wasmsearchconfig_temperature: (a: number, b: number) => void;
readonly __wbg_unifiedattention_free: (a: number, b: number) => void;
readonly __wbg_wasmcausalconeattention_free: (a: number, b: number) => void;
readonly __wbg_wasmflashattention_free: (a: number, b: number) => void;
readonly __wbg_wasmgnnlayer_free: (a: number, b: number) => void;
readonly __wbg_wasmhyperbolicattention_free: (a: number, b: number) => void;
readonly __wbg_wasmlinearattention_free: (a: number, b: number) => void;
readonly __wbg_wasmmincutgatedattention_free: (a: number, b: number) => void;
readonly __wbg_wasmmoeattention_free: (a: number, b: number) => void;
readonly __wbg_wasmmultiheadattention_free: (a: number, b: number) => void;
readonly __wbg_wasmquerydag_free: (a: number, b: number) => void;
readonly __wbg_wasmtensorcompress_free: (a: number, b: number) => void;
readonly availableMechanisms: () => number;
readonly cosineSimilarity: (a: number, b: number, c: number, d: number, e: number) => void;
readonly dagattentionfactory_availableTypes: () => number;
readonly dagattentionfactory_getDescription: (a: number, b: number, c: number) => void;
readonly getStats: () => number;
readonly graphDifferentiableSearch: (a: number, b: number, c: number, d: number, e: number) => void;
readonly graphHierarchicalForward: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
readonly graphattentionfactory_availableTypes: () => number;
readonly graphattentionfactory_getDescription: (a: number, b: number, c: number) => void;
readonly graphattentionfactory_getUseCases: (a: number, b: number) => number;
readonly hybridmambaattention_forward: (a: number, b: number, c: number, d: number, e: number) => void;
readonly hybridmambaattention_localWindow: (a: number) => number;
readonly hybridmambaattention_new: (a: number, b: number) => number;
readonly mambaconfig_new: (a: number) => number;
readonly mambaconfig_withConvKernelSize: (a: number, b: number) => number;
readonly mambaconfig_withExpandFactor: (a: number, b: number) => number;
readonly mambaconfig_withStateDim: (a: number, b: number) => number;
readonly mambassmattention_config: (a: number) => number;
readonly mambassmattention_forward: (a: number, b: number, c: number, d: number, e: number) => void;
readonly mambassmattention_getAttentionScores: (a: number, b: number, c: number, d: number, e: number) => void;
readonly mambassmattention_innerDim: (a: number) => number;
readonly mambassmattention_new: (a: number) => number;
readonly mambassmattention_withDefaults: (a: number) => number;
readonly scaledDotAttention: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
readonly softmax: (a: number, b: number, c: number) => void;
readonly temperatureSoftmax: (a: number, b: number, c: number, d: number) => void;
readonly unifiedattention_category: (a: number, b: number) => void;
readonly unifiedattention_mechanism: (a: number, b: number) => void;
readonly unifiedattention_new: (a: number, b: number, c: number) => void;
readonly unifiedattention_supportsGraphs: (a: number) => number;
readonly unifiedattention_supportsHyperbolic: (a: number) => number;
readonly unifiedattention_supportsSequences: (a: number) => number;
readonly version: (a: number) => void;
readonly wasmcausalconeattention_forward: (a: number, b: number, c: number) => void;
readonly wasmcriticalpathattention_forward: (a: number, b: number, c: number) => void;
readonly wasmflashattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
readonly wasmflashattention_new: (a: number, b: number) => number;
readonly wasmgnnlayer_forward: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void;
readonly wasmgnnlayer_new: (a: number, b: number, c: number, d: number, e: number) => void;
readonly wasmgnnlayer_outputDim: (a: number) => number;
readonly wasmhierarchicallorentzattention_forward: (a: number, b: number, c: number) => void;
readonly wasmhyperbolicattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
readonly wasmhyperbolicattention_curvature: (a: number) => number;
readonly wasmhyperbolicattention_new: (a: number, b: number) => number;
readonly wasmlinearattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
readonly wasmlinearattention_new: (a: number, b: number) => number;
readonly wasmlocalglobalattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
readonly wasmlocalglobalattention_new: (a: number, b: number, c: number) => number;
readonly wasmmincutgatedattention_forward: (a: number, b: number, c: number) => void;
readonly wasmmoeattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
readonly wasmmoeattention_new: (a: number, b: number, c: number) => number;
readonly wasmmultiheadattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
readonly wasmmultiheadattention_dim: (a: number) => number;
readonly wasmmultiheadattention_headDim: (a: number) => number;
readonly wasmmultiheadattention_new: (a: number, b: number, c: number) => void;
readonly wasmmultiheadattention_numHeads: (a: number) => number;
readonly wasmparallelbranchattention_forward: (a: number, b: number, c: number) => void;
readonly wasmquerydag_addEdge: (a: number, b: number, c: number) => number;
readonly wasmquerydag_addNode: (a: number, b: number, c: number, d: number) => number;
readonly wasmquerydag_edgeCount: (a: number) => number;
readonly wasmquerydag_new: () => number;
readonly wasmquerydag_nodeCount: (a: number) => number;
readonly wasmquerydag_toJson: (a: number, b: number) => void;
readonly wasmtemporalbtspattention_forward: (a: number, b: number, c: number) => void;
readonly wasmtensorcompress_compress: (a: number, b: number, c: number, d: number, e: number) => void;
readonly wasmtensorcompress_compressWithLevel: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
readonly wasmtensorcompress_decompress: (a: number, b: number, c: number) => void;
readonly wasmtensorcompress_getCompressionRatio: (a: number, b: number) => number;
readonly wasmtensorcompress_new: () => number;
readonly wasmtopologicalattention_forward: (a: number, b: number, c: number) => void;
readonly init: () => void;
readonly wasmmincutgatedattention_new: (a: number) => number;
readonly wasmtopologicalattention_new: (a: number) => number;
readonly __wbg_set_wasmsearchconfig_k: (a: number, b: number) => void;
readonly wasmcausalconeattention_new: (a: number, b: number) => number;
readonly wasmcriticalpathattention_new: (a: number, b: number) => number;
readonly wasmhierarchicallorentzattention_new: (a: number, b: number) => number;
readonly wasmparallelbranchattention_new: (a: number, b: number) => number;
readonly wasmsearchconfig_new: (a: number, b: number) => number;
readonly wasmtemporalbtspattention_new: (a: number, b: number) => number;
readonly __wbg_get_wasmsearchconfig_k: (a: number) => number;
readonly __wbg_graphattentionfactory_free: (a: number, b: number) => void;
readonly __wbg_wasmcriticalpathattention_free: (a: number, b: number) => void;
readonly __wbg_wasmhierarchicallorentzattention_free: (a: number, b: number) => void;
readonly __wbg_wasmlocalglobalattention_free: (a: number, b: number) => void;
readonly __wbg_wasmparallelbranchattention_free: (a: number, b: number) => void;
readonly __wbg_wasmsearchconfig_free: (a: number, b: number) => void;
readonly __wbg_wasmtemporalbtspattention_free: (a: number, b: number) => void;
readonly __wbg_wasmtopologicalattention_free: (a: number, b: number) => void;
readonly __wbindgen_export: (a: number, b: number) => number;
readonly __wbindgen_export2: (a: number, b: number, c: number, d: number) => number;
readonly __wbindgen_export3: (a: number) => void;
readonly __wbindgen_export4: (a: number, b: number, c: number) => void;
readonly __wbindgen_add_to_stack_pointer: (a: number) => number;
readonly __wbindgen_start: () => void;
}
export type SyncInitInput = BufferSource | WebAssembly.Module;
/**
* Instantiates the given `module`, which can either be bytes or
* a precompiled `WebAssembly.Module`.
*
* @param {{ module: SyncInitInput }} module - Passing `SyncInitInput` directly is deprecated.
*
* @returns {InitOutput}
*/
export function initSync(module: { module: SyncInitInput } | SyncInitInput): InitOutput;
/**
* If `module_or_path` is {RequestInfo} or {URL}, makes a request and
* for everything else, calls `WebAssembly.instantiate` directly.
*
* @param {{ module_or_path: InitInput | Promise<InitInput> }} module_or_path - Passing `InitInput` directly is deprecated.
*
* @returns {Promise<InitOutput>}
*/
export default function __wbg_init (module_or_path?: { module_or_path: InitInput | Promise<InitInput> } | InitInput | Promise<InitInput>): Promise<InitOutput>;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,129 @@
/* tslint:disable */
/* eslint-disable */
export const memory: WebAssembly.Memory;
export const __wbg_dagattentionfactory_free: (a: number, b: number) => void;
export const __wbg_get_mambaconfig_conv_kernel_size: (a: number) => number;
export const __wbg_get_mambaconfig_dim: (a: number) => number;
export const __wbg_get_mambaconfig_dt_max: (a: number) => number;
export const __wbg_get_mambaconfig_dt_min: (a: number) => number;
export const __wbg_get_mambaconfig_expand_factor: (a: number) => number;
export const __wbg_get_mambaconfig_state_dim: (a: number) => number;
export const __wbg_get_mambaconfig_use_d_skip: (a: number) => number;
export const __wbg_get_wasmsearchconfig_temperature: (a: number) => number;
export const __wbg_hybridmambaattention_free: (a: number, b: number) => void;
export const __wbg_mambaconfig_free: (a: number, b: number) => void;
export const __wbg_mambassmattention_free: (a: number, b: number) => void;
export const __wbg_set_mambaconfig_conv_kernel_size: (a: number, b: number) => void;
export const __wbg_set_mambaconfig_dim: (a: number, b: number) => void;
export const __wbg_set_mambaconfig_dt_max: (a: number, b: number) => void;
export const __wbg_set_mambaconfig_dt_min: (a: number, b: number) => void;
export const __wbg_set_mambaconfig_expand_factor: (a: number, b: number) => void;
export const __wbg_set_mambaconfig_state_dim: (a: number, b: number) => void;
export const __wbg_set_mambaconfig_use_d_skip: (a: number, b: number) => void;
export const __wbg_set_wasmsearchconfig_temperature: (a: number, b: number) => void;
export const __wbg_unifiedattention_free: (a: number, b: number) => void;
export const __wbg_wasmcausalconeattention_free: (a: number, b: number) => void;
export const __wbg_wasmflashattention_free: (a: number, b: number) => void;
export const __wbg_wasmgnnlayer_free: (a: number, b: number) => void;
export const __wbg_wasmhyperbolicattention_free: (a: number, b: number) => void;
export const __wbg_wasmlinearattention_free: (a: number, b: number) => void;
export const __wbg_wasmmincutgatedattention_free: (a: number, b: number) => void;
export const __wbg_wasmmoeattention_free: (a: number, b: number) => void;
export const __wbg_wasmmultiheadattention_free: (a: number, b: number) => void;
export const __wbg_wasmquerydag_free: (a: number, b: number) => void;
export const __wbg_wasmtensorcompress_free: (a: number, b: number) => void;
export const availableMechanisms: () => number;
export const cosineSimilarity: (a: number, b: number, c: number, d: number, e: number) => void;
export const dagattentionfactory_availableTypes: () => number;
export const dagattentionfactory_getDescription: (a: number, b: number, c: number) => void;
export const getStats: () => number;
export const graphDifferentiableSearch: (a: number, b: number, c: number, d: number, e: number) => void;
export const graphHierarchicalForward: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const graphattentionfactory_availableTypes: () => number;
export const graphattentionfactory_getDescription: (a: number, b: number, c: number) => void;
export const graphattentionfactory_getUseCases: (a: number, b: number) => number;
export const hybridmambaattention_forward: (a: number, b: number, c: number, d: number, e: number) => void;
export const hybridmambaattention_localWindow: (a: number) => number;
export const hybridmambaattention_new: (a: number, b: number) => number;
export const mambaconfig_new: (a: number) => number;
export const mambaconfig_withConvKernelSize: (a: number, b: number) => number;
export const mambaconfig_withExpandFactor: (a: number, b: number) => number;
export const mambaconfig_withStateDim: (a: number, b: number) => number;
export const mambassmattention_config: (a: number) => number;
export const mambassmattention_forward: (a: number, b: number, c: number, d: number, e: number) => void;
export const mambassmattention_getAttentionScores: (a: number, b: number, c: number, d: number, e: number) => void;
export const mambassmattention_innerDim: (a: number) => number;
export const mambassmattention_new: (a: number) => number;
export const mambassmattention_withDefaults: (a: number) => number;
export const scaledDotAttention: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const softmax: (a: number, b: number, c: number) => void;
export const temperatureSoftmax: (a: number, b: number, c: number, d: number) => void;
export const unifiedattention_category: (a: number, b: number) => void;
export const unifiedattention_mechanism: (a: number, b: number) => void;
export const unifiedattention_new: (a: number, b: number, c: number) => void;
export const unifiedattention_supportsGraphs: (a: number) => number;
export const unifiedattention_supportsHyperbolic: (a: number) => number;
export const unifiedattention_supportsSequences: (a: number) => number;
export const version: (a: number) => void;
export const wasmcausalconeattention_forward: (a: number, b: number, c: number) => void;
export const wasmcriticalpathattention_forward: (a: number, b: number, c: number) => void;
export const wasmflashattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmflashattention_new: (a: number, b: number) => number;
export const wasmgnnlayer_forward: (a: number, b: number, c: number, d: number, e: number, f: number, g: number) => void;
export const wasmgnnlayer_new: (a: number, b: number, c: number, d: number, e: number) => void;
export const wasmgnnlayer_outputDim: (a: number) => number;
export const wasmhierarchicallorentzattention_forward: (a: number, b: number, c: number) => void;
export const wasmhyperbolicattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmhyperbolicattention_curvature: (a: number) => number;
export const wasmhyperbolicattention_new: (a: number, b: number) => number;
export const wasmlinearattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmlinearattention_new: (a: number, b: number) => number;
export const wasmlocalglobalattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmlocalglobalattention_new: (a: number, b: number, c: number) => number;
export const wasmmincutgatedattention_forward: (a: number, b: number, c: number) => void;
export const wasmmoeattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmmoeattention_new: (a: number, b: number, c: number) => number;
export const wasmmultiheadattention_compute: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmmultiheadattention_dim: (a: number) => number;
export const wasmmultiheadattention_headDim: (a: number) => number;
export const wasmmultiheadattention_new: (a: number, b: number, c: number) => void;
export const wasmmultiheadattention_numHeads: (a: number) => number;
export const wasmparallelbranchattention_forward: (a: number, b: number, c: number) => void;
export const wasmquerydag_addEdge: (a: number, b: number, c: number) => number;
export const wasmquerydag_addNode: (a: number, b: number, c: number, d: number) => number;
export const wasmquerydag_edgeCount: (a: number) => number;
export const wasmquerydag_new: () => number;
export const wasmquerydag_nodeCount: (a: number) => number;
export const wasmquerydag_toJson: (a: number, b: number) => void;
export const wasmtemporalbtspattention_forward: (a: number, b: number, c: number) => void;
export const wasmtensorcompress_compress: (a: number, b: number, c: number, d: number, e: number) => void;
export const wasmtensorcompress_compressWithLevel: (a: number, b: number, c: number, d: number, e: number, f: number) => void;
export const wasmtensorcompress_decompress: (a: number, b: number, c: number) => void;
export const wasmtensorcompress_getCompressionRatio: (a: number, b: number) => number;
export const wasmtensorcompress_new: () => number;
export const wasmtopologicalattention_forward: (a: number, b: number, c: number) => void;
export const init: () => void;
export const wasmmincutgatedattention_new: (a: number) => number;
export const wasmtopologicalattention_new: (a: number) => number;
export const __wbg_set_wasmsearchconfig_k: (a: number, b: number) => void;
export const wasmcausalconeattention_new: (a: number, b: number) => number;
export const wasmcriticalpathattention_new: (a: number, b: number) => number;
export const wasmhierarchicallorentzattention_new: (a: number, b: number) => number;
export const wasmparallelbranchattention_new: (a: number, b: number) => number;
export const wasmsearchconfig_new: (a: number, b: number) => number;
export const wasmtemporalbtspattention_new: (a: number, b: number) => number;
export const __wbg_get_wasmsearchconfig_k: (a: number) => number;
export const __wbg_graphattentionfactory_free: (a: number, b: number) => void;
export const __wbg_wasmcriticalpathattention_free: (a: number, b: number) => void;
export const __wbg_wasmhierarchicallorentzattention_free: (a: number, b: number) => void;
export const __wbg_wasmlocalglobalattention_free: (a: number, b: number) => void;
export const __wbg_wasmparallelbranchattention_free: (a: number, b: number) => void;
export const __wbg_wasmsearchconfig_free: (a: number, b: number) => void;
export const __wbg_wasmtemporalbtspattention_free: (a: number, b: number) => void;
export const __wbg_wasmtopologicalattention_free: (a: number, b: number) => void;
export const __wbindgen_export: (a: number, b: number) => number;
export const __wbindgen_export2: (a: number, b: number, c: number, d: number) => number;
export const __wbindgen_export3: (a: number) => void;
export const __wbindgen_export4: (a: number, b: number, c: number) => void;
export const __wbindgen_add_to_stack_pointer: (a: number) => number;
export const __wbindgen_start: () => void;