Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,110 @@
name: Build and Test
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
jobs:
build:
strategy:
fail-fast: false
matrix:
settings:
- host: macos-latest
target: x86_64-apple-darwin
build: npm run build
- host: macos-latest
target: aarch64-apple-darwin
build: npm run build -- --target aarch64-apple-darwin
- host: ubuntu-latest
target: x86_64-unknown-linux-gnu
build: npm run build
- host: ubuntu-latest
target: x86_64-unknown-linux-musl
build: npm run build -- --target x86_64-unknown-linux-musl
- host: ubuntu-latest
target: aarch64-unknown-linux-gnu
build: npm run build -- --target aarch64-unknown-linux-gnu
- host: ubuntu-latest
target: aarch64-unknown-linux-musl
build: npm run build -- --target aarch64-unknown-linux-musl
- host: windows-latest
target: x86_64-pc-windows-msvc
build: npm run build
name: Build ${{ matrix.settings.target }}
runs-on: ${{ matrix.settings.host }}
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 18
cache: npm
cache-dependency-path: crates/ruvector-gnn-node/package.json
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
targets: ${{ matrix.settings.target }}
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ matrix.settings.target }}-${{ hashFiles('**/Cargo.lock') }}
- name: Install dependencies
working-directory: crates/ruvector-gnn-node
run: npm install
- name: Build
working-directory: crates/ruvector-gnn-node
run: ${{ matrix.settings.build }}
- name: Test (non-cross compile only)
if: matrix.settings.host == 'ubuntu-latest' && matrix.settings.target == 'x86_64-unknown-linux-gnu'
working-directory: crates/ruvector-gnn-node
run: npm test
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: bindings-${{ matrix.settings.target }}
path: crates/ruvector-gnn-node/*.node
if-no-files-found: error
test:
name: Test Node.js bindings
runs-on: ubuntu-latest
needs: build
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 18
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: bindings-x86_64-unknown-linux-gnu
path: crates/ruvector-gnn-node
- name: Install dependencies
working-directory: crates/ruvector-gnn-node
run: npm install --ignore-scripts
- name: Run tests
working-directory: crates/ruvector-gnn-node
run: npm test

View File

@@ -0,0 +1,13 @@
target/
Cargo.lock
.cargo/
*.node
*.iml
.idea/
.vscode/
.DS_Store
*.swp
*.swo
*~
.#*
\#*#

View File

@@ -0,0 +1,26 @@
[package]
name = "ruvector-gnn-node"
version.workspace = true
edition.workspace = true
rust-version.workspace = true
license.workspace = true
authors.workspace = true
repository.workspace = true
readme = "README.md"
description = "Node.js bindings for Ruvector GNN via NAPI-RS"
[lib]
crate-type = ["cdylib"]
[dependencies]
napi = { workspace = true }
napi-derive = { workspace = true }
ruvector-gnn = { version = "2.0", path = "../ruvector-gnn", default-features = false }
serde_json = { workspace = true }
[build-dependencies]
napi-build = "2"
[profile.release]
lto = true
strip = true

View File

@@ -0,0 +1,252 @@
# @ruvector/gnn - Graph Neural Network Node.js Bindings
High-performance Graph Neural Network (GNN) capabilities for Ruvector, powered by Rust and NAPI-RS.
[![npm version](https://img.shields.io/npm/v/@ruvector/gnn.svg)](https://www.npmjs.com/package/@ruvector/gnn)
[![CI](https://github.com/ruvnet/ruvector/actions/workflows/build-gnn.yml/badge.svg)](https://github.com/ruvnet/ruvector/actions/workflows/build-gnn.yml)
## Features
- **GNN Layers**: Multi-head attention, layer normalization, GRU cells
- **Tensor Compression**: Adaptive compression with 5 levels (None, Half, PQ8, PQ4, Binary)
- **Differentiable Search**: Soft attention-based search with temperature scaling
- **Hierarchical Processing**: Multi-layer GNN forward pass
- **Zero-copy**: Efficient data transfer between JavaScript and Rust
- **TypeScript Support**: Full type definitions included
## Installation
```bash
npm install @ruvector/gnn
```
## Quick Start
### Creating a GNN Layer
```javascript
const { RuvectorLayer } = require('@ruvector/gnn');
// Create a GNN layer with:
// - Input dimension: 128
// - Hidden dimension: 256
// - Attention heads: 4
// - Dropout rate: 0.1
const layer = new RuvectorLayer(128, 256, 4, 0.1);
// Forward pass
const nodeEmbedding = new Array(128).fill(0).map(() => Math.random());
const neighborEmbeddings = [
new Array(128).fill(0).map(() => Math.random()),
new Array(128).fill(0).map(() => Math.random()),
];
const edgeWeights = [0.7, 0.3];
const output = layer.forward(nodeEmbedding, neighborEmbeddings, edgeWeights);
console.log('Output dimension:', output.length); // 256
```
### Tensor Compression
```javascript
const { TensorCompress, getCompressionLevel } = require('@ruvector/gnn');
const compressor = new TensorCompress();
const embedding = new Array(128).fill(0).map(() => Math.random());
// Adaptive compression based on access frequency
const accessFreq = 0.5; // 50% access rate
console.log('Selected level:', getCompressionLevel(accessFreq)); // "half"
const compressed = compressor.compress(embedding, accessFreq);
const decompressed = compressor.decompress(compressed);
console.log('Original size:', embedding.length);
console.log('Compression ratio:', compressed.length / JSON.stringify(embedding).length);
// Explicit compression level
const level = {
level_type: 'pq8',
subvectors: 8,
centroids: 16
};
const compressedPQ = compressor.compressWithLevel(embedding, level);
```
### Differentiable Search
```javascript
const { differentiableSearch } = require('@ruvector/gnn');
const query = [1.0, 0.0, 0.0];
const candidates = [
[1.0, 0.0, 0.0], // Perfect match
[0.9, 0.1, 0.0], // Close match
[0.0, 1.0, 0.0], // Orthogonal
];
const result = differentiableSearch(query, candidates, 2, 1.0);
console.log('Top-2 indices:', result.indices); // [0, 1]
console.log('Soft weights:', result.weights); // [0.x, 0.y]
```
### Hierarchical Forward Pass
```javascript
const { hierarchicalForward, RuvectorLayer } = require('@ruvector/gnn');
const query = [1.0, 0.0];
// Layer embeddings (organized by HNSW layers)
const layerEmbeddings = [
[[1.0, 0.0], [0.0, 1.0]], // Layer 0 embeddings
];
// Create and serialize GNN layers
const layer1 = new RuvectorLayer(2, 2, 1, 0.0);
const layers = [layer1.toJson()];
// Hierarchical processing
const result = hierarchicalForward(query, layerEmbeddings, layers);
console.log('Final embedding:', result);
```
## API Reference
### RuvectorLayer
#### Constructor
```typescript
new RuvectorLayer(
inputDim: number,
hiddenDim: number,
heads: number,
dropout: number
): RuvectorLayer
```
#### Methods
- `forward(nodeEmbedding: number[], neighborEmbeddings: number[][], edgeWeights: number[]): number[]`
- `toJson(): string` - Serialize layer to JSON
- `fromJson(json: string): RuvectorLayer` - Deserialize layer from JSON
### TensorCompress
#### Constructor
```typescript
new TensorCompress(): TensorCompress
```
#### Methods
- `compress(embedding: number[], accessFreq: number): string` - Adaptive compression
- `compressWithLevel(embedding: number[], level: CompressionLevelConfig): string` - Explicit level
- `decompress(compressedJson: string): number[]` - Decompress tensor
#### CompressionLevelConfig
```typescript
interface CompressionLevelConfig {
level_type: 'none' | 'half' | 'pq8' | 'pq4' | 'binary';
scale?: number; // For 'half'
subvectors?: number; // For 'pq8', 'pq4'
centroids?: number; // For 'pq8'
outlier_threshold?: number; // For 'pq4'
threshold?: number; // For 'binary'
}
```
### Search Functions
#### differentiableSearch
```typescript
function differentiableSearch(
query: number[],
candidateEmbeddings: number[][],
k: number,
temperature: number
): { indices: number[], weights: number[] }
```
#### hierarchicalForward
```typescript
function hierarchicalForward(
query: number[],
layerEmbeddings: number[][][],
gnnLayersJson: string[]
): number[]
```
### Utility Functions
#### getCompressionLevel
```typescript
function getCompressionLevel(accessFreq: number): string
```
Returns the compression level that would be selected for the given access frequency:
- `accessFreq > 0.8`: "none" (hot data)
- `accessFreq > 0.4`: "half" (warm data)
- `accessFreq > 0.1`: "pq8" (cool data)
- `accessFreq > 0.01`: "pq4" (cold data)
- `accessFreq <= 0.01`: "binary" (archive)
## Compression Levels
### None
Full precision, no compression. Best for frequently accessed data.
### Half Precision
~50% space savings with minimal quality loss. Good for warm data.
### PQ8 (8-bit Product Quantization)
~8x compression using 8-bit codes. Suitable for cool data.
### PQ4 (4-bit Product Quantization)
~16x compression with outlier handling. For cold data.
### Binary
~32x compression, values become +1/-1. For archival data.
## Performance
- **Zero-copy operations** where possible
- **SIMD optimizations** for vector operations
- **Parallel processing** with Rayon
- **Native performance** with Rust backend
## Building from Source
```bash
# Install dependencies
npm install
# Build debug
npm run build:debug
# Build release
npm run build
# Run tests
npm test
```
## License
MIT - See LICENSE file for details
## Contributing
Contributions are welcome! Please see the main Ruvector repository for guidelines.
## Links
- [GitHub Repository](https://github.com/ruvnet/ruvector)
- [Documentation](https://docs.ruvector.io)
- [Issues](https://github.com/ruvnet/ruvector/issues)

View File

@@ -0,0 +1,5 @@
extern crate napi_build;
fn main() {
napi_build::setup();
}

View File

@@ -0,0 +1,132 @@
// Example: Basic usage of Ruvector GNN Node.js bindings
const {
RuvectorLayer,
TensorCompress,
differentiableSearch,
hierarchicalForward,
getCompressionLevel,
init
} = require('../index.js');
console.log(init());
console.log('');
// ==================== Example 1: GNN Layer ====================
console.log('=== Example 1: GNN Layer ===');
const layer = new RuvectorLayer(4, 8, 2, 0.1);
console.log('Created GNN layer (input_dim: 4, hidden_dim: 8, heads: 2, dropout: 0.1)');
const nodeEmbedding = [1.0, 2.0, 3.0, 4.0];
const neighborEmbeddings = [
[0.5, 1.0, 1.5, 2.0],
[2.0, 3.0, 4.0, 5.0],
];
const edgeWeights = [0.3, 0.7];
const output = layer.forward(nodeEmbedding, neighborEmbeddings, edgeWeights);
console.log('Input embedding:', nodeEmbedding);
console.log('Output embedding (length):', output.length);
console.log('Output embedding (first 4 values):', output.slice(0, 4).map(x => x.toFixed(4)));
console.log('');
// ==================== Example 2: Tensor Compression ====================
console.log('=== Example 2: Tensor Compression ===');
const compressor = new TensorCompress();
const embedding = Array.from({ length: 64 }, (_, i) => Math.sin(i * 0.1));
// Test different access frequencies
const frequencies = [0.9, 0.5, 0.2, 0.05, 0.001];
frequencies.forEach(freq => {
const level = getCompressionLevel(freq);
const compressed = compressor.compress(embedding, freq);
const decompressed = compressor.decompress(compressed);
const originalSize = JSON.stringify(embedding).length;
const compressedSize = compressed.length;
const ratio = (compressedSize / originalSize * 100).toFixed(1);
console.log(`Frequency: ${freq.toFixed(3)} | Level: ${level.padEnd(6)} | Size: ${ratio}% | Error: ${calculateMSE(embedding, decompressed).toFixed(6)}`);
});
console.log('');
// ==================== Example 3: Differentiable Search ====================
console.log('=== Example 3: Differentiable Search ===');
const query = [1.0, 0.0, 0.0];
const candidates = [
[1.0, 0.0, 0.0], // Perfect match
[0.9, 0.1, 0.0], // Close match
[0.7, 0.3, 0.0], // Medium match
[0.0, 1.0, 0.0], // Orthogonal
[0.0, 0.0, 1.0], // Orthogonal
];
console.log('Query:', query);
console.log('Number of candidates:', candidates.length);
const result = differentiableSearch(query, candidates, 3, 1.0);
console.log('Top-3 indices:', result.indices);
console.log('Soft weights:', result.weights.map(w => w.toFixed(4)));
console.log('Weights sum:', result.weights.reduce((a, b) => a + b, 0).toFixed(4));
console.log('');
// ==================== Example 4: Hierarchical Forward ====================
console.log('=== Example 4: Hierarchical Forward ===');
const query2 = [1.0, 0.0];
const layerEmbeddings = [
[
[1.0, 0.0],
[0.0, 1.0],
[0.7, 0.7],
],
];
const layer1 = new RuvectorLayer(2, 2, 1, 0.0);
const layers = [layer1.toJson()];
const finalEmbedding = hierarchicalForward(query2, layerEmbeddings, layers);
console.log('Query:', query2);
console.log('Final embedding:', finalEmbedding.map(x => x.toFixed(4)));
console.log('');
// ==================== Example 5: Layer Serialization ====================
console.log('=== Example 5: Layer Serialization ===');
const originalLayer = new RuvectorLayer(8, 16, 4, 0.2);
const serialized = originalLayer.toJson();
const deserialized = RuvectorLayer.fromJson(serialized);
console.log('Original layer created (8 -> 16, heads: 4, dropout: 0.2)');
console.log('Serialized size:', serialized.length, 'bytes');
console.log('Successfully deserialized');
// Test that deserialized layer works
const testInput = Array.from({ length: 8 }, () => Math.random());
const testNeighbors = [Array.from({ length: 8 }, () => Math.random())];
const testWeights = [1.0];
const output1 = originalLayer.forward(testInput, testNeighbors, testWeights);
const output2 = deserialized.forward(testInput, testNeighbors, testWeights);
console.log('Original output matches deserialized:', arraysEqual(output1, output2, 1e-6));
console.log('');
// ==================== Helper Functions ====================
function calculateMSE(a, b) {
if (a.length !== b.length) return Infinity;
const sum = a.reduce((acc, val, i) => acc + Math.pow(val - b[i], 2), 0);
return sum / a.length;
}
function arraysEqual(a, b, epsilon = 1e-10) {
if (a.length !== b.length) return false;
return a.every((val, i) => Math.abs(val - b[i]) < epsilon);
}
console.log('All examples completed successfully!');

View File

@@ -0,0 +1,34 @@
{
"name": "@ruvector/gnn-darwin-arm64",
"version": "0.1.25",
"os": [
"darwin"
],
"cpu": [
"arm64"
],
"main": "ruvector-gnn.darwin-arm64.node",
"files": [
"ruvector-gnn.darwin-arm64.node"
],
"description": "Graph Neural Network capabilities for Ruvector - darwin-arm64 platform",
"keywords": [
"ruvector",
"gnn",
"graph-neural-network",
"napi-rs"
],
"author": "Ruvector Team",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/ruvnet/ruvector"
},
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
}
}

View File

@@ -0,0 +1,34 @@
{
"name": "@ruvector/gnn-darwin-x64",
"version": "0.1.25",
"os": [
"darwin"
],
"cpu": [
"x64"
],
"main": "ruvector-gnn.darwin-x64.node",
"files": [
"ruvector-gnn.darwin-x64.node"
],
"description": "Graph Neural Network capabilities for Ruvector - darwin-x64 platform",
"keywords": [
"ruvector",
"gnn",
"graph-neural-network",
"napi-rs"
],
"author": "Ruvector Team",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/ruvnet/ruvector"
},
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
}
}

View File

@@ -0,0 +1,38 @@
{
"name": "@ruvector/gnn-linux-arm64-gnu",
"version": "0.1.25",
"os": [
"linux"
],
"cpu": [
"arm64"
],
"main": "ruvector-gnn.linux-arm64-gnu.node",
"files": [
"ruvector-gnn.linux-arm64-gnu.node"
],
"description": "Graph Neural Network capabilities for Ruvector - linux-arm64-gnu platform",
"keywords": [
"ruvector",
"gnn",
"graph-neural-network",
"machine-learning",
"napi-rs"
],
"author": "Ruvector Team",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/ruvnet/ruvector"
},
"engines": {
"node": ">= 18"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"libc": [
"glibc"
]
}

View File

@@ -0,0 +1,37 @@
{
"name": "@ruvector/gnn-linux-arm64-musl",
"version": "0.1.25",
"os": [
"linux"
],
"cpu": [
"arm64"
],
"main": "ruvector-gnn.linux-arm64-musl.node",
"files": [
"ruvector-gnn.linux-arm64-musl.node"
],
"description": "Graph Neural Network capabilities for Ruvector - linux-arm64-musl platform",
"keywords": [
"ruvector",
"gnn",
"graph-neural-network",
"napi-rs"
],
"author": "Ruvector Team",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/ruvnet/ruvector"
},
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"libc": [
"musl"
]
}

View File

@@ -0,0 +1,37 @@
{
"name": "@ruvector/gnn-linux-x64-gnu",
"version": "0.1.25",
"os": [
"linux"
],
"cpu": [
"x64"
],
"main": "ruvector-gnn.linux-x64-gnu.node",
"files": [
"ruvector-gnn.linux-x64-gnu.node"
],
"description": "Graph Neural Network capabilities for Ruvector - linux-x64-gnu platform",
"keywords": [
"ruvector",
"gnn",
"graph-neural-network",
"napi-rs"
],
"author": "Ruvector Team",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/ruvnet/ruvector"
},
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"libc": [
"glibc"
]
}

View File

@@ -0,0 +1,37 @@
{
"name": "@ruvector/gnn-linux-x64-musl",
"version": "0.1.25",
"os": [
"linux"
],
"cpu": [
"x64"
],
"main": "ruvector-gnn.linux-x64-musl.node",
"files": [
"ruvector-gnn.linux-x64-musl.node"
],
"description": "Graph Neural Network capabilities for Ruvector - linux-x64-musl platform",
"keywords": [
"ruvector",
"gnn",
"graph-neural-network",
"napi-rs"
],
"author": "Ruvector Team",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/ruvnet/ruvector"
},
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"libc": [
"musl"
]
}

View File

@@ -0,0 +1,34 @@
{
"name": "@ruvector/gnn-win32-x64-msvc",
"version": "0.1.25",
"os": [
"win32"
],
"cpu": [
"x64"
],
"main": "ruvector-gnn.win32-x64-msvc.node",
"files": [
"ruvector-gnn.win32-x64-msvc.node"
],
"description": "Graph Neural Network capabilities for Ruvector - win32-x64-msvc platform",
"keywords": [
"ruvector",
"gnn",
"graph-neural-network",
"napi-rs"
],
"author": "Ruvector Team",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/ruvnet/ruvector"
},
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
}
}

View File

@@ -0,0 +1,64 @@
{
"name": "@ruvector/gnn",
"version": "0.1.25",
"description": "Graph Neural Network capabilities for Ruvector - Node.js bindings",
"main": "index.js",
"types": "index.d.ts",
"napi": {
"name": "ruvector-gnn",
"triples": {
"defaults": false,
"additional": [
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"x86_64-apple-darwin",
"aarch64-apple-darwin",
"x86_64-pc-windows-msvc"
]
}
},
"scripts": {
"artifacts": "napi artifacts",
"build": "napi build --platform --release",
"build:debug": "napi build --platform",
"prepublishOnly": "napi prepublish -t npm",
"test": "node --test test/*.test.js",
"version": "napi version"
},
"keywords": [
"ruvector",
"gnn",
"graph-neural-network",
"machine-learning",
"vector-database",
"hnsw",
"napi-rs"
],
"author": "Ruvector Team",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/ruvnet/ruvector"
},
"devDependencies": {
"@napi-rs/cli": "^2.16.0"
},
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"optionalDependencies": {
"@ruvector/gnn-linux-x64-gnu": "0.1.25",
"@ruvector/gnn-linux-x64-musl": "0.1.25",
"@ruvector/gnn-linux-arm64-gnu": "0.1.25",
"@ruvector/gnn-linux-arm64-musl": "0.1.25",
"@ruvector/gnn-darwin-x64": "0.1.25",
"@ruvector/gnn-darwin-arm64": "0.1.25",
"@ruvector/gnn-win32-x64-msvc": "0.1.25"
}
}

View File

@@ -0,0 +1,421 @@
//! Node.js bindings for Ruvector GNN via NAPI-RS
//!
//! This module provides JavaScript bindings for the Ruvector GNN library,
//! enabling graph neural network operations, tensor compression, and
//! differentiable search in Node.js applications.
#![deny(clippy::all)]
use napi::bindgen_prelude::*;
use napi_derive::napi;
use ruvector_gnn::{
compress::{
CompressedTensor as RustCompressedTensor, CompressionLevel as RustCompressionLevel,
TensorCompress as RustTensorCompress,
},
layer::RuvectorLayer as RustRuvectorLayer,
search::{
differentiable_search as rust_differentiable_search,
hierarchical_forward as rust_hierarchical_forward,
},
};
// ==================== RuvectorLayer Bindings ====================
/// Graph Neural Network layer for HNSW topology
#[napi]
pub struct RuvectorLayer {
inner: RustRuvectorLayer,
}
#[napi]
impl RuvectorLayer {
/// Create a new Ruvector GNN layer
///
/// # Arguments
/// * `input_dim` - Dimension of input node embeddings
/// * `hidden_dim` - Dimension of hidden representations
/// * `heads` - Number of attention heads
/// * `dropout` - Dropout rate (0.0 to 1.0)
///
/// # Example
/// ```javascript
/// const layer = new RuvectorLayer(128, 256, 4, 0.1);
/// ```
#[napi(constructor)]
pub fn new(input_dim: u32, hidden_dim: u32, heads: u32, dropout: f64) -> Result<Self> {
let inner = RustRuvectorLayer::new(
input_dim as usize,
hidden_dim as usize,
heads as usize,
dropout as f32,
)
.map_err(|e| Error::new(Status::InvalidArg, e.to_string()))?;
Ok(Self { inner })
}
/// Forward pass through the GNN layer
///
/// # Arguments
/// * `node_embedding` - Current node's embedding (Float32Array)
/// * `neighbor_embeddings` - Embeddings of neighbor nodes (Array of Float32Array)
/// * `edge_weights` - Weights of edges to neighbors (Float32Array)
///
/// # Returns
/// Updated node embedding as Float32Array
///
/// # Example
/// ```javascript
/// const node = new Float32Array([1.0, 2.0, 3.0, 4.0]);
/// const neighbors = [new Float32Array([0.5, 1.0, 1.5, 2.0]), new Float32Array([2.0, 3.0, 4.0, 5.0])];
/// const weights = new Float32Array([0.3, 0.7]);
/// const output = layer.forward(node, neighbors, weights);
/// ```
#[napi]
pub fn forward(
&self,
node_embedding: Float32Array,
neighbor_embeddings: Vec<Float32Array>,
edge_weights: Float32Array,
) -> Result<Float32Array> {
let node_slice = node_embedding.as_ref();
let neighbors_vec: Vec<Vec<f32>> = neighbor_embeddings
.into_iter()
.map(|arr| arr.to_vec())
.collect();
let weights_slice = edge_weights.as_ref();
let result = self
.inner
.forward(node_slice, &neighbors_vec, weights_slice);
Ok(Float32Array::new(result))
}
/// Serialize the layer to JSON
#[napi]
pub fn to_json(&self) -> Result<String> {
serde_json::to_string(&self.inner).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Serialization error: {}", e),
)
})
}
/// Deserialize the layer from JSON
#[napi(factory)]
pub fn from_json(json: String) -> Result<Self> {
let inner: RustRuvectorLayer = serde_json::from_str(&json).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Deserialization error: {}", e),
)
})?;
Ok(Self { inner })
}
}
// ==================== TensorCompress Bindings ====================
/// Compression level for tensor compression
#[napi(object)]
pub struct CompressionLevelConfig {
/// Type of compression: "none", "half", "pq8", "pq4", "binary"
pub level_type: String,
/// Scale factor (for "half" compression)
pub scale: Option<f64>,
/// Number of subvectors (for PQ compression)
pub subvectors: Option<u32>,
/// Number of centroids (for PQ8)
pub centroids: Option<u32>,
/// Outlier threshold (for PQ4)
pub outlier_threshold: Option<f64>,
/// Binary threshold (for binary compression)
pub threshold: Option<f64>,
}
impl CompressionLevelConfig {
fn to_rust(&self) -> Result<RustCompressionLevel> {
match self.level_type.as_str() {
"none" => Ok(RustCompressionLevel::None),
"half" => Ok(RustCompressionLevel::Half {
scale: self.scale.unwrap_or(1.0) as f32,
}),
"pq8" => Ok(RustCompressionLevel::PQ8 {
subvectors: self.subvectors.unwrap_or(8) as u8,
centroids: self.centroids.unwrap_or(16) as u8,
}),
"pq4" => Ok(RustCompressionLevel::PQ4 {
subvectors: self.subvectors.unwrap_or(8) as u8,
outlier_threshold: self.outlier_threshold.unwrap_or(3.0) as f32,
}),
"binary" => Ok(RustCompressionLevel::Binary {
threshold: self.threshold.unwrap_or(0.0) as f32,
}),
_ => Err(Error::new(
Status::InvalidArg,
format!("Invalid compression level: {}", self.level_type),
)),
}
}
}
/// Tensor compressor with adaptive level selection
#[napi]
pub struct TensorCompress {
inner: RustTensorCompress,
}
#[napi]
impl TensorCompress {
/// Create a new tensor compressor
///
/// # Example
/// ```javascript
/// const compressor = new TensorCompress();
/// ```
#[napi(constructor)]
pub fn new() -> Self {
Self {
inner: RustTensorCompress::new(),
}
}
/// Compress an embedding based on access frequency
///
/// # Arguments
/// * `embedding` - The input embedding vector (Float32Array)
/// * `access_freq` - Access frequency in range [0.0, 1.0]
///
/// # Returns
/// Compressed tensor as JSON string
///
/// # Example
/// ```javascript
/// const embedding = new Float32Array([1.0, 2.0, 3.0, 4.0]);
/// const compressed = compressor.compress(embedding, 0.5);
/// ```
#[napi]
pub fn compress(&self, embedding: Float32Array, access_freq: f64) -> Result<String> {
let embedding_slice = embedding.as_ref();
let compressed = self
.inner
.compress(embedding_slice, access_freq as f32)
.map_err(|e| Error::new(Status::GenericFailure, format!("Compression error: {}", e)))?;
serde_json::to_string(&compressed).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Serialization error: {}", e),
)
})
}
/// Compress with explicit compression level
///
/// # Arguments
/// * `embedding` - The input embedding vector (Float32Array)
/// * `level` - Compression level configuration
///
/// # Returns
/// Compressed tensor as JSON string
///
/// # Example
/// ```javascript
/// const embedding = new Float32Array([1.0, 2.0, 3.0, 4.0]);
/// const level = { level_type: "half", scale: 1.0 };
/// const compressed = compressor.compressWithLevel(embedding, level);
/// ```
#[napi]
pub fn compress_with_level(
&self,
embedding: Float32Array,
level: CompressionLevelConfig,
) -> Result<String> {
let embedding_slice = embedding.as_ref();
let rust_level = level.to_rust()?;
let compressed = self
.inner
.compress_with_level(embedding_slice, &rust_level)
.map_err(|e| Error::new(Status::GenericFailure, format!("Compression error: {}", e)))?;
serde_json::to_string(&compressed).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Serialization error: {}", e),
)
})
}
/// Decompress a compressed tensor
///
/// # Arguments
/// * `compressed_json` - Compressed tensor as JSON string
///
/// # Returns
/// Decompressed embedding vector as Float32Array
///
/// # Example
/// ```javascript
/// const decompressed = compressor.decompress(compressed);
/// ```
#[napi]
pub fn decompress(&self, compressed_json: String) -> Result<Float32Array> {
let compressed: RustCompressedTensor =
serde_json::from_str(&compressed_json).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Deserialization error: {}", e),
)
})?;
let result = self.inner.decompress(&compressed).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Decompression error: {}", e),
)
})?;
Ok(Float32Array::new(result))
}
}
// ==================== Search Functions ====================
/// Result from differentiable search
#[napi(object)]
pub struct SearchResult {
/// Indices of top-k candidates
pub indices: Vec<u32>,
/// Soft weights for top-k candidates
pub weights: Vec<f64>,
}
/// Differentiable search using soft attention mechanism
///
/// # Arguments
/// * `query` - The query vector (Float32Array)
/// * `candidate_embeddings` - List of candidate embedding vectors (Array of Float32Array)
/// * `k` - Number of top results to return
/// * `temperature` - Temperature for softmax (lower = sharper, higher = smoother)
///
/// # Returns
/// Search result with indices and soft weights
///
/// # Example
/// ```javascript
/// const query = new Float32Array([1.0, 0.0, 0.0]);
/// const candidates = [new Float32Array([1.0, 0.0, 0.0]), new Float32Array([0.9, 0.1, 0.0]), new Float32Array([0.0, 1.0, 0.0])];
/// const result = differentiableSearch(query, candidates, 2, 1.0);
/// console.log(result.indices); // [0, 1]
/// console.log(result.weights); // [0.x, 0.y]
/// ```
#[napi]
pub fn differentiable_search(
query: Float32Array,
candidate_embeddings: Vec<Float32Array>,
k: u32,
temperature: f64,
) -> Result<SearchResult> {
let query_slice = query.as_ref();
let candidates_vec: Vec<Vec<f32>> = candidate_embeddings
.into_iter()
.map(|arr| arr.to_vec())
.collect();
let (indices, weights) =
rust_differentiable_search(query_slice, &candidates_vec, k as usize, temperature as f32);
Ok(SearchResult {
indices: indices.iter().map(|&i| i as u32).collect(),
weights: weights.iter().map(|&w| w as f64).collect(),
})
}
/// Hierarchical forward pass through GNN layers
///
/// # Arguments
/// * `query` - The query vector (Float32Array)
/// * `layer_embeddings` - Embeddings organized by layer (Array of Array of Float32Array)
/// * `gnn_layers_json` - JSON array of serialized GNN layers
///
/// # Returns
/// Final embedding after hierarchical processing as Float32Array
///
/// # Example
/// ```javascript
/// const query = new Float32Array([1.0, 0.0]);
/// const layerEmbeddings = [[new Float32Array([1.0, 0.0]), new Float32Array([0.0, 1.0])]];
/// const layer1 = new RuvectorLayer(2, 2, 1, 0.0);
/// const layers = [layer1.toJson()];
/// const result = hierarchicalForward(query, layerEmbeddings, layers);
/// ```
#[napi]
pub fn hierarchical_forward(
query: Float32Array,
layer_embeddings: Vec<Vec<Float32Array>>,
gnn_layers_json: Vec<String>,
) -> Result<Float32Array> {
let query_slice = query.as_ref();
let embeddings_f32: Vec<Vec<Vec<f32>>> = layer_embeddings
.into_iter()
.map(|layer| layer.into_iter().map(|arr| arr.to_vec()).collect())
.collect();
let gnn_layers: Vec<RustRuvectorLayer> = gnn_layers_json
.iter()
.map(|json| {
serde_json::from_str(json).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Layer deserialization error: {}", e),
)
})
})
.collect::<Result<Vec<_>>>()?;
let result = rust_hierarchical_forward(query_slice, &embeddings_f32, &gnn_layers);
Ok(Float32Array::new(result))
}
// ==================== Helper Functions ====================
/// Get the compression level that would be selected for a given access frequency
///
/// # Arguments
/// * `access_freq` - Access frequency in range [0.0, 1.0]
///
/// # Returns
/// String describing the compression level: "none", "half", "pq8", "pq4", or "binary"
///
/// # Example
/// ```javascript
/// const level = getCompressionLevel(0.9); // "none" (hot data)
/// const level2 = getCompressionLevel(0.5); // "half" (warm data)
/// ```
#[napi]
pub fn get_compression_level(access_freq: f64) -> String {
if access_freq > 0.8 {
"none".to_string()
} else if access_freq > 0.4 {
"half".to_string()
} else if access_freq > 0.1 {
"pq8".to_string()
} else if access_freq > 0.01 {
"pq4".to_string()
} else {
"binary".to_string()
}
}
/// Module initialization
#[napi]
pub fn init() -> String {
"Ruvector GNN Node.js bindings initialized".to_string()
}

View File

@@ -0,0 +1,204 @@
// Basic tests for Ruvector GNN Node.js bindings
const { test } = require('node:test');
const assert = require('node:assert');
const {
RuvectorLayer,
TensorCompress,
differentiableSearch,
hierarchicalForward,
getCompressionLevel,
init
} = require('../index.js');
test('initialization', () => {
const result = init();
assert.strictEqual(typeof result, 'string');
assert.ok(result.includes('initialized'));
});
test('RuvectorLayer creation', () => {
const layer = new RuvectorLayer(4, 8, 2, 0.1);
assert.ok(layer instanceof RuvectorLayer);
});
test('RuvectorLayer forward pass', () => {
const layer = new RuvectorLayer(4, 8, 2, 0.1);
const node = new Float32Array([1.0, 2.0, 3.0, 4.0]);
const neighbors = [new Float32Array([0.5, 1.0, 1.5, 2.0]), new Float32Array([2.0, 3.0, 4.0, 5.0])];
const weights = new Float32Array([0.3, 0.7]);
const output = layer.forward(node, neighbors, weights);
assert.strictEqual(output.length, 8);
assert.ok(output instanceof Float32Array);
});
test('RuvectorLayer forward with no neighbors', () => {
const layer = new RuvectorLayer(4, 8, 2, 0.1);
const node = new Float32Array([1.0, 2.0, 3.0, 4.0]);
const neighbors = [];
const weights = new Float32Array([]);
const output = layer.forward(node, neighbors, weights);
assert.strictEqual(output.length, 8);
});
test('RuvectorLayer serialization', () => {
const layer = new RuvectorLayer(4, 8, 2, 0.1);
const json = layer.toJson();
assert.strictEqual(typeof json, 'string');
assert.ok(json.length > 0);
});
test('RuvectorLayer deserialization', () => {
const layer1 = new RuvectorLayer(4, 8, 2, 0.1);
const json = layer1.toJson();
const layer2 = RuvectorLayer.fromJson(json);
assert.ok(layer2 instanceof RuvectorLayer);
// Test that they produce same output
const node = new Float32Array([1.0, 2.0, 3.0, 4.0]);
const neighbors = [new Float32Array([0.5, 1.0, 1.5, 2.0])];
const weights = new Float32Array([1.0]);
const output1 = layer1.forward(node, neighbors, weights);
const output2 = layer2.forward(node, neighbors, weights);
assert.strictEqual(output1.length, output2.length);
for (let i = 0; i < output1.length; i++) {
assert.ok(Math.abs(output1[i] - output2[i]) < 1e-6);
}
});
test('TensorCompress creation', () => {
const compressor = new TensorCompress();
assert.ok(compressor instanceof TensorCompress);
});
test('TensorCompress adaptive compression', () => {
const compressor = new TensorCompress();
const embedding = new Float32Array([1.0, 2.0, 3.0, 4.0]);
const compressed = compressor.compress(embedding, 0.5);
assert.strictEqual(typeof compressed, 'string');
assert.ok(compressed.length > 0);
});
test('TensorCompress round-trip', () => {
const compressor = new TensorCompress();
const embedding = new Float32Array([1.0, 2.0, 3.0, 4.0]);
const compressed = compressor.compress(embedding, 1.0); // No compression
const decompressed = compressor.decompress(compressed);
assert.strictEqual(decompressed.length, embedding.length);
assert.ok(decompressed instanceof Float32Array);
for (let i = 0; i < decompressed.length; i++) {
assert.ok(Math.abs(decompressed[i] - embedding[i]) < 1e-6);
}
});
test('TensorCompress with explicit level', () => {
const compressor = new TensorCompress();
const embedding = new Float32Array(Array.from({ length: 64 }, (_, i) => i * 0.1));
const level = {
level_type: 'half',
scale: 1.0
};
const compressed = compressor.compressWithLevel(embedding, level);
const decompressed = compressor.decompress(compressed);
assert.strictEqual(decompressed.length, embedding.length);
});
test('getCompressionLevel', () => {
assert.strictEqual(getCompressionLevel(0.9), 'none');
assert.strictEqual(getCompressionLevel(0.5), 'half');
assert.strictEqual(getCompressionLevel(0.2), 'pq8');
assert.strictEqual(getCompressionLevel(0.05), 'pq4');
assert.strictEqual(getCompressionLevel(0.001), 'binary');
});
test('differentiableSearch', () => {
const query = new Float32Array([1.0, 0.0, 0.0]);
const candidates = [
new Float32Array([1.0, 0.0, 0.0]),
new Float32Array([0.9, 0.1, 0.0]),
new Float32Array([0.0, 1.0, 0.0]),
];
const result = differentiableSearch(query, candidates, 2, 1.0);
assert.ok(Array.isArray(result.indices));
assert.ok(Array.isArray(result.weights));
assert.strictEqual(result.indices.length, 2);
assert.strictEqual(result.weights.length, 2);
// First result should be perfect match
assert.strictEqual(result.indices[0], 0);
// Weights should be valid probabilities
result.weights.forEach(w => {
assert.ok(w >= 0 && w <= 1);
});
});
test('differentiableSearch with empty candidates', () => {
const query = new Float32Array([1.0, 0.0, 0.0]);
const candidates = [];
const result = differentiableSearch(query, candidates, 2, 1.0);
assert.strictEqual(result.indices.length, 0);
assert.strictEqual(result.weights.length, 0);
});
test('hierarchicalForward', () => {
const query = new Float32Array([1.0, 0.0]);
const layerEmbeddings = [
[new Float32Array([1.0, 0.0]), new Float32Array([0.0, 1.0])],
];
const layer = new RuvectorLayer(2, 2, 1, 0.0);
const layers = [layer.toJson()];
const result = hierarchicalForward(query, layerEmbeddings, layers);
assert.ok(result instanceof Float32Array);
assert.strictEqual(result.length, 2);
});
test('invalid dropout rate throws error', () => {
assert.throws(() => {
new RuvectorLayer(4, 8, 2, 1.5); // dropout > 1.0
});
assert.throws(() => {
new RuvectorLayer(4, 8, 2, -0.1); // dropout < 0.0
});
});
test('compression with empty embedding throws error', () => {
const compressor = new TensorCompress();
assert.throws(() => {
compressor.compress(new Float32Array([]), 0.5);
});
});
test('compression levels produce different sizes', () => {
const compressor = new TensorCompress();
const embedding = new Float32Array(Array.from({ length: 64 }, (_, i) => Math.sin(i * 0.1)));
const none = compressor.compress(embedding, 1.0); // No compression
const half = compressor.compress(embedding, 0.5); // Half precision
const binary = compressor.compress(embedding, 0.001); // Binary
// Binary should be smallest
assert.ok(binary.length < half.length);
// None should be largest (or close to half)
assert.ok(none.length >= half.length * 0.8);
});