Для чего нужна данная статья? :
- Создать Angular AI.
- Найти компромиссы между разными стеками.
Зачем Вам это уметь? :
Разработать функции ИИ в своих приложениях на Angular.
Интегрировать функции ИИ в клиентское приложение.
Найти компромиссы между пакетами SDK, LLM, библиотеками и утилитами.
Использовать функции API и инструменты Angular наиболее эффективно.
Архитектура
Rust/Candle Layer: MLP.
WASM Compilation: web.
Angular: WASM CDK UI.
- wasm-pack build --target web .
- build --release .
ng new ng-ml-wasm --no-create-application
cd ng-ml-wasm
ng g application ml-app
ng g library wasm-ml
cargo new --lib ml-rust-lib --vcs none.
wasm-pack build --target web.
Candle NN Definition
use candle_core::{Device, Tensor, DType, VarBuilder, Result};
use candle_nn::{Linear, Module};
#[derive(Clone)]
struct MLP {
ln1: Linear,
ln2: Linear,
ln3: Linear,
}
impl MLP {
fn new(vs: VarBuilder) -> Result<Self> {
let ln1 = candle_nn::linear(2, 4, vs.pp("ln1"))?;
let ln2 = candle_nn::linear(4, 2, vs.pp("ln2"))?;
let ln3 = candle_nn::linear(2, 2, vs.pp("ln3"))?; // Output for binary classification
Ok(Self { ln1, ln2, ln3 })
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.ln1.forward(xs)?.relu()?;
let xs = self.ln2.forward(&xs)?.relu()?;
self.ln3.forward(&xs)
}
}
MLP dataset
use anyhow::{Result, anyhow};
use candle_core::{DType, Device, Tensor};
use candle_nn::{Linear, Module, Optimizer, VarBuilder, AdamW, loss};
const VOTE_DIM: usize = 2; // Input features
const RESULTS: usize = 1; // Output classes (binary +1 for softmax)
const EPOCHS: usize = 20;
const LAYER1_OUT_SIZE: usize = 4;
const LAYER2_OUT_SIZE: usize = 2;
const LEARNING_RATE: f64 = 0.05;
#[derive(Clone)]
struct Dataset {
train_votes: Tensor,
train_results: Tensor,
test_votes: Tensor,
test_results: Tensor,
}
#[derive(Clone)]
struct MLP {
ln1: Linear,
ln2: Linear,
ln3: Linear,
}
impl MLP {
fn new(vs: VarBuilder) -> Result<Self> {
let ln1 = candle_nn::linear(VOTE_DIM, LAYER1_OUT_SIZE, vs.pp("ln1"))?;
let ln2 = candle_nn::linear(LAYER1_OUT_SIZE, LAYER2_OUT_SIZE, vs.pp("ln2"))?;
let ln3 = candle_nn::linear(LAYER2_OUT_SIZE, RESULTS + 1, vs.pp("ln3"))?;
Ok(Self { ln1, ln2, ln3 })
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.ln1.forward(xs)?.relu()?;
let xs = self.ln2.forward(&xs)?.relu()?;
self.ln3.forward(&xs)
}
}
fn train(m: Dataset, dev: &Device) -> Result<MLP> {
let varmap = candle_nn::VarMap::new();
let vs = VarBuilder::from_varmap(&varmap, DType::F32, dev);
let model = MLP::new(vs.clone())?;
let mut adamw = AdamW::new(varmap.all_vars(), candle_nn::ParamsAdamW {
lr: LEARNING_RATE,
..Default::default()
})?;
let mut logits = model.forward(&m.train_votes)?;
let mut sum_loss = logits.nll(&m.train_results, -1)?.sum_all()?.to_scalar::<f32>()?;
println!("Initial loss: {sum_loss}");
for epoch in 1..=EPOCHS {
logits = model.forward(&m.train_votes)?;
let loss = logits.nll(&m.train_results, -1)?.sum_all()?;
adamw.backward_step(&loss)?;
sum_loss = loss.to_scalar::<f32>()?;
println!("Epoch: {epoch:3} Train loss: {sum_loss:8.5}");
}
let logits = model.forward(&m.test_votes)?;
let sum_loss = logits.nll(&m.test_results, -1)?.sum_all()?.to_scalar::<f32>()?;
println!("Test loss: {sum_loss:8.5}");
Ok(model)
}
fn main() -> Result<()> {
let dev = Device::Cpu; // Use Device::new_cuda(0)? for GPU; for WASM, use Cpu
// Sample data (adapt as needed)
let train_votes_vec: Vec<f32> = vec![15., 10., 10., 15., 5., 12., 30., 20., 16., 12., 13., 25., 6., 14., 31., 21.];
let train_votes = Tensor::from_vec(train_votes_vec, (8, VOTE_DIM), &dev)?;
let train_results_vec: Vec<u32> = vec![1, 0, 0, 1, 1, 0, 0, 1];
let train_results = Tensor::from_vec(train_results_vec, 8, &dev)?;
let test_votes_vec: Vec<f32> = vec![13., 9., 8., 14., 3., 10.];
let test_votes = Tensor::from_vec(test_votes_vec, (3, VOTE_DIM), &dev)?;
let test_results_vec: Vec<u32> = vec![1, 0, 0];
let test_results = Tensor::from_vec(test_results_vec, 3, &dev)?;
let dataset = Dataset { train_votes, train_results, test_votes, test_results };
let trained_model = train(dataset, &dev)?;
// Inference example
let real_world_votes: Vec<f32> = vec![13., 22.];
let input = Tensor::from_vec(real_world_votes, (1, VOTE_DIM), &dev)?;
let output = trained_model.forward(&input)?;
let prediction = output.argmax(-1)?.to_scalar::<u32>()?;
println!("Prediction: {}", prediction);
Ok(())
}
WASM
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
pub fn run_inference(input_data: &[f32]) -> Result<String, JsValue> {
let dev = Device::Cpu;
// Load or define model here...
let input = Tensor::from_slice(input_data, (1, VOTE_DIM), &dev).map_err(|e| e.to_string())?;
let output = model.forward(&input).map_err(|e| e.to_string())?;
let pred = output.argmax(-1)?.to_scalar::<u32>().map_err(|e| e.to_string())?;
Ok(pred.to_string())
}
Angular
- public-api.ts:
import init from './lib/ml-rust-lib/pkg';
export { run_inference } from './lib/ml-rust-lib/pkg';
export { init as initMlRust };
CDK
- CDK: ng add @angular/cdk
- app.component.ts (standalone):
import { Component, OnInit, signal } from '@angular/core';
import { CdkDrag, CdkDragDrop, moveItemInArray } from '@angular/cdk/drag-drop';
import { run_inference, initMlRust } from 'wasm-ml';
@Component({
selector: 'app-root',
standalone: true,
imports: [CdkDrag],
template: `
<div cdkDropList (cdkDropListDropped)="drop($event)">
@for (item of inputs(); track item; let i = $index) {
<div cdkDrag>{{item}}</div>
}
</div>
<button (click)="predict()">Predict</button>
<p>Result: {{ result() }}</p>
`,
})
export class AppComponent implements OnInit {
inputs = signal<number[]>([13, 22]); // Draggable inputs
result = signal<string>('');
ngOnInit() {
initMlRust();
}
drop(event: CdkDragDrop<number[]>) {
moveItemInArray(this.inputs(), event.previousIndex, event.currentIndex);
}
async predict() {
this.result.set(await run_inference(this.inputs()));
}
}