Official client libraries for SM-AI-MODELS with built-in retry logic, streaming support, and type safety.
Python SDK
Installation
Codepip install sm-ai-models
Quick Start
Codefrom sm_ai_models import SMAIClient client = SMAIClient( api_key="YOUR_API_KEY", # or set SM_AI_API_KEY env var tts_host="http://YOUR_HOST:9999", asr_host="http://YOUR_HOST:8088" ) # Text-to-Speech audio = client.tts.generate("مرحباً بكم", voice="Yara") audio.save("greeting.mp3") # Speech Recognition result = client.asr.transcribe("recording.wav") print(result.text) print(result.confidence)
Full TTS API
Codefrom sm_ai_models import SMAIClient client = SMAIClient(api_key="YOUR_API_KEY") # Basic generation audio = client.tts.generate( text="مرحباً بكم في يونيكود سولوشنز", voice="Yara", # "Yara", "Nouf", "Yara_en" format="mp3", # "mp3", "wav", "opus", "flac", "pcm" speed=1.0, # 0.25 - 4.0 sample_rate=22050 # 8000, 16000, 22050, 24000 ) # Save to file audio.save("output.mp3") # Get raw bytes raw_bytes = audio.content # Get duration print(f"Duration: {audio.duration_seconds}s") print(f"Size: {audio.size_bytes} bytes") # Streaming generation for chunk in client.tts.stream("مرحباً بكم في يونيكود سولوشنز", voice="Yara"): process_audio(chunk) # Stream with latency measurement stream = client.tts.stream( "مرحباً بكم", voice="Yara", format="pcm", sample_rate=16000 ) for chunk in stream: if stream.is_first_chunk: print(f"TTFC: {stream.time_to_first_chunk_ms}ms") play_audio(chunk) print(f"Total: {stream.total_time_ms}ms")
Full ASR API
Code# Transcribe from file result = client.asr.transcribe("recording.wav") print(result.text) # "مرحباً بكم في يونيكود سولوشنز" print(result.confidence) # 0.95 print(result.duration_seconds) # 3.2 print(result.language) # "ar" # Transcribe from bytes with open("audio.wav", "rb") as f: result = client.asr.transcribe(f.read(), format="wav") # Transcribe from URL result = client.asr.transcribe_url("https://example.com/audio.wav") # Word-level timestamps (gRPC only) result = client.asr.transcribe("recording.wav", word_timestamps=True) for word in result.words: print(f"{word.word} [{word.start_time:.2f}s - {word.end_time:.2f}s] ({word.confidence:.2f})") # Real-time streaming ASR for partial in client.asr.stream_microphone(): if partial.is_final: print(f"\n[Final] {partial.text}") else: print(f"[Partial] {partial.text}", end='\r')
Configuration
Codefrom sm_ai_models import SMAIClient, Config client = SMAIClient( api_key="YOUR_API_KEY", config=Config( tts_host="http://YOUR_HOST:9999", asr_host="http://YOUR_HOST:8088", timeout=30, # Request timeout in seconds max_retries=3, # Auto-retry on 5xx errors retry_delay=1.0, # Initial retry delay (exponential backoff) verify_ssl=True, # Verify TLS certificates ) ) # Health checks status = client.health() print(status.tts) # "healthy" or "unhealthy" print(status.asr) # "healthy" or "unhealthy"
Error Handling
Codefrom sm_ai_models import SMAIClient from sm_ai_models.exceptions import ( SMAIError, AuthenticationError, RateLimitError, ValidationError, ServerError, ConnectionError ) client = SMAIClient(api_key="YOUR_API_KEY") try: audio = client.tts.generate("مرحباً", voice="InvalidVoice") except AuthenticationError: print("Invalid API key") except RateLimitError as e: print(f"Rate limited. Retry after {e.retry_after}s") except ValidationError as e: print(f"Invalid input: {e.message}") print(f"Error code: {e.code}") # e.g. "invalid_voice" except ServerError: print("Server error — will auto-retry if configured") except ConnectionError: print("Cannot connect to SM-AI-MODELS service") except SMAIError as e: print(f"Unexpected error: {e}")
Async Support
Codeimport asyncio from sm_ai_models import AsyncSMAIClient async def main(): client = AsyncSMAIClient(api_key="YOUR_API_KEY") # Async TTS audio = await client.tts.generate("مرحباً بكم", voice="Yara") audio.save("output.mp3") # Async ASR result = await client.asr.transcribe("recording.wav") print(result.text) # Async streaming async for chunk in client.tts.stream("مرحباً بكم", voice="Yara"): process_audio(chunk) await client.close() asyncio.run(main())
Node.js SDK
Installation
Codenpm install sm-ai-models # or yarn add sm-ai-models
Quick Start
Codeimport { SMAIClient } from 'sm-ai-models'; const client = new SMAIClient({ apiKey: 'YOUR_API_KEY', // or set SM_AI_API_KEY env var ttsHost: 'http://YOUR_HOST:9999', asrHost: 'http://YOUR_HOST:8088', }); // Text-to-Speech const audio = await client.tts.generate('مرحباً بكم', { voice: 'Yara' }); await audio.save('greeting.mp3'); // Speech Recognition const result = await client.asr.transcribe('recording.wav'); console.log(result.text);
Full TTS API
Codeimport { SMAIClient } from 'sm-ai-models'; import fs from 'fs/promises'; const client = new SMAIClient({ apiKey: 'YOUR_API_KEY' }); // Generate audio const audio = await client.tts.generate('مرحباً بكم في يونيكود سولوشنز', { voice: 'Yara', // 'Yara' | 'Nouf' | 'Yara_en' format: 'mp3', // 'mp3' | 'wav' | 'opus' | 'flac' | 'pcm' speed: 1.0, // 0.25 - 4.0 sampleRate: 22050, // 8000, 16000, 22050, 24000 }); // Save to file await audio.save('output.mp3'); // Get buffer const buffer = audio.toBuffer(); await fs.writeFile('output.mp3', buffer); // Streaming const stream = client.tts.stream('مرحباً بكم في يونيكود سولوشنز', { voice: 'Yara', format: 'pcm', }); stream.on('data', (chunk) => processAudio(chunk)); stream.on('firstChunk', (ttfc) => console.log(`TTFC: ${ttfc}ms`)); stream.on('end', (stats) => console.log(`Total: ${stats.totalMs}ms`)); stream.on('error', (err) => console.error(err));
Full ASR API
Code// Transcribe from file const result = await client.asr.transcribe('recording.wav'); console.log(result.text); // "مرحباً بكم في يونيكود سولوشنز" console.log(result.confidence); // 0.95 console.log(result.language); // "ar" // Transcribe from buffer const audioBuffer = await fs.readFile('recording.wav'); const result = await client.asr.transcribe(audioBuffer, { format: 'wav' }); // Word timestamps (gRPC) const result = await client.asr.transcribe('recording.wav', { wordTimestamps: true }); for (const word of result.words) { console.log(`${word.word} [${word.startTime}s - ${word.endTime}s]`); }
TypeScript Types
Codeimport { SMAIClient, TTSOptions, ASRResult, Voice, AudioFormat } from 'sm-ai-models'; const options: TTSOptions = { voice: 'Yara' as Voice, format: 'mp3' as AudioFormat, speed: 1.0, sampleRate: 22050, stream: false, }; const result: ASRResult = await client.asr.transcribe('audio.wav'); // result.text: string // result.confidence: number // result.durationSeconds: number // result.language: 'ar' | 'en' // result.words?: WordInfo[]
Error Handling
Codeimport { SMAIClient, SMAIError, RateLimitError, ValidationError } from 'sm-ai-models'; try { const audio = await client.tts.generate('مرحباً', { voice: 'Invalid' }); } catch (err) { if (err instanceof RateLimitError) { console.log(`Retry after ${err.retryAfter}s`); } else if (err instanceof ValidationError) { console.log(`Invalid: ${err.message} (code: ${err.code})`); } else if (err instanceof SMAIError) { console.log(`Request ID: ${err.requestId}`); } }
HTTP (No SDK)
If you prefer not to use the official SDKs, you can use any HTTP client. See:
- Python Integration (raw requests)
- Node.js Integration (raw fetch)
- API Reference — Interactive playground
Community SDKs
| Language | Package | Maintainer | Status |
|---|---|---|---|
| Go | sm-ai-models-go | Community | Beta |
| C# / .NET | SMAIModels.Net | Community | Beta |
| Java | sm-ai-models-java | Community | Planned |
Community SDKs are not officially supported. Use at your own discretion.
Last modified on
