Learn to integrate AI models into web applications. Master REST APIs, Flask, FastAPI, TensorFlow.js, and deployment strategies for production-ready AI apps.
Integrating AI models into web applications allows you to create intelligent, interactive experiences. You can use pre-trained models, APIs, or deploy your own custom models.
# Ways to integrate AI in web apps:
1. REST APIs (OpenAI, Hugging Face, Google Cloud AI)
2. Client-side ML (TensorFlow.js, ONNX.js)
3. Server-side inference (Flask, FastAPI)
4. Edge deployment (TensorFlow Lite, ONNX Runtime)
5. Cloud services (AWS SageMaker, Azure ML, Google Vertex AI)Different approaches to AI integration
OpenAI provides powerful APIs for GPT models, DALL-E, and Whisper. Learn to integrate ChatGPT into your web application.
# Install OpenAI SDK
pip install openaiInstall OpenAI Python library
from openai import OpenAI
# Initialize client
client = OpenAI(api_key='your-api-key-here')
# Chat completion
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Explain quantum computing in simple terms."}
],
temperature=0.7,
max_tokens=150
)
print(response.choices[0].message.content)Basic ChatGPT integration
# Streaming responses for better UX
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Write a short story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")Stream responses for real-time output
Create a REST API using Flask to serve your machine learning models. This allows your web app to make predictions via HTTP requests.
# Install Flask
pip install flask flask-corsInstall Flask and CORS support
from flask import Flask, request, jsonify
from flask_cors import CORS
import joblib
import numpy as np
app = Flask(__name__)
CORS(app) # Enable CORS for web apps
# Load pre-trained model
model = joblib.load('model.pkl')
@app.route('/predict', methods=['POST'])
def predict():
try:
# Get data from request
data = request.get_json()
features = np.array(data['features']).reshape(1, -1)
# Make prediction
prediction = model.predict(features)
probability = model.predict_proba(features)
return jsonify({
'prediction': int(prediction[0]),
'probability': probability[0].tolist(),
'status': 'success'
})
except Exception as e:
return jsonify({'error': str(e)}), 400
@app.route('/health', methods=['GET'])
def health():
return jsonify({'status': 'healthy'})
if __name__ == '__main__':
app.run(debug=True, port=5000)Complete Flask API for ML model
FastAPI is a modern, fast framework for building APIs. It's ideal for ML applications with automatic API documentation.
# Install FastAPI and Uvicorn
pip install fastapi uvicorn python-multipartInstall FastAPI dependencies
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
import tensorflow as tf
import numpy as np
from PIL import Image
import io
app = FastAPI(title="Image Classification API")
# Enable CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Load model
model = tf.keras.models.load_model('image_classifier.h5')
class_names = ['cat', 'dog', 'bird']
@app.post("/classify")
async def classify_image(file: UploadFile = File(...)):
# Read and preprocess image
contents = await file.read()
image = Image.open(io.BytesIO(contents))
image = image.resize((224, 224))
image_array = np.array(image) / 255.0
image_array = np.expand_dims(image_array, axis=0)
# Make prediction
predictions = model.predict(image_array)
predicted_class = class_names[np.argmax(predictions[0])]
confidence = float(np.max(predictions[0]))
return {
"class": predicted_class,
"confidence": confidence,
"all_predictions": {
class_names[i]: float(predictions[0][i])
for i in range(len(class_names))
}
}
# Run with: uvicorn main:app --reloadFastAPI image classification endpoint
TensorFlow.js allows you to run ML models directly in the browser, enabling real-time predictions without server calls.
<!-- Include TensorFlow.js -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/mobilenet"></script>
<input type="file" id="imageUpload" accept="image/*">
<img id="preview" style="max-width: 400px;">
<div id="predictions"></div>
<script>
let model;
// Load pre-trained MobileNet model
async function loadModel() {
model = await mobilenet.load();
console.log('Model loaded');
}
// Classify image
async function classifyImage(img) {
const predictions = await model.classify(img);
// Display predictions
const predDiv = document.getElementById('predictions');
predDiv.innerHTML = '<h3>Predictions:</h3>';
predictions.forEach(pred => {
predDiv.innerHTML += `
<p>${pred.className}: ${(pred.probability * 100).toFixed(2)}%</p>
`;
});
}
// Handle file upload
document.getElementById('imageUpload').addEventListener('change', (e) => {
const file = e.target.files[0];
const reader = new FileReader();
reader.onload = (event) => {
const img = document.getElementById('preview');
img.src = event.target.result;
img.onload = () => classifyImage(img);
};
reader.readAsDataURL(file);
});
loadModel();
</script>Image classification in the browser with TensorFlow.js
Build a React component that integrates with AI APIs for a modern, interactive user experience.
import React, { useState } from 'react';
import axios from 'axios';
function ChatBot() {
const [messages, setMessages] = useState([]);
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const sendMessage = async () => {
if (!input.trim()) return;
// Add user message
const userMessage = { role: 'user', content: input };
setMessages([...messages, userMessage]);
setInput('');
setLoading(true);
try {
// Call your backend API
const response = await axios.post('http://localhost:5000/chat', {
message: input,
history: messages
});
// Add AI response
const aiMessage = {
role: 'assistant',
content: response.data.response
};
setMessages(prev => [...prev, aiMessage]);
} catch (error) {
console.error('Error:', error);
} finally {
setLoading(false);
}
};
return (
<div className="chat-container">
<div className="messages">
{messages.map((msg, idx) => (
<div key={idx} className={`message ${msg.role}`}>
{msg.content}
</div>
))}
{loading && <div className="loading">Thinking...</div>}
</div>
<div className="input-area">
<input
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyPress={(e) => e.key === 'Enter' && sendMessage()}
placeholder="Type a message..."
/>
<button onClick={sendMessage}>Send</button>
</div>
</div>
);
}
export default ChatBot;React chatbot component
Hugging Face provides free inference APIs for thousands of pre-trained models. Perfect for quick prototyping.
import requests
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
headers = {"Authorization": "Bearer YOUR_HF_TOKEN"}
def summarize_text(text):
payload = {"inputs": text}
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
# Example usage
article = """
Your long article text here...
"""
summary = summarize_text(article)
print(summary[0]['summary_text'])Text summarization with Hugging Face API
# Sentiment analysis
API_URL = "https://api-inference.huggingface.co/models/distilbert-base-uncased-finetuned-sst-2-english"
def analyze_sentiment(text):
payload = {"inputs": text}
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
result = analyze_sentiment("I love this product!")
print(result) # [{'label': 'POSITIVE', 'score': 0.9998}]Sentiment analysis with Hugging Face
Learn best practices for deploying ML models to production environments with proper monitoring and scaling.
# Docker deployment
# Dockerfile
FROM python:3.9-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
EXPOSE 8000
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]Dockerfile for ML API
# requirements.txt
fastapi==0.104.1
uvicorn==0.24.0
tensorflow==2.15.0
numpy==1.24.3
pillow==10.1.0
python-multipart==0.0.6Python dependencies for deployment
# Build and run Docker container
docker build -t ml-api .
docker run -p 8000:8000 ml-api
# Or use docker-compose.yml
version: '3.8'
services:
ml-api:
build: .
ports:
- "8000:8000"
environment:
- MODEL_PATH=/models/model.h5
volumes:
- ./models:/modelsDeploy with Docker
OpenAI API - GPT modelsFlask/FastAPI - Custom APIsTensorFlow.js - Browser MLHugging Face - Pre-trained models