Architecture
Backend Services

Backend

Mind Measure Backend Architecture

Overview

The Mind Measure backend is built on AWS serverless architecture, providing scalable, secure, and cost-effective services for mental health data processing and management.

Architecture Components

AWS Lambda Functions

Core Analysis Functions

analyze-audio

// Purpose: Process voice patterns and emotional tone analysis
// Runtime: Node.js 18.x
// Memory: 1024 MB
// Timeout: 30 seconds
 
interface AudioAnalysisRequest {
  sessionId: string;
  audioData: {
    conversation_duration: number;
    speech_rate: number;
    voice_quality: string;
    emotional_tone: string;
    mood_score_1_10: number;
    transcript_length: number;
  };
}
 
interface AudioAnalysisResponse {
  sessionId: string;
  analysis: {
    vocal_stress_indicators: number;
    speech_pattern_analysis: object;
    emotional_markers: string[];
    confidence_score: number;
  };
  processing_time: number;
}

analyze-visual

// Purpose: Facial emotion recognition via AWS Rekognition
// Runtime: Node.js 18.x
// Memory: 2048 MB
// Timeout: 60 seconds
 
interface VisualAnalysisRequest {
  sessionId: string;
  imageData: string; // Base64 encoded image
  visualSummary: {
    samples_captured: number;
    face_detection_rate: number;
    avg_brightness: number;
    quality_score: number;
    engagement_level: string;
  };
}
 
interface VisualAnalysisResponse {
  sessionId: string;
  rekognition_results: {
    emotions: Array<{
      emotion: string;
      confidence: number;
    }>;
    face_details: object;
    quality_assessment: object;
  };
  analysis_summary: {
    primary_emotion: string;
    emotional_stability: number;
    engagement_score: number;
  };
}

analyze-text

// Purpose: Natural language processing of conversation transcripts
// Runtime: Node.js 18.x
// Memory: 512 MB
// Timeout: 15 seconds
 
interface TextAnalysisRequest {
  sessionId: string;
  conversationText: string;
}
 
interface TextAnalysisResponse {
  sessionId: string;
  analysis: {
    sentiment_analysis: {
      overall_sentiment: 'positive' | 'negative' | 'neutral';
      confidence: number;
      emotional_indicators: string[];
    };
    linguistic_patterns: {
      complexity_score: number;
      coherence_score: number;
      emotional_language_usage: number;
    };
    key_themes: string[];
    risk_indicators: string[];
  };
}

calculate-mind-measure

// Purpose: Multi-modal fusion algorithm for wellness scoring
// Runtime: Node.js 18.x
// Memory: 1024 MB
// Timeout: 45 seconds
 
interface FusionRequest {
  sessionId: string;
}
 
interface FusionResponse {
  sessionId: string;
  fusion_score: {
    final_score: number; // 0-100
    confidence: number;
    component_scores: {
      audio_score: number;
      visual_score: number;
      text_score: number;
    };
    risk_assessment: {
      level: 'low' | 'medium' | 'high';
      indicators: string[];
      recommendations: string[];
    };
  };
}

Lambda Function Implementation

Function Structure

// Standard Lambda function structure
import { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda';
import { verifyJWTToken } from './auth';
import { connectToDatabase } from './database';
 
export const handler = async (
  event: APIGatewayProxyEvent
): Promise<APIGatewayProxyResult> => {
  try {
    // 1. Verify authentication
    const token = event.headers.Authorization?.replace('Bearer ', '');
    const user = await verifyJWTToken(token);
    
    // 2. Parse request body
    const body = JSON.parse(event.body || '{}');
    
    // 3. Validate input
    if (!body.sessionId) {
      return {
        statusCode: 400,
        body: JSON.stringify({ error: 'sessionId is required' })
      };
    }
    
    // 4. Process request
    const result = await processAnalysis(body);
    
    // 5. Return response
    return {
      statusCode: 200,
      headers: {
        'Content-Type': 'application/json',
        'Access-Control-Allow-Origin': '*'
      },
      body: JSON.stringify(result)
    };
    
  } catch (error) {
    console.error('Lambda function error:', error);
    return {
      statusCode: 500,
      body: JSON.stringify({ 
        error: 'Internal server error',
        message: error.message 
      })
    };
  }
};

Error Handling & Resilience

// Robust error handling with fallback
export async function processWithFallback<T>(
  primaryFunction: () => Promise<T>,
  fallbackFunction: () => Promise<T>,
  functionName: string
): Promise<T> {
  try {
    console.log(`🚀 Executing ${functionName}`);
    const result = await primaryFunction();
    console.log(`✅ ${functionName} completed successfully`);
    return result;
  } catch (error) {
    console.warn(`⚠️ ${functionName} failed, using fallback:`, error);
    return await fallbackFunction();
  }
}
 
// Usage in analysis pipeline
const analysisResults = await Promise.allSettled([
  processWithFallback(
    () => analyzeAudio(sessionId, audioData),
    () => generateFallbackAudioAnalysis(audioData),
    'Audio Analysis'
  ),
  processWithFallback(
    () => analyzeVisual(sessionId, imageData),
    () => generateFallbackVisualAnalysis(),
    'Visual Analysis'
  ),
  processWithFallback(
    () => analyzeText(sessionId, textData),
    () => generateFallbackTextAnalysis(textData),
    'Text Analysis'
  )
]);

API Gateway Configuration

Endpoint Structure

Base URL: https://4xg1jsjh7k.execute-api.eu-west-2.amazonaws.com/dev
 
Endpoints:
  POST /analyze-audio:
    Description: Process audio analysis
    Authentication: Required (JWT)
    Rate Limit: 10 requests/minute
    
  POST /analyze-visual:
    Description: Process visual analysis
    Authentication: Required (JWT)
    Rate Limit: 5 requests/minute
    
  POST /analyze-text:
    Description: Process text analysis
    Authentication: Required (JWT)
    Rate Limit: 20 requests/minute
    
  POST /calculate-mind-measure:
    Description: Calculate fusion score
    Authentication: Required (JWT)
    Rate Limit: 10 requests/minute

CORS Configuration

{
  "Access-Control-Allow-Origin": "*",
  "Access-Control-Allow-Headers": "Content-Type,Authorization",
  "Access-Control-Allow-Methods": "POST,OPTIONS",
  "Access-Control-Max-Age": "86400"
}

Database Integration

Aurora Serverless v2 Connection

// Database connection pool
import { Pool } from 'pg';
 
const pool = new Pool({
  host: process.env.RDS_HOST,
  port: parseInt(process.env.RDS_PORT || '5432'),
  database: process.env.RDS_DATABASE,
  user: process.env.RDS_USERNAME,
  password: process.env.RDS_PASSWORD,
  ssl: { rejectUnauthorized: false },
  max: 20,
  idleTimeoutMillis: 30000,
  connectionTimeoutMillis: 10000,
});
 
// Database operations
export class DatabaseService {
  async insertAnalysisResult(sessionId: string, analysisType: string, data: any) {
    const query = `
      UPDATE assessment_sessions 
      SET ${analysisType}_data = $2, updated_at = NOW()
      WHERE id = $1
    `;
    await pool.query(query, [sessionId, JSON.stringify(data)]);
  }
  
  async getFusionInputs(sessionId: string) {
    const query = `
      SELECT text_data, audio_data, visual_data
      FROM assessment_sessions
      WHERE id = $1
    `;
    const result = await pool.query(query, [sessionId]);
    return result.rows[0];
  }
  
  async storeFusionResult(sessionId: string, userId: string, fusionData: any) {
    const query = `
      INSERT INTO fusion_outputs (
        session_id, user_id, score, final_score, 
        analysis, topics, created_at
      ) VALUES ($1, $2, $3, $4, $5, $6, NOW())
    `;
    await pool.query(query, [
      sessionId, userId, fusionData.score, fusionData.final_score,
      JSON.stringify(fusionData.analysis), JSON.stringify(fusionData.topics)
    ]);
  }
}

Vercel API Functions

Database Operations API

// /api/database/select.ts
import type { NextApiRequest, NextApiResponse } from 'next';
import { DatabaseService } from '@/services/database/AWSService';
 
export default async function handler(
  req: NextApiRequest,
  res: NextApiResponse
) {
  if (req.method !== 'POST') {
    return res.status(405).json({ error: 'Method not allowed' });
  }
 
  try {
    const { table, columns, filters, orderBy, limit } = req.body;
    
    const dbService = new DatabaseService();
    const result = await dbService.select(table, {
      columns,
      filters,
      orderBy,
      limit
    });
    
    res.status(200).json({
      data: result.data,
      count: result.count,
      error: null
    });
    
  } catch (error) {
    console.error('Database select error:', error);
    res.status(500).json({
      data: null,
      count: 0,
      error: error.message
    });
  }
}

Storage Operations API

// /api/storage/upload.ts
import type { NextApiRequest, NextApiResponse } from 'next';
import formidable from 'formidable';
import { S3Client, PutObjectCommand } from '@aws-sdk/client-s3';
 
const s3Client = new S3Client({
  region: process.env.AWS_REGION,
  credentials: {
    accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
    secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
  },
});
 
export default async function handler(
  req: NextApiRequest,
  res: NextApiResponse
) {
  if (req.method !== 'POST') {
    return res.status(405).json({ error: 'Method not allowed' });
  }
 
  try {
    const form = formidable({
      maxFileSize: 10 * 1024 * 1024, // 10MB limit
    });
 
    const [fields, files] = await form.parse(req);
    const file = Array.isArray(files.file) ? files.file[0] : files.file;
    const universityId = Array.isArray(fields.university_id) 
      ? fields.university_id[0] 
      : fields.university_id;
 
    if (!file || !universityId) {
      return res.status(400).json({ error: 'File and university_id required' });
    }
 
    const bucketName = `mindmeasure-${universityId}-assets`;
    const key = `uploads/${Date.now()}-${file.originalFilename}`;
 
    const command = new PutObjectCommand({
      Bucket: bucketName,
      Key: key,
      Body: require('fs').createReadStream(file.filepath),
      ContentType: file.mimetype || 'application/octet-stream',
    });
 
    await s3Client.send(command);
 
    const fileUrl = `https://${bucketName}.s3.${process.env.AWS_REGION}.amazonaws.com/${key}`;
 
    res.status(200).json({
      success: true,
      url: fileUrl,
      key: key
    });
 
  } catch (error) {
    console.error('File upload error:', error);
    res.status(500).json({
      success: false,
      error: 'File upload failed'
    });
  }
}
 
export const config = {
  api: {
    bodyParser: false,
  },
};

Authentication Integration

JWT Token Verification

// JWT token verification for Lambda functions
import jwt from 'jsonwebtoken';
import jwksClient from 'jwks-rsa';
 
const client = jwksClient({
  jwksUri: `https://cognito-idp.${process.env.AWS_REGION}.amazonaws.com/${process.env.COGNITO_USER_POOL_ID}/.well-known/jwks.json`
});
 
function getKey(header: any, callback: any) {
  client.getSigningKey(header.kid, (err, key) => {
    const signingKey = key?.getPublicKey();
    callback(null, signingKey);
  });
}
 
export async function verifyJWTToken(token: string): Promise<any> {
  return new Promise((resolve, reject) => {
    jwt.verify(token, getKey, {
      audience: process.env.COGNITO_CLIENT_ID,
      issuer: `https://cognito-idp.${process.env.AWS_REGION}.amazonaws.com/${process.env.COGNITO_USER_POOL_ID}`,
      algorithms: ['RS256']
    }, (err, decoded) => {
      if (err) {
        reject(err);
      } else {
        resolve(decoded);
      }
    });
  });
}

Cognito Integration

// AWS Cognito user management
import {
  CognitoIdentityProviderClient,
  AdminGetUserCommand,
  AdminUpdateUserAttributesCommand
} from '@aws-sdk/client-cognito-identity-provider';
 
const cognitoClient = new CognitoIdentityProviderClient({
  region: process.env.AWS_REGION
});
 
export class CognitoService {
  async getUser(username: string) {
    const command = new AdminGetUserCommand({
      UserPoolId: process.env.COGNITO_USER_POOL_ID,
      Username: username
    });
    
    return await cognitoClient.send(command);
  }
  
  async updateUserAttributes(username: string, attributes: any[]) {
    const command = new AdminUpdateUserAttributesCommand({
      UserPoolId: process.env.COGNITO_USER_POOL_ID,
      Username: username,
      UserAttributes: attributes
    });
    
    return await cognitoClient.send(command);
  }
}

AI/ML Integration

AWS Rekognition Integration

// Visual analysis using AWS Rekognition
import {
  RekognitionClient,
  DetectFacesCommand,
  DetectModerationLabelsCommand
} from '@aws-sdk/client-rekognition';
 
const rekognitionClient = new RekognitionClient({
  region: process.env.AWS_REGION
});
 
export class VisualAnalysisService {
  async analyzeFacialEmotions(imageBase64: string) {
    const imageBuffer = Buffer.from(imageBase64, 'base64');
    
    const command = new DetectFacesCommand({
      Image: { Bytes: imageBuffer },
      Attributes: ['ALL']
    });
    
    const result = await rekognitionClient.send(command);
    
    return {
      faces: result.FaceDetails?.map(face => ({
        emotions: face.Emotions?.map(emotion => ({
          type: emotion.Type,
          confidence: emotion.Confidence
        })),
        quality: {
          brightness: face.Quality?.Brightness,
          sharpness: face.Quality?.Sharpness
        },
        pose: {
          roll: face.Pose?.Roll,
          yaw: face.Pose?.Yaw,
          pitch: face.Pose?.Pitch
        }
      })) || []
    };
  }
}

ElevenLabs Integration

// ElevenLabs conversation processing
export class ConversationService {
  async processConversationData(conversationData: any) {
    return {
      transcript: conversationData.transcript || '',
      duration: conversationData.duration || 0,
      emotional_markers: this.extractEmotionalMarkers(conversationData),
      speech_patterns: this.analyzeSpeechPatterns(conversationData),
      engagement_level: this.calculateEngagement(conversationData)
    };
  }
  
  private extractEmotionalMarkers(data: any): string[] {
    // Extract emotional indicators from conversation
    const markers = [];
    
    if (data.tone_analysis) {
      markers.push(...data.tone_analysis.emotions);
    }
    
    if (data.speech_rate && data.speech_rate < 0.8) {
      markers.push('slow_speech');
    }
    
    if (data.voice_quality && data.voice_quality.includes('strain')) {
      markers.push('vocal_strain');
    }
    
    return markers;
  }
}

Monitoring & Logging

CloudWatch Integration

// CloudWatch metrics and logging
import {
  CloudWatchClient,
  PutMetricDataCommand
} from '@aws-sdk/client-cloudwatch';
 
const cloudWatchClient = new CloudWatchClient({
  region: process.env.AWS_REGION
});
 
export class MonitoringService {
  async recordMetric(metricName: string, value: number, unit: string = 'Count') {
    const command = new PutMetricDataCommand({
      Namespace: 'MindMeasure/Lambda',
      MetricData: [{
        MetricName: metricName,
        Value: value,
        Unit: unit,
        Timestamp: new Date()
      }]
    });
    
    await cloudWatchClient.send(command);
  }
  
  async recordAnalysisMetrics(functionName: string, duration: number, success: boolean) {
    await Promise.all([
      this.recordMetric(`${functionName}.Duration`, duration, 'Milliseconds'),
      this.recordMetric(`${functionName}.Invocations`, 1),
      this.recordMetric(`${functionName}.${success ? 'Success' : 'Errors'}`, 1)
    ]);
  }
}
 
// Usage in Lambda functions
const monitoring = new MonitoringService();
 
export const handler = async (event: any) => {
  const startTime = Date.now();
  let success = false;
  
  try {
    // Function logic here
    const result = await processAnalysis(event);
    success = true;
    return result;
  } catch (error) {
    console.error('Function error:', error);
    throw error;
  } finally {
    const duration = Date.now() - startTime;
    await monitoring.recordAnalysisMetrics('analyze-audio', duration, success);
  }
};

Performance Optimization

Connection Pooling

// Optimized database connection pooling
class ConnectionPool {
  private static instance: Pool;
  
  static getInstance(): Pool {
    if (!this.instance) {
      this.instance = new Pool({
        host: process.env.RDS_HOST,
        port: parseInt(process.env.RDS_PORT || '5432'),
        database: process.env.RDS_DATABASE,
        user: process.env.RDS_USERNAME,
        password: process.env.RDS_PASSWORD,
        ssl: { rejectUnauthorized: false },
        max: 20, // Maximum connections
        min: 2,  // Minimum connections
        idleTimeoutMillis: 30000,
        connectionTimeoutMillis: 10000,
        acquireTimeoutMillis: 60000,
      });
    }
    
    return this.instance;
  }
}

Caching Strategy

// In-memory caching for Lambda functions
class CacheService {
  private static cache = new Map<string, { data: any; expiry: number }>();
  
  static set(key: string, data: any, ttlSeconds: number = 300) {
    const expiry = Date.now() + (ttlSeconds * 1000);
    this.cache.set(key, { data, expiry });
  }
  
  static get(key: string): any | null {
    const cached = this.cache.get(key);
    if (!cached) return null;
    
    if (Date.now() > cached.expiry) {
      this.cache.delete(key);
      return null;
    }
    
    return cached.data;
  }
}

Deployment Configuration

Serverless Framework Configuration

# serverless.yml
service: mindmeasure-lambda-functions
 
provider:
  name: aws
  runtime: nodejs18.x
  region: eu-west-2
  stage: ${opt:stage, 'dev'}
  
  environment:
    RDS_HOST: ${env:RDS_HOST}
    RDS_DATABASE: ${env:RDS_DATABASE}
    RDS_USERNAME: ${env:RDS_USERNAME}
    RDS_PASSWORD: ${env:RDS_PASSWORD}
    COGNITO_USER_POOL_ID: ${env:COGNITO_USER_POOL_ID}
    COGNITO_CLIENT_ID: ${env:COGNITO_CLIENT_ID}
 
functions:
  analyzeAudio:
    handler: src/analyze-audio.handler
    timeout: 30
    memorySize: 1024
    events:
      - http:
          path: /analyze-audio
          method: post
          cors: true
          authorizer:
            type: COGNITO_USER_POOLS
            authorizerId: !Ref ApiGatewayAuthorizer
 
  analyzeVisual:
    handler: src/analyze-visual.handler
    timeout: 60
    memorySize: 2048
    events:
      - http:
          path: /analyze-visual
          method: post
          cors: true
 
  analyzeText:
    handler: src/analyze-text.handler
    timeout: 15
    memorySize: 512
    events:
      - http:
          path: /analyze-text
          method: post
          cors: true
 
  calculateMindMeasure:
    handler: src/calculate-mind-measure.handler
    timeout: 45
    memorySize: 1024
    events:
      - http:
          path: /calculate-mind-measure
          method: post
          cors: true
 
resources:
  Resources:
    ApiGatewayAuthorizer:
      Type: AWS::ApiGateway::Authorizer
      Properties:
        Name: CognitoUserPoolAuthorizer
        Type: COGNITO_USER_POOLS
        ProviderARNs:
          - !Sub arn:aws:cognito-idp:${AWS::Region}:${AWS::AccountId}:userpool/${env:COGNITO_USER_POOL_ID}
        RestApiId: !Ref ApiGatewayRestApi

Environment Configuration

Environment Variables

# Production Environment Variables
RDS_HOST=mindmeasure-aurora.cluster-cz8c8wq4k3ak.eu-west-2.rds.amazonaws.com
RDS_PORT=5432
RDS_DATABASE=mindmeasure
RDS_USERNAME=mindmeasure_admin
RDS_PASSWORD=<secure-password>
 
COGNITO_USER_POOL_ID=eu-west-2_ClAG4fQXR
COGNITO_CLIENT_ID=7vu03ppv6alkpphs1ksopll8us
 
AWS_REGION=eu-west-2
AWS_ACCESS_KEY_ID=<access-key>
AWS_SECRET_ACCESS_KEY=<secret-key>
 
# Lambda-specific
LAMBDA_BASE_URL=https://4xg1jsjh7k.execute-api.eu-west-2.amazonaws.com/dev

Testing & Quality Assurance

Unit Testing

// Lambda function unit tests
import { handler } from '../src/analyze-audio';
 
describe('analyze-audio Lambda function', () => {
  test('should process valid audio data', async () => {
    const event = {
      body: JSON.stringify({
        sessionId: 'test-session-123',
        audioData: {
          conversation_duration: 120,
          speech_rate: 1.2,
          voice_quality: 'clear',
          emotional_tone: 'neutral',
          mood_score_1_10: 7,
          transcript_length: 150
        }
      }),
      headers: {
        Authorization: 'Bearer valid-jwt-token'
      }
    };
 
    const result = await handler(event as any);
    
    expect(result.statusCode).toBe(200);
    const body = JSON.parse(result.body);
    expect(body.sessionId).toBe('test-session-123');
    expect(body.analysis).toBeDefined();
  });
 
  test('should return 400 for missing sessionId', async () => {
    const event = {
      body: JSON.stringify({ audioData: {} }),
      headers: { Authorization: 'Bearer valid-jwt-token' }
    };
 
    const result = await handler(event as any);
    
    expect(result.statusCode).toBe(400);
    const body = JSON.parse(result.body);
    expect(body.error).toBe('sessionId is required');
  });
});

Integration Testing

// End-to-end integration tests
describe('Assessment Pipeline Integration', () => {
  test('should complete full analysis pipeline', async () => {
    // 1. Create assessment session
    const session = await createTestSession();
    
    // 2. Run parallel analysis
    const [audioResult, visualResult, textResult] = await Promise.all([
      callLambdaFunction('analyze-audio', { sessionId: session.id, audioData: mockAudioData }),
      callLambdaFunction('analyze-visual', { sessionId: session.id, imageData: mockImageData }),
      callLambdaFunction('analyze-text', { sessionId: session.id, conversationText: mockTextData })
    ]);
    
    // 3. Calculate fusion score
    const fusionResult = await callLambdaFunction('calculate-mind-measure', { sessionId: session.id });
    
    // 4. Verify results
    expect(audioResult.statusCode).toBe(200);
    expect(visualResult.statusCode).toBe(200);
    expect(textResult.statusCode).toBe(200);
    expect(fusionResult.statusCode).toBe(200);
    
    const fusionData = JSON.parse(fusionResult.body);
    expect(fusionData.fusion_score.final_score).toBeGreaterThan(0);
    expect(fusionData.fusion_score.final_score).toBeLessThanOrEqual(100);
  });
});

This backend architecture provides a robust, scalable, and maintainable foundation for the Mind Measure platform, leveraging AWS serverless technologies for optimal performance and cost-effectiveness.

Last Updated: October 28, 2025
Version: 2.0 (AWS Migration)
Next Review: November 28, 2025