chore: Release v1.3.3 - Critical bug fixes and QoL improvements
Critical Fixes: - Docker permissions for PostgreSQL/Redis bind mounts Fixes #59, fixes #62 - Audio analyzer memory consumption and OOM crashes Fixes #21, fixes #26, fixes #53 - LastFM array normalization preventing .map crashes Fixes #37, fixes #39 - Wikidata 403 errors from missing User-Agent Fixes #57 - Singles directory creation race conditions Fixes #58 - Firefox FLAC playback stopping at ~4:34 mark Fixes #42, fixes #17 Quality of Life: - Add Releases link to desktop sidebar navigation Fixes #41 - iPhone safe area insets for Dynamic Island/notch Fixes #54 Contributors: @arsaboo, @rustyricky, @RustyJonez, @tombatossals No regressions detected, backward compatible, production ready.
This commit is contained in:
2
.github/workflows/docker-publish.yml
vendored
2
.github/workflows/docker-publish.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Build and Publish Docker Image
|
||||
name: Release ${{ github.ref_name }}
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
279
.rooignore
Normal file
279
.rooignore
Normal file
@@ -0,0 +1,279 @@
|
||||
# ==============================================================================
|
||||
|
||||
# .rooignore - Custom for Lidify (Based on Context Analysis)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# Created: 2026-01-09
|
||||
|
||||
# Current token usage: ~177,000 tokens per request
|
||||
|
||||
# Target: ~60,000-80,000 tokens per request (60% reduction)
|
||||
|
||||
# Expected savings: $335-395/month
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# TEST ARTIFACTS - BIGGEST BLOAT (1.4MB found in your project)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# Playwright test reports and results - these are generated artifacts
|
||||
|
||||
playwright-report/
|
||||
test-results/
|
||||
frontend/playwright-report/
|
||||
frontend/test-results/
|
||||
|
||||
# Test files themselves
|
||||
|
||||
_.test.ts
|
||||
_.test.tsx
|
||||
_.test.js
|
||||
_.test.jsx
|
||||
_.spec.ts
|
||||
_.spec.tsx
|
||||
_.spec.js
|
||||
_.spec.jsx
|
||||
**/**tests**/
|
||||
**/tests/
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# CONTEXT_PORTAL - Your RAG System (1MB of vector DB data)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# This is YOUR context portal - Roo Code doesn't need to read it!
|
||||
|
||||
context_portal/
|
||||
context_portal/conport_vector_data/
|
||||
context_portal/context.db
|
||||
\*.sqlite3
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# BUILD ARTIFACTS & CACHES (.next/ = 24MB)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
.next/
|
||||
dist/
|
||||
build/
|
||||
out/
|
||||
\*.tsbuildinfo
|
||||
.turbo/
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# DEPENDENCIES - Never needed (429M backend + 729M frontend)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
node_modules/
|
||||
.pnp
|
||||
.pnp.js
|
||||
.yarn/
|
||||
|
||||
# Lock files (488KB total)
|
||||
|
||||
package-lock.json
|
||||
yarn.lock
|
||||
pnpm-lock.yaml
|
||||
**/node_modules/**/yarn.lock
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# IMAGES & MEDIA - (3MB+ of screenshots)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# All image formats
|
||||
|
||||
_.png
|
||||
_.jpg
|
||||
_.jpeg
|
||||
_.gif
|
||||
_.webp
|
||||
_.svg
|
||||
_.ico
|
||||
_.bmp
|
||||
|
||||
# Specifically your screenshot directories
|
||||
|
||||
assets/screenshots/
|
||||
frontend/assets/splash.png
|
||||
frontend/assets/splash-dark.png
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# DOCS - Large deployment doc (312KB)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# Keep README.md, CONTRIBUTING.md, CHANGELOG.md
|
||||
|
||||
# Exclude large pending deploy docs
|
||||
|
||||
docs/PENDING_DEPLOY-1.md
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# DATABASE MIGRATIONS - Keep recent, exclude none (all are 2025+)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# Your migrations are all from 2025/2026, so keep them all
|
||||
|
||||
# If you add older migrations later:
|
||||
|
||||
# backend/prisma/migrations/2024\*/
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# VERSION CONTROL
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
.git/
|
||||
.gitignore
|
||||
.gitattributes
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# SOULARR - External project (separate tool)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# If this is a separate tool/subproject, exclude it
|
||||
|
||||
soularr/
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# IDE & EDITOR
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
.vscode/
|
||||
.idea/
|
||||
\*.sublime-workspace
|
||||
.DS_Store
|
||||
desktop.ini
|
||||
|
||||
# Roo-specific directories (don't need to analyze Roo's own metadata)
|
||||
|
||||
.roo/
|
||||
.claude/
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# LOGS & TEMP
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
_.log
|
||||
npm-debug.log_
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
_.tmp
|
||||
_.temp
|
||||
tmp/
|
||||
temp/
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# ENVIRONMENT FILES
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
.env.local
|
||||
.env.\*.local
|
||||
.env.production
|
||||
.env.development
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# DOCKER (Keep these - you modify them)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# Keeping Docker files as you have 8 docker-compose files
|
||||
|
||||
# Uncomment if you rarely modify:
|
||||
|
||||
# Dockerfile
|
||||
|
||||
# docker-compose\*.yml
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# GITHUB WORKFLOWS
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
.github/workflows/
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# PYTHON CACHE (from services/audio-analyzer)
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
**pycache**/
|
||||
_.pyc
|
||||
_.pyo
|
||||
\*.pyd
|
||||
.Python
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# VERIFICATION CHECKLIST
|
||||
|
||||
# ==============================================================================
|
||||
|
||||
# After adding this file:
|
||||
|
||||
#
|
||||
|
||||
# 1. Restart Roo Code
|
||||
|
||||
# 2. Make a simple request (e.g., "explain backend/src/routes/library.ts")
|
||||
|
||||
# 3. Check OpenRouter activity: https://openrouter.ai/activity
|
||||
|
||||
# 4. Verify token count: Should be ~60K-80K (down from 177K)
|
||||
|
||||
#
|
||||
|
||||
# If still high:
|
||||
|
||||
# - Check if node_modules/ is truly excluded
|
||||
|
||||
# - Verify .next/ is excluded
|
||||
|
||||
# - Check if test files are still being loaded
|
||||
|
||||
#
|
||||
|
||||
# If too aggressive (AI can't find files):
|
||||
|
||||
# - Remove specific exclusions one at a time
|
||||
|
||||
# - Start by uncommenting Docker files
|
||||
|
||||
# - Then uncomment docs if needed
|
||||
|
||||
#
|
||||
|
||||
# Expected cost per request:
|
||||
|
||||
# - Before: $0.141 (177K tokens)
|
||||
|
||||
# - After: $0.055-0.070 (60-80K tokens)
|
||||
|
||||
# - Savings: 50-60% reduction
|
||||
|
||||
# ==============================================================================
|
||||
46
CHANGELOG.md
46
CHANGELOG.md
@@ -5,6 +5,52 @@ All notable changes to Lidify will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [1.3.3] - 2025-01-07
|
||||
|
||||
Bug fix patch release addressing 6 P1 critical issues and 2 P2 quality-of-life improvements.
|
||||
|
||||
### Fixed
|
||||
|
||||
#### Critical (P1)
|
||||
- **Docker:** PostgreSQL/Redis bind mount permission errors on Linux hosts ([#59](https://github.com/Chevron7Locked/lidify/issues/59)) - @arsaboo via [#62](https://github.com/Chevron7Locked/lidify/pull/62)
|
||||
- **Audio Analyzer:** Memory consumption/OOM crashes with large libraries ([#21](https://github.com/Chevron7Locked/lidify/issues/21), [#26](https://github.com/Chevron7Locked/lidify/issues/26)) - @rustyricky via [#53](https://github.com/Chevron7Locked/lidify/pull/53)
|
||||
- **LastFM:** ".map is not a function" crashes with obscure artists ([#37](https://github.com/Chevron7Locked/lidify/issues/37)) - @RustyJonez via [#39](https://github.com/Chevron7Locked/lidify/pull/39)
|
||||
- **Wikidata:** 403 Forbidden errors from missing User-Agent header ([#57](https://github.com/Chevron7Locked/lidify/issues/57))
|
||||
- **Downloads:** Singles directory creation race conditions ([#58](https://github.com/Chevron7Locked/lidify/issues/58))
|
||||
- **Firefox:** FLAC playback stopping at ~4:34 mark on large files ([#42](https://github.com/Chevron7Locked/lidify/issues/42), [#17](https://github.com/Chevron7Locked/lidify/issues/17))
|
||||
|
||||
#### Quality of Life (P2)
|
||||
- **Desktop UI:** Added missing "Releases" link to desktop sidebar navigation ([#41](https://github.com/Chevron7Locked/lidify/issues/41))
|
||||
- **iPhone:** Dynamic Island/notch overlapping TopBar buttons ([#54](https://github.com/Chevron7Locked/lidify/issues/54))
|
||||
|
||||
### Technical Details
|
||||
|
||||
- **Docker Permissions (#62):** Creates `/data/postgres` and `/data/redis` directories with proper ownership; validates write permissions at startup using `gosu <user> test -w`
|
||||
- **Audio Analyzer Memory (#53):** TensorFlow GPU memory growth enabled; `MAX_ANALYZE_SECONDS` configurable (default 90s); explicit garbage collection in finally blocks
|
||||
- **LastFM Normalization (#39):** `normalizeToArray()` utility wraps single-object API responses; protects 5 locations in artist discovery endpoints
|
||||
- **Wikidata User-Agent (#57):** All 4 API endpoints now use configured axios client with proper User-Agent header
|
||||
- **Singles Directory (#58):** Replaced TOCTOU `existsSync()`+`mkdirSync()` pattern with idempotent `mkdir({recursive: true})`
|
||||
- **Firefox FLAC (#42):** Replaced Express `res.sendFile()` with manual range request handling via `fs.createReadStream()` with proper `Content-Range` headers
|
||||
- **Desktop Releases (#41):** Single-line addition to Sidebar.tsx navigation array
|
||||
- **iPhone Safe Area (#54):** TopBar and AuthenticatedLayout use `env(safe-area-inset-top)` CSS environment variable
|
||||
|
||||
### Deferred to Future Release
|
||||
|
||||
- **PR #49** - Playlist visibility toggle (needs PR review)
|
||||
- **PR #47** - Mood bucket tags (already implemented, verify and close)
|
||||
- **PR #36** - Docker --user flag (needs security review)
|
||||
|
||||
### Contributors
|
||||
|
||||
Thanks to everyone who contributed to this release:
|
||||
|
||||
- @arsaboo - Docker bind mount permissions fix ([#62](https://github.com/Chevron7Locked/lidify/pull/62))
|
||||
- @rustyricky - Audio analyzer memory limits ([#53](https://github.com/Chevron7Locked/lidify/pull/53))
|
||||
- @RustyJonez - LastFM array normalization ([#39](https://github.com/Chevron7Locked/lidify/pull/39))
|
||||
- @tombatossals - Testing and validation
|
||||
|
||||
---
|
||||
|
||||
## [1.3.2] - 2025-01-07
|
||||
|
||||
### Fixed
|
||||
|
||||
34
Dockerfile
34
Dockerfile
@@ -193,6 +193,7 @@ priority=10
|
||||
|
||||
[program:redis]
|
||||
command=/usr/bin/redis-server --dir /data/redis --appendonly yes
|
||||
user=redis
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/dev/stdout
|
||||
@@ -235,7 +236,7 @@ stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
environment=DATABASE_URL="postgresql://lidify:lidify@localhost:5432/lidify",REDIS_URL="redis://localhost:6379",MUSIC_PATH="/music",BATCH_SIZE="10",SLEEP_INTERVAL="5"
|
||||
environment=DATABASE_URL="postgresql://lidify:lidify@localhost:5432/lidify",REDIS_URL="redis://localhost:6379",MUSIC_PATH="/music",BATCH_SIZE="10",SLEEP_INTERVAL="5",MAX_ANALYZE_SECONDS="90"
|
||||
priority=50
|
||||
EOF
|
||||
|
||||
@@ -274,10 +275,33 @@ if [ -z "$PG_BIN" ]; then
|
||||
fi
|
||||
echo "Using PostgreSQL from: $PG_BIN"
|
||||
|
||||
# Fix permissions on data directories (may have different UID from previous container)
|
||||
echo "Fixing data directory permissions..."
|
||||
chown -R postgres:postgres /data/postgres /run/postgresql 2>/dev/null || true
|
||||
chmod 700 /data/postgres 2>/dev/null || true
|
||||
# Prepare data directories (bind-mount safe)
|
||||
echo "Preparing data directories..."
|
||||
mkdir -p /data/postgres /data/redis /run/postgresql
|
||||
|
||||
if id postgres >/dev/null 2>&1; then
|
||||
chown -R postgres:postgres /data/postgres /run/postgresql 2>/dev/null || true
|
||||
chmod 700 /data/postgres 2>/dev/null || true
|
||||
if ! gosu postgres test -w /data/postgres; then
|
||||
POSTGRES_UID=$(id -u postgres)
|
||||
POSTGRES_GID=$(id -g postgres)
|
||||
echo "ERROR: /data/postgres is not writable by postgres (${POSTGRES_UID}:${POSTGRES_GID})."
|
||||
echo "If you bind-mount /data, ensure the host path is writable by that UID/GID."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if id redis >/dev/null 2>&1; then
|
||||
chown -R redis:redis /data/redis 2>/dev/null || true
|
||||
chmod 700 /data/redis 2>/dev/null || true
|
||||
if ! gosu redis test -w /data/redis; then
|
||||
REDIS_UID=$(id -u redis)
|
||||
REDIS_GID=$(id -g redis)
|
||||
echo "ERROR: /data/redis is not writable by redis (${REDIS_UID}:${REDIS_GID})."
|
||||
echo "If you bind-mount /data, ensure the host path is writable by that UID/GID."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean up stale PID file if exists
|
||||
rm -f /data/postgres/postmaster.pid 2>/dev/null || true
|
||||
|
||||
10
README.md
10
README.md
@@ -275,6 +275,16 @@ docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Bind-mounting `/data` on Linux
|
||||
|
||||
Named volumes are recommended. If you bind-mount `/data`, make sure required subdirectories exist and are writable by the container service users.
|
||||
|
||||
```bash
|
||||
mkdir -p /path/to/lidify-data/postgres /path/to/lidify-data/redis
|
||||
```
|
||||
|
||||
If startup logs report a permission error, `chown` the host path to the UID/GID shown in the logs (for example, the postgres user).
|
||||
|
||||
---
|
||||
|
||||
Lidify will begin scanning your music library automatically. Depending on the size of your collection, this may take a few minutes to several hours.
|
||||
|
||||
200
analyze-context-bloat-v2.sh
Executable file
200
analyze-context-bloat-v2.sh
Executable file
@@ -0,0 +1,200 @@
|
||||
#!/bin/bash
|
||||
|
||||
# ==============================================================================
|
||||
# analyze-context-bloat.sh
|
||||
# ==============================================================================
|
||||
# Purpose: Find large files in your project that are bloating Roo Code context
|
||||
# Usage: Run this in your Lidify project root directory
|
||||
# chmod +x analyze-context-bloat.sh && ./analyze-context-bloat.sh
|
||||
# ==============================================================================
|
||||
|
||||
echo "=============================================================================="
|
||||
echo "Lidify Context Bloat Analysis"
|
||||
echo "=============================================================================="
|
||||
echo ""
|
||||
echo "Analyzing your project to find files that should be excluded from Roo Code..."
|
||||
echo ""
|
||||
|
||||
# Check if we're in a project directory (monorepo structure)
|
||||
if [ ! -f "backend/package.json" ] && [ ! -f "frontend/package.json" ] && [ ! -f "package.json" ]; then
|
||||
echo "❌ Error: Run this script from your Lidify project root directory"
|
||||
echo " (Looking for backend/package.json or frontend/package.json or package.json)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Found project structure (monorepo detected)"
|
||||
echo ""
|
||||
|
||||
echo "📊 TOP 30 LARGEST FILES (excluding node_modules):"
|
||||
echo "=============================================================================="
|
||||
find . -type f -not -path "*/node_modules/*" -not -path "*/.git/*" -not -path "*/.next/*" -not -path "*/dist/*" -exec du -h {} + 2>/dev/null | sort -rh | head -30
|
||||
echo ""
|
||||
|
||||
echo "📦 DIRECTORY SIZES (top-level):"
|
||||
echo "=============================================================================="
|
||||
du -h --max-depth=1 . 2>/dev/null | sort -rh
|
||||
echo ""
|
||||
|
||||
echo "📦 SUBDIRECTORY SIZES (backend, frontend, services):"
|
||||
echo "=============================================================================="
|
||||
for dir in backend frontend services scripts; do
|
||||
if [ -d "$dir" ]; then
|
||||
echo ""
|
||||
echo "--- $dir/ ---"
|
||||
du -h --max-depth=2 "$dir" 2>/dev/null | sort -rh | head -10
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
echo "🖼️ IMAGE FILES (all types):"
|
||||
echo "=============================================================================="
|
||||
find . -type f \( -name "*.png" -o -name "*.jpg" -o -name "*.jpeg" -o -name "*.gif" -o -name "*.webp" -o -name "*.svg" -o -name "*.ico" \) -not -path "*/node_modules/*" 2>/dev/null | wc -l
|
||||
echo "Total image files found"
|
||||
echo ""
|
||||
echo "Largest images:"
|
||||
find . -type f \( -name "*.png" -o -name "*.jpg" -o -name "*.jpeg" -o -name "*.gif" -o -name "*.webp" \) -not -path "*/node_modules/*" -exec du -h {} + 2>/dev/null | sort -rh | head -20
|
||||
echo ""
|
||||
|
||||
echo "📝 LOCK FILES & GENERATED CODE:"
|
||||
echo "=============================================================================="
|
||||
find . -type f \( -name "package-lock.json" -o -name "yarn.lock" -o -name "pnpm-lock.yaml" -o -name "*.tsbuildinfo" \) -exec du -h {} \; 2>/dev/null
|
||||
echo ""
|
||||
|
||||
echo "📜 MIGRATION FILES:"
|
||||
echo "=============================================================================="
|
||||
if [ -d "backend/prisma/migrations" ]; then
|
||||
echo "Total migration directory size:"
|
||||
du -sh backend/prisma/migrations 2>/dev/null
|
||||
echo ""
|
||||
echo "Number of migrations:"
|
||||
ls -1 backend/prisma/migrations 2>/dev/null | wc -l
|
||||
echo ""
|
||||
echo "Oldest migrations (first 10):"
|
||||
ls -1 backend/prisma/migrations 2>/dev/null | head -10
|
||||
echo ""
|
||||
echo "Newest migrations (last 5):"
|
||||
ls -1 backend/prisma/migrations 2>/dev/null | tail -5
|
||||
else
|
||||
echo "No migrations directory found"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "🗂️ FILE TYPE BREAKDOWN:"
|
||||
echo "=============================================================================="
|
||||
echo "TypeScript/JavaScript files:"
|
||||
find . -type f \( -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" \) -not -path "*/node_modules/*" -not -path "*/.next/*" 2>/dev/null | wc -l
|
||||
echo ""
|
||||
echo "JSON files:"
|
||||
find . -type f -name "*.json" -not -path "*/node_modules/*" 2>/dev/null | wc -l
|
||||
echo ""
|
||||
echo "CSS/Style files:"
|
||||
find . -type f \( -name "*.css" -o -name "*.scss" -o -name "*.sass" \) -not -path "*/node_modules/*" 2>/dev/null | wc -l
|
||||
echo ""
|
||||
echo "Markdown files:"
|
||||
find . -type f -name "*.md" 2>/dev/null | wc -l
|
||||
echo ""
|
||||
|
||||
echo "💾 ESTIMATED TOKEN COUNT:"
|
||||
echo "=============================================================================="
|
||||
# Rough estimation: 1 token ≈ 4 characters
|
||||
total_chars=$(find . -type f \( -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" -o -name "*.json" -o -name "*.md" -o -name "*.css" -o -name "*.yml" -o -name "*.yaml" \) -not -path "*/node_modules/*" -not -path "*/.git/*" -not -path "*/.next/*" -not -path "*/dist/*" -exec cat {} \; 2>/dev/null | wc -c)
|
||||
estimated_tokens=$((total_chars / 4))
|
||||
|
||||
if [ $estimated_tokens -gt 0 ]; then
|
||||
echo "Total characters in text files: $(printf "%'d" $total_chars)"
|
||||
echo "Estimated current token count: ~$(printf "%'d" $estimated_tokens) tokens"
|
||||
echo ""
|
||||
|
||||
optimized_tokens=$((estimated_tokens * 40 / 100))
|
||||
echo "Estimated AFTER .rooignore: ~$(printf "%'d" $optimized_tokens) tokens (60% reduction)"
|
||||
else
|
||||
echo "Could not calculate token estimate"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "🎯 LARGE FILES ANALYSIS:"
|
||||
echo "=============================================================================="
|
||||
|
||||
echo "Large JSON files (>50KB):"
|
||||
large_json=$(find . -type f -name "*.json" -not -path "*/node_modules/*" -not -name "package.json" -not -name "tsconfig.json" -size +50k 2>/dev/null)
|
||||
if [ -n "$large_json" ]; then
|
||||
echo "$large_json" | while read file; do
|
||||
size=$(du -h "$file" | cut -f1)
|
||||
echo " $size - $file"
|
||||
done
|
||||
else
|
||||
echo " None found"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "Large CSS files (>30KB):"
|
||||
large_css=$(find . -type f \( -name "*.css" -o -name "*.scss" \) -not -path "*/node_modules/*" -size +30k 2>/dev/null)
|
||||
if [ -n "$large_css" ]; then
|
||||
echo "$large_css" | while read file; do
|
||||
size=$(du -h "$file" | cut -f1)
|
||||
echo " $size - $file"
|
||||
done
|
||||
else
|
||||
echo " None found"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "Test files:"
|
||||
test_count=$(find . -type f \( -name "*.test.*" -o -name "*.spec.*" \) -not -path "*/node_modules/*" 2>/dev/null | wc -l)
|
||||
echo " Found $test_count test files"
|
||||
if [ "$test_count" -gt 0 ]; then
|
||||
echo " Consider excluding with: *.test.* and *.spec.*"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
python_files=$(find . -type f -name "*.py" -not -path "*/node_modules/*" 2>/dev/null | wc -l)
|
||||
if [ "$python_files" -gt 0 ]; then
|
||||
echo "Python files (services):"
|
||||
echo " Found $python_files Python files"
|
||||
echo " Largest Python files:"
|
||||
find . -type f -name "*.py" -not -path "*/node_modules/*" -exec du -h {} + 2>/dev/null | sort -rh | head -5
|
||||
echo ""
|
||||
fi
|
||||
|
||||
docker_files=$(find . -maxdepth 2 -type f \( -name "Dockerfile*" -o -name "docker-compose*.yml" \) 2>/dev/null | wc -l)
|
||||
if [ "$docker_files" -gt 0 ]; then
|
||||
echo "Docker configuration files:"
|
||||
find . -maxdepth 2 -type f \( -name "Dockerfile*" -o -name "docker-compose*.yml" \) -exec du -h {} \; 2>/dev/null
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "=============================================================================="
|
||||
echo "🎯 RECOMMENDED .rooignore ADDITIONS:"
|
||||
echo "=============================================================================="
|
||||
echo ""
|
||||
echo "Based on this analysis, your .rooignore should definitely include:"
|
||||
echo ""
|
||||
echo "1. node_modules/ (if exists)"
|
||||
echo "2. Lock files (package-lock.json, yarn.lock)"
|
||||
echo "3. All images in assets/screenshots/"
|
||||
echo "4. Build artifacts (.next/, dist/, build/)"
|
||||
echo "5. Old migrations (backend/prisma/migrations/2024*/)"
|
||||
echo ""
|
||||
|
||||
if [ -n "$large_json" ]; then
|
||||
echo "6. Large JSON files:"
|
||||
echo "$large_json" | while read file; do
|
||||
echo " $file"
|
||||
done
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if [ "$test_count" -gt 5 ]; then
|
||||
echo "7. Test files (*.test.*, *.spec.*)"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "=============================================================================="
|
||||
echo "✅ Analysis complete!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Share this output with Claude"
|
||||
echo "2. Claude will create a custom .rooignore for your project"
|
||||
echo "3. Copy .rooignore to project root"
|
||||
echo "4. Make a Roo Code request and verify token reduction"
|
||||
echo "=============================================================================="
|
||||
107
analyze-context-bloat.sh
Executable file
107
analyze-context-bloat.sh
Executable file
@@ -0,0 +1,107 @@
|
||||
#!/bin/bash
|
||||
|
||||
# ==============================================================================
|
||||
# analyze-context-bloat.sh
|
||||
# ==============================================================================
|
||||
# Purpose: Find large files in your project that are bloating Roo Code context
|
||||
# Usage: Run this in your Lidify project root directory
|
||||
# chmod +x analyze-context-bloat.sh && ./analyze-context-bloat.sh
|
||||
# ==============================================================================
|
||||
|
||||
echo "=============================================================================="
|
||||
echo "Lidify Context Bloat Analysis"
|
||||
echo "=============================================================================="
|
||||
echo ""
|
||||
echo "Analyzing your project to find files that should be excluded from Roo Code..."
|
||||
echo ""
|
||||
|
||||
# Check if we're in a project directory
|
||||
if [ ! -f "package.json" ]; then
|
||||
echo "❌ Error: Run this script from your Lidify project root directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📊 TOP 20 LARGEST FILES (excluding node_modules):"
|
||||
echo "=============================================================================="
|
||||
find . -type f -not -path "*/node_modules/*" -not -path "*/.git/*" -not -path "*/.next/*" -exec du -h {} + 2>/dev/null | sort -rh | head -20
|
||||
echo ""
|
||||
|
||||
echo "📦 DIRECTORY SIZES (excluding node_modules):"
|
||||
echo "=============================================================================="
|
||||
du -h --max-depth=2 . 2>/dev/null | grep -v node_modules | sort -rh | head -20
|
||||
echo ""
|
||||
|
||||
echo "🖼️ IMAGE FILES TAKING UP SPACE:"
|
||||
echo "=============================================================================="
|
||||
find . -type f \( -name "*.png" -o -name "*.jpg" -o -name "*.jpeg" -o -name "*.gif" -o -name "*.webp" \) -not -path "*/node_modules/*" -exec du -h {} + 2>/dev/null | sort -rh | head -20
|
||||
echo ""
|
||||
|
||||
echo "📝 LOCK FILES & GENERATED CODE:"
|
||||
echo "=============================================================================="
|
||||
find . -type f \( -name "package-lock.json" -o -name "yarn.lock" -o -name "pnpm-lock.yaml" -o -name "*.tsbuildinfo" \) -exec du -h {} \;
|
||||
echo ""
|
||||
|
||||
echo "📜 MIGRATION FILES:"
|
||||
echo "=============================================================================="
|
||||
if [ -d "backend/prisma/migrations" ]; then
|
||||
echo "Total migration directory size:"
|
||||
du -sh backend/prisma/migrations
|
||||
echo ""
|
||||
echo "Number of migrations:"
|
||||
ls -1 backend/prisma/migrations | wc -l
|
||||
echo ""
|
||||
echo "Oldest migrations (first 5):"
|
||||
ls -1 backend/prisma/migrations | head -5
|
||||
echo ""
|
||||
echo "Newest migrations (last 5):"
|
||||
ls -1 backend/prisma/migrations | tail -5
|
||||
else
|
||||
echo "No migrations directory found"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "💾 ESTIMATED TOKEN COUNT:"
|
||||
echo "=============================================================================="
|
||||
# Rough estimation: 1 token ≈ 4 characters
|
||||
total_chars=$(find . -type f -not -path "*/node_modules/*" -not -path "*/.git/*" -not -path "*/.next/*" -name "*.ts" -o -name "*.tsx" -o -name "*.js" -o -name "*.jsx" -o -name "*.json" -o -name "*.md" 2>/dev/null | xargs cat 2>/dev/null | wc -c)
|
||||
estimated_tokens=$((total_chars / 4))
|
||||
echo "Estimated current token count: ~$(printf "%'d" $estimated_tokens) tokens"
|
||||
echo ""
|
||||
|
||||
echo "🎯 RECOMMENDED .rooignore ADDITIONS:"
|
||||
echo "=============================================================================="
|
||||
echo "Based on this analysis, consider adding these to .rooignore:"
|
||||
echo ""
|
||||
|
||||
# Find large JSON files
|
||||
large_json=$(find . -type f -name "*.json" -not -path "*/node_modules/*" -not -name "package.json" -not -name "tsconfig.json" -size +100k -exec du -h {} \; 2>/dev/null)
|
||||
if [ -n "$large_json" ]; then
|
||||
echo "Large JSON files (>100KB):"
|
||||
echo "$large_json"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Find CSS/SCSS files if they're large
|
||||
large_css=$(find . -type f \( -name "*.css" -o -name "*.scss" \) -not -path "*/node_modules/*" -size +50k -exec du -h {} \; 2>/dev/null)
|
||||
if [ -n "$large_css" ]; then
|
||||
echo "Large CSS files (>50KB):"
|
||||
echo "$large_css"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Find test files
|
||||
test_files=$(find . -type f \( -name "*.test.*" -o -name "*.spec.*" \) -not -path "*/node_modules/*" | wc -l)
|
||||
if [ "$test_files" -gt 0 ]; then
|
||||
echo "Found $test_files test files - consider excluding with: *.test.* and *.spec.*"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "=============================================================================="
|
||||
echo "✅ Analysis complete!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Copy .rooignore to your project root"
|
||||
echo "2. Add any large files shown above to .rooignore"
|
||||
echo "3. Make a Roo Code request and check token count in OpenRouter"
|
||||
echo "4. Target: 60-80K tokens (down from 177K)"
|
||||
echo "=============================================================================="
|
||||
@@ -5,6 +5,7 @@ import { musicBrainzService } from "../services/musicbrainz";
|
||||
import { fanartService } from "../services/fanart";
|
||||
import { deezerService } from "../services/deezer";
|
||||
import { redisClient } from "../utils/redis";
|
||||
import { normalizeToArray } from "../utils/normalize";
|
||||
|
||||
const router = Router();
|
||||
|
||||
@@ -158,8 +159,10 @@ router.get("/discover/:nameOrMbid", async (req, res) => {
|
||||
}
|
||||
|
||||
// Fallback to Last.fm (but filter placeholders)
|
||||
// NORMALIZATION: lastFmInfo.image could be a single object or array
|
||||
if (!image && lastFmInfo?.image) {
|
||||
const lastFmImage = lastFmService.getBestImage(lastFmInfo.image);
|
||||
const images = normalizeToArray(lastFmInfo.image);
|
||||
const lastFmImage = lastFmService.getBestImage(images);
|
||||
// Filter out Last.fm placeholder
|
||||
if (
|
||||
lastFmImage &&
|
||||
@@ -274,10 +277,13 @@ router.get("/discover/:nameOrMbid", async (req, res) => {
|
||||
}
|
||||
|
||||
// Get similar artists from Last.fm and fetch images
|
||||
const similarArtistsRaw = lastFmInfo?.similar?.artist || [];
|
||||
// NORMALIZATION: lastFmInfo.similar.artist could be a single object or array
|
||||
const similarArtistsRaw = normalizeToArray(lastFmInfo?.similar?.artist);
|
||||
const similarArtists = await Promise.all(
|
||||
similarArtistsRaw.slice(0, 10).map(async (artist: any) => {
|
||||
const similarImage = artist.image?.find(
|
||||
// NORMALIZATION: artist.image could be a single object or array
|
||||
const images = normalizeToArray(artist.image);
|
||||
const similarImage = images.find(
|
||||
(img: any) => img.size === "large"
|
||||
)?.[" #text"];
|
||||
|
||||
@@ -325,14 +331,19 @@ router.get("/discover/:nameOrMbid", async (req, res) => {
|
||||
})
|
||||
);
|
||||
|
||||
// NORMALIZATION: lastFmInfo.tags.tag could be a single object or array
|
||||
const tags = normalizeToArray(lastFmInfo?.tags?.tag)
|
||||
.map((t: any) => t?.name)
|
||||
.filter(Boolean);
|
||||
|
||||
const response = {
|
||||
mbid,
|
||||
name: artistName,
|
||||
image,
|
||||
bio, // Use filtered bio instead of raw Last.fm bio
|
||||
summary: bio, // Alias for consistency
|
||||
tags: lastFmInfo?.tags?.tag?.map((t: any) => t.name) || [],
|
||||
genres: lastFmInfo?.tags?.tag?.map((t: any) => t.name) || [], // Alias for consistency
|
||||
tags,
|
||||
genres: tags, // Alias for consistency
|
||||
listeners: parseInt(lastFmInfo?.stats?.listeners || "0"),
|
||||
playcount: parseInt(lastFmInfo?.stats?.playcount || "0"),
|
||||
url: lastFmInfo?.url || null,
|
||||
@@ -470,7 +481,10 @@ router.get("/album/:mbid", async (req, res) => {
|
||||
|
||||
// Check if Cover Art Archive actually has the image
|
||||
try {
|
||||
const response = await fetch(coverArtUrl, { method: "HEAD" });
|
||||
const response = await fetch(coverArtUrl, {
|
||||
method: "HEAD",
|
||||
signal: AbortSignal.timeout(2000),
|
||||
});
|
||||
if (response.ok) {
|
||||
coverUrl = coverArtUrl;
|
||||
logger.debug(`Cover Art Archive has cover for ${albumTitle}`);
|
||||
@@ -529,7 +543,10 @@ router.get("/album/:mbid", async (req, res) => {
|
||||
coverUrl,
|
||||
coverArt: coverUrl, // Alias for compatibility
|
||||
bio: lastFmInfo?.wiki?.summary || null,
|
||||
tags: lastFmInfo?.tags?.tag?.map((t: any) => t.name) || [],
|
||||
// NORMALIZATION: lastFmInfo.tags.tag could be a single object or array
|
||||
tags: normalizeToArray(lastFmInfo?.tags?.tag)
|
||||
.map((t: any) => t?.name)
|
||||
.filter(Boolean),
|
||||
tracks: tracks.map((track: any, index: number) => ({
|
||||
id: `mb-${releaseGroupId}-${track.id || index}`,
|
||||
title: track.title,
|
||||
|
||||
@@ -2754,32 +2754,12 @@ router.get("/tracks/:id/stream", async (req, res) => {
|
||||
`[STREAM] Sending file: ${filePath}, mimeType: ${mimeType}`
|
||||
);
|
||||
|
||||
res.sendFile(
|
||||
filePath,
|
||||
{
|
||||
headers: {
|
||||
"Content-Type": mimeType,
|
||||
"Accept-Ranges": "bytes",
|
||||
"Cache-Control": "public, max-age=31536000",
|
||||
"Access-Control-Allow-Origin":
|
||||
req.headers.origin || "*",
|
||||
"Access-Control-Allow-Credentials": "true",
|
||||
"Cross-Origin-Resource-Policy": "cross-origin",
|
||||
},
|
||||
},
|
||||
(err) => {
|
||||
// Always destroy the streaming service to clean up intervals
|
||||
streamingService.destroy();
|
||||
if (err) {
|
||||
logger.error(`[STREAM] sendFile error:`, err);
|
||||
} else {
|
||||
logger.debug(
|
||||
`[STREAM] File sent successfully: ${path.basename(
|
||||
filePath
|
||||
)}`
|
||||
);
|
||||
}
|
||||
}
|
||||
await streamingService.streamFileWithRangeSupport(req, res, filePath, mimeType);
|
||||
streamingService.destroy();
|
||||
logger.debug(
|
||||
`[STREAM] File sent successfully: ${path.basename(
|
||||
filePath
|
||||
)}`
|
||||
);
|
||||
|
||||
return;
|
||||
@@ -2812,30 +2792,8 @@ router.get("/tracks/:id/stream", async (req, res) => {
|
||||
absolutePath
|
||||
);
|
||||
|
||||
res.sendFile(
|
||||
filePath,
|
||||
{
|
||||
headers: {
|
||||
"Content-Type": mimeType,
|
||||
"Accept-Ranges": "bytes",
|
||||
"Cache-Control": "public, max-age=31536000",
|
||||
"Access-Control-Allow-Origin":
|
||||
req.headers.origin || "*",
|
||||
"Access-Control-Allow-Credentials": "true",
|
||||
"Cross-Origin-Resource-Policy": "cross-origin",
|
||||
},
|
||||
},
|
||||
(err) => {
|
||||
// Always destroy the streaming service to clean up intervals
|
||||
streamingService.destroy();
|
||||
if (err) {
|
||||
logger.error(
|
||||
`[STREAM] sendFile fallback error:`,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
);
|
||||
await streamingService.streamFileWithRangeSupport(req, res, filePath, mimeType);
|
||||
streamingService.destroy();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import * as fs from "fs";
|
||||
import { promises as fsPromises } from "fs";
|
||||
import { Request, Response } from "express";
|
||||
import { logger } from "../utils/logger";
|
||||
import * as path from "path";
|
||||
import * as crypto from "crypto";
|
||||
@@ -384,6 +386,95 @@ export class AudioStreamingService {
|
||||
return mimeTypes[ext] || "audio/mpeg";
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream file with proper HTTP Range support (fixes Firefox FLAC issue #42/#17)
|
||||
* Manually handles Range requests to ensure compatibility with Firefox's strict
|
||||
* Content-Range header validation for large FLAC files.
|
||||
*/
|
||||
async streamFileWithRangeSupport(
|
||||
req: Request,
|
||||
res: Response,
|
||||
filePath: string,
|
||||
mimeType: string
|
||||
): Promise<void> {
|
||||
try {
|
||||
// Get file stats for size
|
||||
const stats = await fsPromises.stat(filePath);
|
||||
const fileSize = stats.size;
|
||||
|
||||
// Parse Range header
|
||||
const range = req.headers.range;
|
||||
let start = 0;
|
||||
let end = fileSize - 1;
|
||||
|
||||
if (range) {
|
||||
// Parse bytes=START-END or bytes=START-
|
||||
const parts = range.replace(/bytes=/, "").split("-");
|
||||
start = parseInt(parts[0], 10);
|
||||
end = parts[1] ? parseInt(parts[1], 10) : fileSize - 1;
|
||||
|
||||
// Validate range
|
||||
if (start >= fileSize || end >= fileSize || start > end) {
|
||||
res.status(416).set({
|
||||
"Content-Range": `bytes */${fileSize}`,
|
||||
});
|
||||
res.end();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const contentLength = end - start + 1;
|
||||
|
||||
// Set response headers
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": mimeType,
|
||||
"Accept-Ranges": "bytes",
|
||||
"Cache-Control": "public, max-age=31536000",
|
||||
"Content-Length": contentLength.toString(),
|
||||
};
|
||||
|
||||
// Add CORS headers from request origin
|
||||
if (req.headers.origin) {
|
||||
headers["Access-Control-Allow-Origin"] = req.headers.origin;
|
||||
headers["Access-Control-Allow-Credentials"] = "true";
|
||||
}
|
||||
|
||||
// Set status and range-specific headers
|
||||
if (range) {
|
||||
res.status(206);
|
||||
headers["Content-Range"] = `bytes ${start}-${end}/${fileSize}`;
|
||||
} else {
|
||||
res.status(200);
|
||||
}
|
||||
|
||||
res.set(headers);
|
||||
|
||||
// Create read stream with range
|
||||
const stream = fs.createReadStream(filePath, { start, end });
|
||||
|
||||
// Handle stream errors
|
||||
stream.on("error", (err) => {
|
||||
logger.error(`[AudioStreaming] Stream error for ${filePath}:`, err);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).end();
|
||||
}
|
||||
});
|
||||
|
||||
// Handle cleanup on response close
|
||||
res.on("close", () => {
|
||||
stream.destroy();
|
||||
});
|
||||
|
||||
// Pipe stream to response
|
||||
stream.pipe(res);
|
||||
} catch (err) {
|
||||
logger.error(`[AudioStreaming] Failed to stream ${filePath}:`, err);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).end();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup resources
|
||||
*/
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
import slsk from "slsk-client";
|
||||
import path from "path";
|
||||
import fs from "fs";
|
||||
import { mkdir } from "fs/promises";
|
||||
import PQueue from "p-queue";
|
||||
import { getSystemSettings } from "../utils/systemSettings";
|
||||
import { sessionLog } from "../utils/playlistLogger";
|
||||
@@ -700,10 +701,14 @@ class SoulseekService {
|
||||
return { success: false, error: "Not connected" };
|
||||
}
|
||||
|
||||
// Ensure destination directory exists
|
||||
// Ensure destination directory exists (idempotent - won't fail if exists)
|
||||
const destDir = path.dirname(destPath);
|
||||
if (!fs.existsSync(destDir)) {
|
||||
fs.mkdirSync(destDir, { recursive: true });
|
||||
try {
|
||||
await mkdir(destDir, { recursive: true });
|
||||
} catch (err: any) {
|
||||
sessionLog("SOULSEEK", `Failed to create directory ${destDir}: ${err.message}`, "ERROR");
|
||||
this.activeDownloads--;
|
||||
return { success: false, error: `Cannot create destination directory: ${err.message}` };
|
||||
}
|
||||
|
||||
sessionLog(
|
||||
|
||||
@@ -78,14 +78,11 @@ class WikidataService {
|
||||
LIMIT 1
|
||||
`;
|
||||
|
||||
const response = await axios.get("https://query.wikidata.org/sparql", {
|
||||
const response = await this.client.get("https://query.wikidata.org/sparql", {
|
||||
params: {
|
||||
query: sparqlQuery,
|
||||
format: "json",
|
||||
},
|
||||
headers: {
|
||||
"User-Agent": "Lidify/1.0.0",
|
||||
},
|
||||
});
|
||||
|
||||
const bindings = response.data.results?.bindings || [];
|
||||
@@ -100,7 +97,7 @@ class WikidataService {
|
||||
): Promise<string | undefined> {
|
||||
try {
|
||||
// Get English Wikipedia article title
|
||||
const response = await axios.get(
|
||||
const response = await this.client.get(
|
||||
`https://www.wikidata.org/wiki/Special:EntityData/${wikidataId}.json`
|
||||
);
|
||||
|
||||
@@ -110,7 +107,7 @@ class WikidataService {
|
||||
if (!enWikiTitle) return undefined;
|
||||
|
||||
// Get article summary from Wikipedia API
|
||||
const summaryResponse = await axios.get(
|
||||
const summaryResponse = await this.client.get(
|
||||
"https://en.wikipedia.org/api/rest_v1/page/summary/" +
|
||||
encodeURIComponent(enWikiTitle)
|
||||
);
|
||||
@@ -129,7 +126,7 @@ class WikidataService {
|
||||
wikidataId: string
|
||||
): Promise<string | undefined> {
|
||||
try {
|
||||
const response = await axios.get(
|
||||
const response = await this.client.get(
|
||||
`https://www.wikidata.org/wiki/Special:EntityData/${wikidataId}.json`
|
||||
);
|
||||
|
||||
|
||||
@@ -121,9 +121,12 @@ async function migrateExistingSoulseekFiles(musicPath: string): Promise<void> {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Create destination directory
|
||||
if (!fs.existsSync(destDir)) {
|
||||
// Create destination directory (idempotent - won't fail if exists)
|
||||
try {
|
||||
fs.mkdirSync(destDir, { recursive: true });
|
||||
} catch (err: any) {
|
||||
sessionLog('ORGANIZE', `Failed to create directory ${destDir}: ${err.message}`, 'WARN');
|
||||
continue; // Skip this file, try next
|
||||
}
|
||||
|
||||
// Move file (copy then delete original)
|
||||
|
||||
@@ -129,7 +129,7 @@ export function AuthenticatedLayout({ children }: { children: ReactNode }) {
|
||||
tabIndex={-1}
|
||||
className="flex-1 bg-gradient-to-b from-[#1a1a1a] via-black to-black mx-2 mb-2 rounded-lg overflow-y-auto relative focus:outline-none"
|
||||
style={{
|
||||
marginTop: "58px",
|
||||
marginTop: "calc(58px + env(safe-area-inset-top, 0px))",
|
||||
marginBottom:
|
||||
"calc(56px + env(safe-area-inset-bottom, 0px) + 8px)",
|
||||
}}
|
||||
|
||||
@@ -17,6 +17,7 @@ const navigation = [
|
||||
{ name: "Library", href: "/library" },
|
||||
{ name: "Radio", href: "/radio" },
|
||||
{ name: "Discovery", href: "/discover" },
|
||||
{ name: "Releases", href: "/releases" },
|
||||
{ name: "Audiobooks", href: "/audiobooks" },
|
||||
{ name: "Podcasts", href: "/podcasts" },
|
||||
{ name: "Browse", href: "/browse/playlists", badge: "Beta" },
|
||||
|
||||
@@ -142,7 +142,10 @@ export function TopBar() {
|
||||
return (
|
||||
<header
|
||||
className="fixed top-0 left-0 right-0 bg-black flex items-center px-3 z-50"
|
||||
style={{ height: isMobileOrTablet ? "58px" : "64px" }}
|
||||
style={{
|
||||
height: isMobileOrTablet ? "58px" : "64px",
|
||||
paddingTop: isMobileOrTablet ? "env(safe-area-inset-top)" : undefined,
|
||||
}}
|
||||
>
|
||||
{/* Mobile/Tablet Layout: Hamburger + Home + Search + Bell */}
|
||||
{isMobileOrTablet ? (
|
||||
|
||||
@@ -65,6 +65,7 @@ import traceback
|
||||
import numpy as np
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
import multiprocessing
|
||||
import gc
|
||||
|
||||
# BrokenProcessPool was added in Python 3.9, provide compatibility for Python 3.8
|
||||
try:
|
||||
@@ -112,6 +113,14 @@ except ImportError as e:
|
||||
TF_MODELS_AVAILABLE = False
|
||||
TensorflowPredictMusiCNN = None
|
||||
try:
|
||||
import tensorflow as tf
|
||||
# Limit TensorFlow memory usage (CPU & GPU)
|
||||
try:
|
||||
gpus = tf.config.experimental.list_physical_devices('GPU')
|
||||
for gpu in gpus:
|
||||
tf.config.experimental.set_memory_growth(gpu, True)
|
||||
except Exception:
|
||||
pass
|
||||
from essentia.standard import TensorflowPredictMusiCNN
|
||||
TF_MODELS_AVAILABLE = True
|
||||
logger.info("TensorflowPredictMusiCNN available - Enhanced mode enabled")
|
||||
@@ -376,14 +385,18 @@ class AudioAnalyzer:
|
||||
traceback.print_exc()
|
||||
self.enhanced_mode = False
|
||||
|
||||
def load_audio(self, file_path: str, sample_rate: int = 16000) -> Optional[Any]:
|
||||
"""Load audio file as mono signal"""
|
||||
def load_audio(self, file_path: str, sample_rate: int = 16000, max_duration: int = 90) -> Optional[Any]:
|
||||
"""Load up to max_duration seconds of audio as mono signal (to limit memory usage)"""
|
||||
if not ESSENTIA_AVAILABLE:
|
||||
return None
|
||||
|
||||
try:
|
||||
loader = es.MonoLoader(filename=file_path, sampleRate=sample_rate)
|
||||
audio = loader()
|
||||
# Limit to max_duration seconds
|
||||
max_samples = int(sample_rate * max_duration)
|
||||
if len(audio) > max_samples:
|
||||
audio = audio[:max_samples]
|
||||
return audio
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load audio {file_path}: {e}")
|
||||
@@ -514,12 +527,17 @@ class AudioAnalyzer:
|
||||
result['_error'] = 'Essentia library not installed'
|
||||
return result
|
||||
|
||||
# Load audio at different sample rates for different algorithms
|
||||
audio_44k = self.load_audio(file_path, 44100)
|
||||
audio_16k = self.load_audio(file_path, 16000)
|
||||
|
||||
# Limit memory: only analyze up to MAX_ANALYZE_SECONDS (default 90s)
|
||||
MAX_ANALYZE_SECONDS = int(os.getenv('MAX_ANALYZE_SECONDS', '90'))
|
||||
try:
|
||||
# Load audio at different sample rates for different algorithms, limit duration
|
||||
audio_44k = self.load_audio(file_path, 44100, max_duration=MAX_ANALYZE_SECONDS)
|
||||
audio_16k = self.load_audio(file_path, 16000, max_duration=MAX_ANALYZE_SECONDS)
|
||||
except MemoryError:
|
||||
logger.error(f"MemoryError: Could not load audio for {file_path}")
|
||||
result['_error'] = 'MemoryError: audio file too large'
|
||||
return result
|
||||
if audio_44k is None or audio_16k is None:
|
||||
result['_error'] = 'Failed to load audio file'
|
||||
return result
|
||||
|
||||
# Validate audio before analysis (Phase 2 defensive improvement)
|
||||
@@ -586,7 +604,10 @@ class AudioAnalyzer:
|
||||
# Process audio in frames for detailed analysis
|
||||
frame_size = 2048
|
||||
hop_size = 1024
|
||||
for i in range(0, len(audio_44k) - frame_size, hop_size):
|
||||
max_frames = int((44100 * MAX_ANALYZE_SECONDS - frame_size) / hop_size)
|
||||
for idx, i in enumerate(range(0, len(audio_44k) - frame_size, hop_size)):
|
||||
if idx > max_frames:
|
||||
break
|
||||
frame = audio_44k[i:i + frame_size]
|
||||
windowed = self.windowing(frame)
|
||||
spectrum = self.spectrum(windowed)
|
||||
@@ -599,7 +620,6 @@ class AudioAnalyzer:
|
||||
# RMS-based energy (properly normalized to 0-1)
|
||||
if rms_values:
|
||||
avg_rms = np.mean(rms_values)
|
||||
# RMS is typically 0.0-0.5 for normalized audio, scale to 0-1
|
||||
result['energy'] = round(min(1.0, float(avg_rms) * 3), 3)
|
||||
else:
|
||||
result['energy'] = 0.5
|
||||
@@ -616,7 +636,6 @@ class AudioAnalyzer:
|
||||
result['_zcr'] = np.mean(zcr_values) if zcr_values else 0.1
|
||||
|
||||
# Basic Danceability (non-ML)
|
||||
# Note: es.Danceability() can return values > 1.0, so we clamp
|
||||
danceability, _ = self.danceability_extractor(audio_44k)
|
||||
result['danceability'] = round(max(0.0, min(1.0, float(danceability))), 3)
|
||||
|
||||
@@ -632,22 +651,25 @@ class AudioAnalyzer:
|
||||
traceback.print_exc()
|
||||
self._apply_standard_estimates(result, scale, bpm)
|
||||
else:
|
||||
# === STANDARD MODE: Use heuristics ===
|
||||
self._apply_standard_estimates(result, scale, bpm)
|
||||
|
||||
# Generate mood tags based on all features
|
||||
result['moodTags'] = self._generate_mood_tags(result)
|
||||
|
||||
logger.info(f"Analysis complete [{result['analysisMode']}]: BPM={result['bpm']}, Key={result['key']} {result['keyScale']}, Valence={result['valence']}, Arousal={result['arousal']}")
|
||||
|
||||
except MemoryError:
|
||||
logger.error(f"MemoryError during analysis of {file_path}")
|
||||
result['_error'] = 'MemoryError: analysis exceeded memory limits'
|
||||
except Exception as e:
|
||||
logger.error(f"Analysis error: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
# Clean up internal fields before returning
|
||||
for key in ['_spectral_centroid', '_spectral_flatness', '_zcr']:
|
||||
result.pop(key, None)
|
||||
|
||||
finally:
|
||||
# Clean up internal fields before returning
|
||||
for key in ['_spectral_centroid', '_spectral_flatness', '_zcr']:
|
||||
result.pop(key, None)
|
||||
# Explicitly free memory
|
||||
del audio_44k, audio_16k
|
||||
gc.collect()
|
||||
return result
|
||||
|
||||
def _extract_ml_features(self, audio_16k) -> Dict[str, Any]:
|
||||
|
||||
Reference in New Issue
Block a user