Add multiple features: auth, uploads, queue management, and filters

- Replace email with username for authentication
  - Update User model, schemas, and auth endpoints
  - Update frontend login and register views
  - Add migration to remove email column

- Add multiple track upload support
  - New backend endpoint for bulk upload
  - Frontend multi-file selection with progress
  - Auto-extract metadata from ID3 tags
  - Visual upload progress for each file

- Prevent duplicate tracks in room queue
  - Backend validation for duplicates
  - Visual indication of tracks already in queue
  - Error handling with user feedback

- Add bulk track selection for rooms
  - Multi-select mode with checkboxes
  - Bulk add endpoint with duplicate filtering
  - Selection counter and controls

- Add track filters in room modal
  - Search by title and artist
  - Filter by "My tracks"
  - Filter by "Not in queue"
  - Live filtering with result counter

- Improve Makefile
  - Add build-backend and build-frontend commands
  - Add rebuild-backend and rebuild-frontend commands
  - Add rebuild-clean variants
  - Update migrations to run in Docker

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-12-19 19:22:35 +03:00
parent fdc854256c
commit 8a2ea5b4af
17 changed files with 848 additions and 143 deletions

View File

@@ -32,6 +32,74 @@ async def get_tracks(
return result.scalars().all()
async def _process_single_track(
file: UploadFile,
title: str,
artist: str,
current_user: User,
) -> tuple[Track, Exception | None]:
"""Process a single track upload. Returns (track, error)."""
try:
# Check file type
if not file.content_type or not file.content_type.startswith("audio/"):
return None, Exception("File must be an audio file")
# Read file content
content = await file.read()
file_size = len(content)
# Check file size
max_size = settings.max_file_size_mb * 1024 * 1024
if file_size > max_size:
return None, Exception(f"File size exceeds {settings.max_file_size_mb}MB limit")
# Check storage limit
if not await can_upload_file(file_size):
return None, Exception("Storage limit exceeded")
# Get duration and metadata from MP3
try:
audio = MP3(BytesIO(content))
duration = int(audio.info.length * 1000) # Convert to milliseconds
# Extract ID3 tags if title/artist not provided
if not title or not artist:
tags = audio.tags
if tags:
# TIT2 = Title, TPE1 = Artist
if not title and tags.get("TIT2"):
title = str(tags.get("TIT2"))
if not artist and tags.get("TPE1"):
artist = str(tags.get("TPE1"))
# Fallback to filename if still no title
if not title:
title = file.filename.rsplit(".", 1)[0] if file.filename else "Unknown"
if not artist:
artist = "Unknown"
except Exception as e:
return None, Exception("Could not read audio file")
# Upload to S3
s3_key = f"tracks/{uuid.uuid4()}.mp3"
await upload_file(content, s3_key)
# Create track record
track = Track(
title=title,
artist=artist,
duration=duration,
s3_key=s3_key,
file_size=file_size,
uploaded_by=current_user.id,
)
return track, None
except Exception as e:
return None, e
@router.post("/upload", response_model=TrackResponse)
async def upload_track(
file: UploadFile = File(...),
@@ -40,78 +108,46 @@ async def upload_track(
db: AsyncSession = Depends(get_db),
current_user: User = Depends(get_current_user),
):
# Check file type
if not file.content_type or not file.content_type.startswith("audio/"):
track, error = await _process_single_track(file, title, artist, current_user)
if error:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="File must be an audio file",
detail=str(error),
)
# Read file content
content = await file.read()
file_size = len(content)
# Check file size
max_size = settings.max_file_size_mb * 1024 * 1024
if file_size > max_size:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"File size exceeds {settings.max_file_size_mb}MB limit",
)
# Check storage limit
if not await can_upload_file(file_size):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Storage limit exceeded",
)
# Get duration and metadata from MP3
try:
audio = MP3(BytesIO(content))
duration = int(audio.info.length * 1000) # Convert to milliseconds
# Extract ID3 tags if title/artist not provided
if not title or not artist:
tags = audio.tags
if tags:
# TIT2 = Title, TPE1 = Artist
if not title and tags.get("TIT2"):
title = str(tags.get("TIT2"))
if not artist and tags.get("TPE1"):
artist = str(tags.get("TPE1"))
# Fallback to filename if still no title
if not title:
title = file.filename.rsplit(".", 1)[0] if file.filename else "Unknown"
if not artist:
artist = "Unknown"
except Exception:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Could not read audio file",
)
# Upload to S3
s3_key = f"tracks/{uuid.uuid4()}.mp3"
await upload_file(content, s3_key)
# Create track record
track = Track(
title=title,
artist=artist,
duration=duration,
s3_key=s3_key,
file_size=file_size,
uploaded_by=current_user.id,
)
db.add(track)
await db.flush()
return track
@router.post("/upload-multiple", response_model=list[TrackResponse])
async def upload_multiple_tracks(
files: list[UploadFile] = File(...),
db: AsyncSession = Depends(get_db),
current_user: User = Depends(get_current_user),
):
"""Upload multiple tracks at once. Each file's metadata is auto-detected from ID3 tags."""
if not files:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No files provided",
)
# Process all files
results = []
for file in files:
track, error = await _process_single_track(file, None, None, current_user)
if track:
db.add(track)
results.append(track)
# Commit all at once
await db.flush()
return results
@router.get("/{track_id}", response_model=TrackWithUrl)
async def get_track(track_id: uuid.UUID, db: AsyncSession = Depends(get_db)):
result = await db.execute(select(Track).where(Track.id == track_id))