show ffmpg command in case of error

#6
by Aivo - opened
README.md CHANGED
@@ -1,35 +1,21 @@
1
  ---
2
- title: AI Video Composer - Natural Language FFMPEG
3
- short_description: Describe what you want, AI writes the FFMPEG command
4
  emoji: 🏞
5
  colorFrom: red
6
  colorTo: yellow
7
  sdk: gradio
8
- sdk_version: 6.2.0
9
  app_file: app.py
10
  pinned: false
11
  disable_embedding: true
12
  models:
13
- - moonshotai/Kimi-K2-Instruct
14
- tags:
15
- - ffmpeg
16
- - video-editing
17
- - natural-language
18
- - ai-video
19
- - no-code
20
- - video-processing
21
- - media-converter
22
- - slideshow-maker
23
- - video-merger
24
- - command-generator
25
- - waveform-visualization
26
- - audio-to-video
27
- - image-to-video
28
  ---
29
 
30
- # 🏞 AI Video Composer - FFMPEG in Plain English
31
 
32
- Describe what you want in plain English, like "create a slideshow from these images with background music" or "add a waveform visualization to this audio", and AI generates the exact FFMPEG command to make it happen.
33
 
34
  ## How It Works
35
 
@@ -58,7 +44,7 @@ Describe what you want in plain English, like "create a slideshow from these ima
58
 
59
  4. **Processing**:
60
  - The app analyzes your files and instructions
61
- - Generates an optimized FFmpeg command using Kimi-K2
62
  - Executes the command and returns the processed video
63
  - Displays the generated FFmpeg command for transparency
64
 
@@ -76,7 +62,7 @@ Describe what you want in plain English, like "create a slideshow from these ima
76
 
77
  - Built with Gradio for the user interface
78
  - Uses FFmpeg for media processing
79
- - Powered by Kimi-K2 for command generation
80
  - Implements robust error handling and command validation
81
  - Processes files in a temporary directory for safety
82
  - Supports both simple operations and complex media transformations
@@ -92,4 +78,4 @@ Describe what you want in plain English, like "create a slideshow from these ima
92
 
93
  If you have ideas for improvements or bug fixes, please open a PR:
94
 
95
- [![Open a Pull Request](https://huggingface.co/datasets/huggingface/badges/raw/main/open-a-pr-lg-light.svg)](https://huggingface.co/spaces/huggingface-projects/video-composer-gpt4/discussions)
 
1
  ---
2
+ title: AI Video Composer
3
+ short_description: Create videos with FFMPEG + Qwen2.5-Coder
4
  emoji: 🏞
5
  colorFrom: red
6
  colorTo: yellow
7
  sdk: gradio
8
+ sdk_version: 5.6.0
9
  app_file: app.py
10
  pinned: false
11
  disable_embedding: true
12
  models:
13
+ - Qwen/Qwen2.5-Coder-32B-Instruct
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  ---
15
 
16
+ # 🏞 AI Video Composer
17
 
18
+ AI Video Composer is an intelligent media processing application that uses natural language instructions to create videos from your media assets. It leverages the Qwen2.5-Coder language model to generate FFmpeg commands based on your requirements.
19
 
20
  ## How It Works
21
 
 
44
 
45
  4. **Processing**:
46
  - The app analyzes your files and instructions
47
+ - Generates an optimized FFmpeg command using Qwen2.5-Coder
48
  - Executes the command and returns the processed video
49
  - Displays the generated FFmpeg command for transparency
50
 
 
62
 
63
  - Built with Gradio for the user interface
64
  - Uses FFmpeg for media processing
65
+ - Powered by Qwen2.5-Coder for command generation
66
  - Implements robust error handling and command validation
67
  - Processes files in a temporary directory for safety
68
  - Supports both simple operations and complex media transformations
 
78
 
79
  If you have ideas for improvements or bug fixes, please open a PR:
80
 
81
+ [![Open a Pull Request](https://huggingface.co/datasets/huggingface/badges/raw/main/open-a-pr-lg-light.svg)](https://huggingface.co/spaces/huggingface-projects/video-composer-gpt4/discussions)
app.py CHANGED
@@ -1,12 +1,4 @@
1
- import subprocess
2
- import sys
3
-
4
- # Install local mediagallery package at runtime (for HF Spaces)
5
- subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "./mediagallery"])
6
-
7
  import gradio as gr
8
- import spaces
9
- from gradio_mediagallery import MediaGallery
10
 
11
  from PIL import Image
12
  from moviepy.editor import VideoFileClip, AudioFileClip
@@ -20,25 +12,13 @@ import tempfile
20
  import shlex
21
  import shutil
22
 
23
- # Supported models configuration
24
- MODELS = {
25
- "moonshotai/Kimi-K2-Instruct": {
26
- "base_url": "https://router.huggingface.co/v1",
27
- "env_key": "HF_TOKEN",
28
- "model_name": "moonshotai/Kimi-K2-Instruct-0905:groq",
29
- },
30
- }
31
-
32
- # Initialize client with first available model
33
- client = OpenAI(
34
- base_url=next(iter(MODELS.values()))["base_url"],
35
- api_key=os.environ[next(iter(MODELS.values()))["env_key"]],
36
- )
37
 
38
  allowed_medias = [
39
  ".png",
40
  ".jpg",
41
- ".webp",
42
  ".jpeg",
43
  ".tiff",
44
  ".bmp",
@@ -63,37 +43,7 @@ allowed_medias = [
63
  ]
64
 
65
 
66
- class FileWrapper:
67
- """Wrapper to provide .name attribute for MediaGallery output tuples."""
68
-
69
- def __init__(self, path):
70
- self.name = path if isinstance(path, str) else str(path)
71
-
72
-
73
- def normalize_files(files):
74
- """Convert MediaGallery output or gr.File output to list of file-like objects."""
75
- if not files:
76
- return []
77
-
78
- result = []
79
- for item in files:
80
- if isinstance(item, tuple):
81
- # MediaGallery returns (path, caption) tuples
82
- path = item[0]
83
- result.append(FileWrapper(path))
84
- elif hasattr(item, "name"):
85
- # gr.File returns objects with .name attribute
86
- result.append(item)
87
- elif isinstance(item, str):
88
- # Direct file path
89
- result.append(FileWrapper(item))
90
- else:
91
- result.append(FileWrapper(str(item)))
92
- return result
93
-
94
-
95
  def get_files_infos(files):
96
- files = normalize_files(files)
97
  results = []
98
  for file in files:
99
  file_path = Path(file.name)
@@ -134,16 +84,7 @@ def get_files_infos(files):
134
  return results
135
 
136
 
137
- def get_completion(
138
- prompt,
139
- files_info,
140
- top_p,
141
- temperature,
142
- model_choice,
143
- conversation_history=None,
144
- previous_error=None,
145
- previous_command=None,
146
- ):
147
  # Create table header
148
  files_info_string = "| Type | Name | Dimensions | Duration | Audio Channels |\n"
149
  files_info_string += "|------|------|------------|-----------|--------|\n"
@@ -162,140 +103,47 @@ def get_completion(
162
 
163
  files_info_string += f"| {file_info['type']} | {file_info['name']} | {dimensions} | {duration} | {audio} |\n"
164
 
165
- # Build the user message with optional error feedback
166
- user_content = f"""## AVAILABLE ASSETS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  {files_info_string}
169
 
170
- ## TASK
171
- {prompt}
172
-
173
- ## REQUIREMENTS
174
- - Output format: MP4 video saved as "output.mp4"
175
- - Generate a single, complete FFmpeg command
176
- - Command must work with the exact filenames listed above
177
-
178
- Think briefly about the approach, then output the FFmpeg command in a ```bash code block."""
179
-
180
- # Add error feedback if this is a retry
181
- if previous_error and previous_command:
182
- user_content += f"""
183
-
184
- IMPORTANT: This is a retry attempt. The previous command failed with the following error:
185
-
186
- PREVIOUS COMMAND (FAILED):
187
- {previous_command}
188
-
189
- ERROR MESSAGE:
190
- {previous_error}
191
-
192
- Please analyze the error and generate a corrected command that addresses the specific issue.
193
-
194
- COMMON SLIDESHOW ERROR FIXES:
195
- - If you see "do not match the corresponding output link" → Images have different dimensions, use scale+pad approach
196
- - If you see "Padded dimensions cannot be smaller than input dimensions" → Fix pad calculation or use standard resolution (1920x1080 or 1080x1920)
197
- - If you see "Failed to configure input pad" → Check scale and pad syntax, ensure proper filter chain
198
- - If you see "Invalid argument" in filters → Simplify filter_complex syntax and check parentheses
199
-
200
- FORMAT DETECTION KEYWORDS:
201
- - "vertical", "portrait", "9:16", "TikTok", "Instagram Stories", "phone" → Use 1080x1920
202
- - "horizontal", "landscape", "16:9", "YouTube", "TV" → Use 1920x1080 (default)
203
- - "square", "1:1", "Instagram post" → Use 1080x1080"""
204
-
205
- user_content += "\n\nYOUR RESPONSE:"
206
-
207
- # Initialize conversation with system message and first user message
208
- if conversation_history is None:
209
- messages = [
210
- {
211
- "role": "system",
212
- "content": """You are an expert FFmpeg engineer. Generate precise, working FFmpeg commands.
213
-
214
- ## OUTPUT FORMAT
215
- 1. Brief analysis (2-3 sentences max)
216
- 2. Single FFmpeg command in a ```bash code block
217
- 3. Output file must be "output.mp4"
218
-
219
- ## CORE RULES
220
- - ONE command only, no chaining (no && or ;)
221
- - Use exact filenames from the asset list
222
- - Keep commands as simple as possible
223
- - Always use: -c:v libx264 -pix_fmt yuv420p -movflags +faststart
224
-
225
- ## SLIDESHOW PATTERN (for multiple images)
226
- When combining images with different dimensions:
227
- ```bash
228
- ffmpeg -loop 1 -t 3 -i img1.jpg -loop 1 -t 3 -i img2.jpg -filter_complex "[0]scale=1920:1080:force_original_aspect_ratio=decrease,pad=1920:1080:(ow-iw)/2:(oh-ih)/2,setsar=1[v0];[1]scale=1920:1080:force_original_aspect_ratio=decrease,pad=1920:1080:(ow-iw)/2:(oh-ih)/2,setsar=1[v1];[v0][v1]concat=n=2:v=1:a=0" -c:v libx264 -pix_fmt yuv420p output.mp4
229
- ```
230
- - Default: 1920x1080, 3 seconds per image
231
- - Vertical/portrait/TikTok: use 1080x1920
232
- - Always scale+pad to normalize dimensions
233
-
234
- ## AUDIO WAVEFORM
235
- For waveform visualization:
236
- ```bash
237
- ffmpeg -i audio.mp3 -i bg.png -filter_complex "[0:a]showwaves=s=1920x200:mode=line:colors=white[wave];[1]scale=1920:1080[bg];[bg][wave]overlay=(W-w)/2:(H-h)/2" -c:v libx264 -c:a aac output.mp4
238
- ```
239
-
240
- ## WITH BACKGROUND MUSIC
241
- Add audio to video/slideshow:
242
- ```bash
243
- ffmpeg ... -i music.mp3 -map "[vout]" -map N:a -shortest -c:a aac output.mp4
244
- ```
245
- Where N is the audio input index.""",
246
- },
247
- {
248
- "role": "user",
249
- "content": user_content,
250
- },
251
- ]
252
- else:
253
- # Use existing conversation history
254
- messages = conversation_history[:]
255
-
256
- # If there's a previous error, add it as a separate message exchange
257
- if previous_error and previous_command:
258
- # Add the failed command as assistant response
259
- messages.append(
260
- {
261
- "role": "assistant",
262
- "content": f"I'll execute this FFmpeg command:\n\n```bash\n{previous_command}\n```",
263
- }
264
- )
265
-
266
- # Add the error as user feedback
267
- messages.append(
268
- {
269
- "role": "user",
270
- "content": f"""The command failed with the following error:
271
-
272
- ERROR MESSAGE:
273
- {previous_error}
274
-
275
- Please analyze the error and generate a corrected command that addresses the specific issue.
276
-
277
- COMMON SLIDESHOW ERROR FIXES:
278
- - If you see "do not match the corresponding output link" → Images have different dimensions, use scale+pad approach
279
- - If you see "Padded dimensions cannot be smaller than input dimensions" → Fix pad calculation or use standard resolution (1920x1080 or 1080x1920)
280
- - If you see "Failed to configure input pad" → Check scale and pad syntax, ensure proper filter chain
281
- - If you see "Invalid argument" in filters → Simplify filter_complex syntax and check parentheses
282
-
283
- FORMAT DETECTION KEYWORDS:
284
- - "vertical", "portrait", "9:16", "TikTok", "Instagram Stories", "phone" → Use 1080x1920
285
- - "horizontal", "landscape", "16:9", "YouTube", "TV" → Use 1920x1080 (default)
286
- - "square", "1:1", "Instagram post" → Use 1080x1080
287
-
288
- Please provide a corrected FFmpeg command.""",
289
- }
290
- )
291
- else:
292
- # Add new user request to existing conversation
293
- messages.append(
294
- {
295
- "role": "user",
296
- "content": user_content,
297
- }
298
- )
299
  try:
300
  # Print the complete prompt
301
  print("\n=== COMPLETE PROMPT ===")
@@ -304,131 +152,39 @@ Please provide a corrected FFmpeg command.""",
304
  print(msg["content"])
305
  print("=====================\n")
306
 
307
- if model_choice not in MODELS:
308
- raise ValueError(f"Model {model_choice} is not supported")
309
-
310
- model_config = MODELS[model_choice]
311
- client.base_url = model_config["base_url"]
312
- client.api_key = os.environ[model_config["env_key"]]
313
- model = model_config.get("model_name", model_choice)
314
-
315
  completion = client.chat.completions.create(
316
- model=model,
317
  messages=messages,
318
  temperature=temperature,
319
  top_p=top_p,
320
  max_tokens=2048,
321
  )
322
  content = completion.choices[0].message.content
323
- print(f"\n=== RAW API RESPONSE ===\n{content}\n========================\n")
324
-
325
  # Extract command from code block if present
326
- import re
327
-
328
- command = None
329
-
330
- # Try multiple code block patterns
331
- code_patterns = [
332
- r"```(?:bash|sh|shell)?\n(.*?)\n```", # Standard code blocks
333
- r"```\n(.*?)\n```", # Plain code blocks
334
- r"`([^`]*ffmpeg[^`]*)`", # Inline code with ffmpeg
335
- ]
336
-
337
- for pattern in code_patterns:
338
- matches = re.findall(pattern, content, re.DOTALL | re.IGNORECASE)
339
- for match in matches:
340
- if "ffmpeg" in match.lower():
341
- command = match.strip()
342
- break
343
  if command:
344
- break
345
-
346
- # If no code block found, try to find ffmpeg lines directly
347
- if not command:
348
- ffmpeg_lines = [
349
- line.strip()
350
- for line in content.split("\n")
351
- if line.strip().lower().startswith("ffmpeg")
352
- ]
353
- if ffmpeg_lines:
354
- command = ffmpeg_lines[0]
355
-
356
- # Last resort: look for any line containing ffmpeg
357
- if not command:
358
- for line in content.split("\n"):
359
- line = line.strip()
360
- if "ffmpeg" in line.lower() and len(line) > 10:
361
- command = line
362
- break
363
-
364
- if not command:
365
- print(f"ERROR: No ffmpeg command found in response")
366
- command = content.replace("\n", " ").strip()
367
-
368
- print(f"=== EXTRACTED COMMAND ===\n{command}\n========================\n")
369
 
370
  # remove output.mp4 with the actual output file path
371
  command = command.replace("output.mp4", "")
372
 
373
- # Add the assistant's response to conversation history
374
- messages.append({"role": "assistant", "content": content})
375
-
376
- return command, messages
377
  except Exception as e:
378
  raise Exception("API Error")
379
 
380
 
381
- @spaces.GPU(duration=120)
382
- def execute_ffmpeg_command(args, temp_dir, output_file_path):
383
- """Execute FFmpeg command with GPU acceleration"""
384
- final_command = args + ["-y", output_file_path]
385
- print(f"\n=== EXECUTING FFMPEG COMMAND ===\nffmpeg {' '.join(final_command[1:])}\n")
386
- subprocess.run(final_command, cwd=temp_dir)
387
- return output_file_path
388
-
389
-
390
- def compose_video(
391
- prompt: str,
392
- files: list = None,
393
- top_p: float = 0.7,
394
- temperature: float = 0.1,
395
- model_choice: str = "moonshotai/Kimi-K2-Instruct",
396
- ) -> str:
397
- """
398
- Compose videos from existing media assets using natural language instructions.
399
-
400
- This tool is NOT for AI video generation. Instead, it uses AI to generate FFmpeg
401
- commands that combine, edit, and transform your uploaded images, videos, and audio
402
- files based on natural language descriptions.
403
-
404
- Args:
405
- prompt (str): Natural language instructions for video composition (e.g., "Create a slideshow with background music")
406
- files (list, optional): List of media files (images, videos, audio) to use
407
- top_p (float): Top-p sampling parameter for AI model (0.0-1.0, default: 0.7)
408
- temperature (float): Temperature parameter for AI model creativity (0.0-5.0, default: 0.1)
409
- model_choice (str): AI model to use for command generation (default: "deepseek-ai/DeepSeek-V3")
410
-
411
- Returns:
412
- str: Path to the generated video file
413
-
414
- Example:
415
- compose_video("Create a 10-second slideshow from the images with fade transitions", files=[img1, img2, img3])
416
- """
417
- return update(files or [], prompt, top_p, temperature, model_choice)
418
-
419
-
420
- def update(
421
- files,
422
- prompt,
423
- top_p=1,
424
- temperature=1,
425
- model_choice="moonshotai/Kimi-K2-Instruct",
426
- ):
427
  if prompt == "":
428
  raise gr.Error("Please enter a prompt.")
429
 
430
- # Normalize files from MediaGallery or gr.File format
431
- files = normalize_files(files)
432
  files_info = get_files_infos(files)
433
  # disable this if you're running the app locally or on your own server
434
  for file_info in files_info:
@@ -437,30 +193,16 @@ def update(
437
  raise gr.Error(
438
  "Please make sure all videos are less than 2 minute long."
439
  )
440
- if file_info["size"] > 100000000:
441
- raise gr.Error("Please make sure all files are less than 100MB in size.")
442
 
443
  attempts = 0
444
- command_attempts = []
445
- previous_error = None
446
- previous_command = None
447
- conversation_history = None
448
-
449
  while attempts < 2:
450
- print("ATTEMPT", attempts + 1)
451
  try:
452
- command_string, conversation_history = get_completion(
453
- prompt,
454
- files_info,
455
- top_p,
456
- temperature,
457
- model_choice,
458
- conversation_history,
459
- previous_error,
460
- previous_command,
461
- )
462
  print(
463
- f"""///PROMPT {prompt} \n\n/// START OF COMMAND ///:\n\n{command_string}\n\n/// END OF COMMAND ///\n\n"""
464
  )
465
 
466
  # split command string into list of arguments
@@ -475,135 +217,58 @@ def update(
475
  shutil.copy(file_path, Path(temp_dir) / sanitized_name)
476
 
477
  # test if ffmpeg command is valid dry run
478
- ffmpeg_dry_run = subprocess.run(
479
  args + ["-f", "null", "-"],
480
  stderr=subprocess.PIPE,
481
  text=True,
482
  cwd=temp_dir,
483
  )
484
-
485
- # Extract command for display
486
- command_for_display = f"ffmpeg {' '.join(args[1:])} -y output.mp4"
487
-
488
- if ffmpeg_dry_run.returncode == 0:
489
  print("Command is valid.")
490
- # Add successful command to attempts
491
- command_attempts.append(
492
- {
493
- "command": command_for_display,
494
- "status": "✅ Valid",
495
- "attempt": attempts + 1,
496
- }
497
- )
498
  else:
499
  print("Command is not valid. Error output:")
500
- print(ffmpeg_dry_run.stderr)
501
-
502
- # Add failed command to attempts with error
503
- command_attempts.append(
504
- {
505
- "command": command_for_display,
506
- "status": "❌ Invalid",
507
- "error": ffmpeg_dry_run.stderr,
508
- "attempt": attempts + 1,
509
- }
510
- )
511
-
512
- # Store error details for next retry
513
- previous_error = ffmpeg_dry_run.stderr
514
- previous_command = command_for_display
515
-
516
  raise Exception(
517
- f"FFMPEG command validation failed: {ffmpeg_dry_run.stderr}"
518
  )
519
 
520
  output_file_name = f"output_{uuid.uuid4()}.mp4"
521
  output_file_path = str((Path(temp_dir) / output_file_name).resolve())
522
- execute_ffmpeg_command(args, temp_dir, output_file_path)
523
-
524
- # Generate command display with all attempts
525
- command_display = generate_command_display(command_attempts)
526
- return output_file_path, gr.update(value=command_display)
527
-
 
528
  except Exception as e:
529
  attempts += 1
530
  if attempts >= 2:
531
  print("FROM UPDATE", e)
532
- # Show all attempted commands even on final failure
533
- command_display = generate_command_display(command_attempts)
534
- command_display += (
535
- f"\n\n### Final Error\n❌ All attempts failed. Last error: {str(e)}"
536
- )
537
- return None, gr.update(value=command_display)
538
-
539
-
540
- def generate_command_display(command_attempts):
541
- """Generate a markdown display of all command attempts"""
542
- if not command_attempts:
543
- return "### No commands generated"
544
 
545
- display = "### Generated Commands\n\n"
546
-
547
- for attempt in command_attempts:
548
- display += f"**Attempt {attempt['attempt']}** {attempt['status']}\n"
549
- display += f"```bash\n{attempt['command']}\n```\n"
550
-
551
- if attempt["status"] == "❌ Invalid" and "error" in attempt:
552
- display += f"<details>\n<summary>🔍 Error Details</summary>\n\n```\n{attempt['error']}\n```\n</details>\n\n"
553
- else:
554
- display += "\n"
555
-
556
- return display
557
-
558
-
559
- # Create MCP-compatible interface
560
- mcp_interface = gr.Interface(
561
- fn=compose_video,
562
- inputs=[
563
- gr.Textbox(
564
- value="Create a slideshow with background music",
565
- label="Video Composition Instructions",
566
- ),
567
- gr.File(file_count="multiple", label="Media Files", file_types=allowed_medias),
568
- gr.Slider(0.0, 1.0, value=0.7, label="Top-p"),
569
- gr.Slider(0.0, 5.0, value=0.1, label="Temperature"),
570
- gr.Radio(
571
- choices=list(MODELS.keys()), value=list(MODELS.keys())[0], label="Model"
572
- ),
573
- ],
574
- outputs=gr.Video(label="Generated Video"),
575
- title="AI Video Composer MCP Tool",
576
- description="Compose videos from media assets using natural language",
577
- )
578
 
579
  with gr.Blocks() as demo:
580
  gr.Markdown(
581
  """
582
- # 🏞 AI Video Composer: FFMPEG in Plain English
583
- Upload your media files, describe what you want, and [Kimi-K2](https://huggingface.co/moonshotai/Kimi-K2-Instruct) generates the FFMPEG command. Create slideshows from images, add background music, merge video clips, visualize audio waveforms, convert formats, adjust speed, and more.
584
  """,
585
  elem_id="header",
586
  )
587
  with gr.Row():
588
  with gr.Column():
589
- user_files = MediaGallery(
 
 
590
  file_types=allowed_medias,
591
- label="Media Assets",
592
- columns=3,
593
- interactive=True,
594
  )
595
  user_prompt = gr.Textbox(
596
- placeholder="eg: Remove the 3 first seconds of the video",
597
  label="Instructions",
598
- lines=3,
599
  )
600
  btn = gr.Button("Run")
601
  with gr.Accordion("Parameters", open=False):
602
- model_choice = gr.Radio(
603
- choices=list(MODELS.keys()),
604
- value=list(MODELS.keys())[0],
605
- label="Model",
606
- )
607
  top_p = gr.Slider(
608
  minimum=-0,
609
  maximum=1.0,
@@ -628,7 +293,7 @@ with gr.Blocks() as demo:
628
 
629
  btn.click(
630
  fn=update,
631
- inputs=[user_files, user_prompt, top_p, temperature, model_choice],
632
  outputs=[generated_video, generated_command],
633
  )
634
  with gr.Row():
@@ -639,32 +304,37 @@ with gr.Blocks() as demo:
639
  "Use the image as the background with a waveform visualization for the audio positioned in center of the video.",
640
  0.7,
641
  0.1,
642
- list(MODELS.keys())[0],
643
- ],
644
- [
645
- ["./examples/ai_talk.wav", "./examples/bg-image.png"],
646
- "Use the image as the background with a waveform visualization for the audio positioned in center of the video. Make sure the waveform has a max height of 250 pixels.",
647
- 0.7,
648
- 0.1,
649
- list(MODELS.keys())[0],
650
  ],
651
  [
652
  [
 
653
  "./examples/cat1.jpeg",
654
  "./examples/cat2.jpeg",
655
  "./examples/cat3.jpeg",
656
  "./examples/cat4.jpeg",
657
  "./examples/cat5.jpeg",
658
  "./examples/cat6.jpeg",
 
659
  "./examples/heat-wave.mp3",
660
  ],
661
- "Create a 3x2 grid of the cat images with the audio as background music. Make the video duration match the audio duration.",
 
 
 
 
 
 
 
 
 
 
 
 
662
  0.7,
663
  0.1,
664
- list(MODELS.keys())[0],
665
  ],
666
  ],
667
- inputs=[user_files, user_prompt, top_p, temperature, model_choice],
668
  outputs=[generated_video, generated_command],
669
  fn=update,
670
  run_on_click=True,
@@ -680,9 +350,5 @@ with gr.Blocks() as demo:
680
  """,
681
  )
682
 
683
- # Launch MCP interface for tool access
684
- mcp_interface.queue(default_concurrency_limit=200)
685
-
686
- # Launch main demo
687
  demo.queue(default_concurrency_limit=200)
688
- demo.launch(show_api=False, ssr_mode=False, mcp_server=True)
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
2
 
3
  from PIL import Image
4
  from moviepy.editor import VideoFileClip, AudioFileClip
 
12
  import shlex
13
  import shutil
14
 
15
+ HF_API_KEY = os.environ["HF_TOKEN"]
16
+
17
+ client = OpenAI(base_url="https://api-inference.huggingface.co/v1/", api_key=HF_API_KEY)
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  allowed_medias = [
20
  ".png",
21
  ".jpg",
 
22
  ".jpeg",
23
  ".tiff",
24
  ".bmp",
 
43
  ]
44
 
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  def get_files_infos(files):
 
47
  results = []
48
  for file in files:
49
  file_path = Path(file.name)
 
84
  return results
85
 
86
 
87
+ def get_completion(prompt, files_info, top_p, temperature):
 
 
 
 
 
 
 
 
 
88
  # Create table header
89
  files_info_string = "| Type | Name | Dimensions | Duration | Audio Channels |\n"
90
  files_info_string += "|------|------|------------|-----------|--------|\n"
 
103
 
104
  files_info_string += f"| {file_info['type']} | {file_info['name']} | {dimensions} | {duration} | {audio} |\n"
105
 
106
+ messages = [
107
+ {
108
+ "role": "system",
109
+ "content": """
110
+ You are a very experienced media engineer, controlling a UNIX terminal.
111
+ You are an FFMPEG expert with years of experience and multiple contributions to the FFMPEG project.
112
+
113
+ You are given:
114
+ (1) a set of video, audio and/or image assets. Including their name, duration, dimensions and file size
115
+ (2) the description of a new video you need to create from the list of assets
116
+
117
+ Your objective is to generate the SIMPLEST POSSIBLE single ffmpeg command to create the requested video.
118
+
119
+ Key requirements:
120
+ - Use the absolute minimum number of ffmpeg options needed
121
+ - Avoid complex filter chains or filter_complex if possible
122
+ - Prefer simple concatenation, scaling, and basic filters
123
+ - Output exactly ONE command that will be directly pasted into the terminal
124
+ - Never output multiple commands chained together
125
+ - Output the command in a single line (no line breaks or multiple lines)
126
+ - If the user asks for waveform visualization make sure to set the mode to `line` with and the use the full width of the video. Also concatenate the audio into a single channel.
127
+ - For image sequences: Use -framerate and pattern matching (like 'img%d.jpg') when possible, falling back to individual image processing with -loop 1 and appropriate filters only when necessary.
128
+ - When showing file operations or commands, always use explicit paths and filenames without wildcards - avoid using asterisk (*) or glob patterns. Instead, use specific numbered sequences (like %d), explicit file lists, or show the full filename.
129
+
130
+ Remember: Simpler is better. Only use advanced ffmpeg features if absolutely necessary for the requested output.
131
+ """,
132
+ },
133
+ {
134
+ "role": "user",
135
+ "content": f"""Always output the media as video/mp4 and output file with "output.mp4". Provide only the shell command without any explanations.
136
+ The current assets and objective follow. Reply with the FFMPEG command:
137
+
138
+ AVAILABLE ASSETS LIST:
139
 
140
  {files_info_string}
141
 
142
+ OBJECTIVE: {prompt} and output at "output.mp4"
143
+ YOUR FFMPEG COMMAND:
144
+ """,
145
+ },
146
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  try:
148
  # Print the complete prompt
149
  print("\n=== COMPLETE PROMPT ===")
 
152
  print(msg["content"])
153
  print("=====================\n")
154
 
 
 
 
 
 
 
 
 
155
  completion = client.chat.completions.create(
156
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
157
  messages=messages,
158
  temperature=temperature,
159
  top_p=top_p,
160
  max_tokens=2048,
161
  )
162
  content = completion.choices[0].message.content
 
 
163
  # Extract command from code block if present
164
+ if "```" in content:
165
+ # Find content between ```sh or ```bash and the next ```
166
+ import re
167
+
168
+ command = re.search(r"```(?:sh|bash)?\n(.*?)\n```", content, re.DOTALL)
 
 
 
 
 
 
 
 
 
 
 
 
169
  if command:
170
+ command = command.group(1).strip()
171
+ else:
172
+ command = content.replace("\n", "")
173
+ else:
174
+ command = content.replace("\n", "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
  # remove output.mp4 with the actual output file path
177
  command = command.replace("output.mp4", "")
178
 
179
+ return command
 
 
 
180
  except Exception as e:
181
  raise Exception("API Error")
182
 
183
 
184
+ def update(files, prompt, top_p=1, temperature=1):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  if prompt == "":
186
  raise gr.Error("Please enter a prompt.")
187
 
 
 
188
  files_info = get_files_infos(files)
189
  # disable this if you're running the app locally or on your own server
190
  for file_info in files_info:
 
193
  raise gr.Error(
194
  "Please make sure all videos are less than 2 minute long."
195
  )
196
+ if file_info["size"] > 10000000:
197
+ raise gr.Error("Please make sure all files are less than 10MB in size.")
198
 
199
  attempts = 0
 
 
 
 
 
200
  while attempts < 2:
201
+ print("ATTEMPT", attempts)
202
  try:
203
+ command_string = get_completion(prompt, files_info, top_p, temperature)
 
 
 
 
 
 
 
 
 
204
  print(
205
+ f"""///PROMTP {prompt} \n\n/// START OF COMMAND ///:\n\n{command_string}\n\n/// END OF COMMAND ///\n\n"""
206
  )
207
 
208
  # split command string into list of arguments
 
217
  shutil.copy(file_path, Path(temp_dir) / sanitized_name)
218
 
219
  # test if ffmpeg command is valid dry run
220
+ ffmpg_dry_run = subprocess.run(
221
  args + ["-f", "null", "-"],
222
  stderr=subprocess.PIPE,
223
  text=True,
224
  cwd=temp_dir,
225
  )
226
+ if ffmpg_dry_run.returncode == 0:
 
 
 
 
227
  print("Command is valid.")
 
 
 
 
 
 
 
 
228
  else:
229
  print("Command is not valid. Error output:")
230
+ print(ffmpg_dry_run.stderr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  raise Exception(
232
+ "FFMPEG generated command is not valid. Please try something else."
233
  )
234
 
235
  output_file_name = f"output_{uuid.uuid4()}.mp4"
236
  output_file_path = str((Path(temp_dir) / output_file_name).resolve())
237
+ final_command = args + ["-y", output_file_path]
238
+ print(
239
+ f"\n=== EXECUTING FFMPEG COMMAND ===\nffmpeg {' '.join(final_command[1:])}\n"
240
+ )
241
+ subprocess.run(final_command, cwd=temp_dir)
242
+ generated_command = f"### Generated Command\n```bash\nffmpeg {' '.join(args[1:])} -y output.mp4\n```"
243
+ return output_file_path, gr.update(value=generated_command)
244
  except Exception as e:
245
  attempts += 1
246
  if attempts >= 2:
247
  print("FROM UPDATE", e)
248
+ raise gr.Error(e)
 
 
 
 
 
 
 
 
 
 
 
249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
  with gr.Blocks() as demo:
252
  gr.Markdown(
253
  """
254
+ # 🏞 AI Video Composer
255
+ Compose new videos from your assets using natural language. Add video, image and audio assets and let [Qwen2.5-Coder](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) generate a new video for you (using FFMPEG).
256
  """,
257
  elem_id="header",
258
  )
259
  with gr.Row():
260
  with gr.Column():
261
+ user_files = gr.File(
262
+ file_count="multiple",
263
+ label="Media files",
264
  file_types=allowed_medias,
 
 
 
265
  )
266
  user_prompt = gr.Textbox(
267
+ placeholder="I want to convert to a gif under 15mb",
268
  label="Instructions",
 
269
  )
270
  btn = gr.Button("Run")
271
  with gr.Accordion("Parameters", open=False):
 
 
 
 
 
272
  top_p = gr.Slider(
273
  minimum=-0,
274
  maximum=1.0,
 
293
 
294
  btn.click(
295
  fn=update,
296
+ inputs=[user_files, user_prompt, top_p, temperature],
297
  outputs=[generated_video, generated_command],
298
  )
299
  with gr.Row():
 
304
  "Use the image as the background with a waveform visualization for the audio positioned in center of the video.",
305
  0.7,
306
  0.1,
 
 
 
 
 
 
 
 
307
  ],
308
  [
309
  [
310
+ "./examples/cat8.jpeg",
311
  "./examples/cat1.jpeg",
312
  "./examples/cat2.jpeg",
313
  "./examples/cat3.jpeg",
314
  "./examples/cat4.jpeg",
315
  "./examples/cat5.jpeg",
316
  "./examples/cat6.jpeg",
317
+ "./examples/cat7.jpeg",
318
  "./examples/heat-wave.mp3",
319
  ],
320
+ "Generate an MP4 slideshow where each photo appears for 2 seconds, using the provided audio as soundtrack.",
321
+ 0.7,
322
+ 0.1,
323
+ ],
324
+ [
325
+ ["./examples/waterfall-overlay.png", "./examples/waterfall.mp4"],
326
+ "Add the overlay to the video.",
327
+ 0.7,
328
+ 0.1,
329
+ ],
330
+ [
331
+ ["./examples/example.mp4"],
332
+ "Make this video 10 times faster",
333
  0.7,
334
  0.1,
 
335
  ],
336
  ],
337
+ inputs=[user_files, user_prompt, top_p, temperature],
338
  outputs=[generated_video, generated_command],
339
  fn=update,
340
  run_on_click=True,
 
350
  """,
351
  )
352
 
 
 
 
 
353
  demo.queue(default_concurrency_limit=200)
354
+ demo.launch(show_api=False, ssr_mode=False)
mediagallery/.gitignore DELETED
@@ -1,11 +0,0 @@
1
- .eggs/
2
- dist/
3
- *.pyc
4
- __pycache__/
5
- *.py[cod]
6
- *$py.class
7
- __tmp/*
8
- *.pyi
9
- .mypycache
10
- .ruff_cache
11
- node_modules
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/README.md DELETED
@@ -1,490 +0,0 @@
1
-
2
- # `gradio_mediagallery`
3
- <a href="https://pypi.org/project/gradio_mediagallery/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_mediagallery"></a>
4
-
5
- Python library for easily interacting with trained machine learning models
6
-
7
- ## Installation
8
-
9
- ```bash
10
- pip install gradio_mediagallery
11
- ```
12
-
13
- ## Usage
14
-
15
- ```python
16
-
17
- import gradio as gr
18
- from gradio_mediagallery import MediaGallery
19
-
20
-
21
- example = MediaGallery().example_value()
22
-
23
- with gr.Blocks() as demo:
24
- with gr.Row():
25
- MediaGallery(label="Blank"), # blank component
26
- MediaGallery(value=example, label="Populated"), # populated component
27
-
28
-
29
- if __name__ == "__main__":
30
- demo.launch()
31
-
32
- ```
33
-
34
- ## `MediaGallery`
35
-
36
- ### Initialization
37
-
38
- <table>
39
- <thead>
40
- <tr>
41
- <th align="left">name</th>
42
- <th align="left" style="width: 25%;">type</th>
43
- <th align="left">default</th>
44
- <th align="left">description</th>
45
- </tr>
46
- </thead>
47
- <tbody>
48
- <tr>
49
- <td align="left"><code>value</code></td>
50
- <td align="left" style="width: 25%;">
51
-
52
- ```python
53
- Sequence[
54
- np.ndarray | PIL.Image.Image | str | Path | tuple
55
- ]
56
- | Callable
57
- | None
58
- ```
59
-
60
- </td>
61
- <td align="left"><code>None</code></td>
62
- <td align="left">List of images or videos to display in the gallery by default. If a function is provided, the function will be called each time the app loads to set the initial value of this component.</td>
63
- </tr>
64
-
65
- <tr>
66
- <td align="left"><code>format</code></td>
67
- <td align="left" style="width: 25%;">
68
-
69
- ```python
70
- str
71
- ```
72
-
73
- </td>
74
- <td align="left"><code>"webp"</code></td>
75
- <td align="left">Format to save images before they are returned to the frontend, such as 'jpeg' or 'png'. This parameter only applies to images that are returned from the prediction function as numpy arrays or PIL Images. The format should be supported by the PIL library.</td>
76
- </tr>
77
-
78
- <tr>
79
- <td align="left"><code>file_types</code></td>
80
- <td align="left" style="width: 25%;">
81
-
82
- ```python
83
- list[str] | None
84
- ```
85
-
86
- </td>
87
- <td align="left"><code>None</code></td>
88
- <td align="left">List of file extensions or types of files to be uploaded (e.g. ['image', '.mp4']), when this is used as an input component. "image" allows only image files to be uploaded, "video" allows only video files to be uploaded, ".mp4" allows only mp4 files to be uploaded, etc. If None, any image and video files types are allowed.</td>
89
- </tr>
90
-
91
- <tr>
92
- <td align="left"><code>label</code></td>
93
- <td align="left" style="width: 25%;">
94
-
95
- ```python
96
- str | I18nData | None
97
- ```
98
-
99
- </td>
100
- <td align="left"><code>None</code></td>
101
- <td align="left">the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.</td>
102
- </tr>
103
-
104
- <tr>
105
- <td align="left"><code>every</code></td>
106
- <td align="left" style="width: 25%;">
107
-
108
- ```python
109
- Timer | float | None
110
- ```
111
-
112
- </td>
113
- <td align="left"><code>None</code></td>
114
- <td align="left">Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.</td>
115
- </tr>
116
-
117
- <tr>
118
- <td align="left"><code>inputs</code></td>
119
- <td align="left" style="width: 25%;">
120
-
121
- ```python
122
- Component | Sequence[Component] | set[Component] | None
123
- ```
124
-
125
- </td>
126
- <td align="left"><code>None</code></td>
127
- <td align="left">Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.</td>
128
- </tr>
129
-
130
- <tr>
131
- <td align="left"><code>show_label</code></td>
132
- <td align="left" style="width: 25%;">
133
-
134
- ```python
135
- bool | None
136
- ```
137
-
138
- </td>
139
- <td align="left"><code>None</code></td>
140
- <td align="left">if True, will display label.</td>
141
- </tr>
142
-
143
- <tr>
144
- <td align="left"><code>container</code></td>
145
- <td align="left" style="width: 25%;">
146
-
147
- ```python
148
- bool
149
- ```
150
-
151
- </td>
152
- <td align="left"><code>True</code></td>
153
- <td align="left">If True, will place the component in a container - providing some extra padding around the border.</td>
154
- </tr>
155
-
156
- <tr>
157
- <td align="left"><code>scale</code></td>
158
- <td align="left" style="width: 25%;">
159
-
160
- ```python
161
- int | None
162
- ```
163
-
164
- </td>
165
- <td align="left"><code>None</code></td>
166
- <td align="left">relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.</td>
167
- </tr>
168
-
169
- <tr>
170
- <td align="left"><code>min_width</code></td>
171
- <td align="left" style="width: 25%;">
172
-
173
- ```python
174
- int
175
- ```
176
-
177
- </td>
178
- <td align="left"><code>160</code></td>
179
- <td align="left">minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.</td>
180
- </tr>
181
-
182
- <tr>
183
- <td align="left"><code>visible</code></td>
184
- <td align="left" style="width: 25%;">
185
-
186
- ```python
187
- bool
188
- ```
189
-
190
- </td>
191
- <td align="left"><code>True</code></td>
192
- <td align="left">If False, component will be hidden.</td>
193
- </tr>
194
-
195
- <tr>
196
- <td align="left"><code>elem_id</code></td>
197
- <td align="left" style="width: 25%;">
198
-
199
- ```python
200
- str | None
201
- ```
202
-
203
- </td>
204
- <td align="left"><code>None</code></td>
205
- <td align="left">An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.</td>
206
- </tr>
207
-
208
- <tr>
209
- <td align="left"><code>elem_classes</code></td>
210
- <td align="left" style="width: 25%;">
211
-
212
- ```python
213
- list[str] | str | None
214
- ```
215
-
216
- </td>
217
- <td align="left"><code>None</code></td>
218
- <td align="left">An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.</td>
219
- </tr>
220
-
221
- <tr>
222
- <td align="left"><code>render</code></td>
223
- <td align="left" style="width: 25%;">
224
-
225
- ```python
226
- bool
227
- ```
228
-
229
- </td>
230
- <td align="left"><code>True</code></td>
231
- <td align="left">If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.</td>
232
- </tr>
233
-
234
- <tr>
235
- <td align="left"><code>key</code></td>
236
- <td align="left" style="width: 25%;">
237
-
238
- ```python
239
- int | str | tuple[int | str, ...] | None
240
- ```
241
-
242
- </td>
243
- <td align="left"><code>None</code></td>
244
- <td align="left">in a gr.render, Components with the same key across re-renders are treated as the same component, not a new component. Properties set in 'preserved_by_key' are not reset across a re-render.</td>
245
- </tr>
246
-
247
- <tr>
248
- <td align="left"><code>preserved_by_key</code></td>
249
- <td align="left" style="width: 25%;">
250
-
251
- ```python
252
- list[str] | str | None
253
- ```
254
-
255
- </td>
256
- <td align="left"><code>"value"</code></td>
257
- <td align="left">A list of parameters from this component's constructor. Inside a gr.render() function, if a component is re-rendered with the same key, these (and only these) parameters will be preserved in the UI (if they have been changed by the user or an event listener) instead of re-rendered based on the values provided during constructor.</td>
258
- </tr>
259
-
260
- <tr>
261
- <td align="left"><code>columns</code></td>
262
- <td align="left" style="width: 25%;">
263
-
264
- ```python
265
- int | None
266
- ```
267
-
268
- </td>
269
- <td align="left"><code>2</code></td>
270
- <td align="left">Represents the number of images that should be shown in one row.</td>
271
- </tr>
272
-
273
- <tr>
274
- <td align="left"><code>rows</code></td>
275
- <td align="left" style="width: 25%;">
276
-
277
- ```python
278
- int | None
279
- ```
280
-
281
- </td>
282
- <td align="left"><code>None</code></td>
283
- <td align="left">Represents the number of rows in the image grid.</td>
284
- </tr>
285
-
286
- <tr>
287
- <td align="left"><code>height</code></td>
288
- <td align="left" style="width: 25%;">
289
-
290
- ```python
291
- int | float | str | None
292
- ```
293
-
294
- </td>
295
- <td align="left"><code>None</code></td>
296
- <td align="left">The height of the gallery component, specified in pixels if a number is passed, or in CSS units if a string is passed. If more images are displayed than can fit in the height, a scrollbar will appear.</td>
297
- </tr>
298
-
299
- <tr>
300
- <td align="left"><code>allow_preview</code></td>
301
- <td align="left" style="width: 25%;">
302
-
303
- ```python
304
- bool
305
- ```
306
-
307
- </td>
308
- <td align="left"><code>True</code></td>
309
- <td align="left">If True, images in the gallery will be enlarged when they are clicked. Default is True.</td>
310
- </tr>
311
-
312
- <tr>
313
- <td align="left"><code>preview</code></td>
314
- <td align="left" style="width: 25%;">
315
-
316
- ```python
317
- bool | None
318
- ```
319
-
320
- </td>
321
- <td align="left"><code>None</code></td>
322
- <td align="left">If True, MediaGallery will start in preview mode, which shows all of the images as thumbnails and allows the user to click on them to view them in full size. Only works if allow_preview is True.</td>
323
- </tr>
324
-
325
- <tr>
326
- <td align="left"><code>selected_index</code></td>
327
- <td align="left" style="width: 25%;">
328
-
329
- ```python
330
- int | None
331
- ```
332
-
333
- </td>
334
- <td align="left"><code>None</code></td>
335
- <td align="left">The index of the image that should be initially selected. If None, no image will be selected at start. If provided, will set MediaGallery to preview mode unless allow_preview is set to False.</td>
336
- </tr>
337
-
338
- <tr>
339
- <td align="left"><code>object_fit</code></td>
340
- <td align="left" style="width: 25%;">
341
-
342
- ```python
343
- Literal[
344
- "contain", "cover", "fill", "none", "scale-down"
345
- ]
346
- | None
347
- ```
348
-
349
- </td>
350
- <td align="left"><code>None</code></td>
351
- <td align="left">CSS object-fit property for the thumbnail images in the gallery. Can be "contain", "cover", "fill", "none", or "scale-down".</td>
352
- </tr>
353
-
354
- <tr>
355
- <td align="left"><code>show_share_button</code></td>
356
- <td align="left" style="width: 25%;">
357
-
358
- ```python
359
- bool | None
360
- ```
361
-
362
- </td>
363
- <td align="left"><code>None</code></td>
364
- <td align="left">If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.</td>
365
- </tr>
366
-
367
- <tr>
368
- <td align="left"><code>show_download_button</code></td>
369
- <td align="left" style="width: 25%;">
370
-
371
- ```python
372
- bool | None
373
- ```
374
-
375
- </td>
376
- <td align="left"><code>True</code></td>
377
- <td align="left">If True, will show a download button in the corner of the selected image. If False, the icon does not appear. Default is True.</td>
378
- </tr>
379
-
380
- <tr>
381
- <td align="left"><code>interactive</code></td>
382
- <td align="left" style="width: 25%;">
383
-
384
- ```python
385
- bool | None
386
- ```
387
-
388
- </td>
389
- <td align="left"><code>None</code></td>
390
- <td align="left">If True, the gallery will be interactive, allowing the user to upload images. If False, the gallery will be static. Default is True.</td>
391
- </tr>
392
-
393
- <tr>
394
- <td align="left"><code>type</code></td>
395
- <td align="left" style="width: 25%;">
396
-
397
- ```python
398
- Literal["numpy", "pil", "filepath"]
399
- ```
400
-
401
- </td>
402
- <td align="left"><code>"filepath"</code></td>
403
- <td align="left">The format the image is converted to before being passed into the prediction function. "numpy" converts the image to a numpy array with shape (height, width, 3) and values from 0 to 255, "pil" converts the image to a PIL image object, "filepath" passes a str path to a temporary file containing the image. If the image is SVG, the `type` is ignored and the filepath of the SVG is returned.</td>
404
- </tr>
405
-
406
- <tr>
407
- <td align="left"><code>show_fullscreen_button</code></td>
408
- <td align="left" style="width: 25%;">
409
-
410
- ```python
411
- bool
412
- ```
413
-
414
- </td>
415
- <td align="left"><code>True</code></td>
416
- <td align="left">If True, will show a fullscreen icon in the corner of the component that allows user to view the gallery in fullscreen mode. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.</td>
417
- </tr>
418
- </tbody></table>
419
-
420
-
421
- ### Events
422
-
423
- | name | description |
424
- |:-----|:------------|
425
- | `select` | Event listener for when the user selects or deselects the MediaGallery. Uses event data gradio.SelectData to carry `value` referring to the label of the MediaGallery, and `selected` to refer to state of the MediaGallery. See EventData documentation on how to use this event data |
426
- | `upload` | This listener is triggered when the user uploads a file into the MediaGallery. |
427
- | `change` | Triggered when the value of the MediaGallery changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. |
428
- | `preview_close` | This event is triggered when the MediaGallery preview is closed by the user |
429
- | `preview_open` | This event is triggered when the MediaGallery preview is opened by the user |
430
-
431
-
432
-
433
- ### User function
434
-
435
- The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
436
-
437
- - When used as an Input, the component only impacts the input signature of the user function.
438
- - When used as an output, the component only impacts the return signature of the user function.
439
-
440
- The code snippet below is accurate in cases where the component is used as both an input and an output.
441
-
442
- - **As output:** Is passed, passes the list of images or videos as a list of (media, caption) tuples, or a list of (media, None) tuples if no captions are provided (which is usually the case). Images can be a `str` file path, a `numpy` array, or a `PIL.Image` object depending on `type`. Videos are always `str` file path.
443
- - **As input:** Should return, expects the function to return a `list` of images or videos, or `list` of (media, `str` caption) tuples. Each image can be a `str` file path, a `numpy` array, or a `PIL.Image` object. Each video can be a `str` file path.
444
-
445
- ```python
446
- def predict(
447
- value: list[tuple[str, str | None]]
448
- | list[tuple[PIL.Image.Image, str | None]]
449
- | list[tuple[numpy.ndarray, str | None]]
450
- | None
451
- ) -> list[
452
- typing.Union[
453
- numpy.ndarray,
454
- PIL.Image.Image,
455
- pathlib.Path,
456
- str,
457
- tuple[
458
- typing.Union[
459
- numpy.ndarray,
460
- PIL.Image.Image,
461
- pathlib.Path,
462
- str,
463
- ],
464
- str,
465
- ],
466
- ][
467
- numpy.ndarray,
468
- PIL.Image.Image,
469
- pathlib.Path,
470
- str,
471
- tuple[
472
- typing.Union[
473
- numpy.ndarray,
474
- PIL.Image.Image,
475
- pathlib.Path,
476
- str,
477
- ][
478
- numpy.ndarray,
479
- PIL.Image.Image,
480
- pathlib.Path,
481
- str,
482
- ],
483
- str,
484
- ],
485
- ]
486
- ]
487
- | None:
488
- return value
489
- ```
490
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/backend/gradio_mediagallery/__init__.py DELETED
@@ -1,4 +0,0 @@
1
-
2
- from .mediagallery import MediaGallery
3
-
4
- __all__ = ['MediaGallery']
 
 
 
 
 
mediagallery/backend/gradio_mediagallery/mediagallery.py DELETED
@@ -1,374 +0,0 @@
1
- """gr.Gallery() component."""
2
-
3
- from __future__ import annotations
4
-
5
- from collections.abc import Callable, Sequence
6
- from concurrent.futures import ThreadPoolExecutor
7
- from pathlib import Path
8
- from typing import (
9
- TYPE_CHECKING,
10
- Any,
11
- Literal,
12
- Optional,
13
- Union,
14
- )
15
- from urllib.parse import quote, urlparse
16
-
17
- import numpy as np
18
- import PIL.Image
19
- from gradio_client import handle_file
20
- from gradio_client import utils as client_utils
21
- from gradio_client.documentation import document
22
- from gradio_client.utils import is_http_url_like
23
-
24
- from gradio import image_utils, processing_utils, utils
25
- try:
26
- from gradio import wasm_utils
27
- IS_WASM = wasm_utils.IS_WASM
28
- except ImportError:
29
- IS_WASM = False
30
- from gradio.components.base import Component
31
- from gradio.data_classes import FileData, GradioModel, GradioRootModel, ImageData
32
- from gradio.events import EventListener, Events
33
- from gradio.exceptions import Error
34
- from gradio.i18n import I18nData
35
-
36
- if TYPE_CHECKING:
37
- from gradio.components import Timer
38
-
39
- GalleryMediaType = Union[np.ndarray, PIL.Image.Image, Path, str]
40
- CaptionedGalleryMediaType = tuple[GalleryMediaType, str]
41
-
42
-
43
- class GalleryImage(GradioModel):
44
- image: ImageData
45
- caption: Optional[str] = None
46
-
47
-
48
- class GalleryVideo(GradioModel):
49
- video: FileData
50
- caption: Optional[str] = None
51
-
52
-
53
- class GalleryAudio(GradioModel):
54
- audio: FileData
55
- caption: Optional[str] = None
56
-
57
-
58
- class GalleryData(GradioRootModel):
59
- root: list[Union[GalleryImage, GalleryVideo, GalleryAudio]]
60
-
61
-
62
- # File extension mappings for media type detection
63
- IMAGE_EXTENSIONS = {'.png', '.jpg', '.jpeg', '.webp', '.gif', '.bmp', '.tiff', '.svg'}
64
- VIDEO_EXTENSIONS = {'.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', '.webm', '.mpg', '.mpeg', '.m4v', '.3gp', '.3g2', '.3gpp'}
65
- AUDIO_EXTENSIONS = {'.mp3', '.wav', '.ogg', '.m4a', '.flac', '.aac'}
66
-
67
-
68
- class MediaGallery(Component):
69
- """
70
- Creates a gallery component that allows displaying a grid of images or videos, and optionally captions. If used as an input, the user can upload images or videos to the gallery.
71
- If used as an output, the user can click on individual images or videos to view them at a higher resolution.
72
-
73
- Demos: fake_gan
74
- """
75
-
76
- EVENTS = [
77
- Events.select,
78
- Events.upload,
79
- Events.change,
80
- EventListener(
81
- "preview_close",
82
- doc="This event is triggered when the MediaGallery preview is closed by the user",
83
- ),
84
- EventListener(
85
- "preview_open",
86
- doc="This event is triggered when the MediaGallery preview is opened by the user",
87
- ),
88
- ]
89
-
90
- data_model = GalleryData
91
-
92
- def __init__(
93
- self,
94
- value: (
95
- Sequence[np.ndarray | PIL.Image.Image | str | Path | tuple]
96
- | Callable
97
- | None
98
- ) = None,
99
- *,
100
- format: str = "webp",
101
- file_types: list[str] | None = None,
102
- label: str | I18nData | None = None,
103
- every: Timer | float | None = None,
104
- inputs: Component | Sequence[Component] | set[Component] | None = None,
105
- show_label: bool | None = None,
106
- container: bool = True,
107
- scale: int | None = None,
108
- min_width: int = 160,
109
- visible: bool = True,
110
- elem_id: str | None = None,
111
- elem_classes: list[str] | str | None = None,
112
- render: bool = True,
113
- key: int | str | tuple[int | str, ...] | None = None,
114
- preserved_by_key: list[str] | str | None = "value",
115
- columns: int | None = 2,
116
- rows: int | None = None,
117
- height: int | float | str | None = None,
118
- allow_preview: bool = True,
119
- preview: bool | None = None,
120
- selected_index: int | None = None,
121
- object_fit: (
122
- Literal["contain", "cover", "fill", "none", "scale-down"] | None
123
- ) = None,
124
- show_share_button: bool | None = None,
125
- show_download_button: bool | None = True,
126
- interactive: bool | None = None,
127
- type: Literal["numpy", "pil", "filepath"] = "filepath",
128
- show_fullscreen_button: bool = True,
129
- ):
130
- """
131
- Parameters:
132
- value: List of images or videos to display in the gallery by default. If a function is provided, the function will be called each time the app loads to set the initial value of this component.
133
- format: Format to save images before they are returned to the frontend, such as 'jpeg' or 'png'. This parameter only applies to images that are returned from the prediction function as numpy arrays or PIL Images. The format should be supported by the PIL library.
134
- file_types: List of file extensions or types of files to be uploaded (e.g. ['image', '.mp4']), when this is used as an input component. "image" allows only image files to be uploaded, "video" allows only video files to be uploaded, ".mp4" allows only mp4 files to be uploaded, etc. If None, any image and video files types are allowed.
135
- label: the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.
136
- every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
137
- inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.
138
- show_label: if True, will display label.
139
- container: If True, will place the component in a container - providing some extra padding around the border.
140
- scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.
141
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
142
- visible: If False, component will be hidden.
143
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
144
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
145
- render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
146
- key: in a gr.render, Components with the same key across re-renders are treated as the same component, not a new component. Properties set in 'preserved_by_key' are not reset across a re-render.
147
- preserved_by_key: A list of parameters from this component's constructor. Inside a gr.render() function, if a component is re-rendered with the same key, these (and only these) parameters will be preserved in the UI (if they have been changed by the user or an event listener) instead of re-rendered based on the values provided during constructor.
148
- columns: Represents the number of images that should be shown in one row.
149
- rows: Represents the number of rows in the image grid.
150
- height: The height of the gallery component, specified in pixels if a number is passed, or in CSS units if a string is passed. If more images are displayed than can fit in the height, a scrollbar will appear.
151
- allow_preview: If True, images in the gallery will be enlarged when they are clicked. Default is True.
152
- preview: If True, MediaGallery will start in preview mode, which shows all of the images as thumbnails and allows the user to click on them to view them in full size. Only works if allow_preview is True.
153
- selected_index: The index of the image that should be initially selected. If None, no image will be selected at start. If provided, will set MediaGallery to preview mode unless allow_preview is set to False.
154
- object_fit: CSS object-fit property for the thumbnail images in the gallery. Can be "contain", "cover", "fill", "none", or "scale-down".
155
- show_share_button: If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.
156
- show_download_button: If True, will show a download button in the corner of the selected image. If False, the icon does not appear. Default is True.
157
- interactive: If True, the gallery will be interactive, allowing the user to upload images. If False, the gallery will be static. Default is True.
158
- type: The format the image is converted to before being passed into the prediction function. "numpy" converts the image to a numpy array with shape (height, width, 3) and values from 0 to 255, "pil" converts the image to a PIL image object, "filepath" passes a str path to a temporary file containing the image. If the image is SVG, the `type` is ignored and the filepath of the SVG is returned.
159
- show_fullscreen_button: If True, will show a fullscreen icon in the corner of the component that allows user to view the gallery in fullscreen mode. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.
160
- """
161
- self.format = format
162
- self.columns = columns
163
- self.rows = rows
164
- self.height = height
165
- self.preview = preview
166
- self.object_fit = object_fit
167
- self.allow_preview = allow_preview
168
- self.show_download_button = (
169
- (utils.get_space() is not None)
170
- if show_download_button is None
171
- else show_download_button
172
- )
173
- self.selected_index = selected_index
174
- self.type = type
175
- self.show_fullscreen_button = show_fullscreen_button
176
- self.file_types = file_types
177
-
178
- self.show_share_button = (
179
- (utils.get_space() is not None)
180
- if show_share_button is None
181
- else show_share_button
182
- )
183
- super().__init__(
184
- label=label,
185
- every=every,
186
- inputs=inputs,
187
- show_label=show_label,
188
- container=container,
189
- scale=scale,
190
- min_width=min_width,
191
- visible=visible,
192
- elem_id=elem_id,
193
- elem_classes=elem_classes,
194
- render=render,
195
- key=key,
196
- preserved_by_key=preserved_by_key,
197
- value=value,
198
- interactive=interactive,
199
- )
200
- self._value_description = f"a list of {'string filepaths' if type == 'filepath' else 'numpy arrays' if type == 'numpy' else 'PIL images'}"
201
-
202
- def preprocess(
203
- self, payload: GalleryData | None
204
- ) -> (
205
- list[tuple[str, str | None]]
206
- | list[tuple[PIL.Image.Image, str | None]]
207
- | list[tuple[np.ndarray, str | None]]
208
- | None
209
- ):
210
- """
211
- Parameters:
212
- payload: a list of images or videos, or list of (media, caption) tuples
213
- Returns:
214
- Passes the list of images or videos as a list of (media, caption) tuples, or a list of (media, None) tuples if no captions are provided (which is usually the case). Images can be a `str` file path, a `numpy` array, or a `PIL.Image` object depending on `type`. Videos are always `str` file path.
215
- """
216
- if payload is None or not payload.root:
217
- return None
218
- data = []
219
- for gallery_element in payload.root:
220
- if isinstance(gallery_element, GalleryVideo):
221
- file_path = gallery_element.video.path
222
- elif isinstance(gallery_element, GalleryAudio):
223
- file_path = gallery_element.audio.path
224
- else:
225
- file_path = gallery_element.image.path or ""
226
- if self.file_types and not client_utils.is_valid_file(
227
- file_path, self.file_types
228
- ):
229
- raise Error(
230
- f"Invalid file type. Please upload a file that is one of these formats: {self.file_types}"
231
- )
232
- else:
233
- # Return file path for video and audio, convert images based on type
234
- if isinstance(gallery_element, GalleryVideo):
235
- media = gallery_element.video.path
236
- elif isinstance(gallery_element, GalleryAudio):
237
- media = gallery_element.audio.path
238
- else:
239
- media = self.convert_to_type(gallery_element.image.path, self.type) # type: ignore
240
- data.append((media, gallery_element.caption))
241
- return data
242
-
243
- def postprocess(
244
- self,
245
- value: list[GalleryMediaType | CaptionedGalleryMediaType] | None,
246
- ) -> GalleryData:
247
- """
248
- Parameters:
249
- value: Expects the function to return a `list` of images or videos, or `list` of (media, `str` caption) tuples. Each image can be a `str` file path, a `numpy` array, or a `PIL.Image` object. Each video can be a `str` file path.
250
- Returns:
251
- a list of images or videos, or list of (media, caption) tuples
252
- """
253
- if value is None:
254
- return GalleryData(root=[])
255
- if isinstance(value, str):
256
- raise ValueError(
257
- "The `value` passed into `gr.Gallery` must be a list of images or videos, or list of (media, caption) tuples."
258
- )
259
- output = []
260
-
261
- def _save(img):
262
- url = None
263
- caption = None
264
- orig_name = None
265
- mime_type = None
266
- if isinstance(img, (tuple, list)):
267
- img, caption = img
268
- if isinstance(img, np.ndarray):
269
- file = processing_utils.save_img_array_to_cache(
270
- img, cache_dir=self.GRADIO_CACHE, format=self.format
271
- )
272
- file_path = str(utils.abspath(file))
273
- elif isinstance(img, PIL.Image.Image):
274
- file = processing_utils.save_pil_to_cache(
275
- img, cache_dir=self.GRADIO_CACHE, format=self.format
276
- )
277
- file_path = str(utils.abspath(file))
278
- elif isinstance(img, str):
279
- mime_type = client_utils.get_mimetype(img)
280
- if img.lower().endswith(".svg"):
281
- svg_content = image_utils.extract_svg_content(img)
282
- orig_name = Path(img).name
283
- url = f"data:image/svg+xml,{quote(svg_content)}"
284
- file_path = None
285
- elif is_http_url_like(img):
286
- url = img
287
- orig_name = Path(urlparse(img).path).name
288
- file_path = img
289
- else:
290
- url = None
291
- orig_name = Path(img).name
292
- file_path = img
293
- elif isinstance(img, Path):
294
- file_path = str(img)
295
- orig_name = img.name
296
- mime_type = client_utils.get_mimetype(file_path)
297
- else:
298
- raise ValueError(f"Cannot process type as image: {type(img)}")
299
- # Determine media type from mime_type or file extension
300
- if mime_type is not None and "video" in mime_type:
301
- return GalleryVideo(
302
- video=FileData(
303
- path=file_path, # type: ignore
304
- url=url,
305
- orig_name=orig_name,
306
- mime_type=mime_type,
307
- ),
308
- caption=caption,
309
- )
310
- elif mime_type is not None and "audio" in mime_type:
311
- return GalleryAudio(
312
- audio=FileData(
313
- path=file_path, # type: ignore
314
- url=url,
315
- orig_name=orig_name,
316
- mime_type=mime_type,
317
- ),
318
- caption=caption,
319
- )
320
- else:
321
- # Check file extension for audio files (fallback)
322
- ext = Path(orig_name or file_path or "").suffix.lower() if (orig_name or file_path) else ""
323
- if ext in AUDIO_EXTENSIONS:
324
- return GalleryAudio(
325
- audio=FileData(
326
- path=file_path, # type: ignore
327
- url=url,
328
- orig_name=orig_name,
329
- mime_type=mime_type or "audio/mpeg",
330
- ),
331
- caption=caption,
332
- )
333
- return GalleryImage(
334
- image=ImageData(
335
- path=file_path,
336
- url=url,
337
- orig_name=orig_name,
338
- mime_type=mime_type,
339
- ),
340
- caption=caption,
341
- )
342
-
343
- if IS_WASM:
344
- for img in value:
345
- output.append(_save(img))
346
- else:
347
- with ThreadPoolExecutor() as executor:
348
- for o in executor.map(_save, value):
349
- output.append(o)
350
- return GalleryData(root=output)
351
-
352
- @staticmethod
353
- def convert_to_type(img: str, type: Literal["filepath", "numpy", "pil"]):
354
- if type == "filepath":
355
- return img
356
- else:
357
- converted_image = PIL.Image.open(img)
358
- if type == "numpy":
359
- converted_image = np.array(converted_image)
360
- return converted_image
361
-
362
- def example_payload(self) -> Any:
363
- return [
364
- {
365
- "image": handle_file(
366
- "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
367
- )
368
- },
369
- ]
370
-
371
- def example_value(self) -> Any:
372
- return [
373
- "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
374
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/backend/gradio_mediagallery/templates/component/assets/worker-BAOIWoxA.js DELETED
@@ -1 +0,0 @@
1
- (function(){"use strict";const i="https://unpkg.com/@ffmpeg/[email protected]/dist/umd/ffmpeg-core.js";var E;(function(t){t.LOAD="LOAD",t.EXEC="EXEC",t.FFPROBE="FFPROBE",t.WRITE_FILE="WRITE_FILE",t.READ_FILE="READ_FILE",t.DELETE_FILE="DELETE_FILE",t.RENAME="RENAME",t.CREATE_DIR="CREATE_DIR",t.LIST_DIR="LIST_DIR",t.DELETE_DIR="DELETE_DIR",t.ERROR="ERROR",t.DOWNLOAD="DOWNLOAD",t.PROGRESS="PROGRESS",t.LOG="LOG",t.MOUNT="MOUNT",t.UNMOUNT="UNMOUNT"})(E||(E={}));const f=new Error("unknown message type"),a=new Error("ffmpeg is not loaded, call `await ffmpeg.load()` first"),u=new Error("failed to import ffmpeg-core.js");let r;const O=async({coreURL:t,wasmURL:n,workerURL:e})=>{const o=!r;try{t||(t=i),importScripts(t)}catch{if((!t||t===i)&&(t=i.replace("/umd/","/esm/")),self.createFFmpegCore=(await import(t)).default,!self.createFFmpegCore)throw u}const s=t,c=n||t.replace(/.js$/g,".wasm"),p=e||t.replace(/.js$/g,".worker.js");return r=await self.createFFmpegCore({mainScriptUrlOrBlob:`${s}#${btoa(JSON.stringify({wasmURL:c,workerURL:p}))}`}),r.setLogger(R=>self.postMessage({type:E.LOG,data:R})),r.setProgress(R=>self.postMessage({type:E.PROGRESS,data:R})),o},m=({args:t,timeout:n=-1})=>{r.setTimeout(n),r.exec(...t);const e=r.ret;return r.reset(),e},l=({args:t,timeout:n=-1})=>{r.setTimeout(n),r.ffprobe(...t);const e=r.ret;return r.reset(),e},D=({path:t,data:n})=>(r.FS.writeFile(t,n),!0),S=({path:t,encoding:n})=>r.FS.readFile(t,{encoding:n}),I=({path:t})=>(r.FS.unlink(t),!0),L=({oldPath:t,newPath:n})=>(r.FS.rename(t,n),!0),N=({path:t})=>(r.FS.mkdir(t),!0),A=({path:t})=>{const n=r.FS.readdir(t),e=[];for(const o of n){const s=r.FS.stat(`${t}/${o}`),c=r.FS.isDir(s.mode);e.push({name:o,isDir:c})}return e},k=({path:t})=>(r.FS.rmdir(t),!0),w=({fsType:t,options:n,mountPoint:e})=>{const o=t,s=r.FS.filesystems[o];return s?(r.FS.mount(s,n,e),!0):!1},b=({mountPoint:t})=>(r.FS.unmount(t),!0);self.onmessage=async({data:{id:t,type:n,data:e}})=>{const o=[];let s;try{if(n!==E.LOAD&&!r)throw a;switch(n){case E.LOAD:s=await O(e);break;case E.EXEC:s=m(e);break;case E.FFPROBE:s=l(e);break;case E.WRITE_FILE:s=D(e);break;case E.READ_FILE:s=S(e);break;case E.DELETE_FILE:s=I(e);break;case E.RENAME:s=L(e);break;case E.CREATE_DIR:s=N(e);break;case E.LIST_DIR:s=A(e);break;case E.DELETE_DIR:s=k(e);break;case E.MOUNT:s=w(e);break;case E.UNMOUNT:s=b(e);break;default:throw f}}catch(c){self.postMessage({id:t,type:E.ERROR,data:c.toString()});return}s instanceof Uint8Array&&o.push(s.buffer),self.postMessage({id:t,type:n,data:s},o)}})();
 
 
mediagallery/backend/gradio_mediagallery/templates/component/index.js DELETED
The diff for this file is too large to render. See raw diff
 
mediagallery/backend/gradio_mediagallery/templates/component/style.css DELETED
The diff for this file is too large to render. See raw diff
 
mediagallery/backend/gradio_mediagallery/templates/example/index.js DELETED
@@ -1,308 +0,0 @@
1
- const {
2
- SvelteComponent: M,
3
- append_hydration: m,
4
- attr: c,
5
- children: v,
6
- claim_element: u,
7
- claim_space: C,
8
- claim_text: k,
9
- destroy_each: q,
10
- detach: o,
11
- element: d,
12
- empty: j,
13
- ensure_array_like: y,
14
- get_svelte_dataset: H,
15
- init: L,
16
- insert_hydration: h,
17
- noop: b,
18
- safe_not_equal: T,
19
- set_data: B,
20
- space: G,
21
- src_url_equal: g,
22
- text: w,
23
- toggle_class: _
24
- } = window.__gradio__svelte__internal;
25
- function E(r, e, i) {
26
- const s = r.slice();
27
- return s[3] = e[i], s;
28
- }
29
- function I(r) {
30
- let e, i, s = y(
31
- /*value*/
32
- r[0].slice(0, 5)
33
- ), l = [];
34
- for (let n = 0; n < s.length; n += 1)
35
- l[n] = V(E(r, s, n));
36
- let t = (
37
- /*value*/
38
- r[0].length > 5 && D(r)
39
- );
40
- return {
41
- c() {
42
- e = d("div");
43
- for (let n = 0; n < l.length; n += 1)
44
- l[n].c();
45
- i = G(), t && t.c(), this.h();
46
- },
47
- l(n) {
48
- e = u(n, "DIV", { class: !0 });
49
- var f = v(e);
50
- for (let a = 0; a < l.length; a += 1)
51
- l[a].l(f);
52
- i = C(f), t && t.l(f), f.forEach(o), this.h();
53
- },
54
- h() {
55
- c(e, "class", "images-wrapper svelte-nb996j");
56
- },
57
- m(n, f) {
58
- h(n, e, f);
59
- for (let a = 0; a < l.length; a += 1)
60
- l[a] && l[a].m(e, null);
61
- m(e, i), t && t.m(e, null);
62
- },
63
- p(n, f) {
64
- if (f & /*value*/
65
- 1) {
66
- s = y(
67
- /*value*/
68
- n[0].slice(0, 5)
69
- );
70
- let a;
71
- for (a = 0; a < s.length; a += 1) {
72
- const p = E(n, s, a);
73
- l[a] ? l[a].p(p, f) : (l[a] = V(p), l[a].c(), l[a].m(e, i));
74
- }
75
- for (; a < l.length; a += 1)
76
- l[a].d(1);
77
- l.length = s.length;
78
- }
79
- /*value*/
80
- n[0].length > 5 ? t ? t.p(n, f) : (t = D(n), t.c(), t.m(e, null)) : t && (t.d(1), t = null);
81
- },
82
- d(n) {
83
- n && o(e), q(l, n), t && t.d();
84
- }
85
- };
86
- }
87
- function N(r) {
88
- let e, i = '<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="svelte-nb996j"><path d="M9 18V5l12-2v13" class="svelte-nb996j"></path><circle cx="6" cy="18" r="3" class="svelte-nb996j"></circle><circle cx="18" cy="16" r="3" class="svelte-nb996j"></circle></svg>';
89
- return {
90
- c() {
91
- e = d("div"), e.innerHTML = i, this.h();
92
- },
93
- l(s) {
94
- e = u(s, "DIV", { class: !0, "data-svelte-h": !0 }), H(e) !== "svelte-19pyv4n" && (e.innerHTML = i), this.h();
95
- },
96
- h() {
97
- c(e, "class", "image-container audio svelte-nb996j");
98
- },
99
- m(s, l) {
100
- h(s, e, l);
101
- },
102
- p: b,
103
- d(s) {
104
- s && o(e);
105
- }
106
- };
107
- }
108
- function O(r) {
109
- let e, i, s;
110
- return {
111
- c() {
112
- e = d("div"), i = d("video"), this.h();
113
- },
114
- l(l) {
115
- e = u(l, "DIV", { class: !0 });
116
- var t = v(e);
117
- i = u(t, "VIDEO", { src: !0, preload: !0, class: !0 }), v(i).forEach(o), t.forEach(o), this.h();
118
- },
119
- h() {
120
- g(i.src, s = /*item*/
121
- r[3].video.url) || c(i, "src", s), i.controls = !1, i.muted = !0, c(i, "preload", "metadata"), c(i, "class", "svelte-nb996j"), c(e, "class", "image-container svelte-nb996j");
122
- },
123
- m(l, t) {
124
- h(l, e, t), m(e, i);
125
- },
126
- p(l, t) {
127
- t & /*value*/
128
- 1 && !g(i.src, s = /*item*/
129
- l[3].video.url) && c(i, "src", s);
130
- },
131
- d(l) {
132
- l && o(e);
133
- }
134
- };
135
- }
136
- function S(r) {
137
- let e, i, s, l;
138
- return {
139
- c() {
140
- e = d("div"), i = d("img"), this.h();
141
- },
142
- l(t) {
143
- e = u(t, "DIV", { class: !0 });
144
- var n = v(e);
145
- i = u(n, "IMG", { src: !0, alt: !0, class: !0 }), n.forEach(o), this.h();
146
- },
147
- h() {
148
- g(i.src, s = /*item*/
149
- r[3].image.url) || c(i, "src", s), c(i, "alt", l = /*item*/
150
- r[3].caption || ""), c(i, "class", "svelte-nb996j"), c(e, "class", "image-container svelte-nb996j");
151
- },
152
- m(t, n) {
153
- h(t, e, n), m(e, i);
154
- },
155
- p(t, n) {
156
- n & /*value*/
157
- 1 && !g(i.src, s = /*item*/
158
- t[3].image.url) && c(i, "src", s), n & /*value*/
159
- 1 && l !== (l = /*item*/
160
- t[3].caption || "") && c(i, "alt", l);
161
- },
162
- d(t) {
163
- t && o(e);
164
- }
165
- };
166
- }
167
- function V(r) {
168
- let e;
169
- function i(t, n) {
170
- if ("image" in /*item*/
171
- t[3] && /*item*/
172
- t[3].image) return S;
173
- if ("video" in /*item*/
174
- t[3] && /*item*/
175
- t[3].video) return O;
176
- if ("audio" in /*item*/
177
- t[3] && /*item*/
178
- t[3].audio) return N;
179
- }
180
- let s = i(r), l = s && s(r);
181
- return {
182
- c() {
183
- l && l.c(), e = j();
184
- },
185
- l(t) {
186
- l && l.l(t), e = j();
187
- },
188
- m(t, n) {
189
- l && l.m(t, n), h(t, e, n);
190
- },
191
- p(t, n) {
192
- s === (s = i(t)) && l ? l.p(t, n) : (l && l.d(1), l = s && s(t), l && (l.c(), l.m(e.parentNode, e)));
193
- },
194
- d(t) {
195
- t && o(e), l && l.d(t);
196
- }
197
- };
198
- }
199
- function D(r) {
200
- let e, i, s = (
201
- /*value*/
202
- r[0].length - 5 + ""
203
- ), l;
204
- return {
205
- c() {
206
- e = d("div"), i = w("+"), l = w(s), this.h();
207
- },
208
- l(t) {
209
- e = u(t, "DIV", { class: !0 });
210
- var n = v(e);
211
- i = k(n, "+"), l = k(n, s), n.forEach(o), this.h();
212
- },
213
- h() {
214
- c(e, "class", "more-indicator svelte-nb996j");
215
- },
216
- m(t, n) {
217
- h(t, e, n), m(e, i), m(e, l);
218
- },
219
- p(t, n) {
220
- n & /*value*/
221
- 1 && s !== (s = /*value*/
222
- t[0].length - 5 + "") && B(l, s);
223
- },
224
- d(t) {
225
- t && o(e);
226
- }
227
- };
228
- }
229
- function z(r) {
230
- let e, i = (
231
- /*value*/
232
- r[0] && /*value*/
233
- r[0].length > 0 && I(r)
234
- );
235
- return {
236
- c() {
237
- e = d("div"), i && i.c(), this.h();
238
- },
239
- l(s) {
240
- e = u(s, "DIV", { class: !0 });
241
- var l = v(e);
242
- i && i.l(l), l.forEach(o), this.h();
243
- },
244
- h() {
245
- c(e, "class", "container svelte-nb996j"), _(
246
- e,
247
- "table",
248
- /*type*/
249
- r[1] === "table"
250
- ), _(
251
- e,
252
- "gallery",
253
- /*type*/
254
- r[1] === "gallery"
255
- ), _(
256
- e,
257
- "selected",
258
- /*selected*/
259
- r[2]
260
- );
261
- },
262
- m(s, l) {
263
- h(s, e, l), i && i.m(e, null);
264
- },
265
- p(s, [l]) {
266
- /*value*/
267
- s[0] && /*value*/
268
- s[0].length > 0 ? i ? i.p(s, l) : (i = I(s), i.c(), i.m(e, null)) : i && (i.d(1), i = null), l & /*type*/
269
- 2 && _(
270
- e,
271
- "table",
272
- /*type*/
273
- s[1] === "table"
274
- ), l & /*type*/
275
- 2 && _(
276
- e,
277
- "gallery",
278
- /*type*/
279
- s[1] === "gallery"
280
- ), l & /*selected*/
281
- 4 && _(
282
- e,
283
- "selected",
284
- /*selected*/
285
- s[2]
286
- );
287
- },
288
- i: b,
289
- o: b,
290
- d(s) {
291
- s && o(e), i && i.d();
292
- }
293
- };
294
- }
295
- function A(r, e, i) {
296
- let { value: s } = e, { type: l } = e, { selected: t = !1 } = e;
297
- return r.$$set = (n) => {
298
- "value" in n && i(0, s = n.value), "type" in n && i(1, l = n.type), "selected" in n && i(2, t = n.selected);
299
- }, [s, l, t];
300
- }
301
- class F extends M {
302
- constructor(e) {
303
- super(), L(this, e, A, z, T, { value: 0, type: 1, selected: 2 });
304
- }
305
- }
306
- export {
307
- F as default
308
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/backend/gradio_mediagallery/templates/example/style.css DELETED
@@ -1 +0,0 @@
1
- .container.svelte-nb996j.svelte-nb996j{border-radius:var(--radius-lg);overflow:hidden;border:2px solid transparent;box-sizing:border-box}.container.selected.svelte-nb996j.svelte-nb996j{border-color:var(--border-color-accent)}tr:hover .container.svelte-nb996j.svelte-nb996j,tr:hover .images-wrapper.svelte-nb996j.svelte-nb996j,tr:hover .image-container.svelte-nb996j.svelte-nb996j,tr:hover .image-container.svelte-nb996j img.svelte-nb996j,tr:hover .image-container.svelte-nb996j video.svelte-nb996j{transform:none!important;scale:none!important}.container.svelte-nb996j .svelte-nb996j,.images-wrapper.svelte-nb996j.svelte-nb996j,.image-container.svelte-nb996j.svelte-nb996j{box-sizing:border-box}.images-wrapper.svelte-nb996j.svelte-nb996j{display:flex;gap:var(--spacing-sm)}.container.table.svelte-nb996j .images-wrapper.svelte-nb996j{flex-direction:row;align-items:center;padding:var(--spacing-sm);border:1px solid var(--border-color-primary);border-radius:var(--radius-lg);background:var(--background-fill-secondary)}.container.gallery.svelte-nb996j .images-wrapper.svelte-nb996j{flex-direction:row;gap:0}.image-container.svelte-nb996j.svelte-nb996j{position:relative;flex-shrink:0}.container.table.svelte-nb996j .image-container.svelte-nb996j{width:var(--size-12);height:var(--size-12)}.container.gallery.svelte-nb996j .image-container.svelte-nb996j{width:var(--size-20);height:var(--size-20);margin-left:calc(-1 * var(--size-8))}.container.gallery.svelte-nb996j .image-container.svelte-nb996j:first-child{margin-left:0}.more-indicator.svelte-nb996j.svelte-nb996j{display:flex;align-items:center;justify-content:center;font-size:var(--text-sm);font-weight:700;color:var(--body-text-color-subdued);background:var(--background-fill-secondary);border-radius:var(--radius-md)}.container.table.svelte-nb996j .more-indicator.svelte-nb996j{width:var(--size-12);height:var(--size-12)}.container.gallery.svelte-nb996j .more-indicator.svelte-nb996j{width:var(--size-20);height:var(--size-20);margin-left:calc(-1 * var(--size-8))}.image-container.svelte-nb996j img.svelte-nb996j,.image-container.svelte-nb996j video.svelte-nb996j{width:100%;height:100%;object-fit:cover;border-radius:var(--radius-md)}.image-container.audio.svelte-nb996j.svelte-nb996j{display:flex;align-items:center;justify-content:center;background:linear-gradient(135deg,#667eea,#764ba2);border-radius:var(--radius-md);color:#fff}.container.svelte-nb996j.svelte-nb996j,.container.svelte-nb996j .svelte-nb996j,.image-container.svelte-nb996j.svelte-nb996j,.image-container.svelte-nb996j img.svelte-nb996j,.image-container.svelte-nb996j video.svelte-nb996j{transition:none!important}.container.svelte-nb996j.svelte-nb996j:hover,.image-container.svelte-nb996j.svelte-nb996j:hover,.image-container.svelte-nb996j:hover img.svelte-nb996j,.image-container.svelte-nb996j:hover video.svelte-nb996j{transform:none!important;filter:none!important;opacity:1!important}
 
 
mediagallery/demo/__init__.py DELETED
File without changes
mediagallery/demo/app.py DELETED
@@ -1,15 +0,0 @@
1
-
2
- import gradio as gr
3
- from gradio_mediagallery import MediaGallery
4
-
5
-
6
- example = MediaGallery().example_value()
7
-
8
- with gr.Blocks() as demo:
9
- with gr.Row():
10
- MediaGallery(label="Blank"), # blank component
11
- MediaGallery(value=example, label="Populated"), # populated component
12
-
13
-
14
- if __name__ == "__main__":
15
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/demo/css.css DELETED
@@ -1,157 +0,0 @@
1
- html {
2
- font-family: Inter;
3
- font-size: 16px;
4
- font-weight: 400;
5
- line-height: 1.5;
6
- -webkit-text-size-adjust: 100%;
7
- background: #fff;
8
- color: #323232;
9
- -webkit-font-smoothing: antialiased;
10
- -moz-osx-font-smoothing: grayscale;
11
- text-rendering: optimizeLegibility;
12
- }
13
-
14
- :root {
15
- --space: 1;
16
- --vspace: calc(var(--space) * 1rem);
17
- --vspace-0: calc(3 * var(--space) * 1rem);
18
- --vspace-1: calc(2 * var(--space) * 1rem);
19
- --vspace-2: calc(1.5 * var(--space) * 1rem);
20
- --vspace-3: calc(0.5 * var(--space) * 1rem);
21
- }
22
-
23
- .app {
24
- max-width: 748px !important;
25
- }
26
-
27
- .prose p {
28
- margin: var(--vspace) 0;
29
- line-height: var(--vspace * 2);
30
- font-size: 1rem;
31
- }
32
-
33
- code {
34
- font-family: "Inconsolata", sans-serif;
35
- font-size: 16px;
36
- }
37
-
38
- h1,
39
- h1 code {
40
- font-weight: 400;
41
- line-height: calc(2.5 / var(--space) * var(--vspace));
42
- }
43
-
44
- h1 code {
45
- background: none;
46
- border: none;
47
- letter-spacing: 0.05em;
48
- padding-bottom: 5px;
49
- position: relative;
50
- padding: 0;
51
- }
52
-
53
- h2 {
54
- margin: var(--vspace-1) 0 var(--vspace-2) 0;
55
- line-height: 1em;
56
- }
57
-
58
- h3,
59
- h3 code {
60
- margin: var(--vspace-1) 0 var(--vspace-2) 0;
61
- line-height: 1em;
62
- }
63
-
64
- h4,
65
- h5,
66
- h6 {
67
- margin: var(--vspace-3) 0 var(--vspace-3) 0;
68
- line-height: var(--vspace);
69
- }
70
-
71
- .bigtitle,
72
- h1,
73
- h1 code {
74
- font-size: calc(8px * 4.5);
75
- word-break: break-word;
76
- }
77
-
78
- .title,
79
- h2,
80
- h2 code {
81
- font-size: calc(8px * 3.375);
82
- font-weight: lighter;
83
- word-break: break-word;
84
- border: none;
85
- background: none;
86
- }
87
-
88
- .subheading1,
89
- h3,
90
- h3 code {
91
- font-size: calc(8px * 1.8);
92
- font-weight: 600;
93
- border: none;
94
- background: none;
95
- letter-spacing: 0.1em;
96
- text-transform: uppercase;
97
- }
98
-
99
- h2 code {
100
- padding: 0;
101
- position: relative;
102
- letter-spacing: 0.05em;
103
- }
104
-
105
- blockquote {
106
- font-size: calc(8px * 1.1667);
107
- font-style: italic;
108
- line-height: calc(1.1667 * var(--vspace));
109
- margin: var(--vspace-2) var(--vspace-2);
110
- }
111
-
112
- .subheading2,
113
- h4 {
114
- font-size: calc(8px * 1.4292);
115
- text-transform: uppercase;
116
- font-weight: 600;
117
- }
118
-
119
- .subheading3,
120
- h5 {
121
- font-size: calc(8px * 1.2917);
122
- line-height: calc(1.2917 * var(--vspace));
123
-
124
- font-weight: lighter;
125
- text-transform: uppercase;
126
- letter-spacing: 0.15em;
127
- }
128
-
129
- h6 {
130
- font-size: calc(8px * 1.1667);
131
- font-size: 1.1667em;
132
- font-weight: normal;
133
- font-style: italic;
134
- font-family: "le-monde-livre-classic-byol", serif !important;
135
- letter-spacing: 0px !important;
136
- }
137
-
138
- #start .md > *:first-child {
139
- margin-top: 0;
140
- }
141
-
142
- h2 + h3 {
143
- margin-top: 0;
144
- }
145
-
146
- .md hr {
147
- border: none;
148
- border-top: 1px solid var(--block-border-color);
149
- margin: var(--vspace-2) 0 var(--vspace-2) 0;
150
- }
151
- .prose ul {
152
- margin: var(--vspace-2) 0 var(--vspace-1) 0;
153
- }
154
-
155
- .gap {
156
- gap: 0;
157
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/demo/space.py DELETED
@@ -1,176 +0,0 @@
1
-
2
- import gradio as gr
3
- from app import demo as app
4
- import os
5
-
6
- _docs = {'MediaGallery': {'description': 'Creates a gallery component that allows displaying a grid of images or videos, and optionally captions. If used as an input, the user can upload images or videos to the gallery.\nIf used as an output, the user can click on individual images or videos to view them at a higher resolution.\n', 'members': {'__init__': {'value': {'type': 'Sequence[\n np.ndarray | PIL.Image.Image | str | Path | tuple\n ]\n | Callable\n | None', 'default': 'None', 'description': 'List of images or videos to display in the gallery by default. If a function is provided, the function will be called each time the app loads to set the initial value of this component.'}, 'format': {'type': 'str', 'default': '"webp"', 'description': "Format to save images before they are returned to the frontend, such as 'jpeg' or 'png'. This parameter only applies to images that are returned from the prediction function as numpy arrays or PIL Images. The format should be supported by the PIL library."}, 'file_types': {'type': 'list[str] | None', 'default': 'None', 'description': 'List of file extensions or types of files to be uploaded (e.g. [\'image\', \'.mp4\']), when this is used as an input component. "image" allows only image files to be uploaded, "video" allows only video files to be uploaded, ".mp4" allows only mp4 files to be uploaded, etc. If None, any image and video files types are allowed.'}, 'label': {'type': 'str | I18nData | None', 'default': 'None', 'description': 'the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.'}, 'every': {'type': 'Timer | float | None', 'default': 'None', 'description': 'Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.'}, 'inputs': {'type': 'Component | Sequence[Component] | set[Component] | None', 'default': 'None', 'description': 'Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.'}, 'show_label': {'type': 'bool | None', 'default': 'None', 'description': 'if True, will display label.'}, 'container': {'type': 'bool', 'default': 'True', 'description': 'If True, will place the component in a container - providing some extra padding around the border.'}, 'scale': {'type': 'int | None', 'default': 'None', 'description': 'relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.'}, 'min_width': {'type': 'int', 'default': '160', 'description': 'minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.'}, 'visible': {'type': 'bool', 'default': 'True', 'description': 'If False, component will be hidden.'}, 'elem_id': {'type': 'str | None', 'default': 'None', 'description': 'An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.'}, 'elem_classes': {'type': 'list[str] | str | None', 'default': 'None', 'description': 'An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.'}, 'render': {'type': 'bool', 'default': 'True', 'description': 'If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.'}, 'key': {'type': 'int | str | tuple[int | str, ...] | None', 'default': 'None', 'description': "in a gr.render, Components with the same key across re-renders are treated as the same component, not a new component. Properties set in 'preserved_by_key' are not reset across a re-render."}, 'preserved_by_key': {'type': 'list[str] | str | None', 'default': '"value"', 'description': "A list of parameters from this component's constructor. Inside a gr.render() function, if a component is re-rendered with the same key, these (and only these) parameters will be preserved in the UI (if they have been changed by the user or an event listener) instead of re-rendered based on the values provided during constructor."}, 'columns': {'type': 'int | None', 'default': '2', 'description': 'Represents the number of images that should be shown in one row.'}, 'rows': {'type': 'int | None', 'default': 'None', 'description': 'Represents the number of rows in the image grid.'}, 'height': {'type': 'int | float | str | None', 'default': 'None', 'description': 'The height of the gallery component, specified in pixels if a number is passed, or in CSS units if a string is passed. If more images are displayed than can fit in the height, a scrollbar will appear.'}, 'allow_preview': {'type': 'bool', 'default': 'True', 'description': 'If True, images in the gallery will be enlarged when they are clicked. Default is True.'}, 'preview': {'type': 'bool | None', 'default': 'None', 'description': 'If True, MediaGallery will start in preview mode, which shows all of the images as thumbnails and allows the user to click on them to view them in full size. Only works if allow_preview is True.'}, 'selected_index': {'type': 'int | None', 'default': 'None', 'description': 'The index of the image that should be initially selected. If None, no image will be selected at start. If provided, will set MediaGallery to preview mode unless allow_preview is set to False.'}, 'object_fit': {'type': 'Literal[\n "contain", "cover", "fill", "none", "scale-down"\n ]\n | None', 'default': 'None', 'description': 'CSS object-fit property for the thumbnail images in the gallery. Can be "contain", "cover", "fill", "none", or "scale-down".'}, 'show_share_button': {'type': 'bool | None', 'default': 'None', 'description': 'If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.'}, 'show_download_button': {'type': 'bool | None', 'default': 'True', 'description': 'If True, will show a download button in the corner of the selected image. If False, the icon does not appear. Default is True.'}, 'interactive': {'type': 'bool | None', 'default': 'None', 'description': 'If True, the gallery will be interactive, allowing the user to upload images. If False, the gallery will be static. Default is True.'}, 'type': {'type': 'Literal["numpy", "pil", "filepath"]', 'default': '"filepath"', 'description': 'The format the image is converted to before being passed into the prediction function. "numpy" converts the image to a numpy array with shape (height, width, 3) and values from 0 to 255, "pil" converts the image to a PIL image object, "filepath" passes a str path to a temporary file containing the image. If the image is SVG, the `type` is ignored and the filepath of the SVG is returned.'}, 'show_fullscreen_button': {'type': 'bool', 'default': 'True', 'description': 'If True, will show a fullscreen icon in the corner of the component that allows user to view the gallery in fullscreen mode. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.'}}, 'postprocess': {'value': {'type': 'list[\n typing.Union[\n numpy.ndarray,\n PIL.Image.Image,\n pathlib.Path,\n str,\n tuple[\n typing.Union[\n numpy.ndarray,\n PIL.Image.Image,\n pathlib.Path,\n str,\n ],\n str,\n ],\n ][\n numpy.ndarray,\n PIL.Image.Image,\n pathlib.Path,\n str,\n tuple[\n typing.Union[\n numpy.ndarray,\n PIL.Image.Image,\n pathlib.Path,\n str,\n ][\n numpy.ndarray,\n PIL.Image.Image,\n pathlib.Path,\n str,\n ],\n str,\n ],\n ]\n ]\n | None', 'description': 'Expects the function to return a `list` of images or videos, or `list` of (media, `str` caption) tuples. Each image can be a `str` file path, a `numpy` array, or a `PIL.Image` object. Each video can be a `str` file path.'}}, 'preprocess': {'return': {'type': 'list[tuple[str, str | None]]\n | list[tuple[PIL.Image.Image, str | None]]\n | list[tuple[numpy.ndarray, str | None]]\n | None', 'description': 'Passes the list of images or videos as a list of (media, caption) tuples, or a list of (media, None) tuples if no captions are provided (which is usually the case). Images can be a `str` file path, a `numpy` array, or a `PIL.Image` object depending on `type`. Videos are always `str` file path.'}, 'value': None}}, 'events': {'select': {'type': None, 'default': None, 'description': 'Event listener for when the user selects or deselects the MediaGallery. Uses event data gradio.SelectData to carry `value` referring to the label of the MediaGallery, and `selected` to refer to state of the MediaGallery. See EventData documentation on how to use this event data'}, 'upload': {'type': None, 'default': None, 'description': 'This listener is triggered when the user uploads a file into the MediaGallery.'}, 'change': {'type': None, 'default': None, 'description': 'Triggered when the value of the MediaGallery changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input.'}, 'preview_close': {'type': None, 'default': None, 'description': 'This event is triggered when the MediaGallery preview is closed by the user'}, 'preview_open': {'type': None, 'default': None, 'description': 'This event is triggered when the MediaGallery preview is opened by the user'}}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'MediaGallery': []}}}
7
-
8
- abs_path = os.path.join(os.path.dirname(__file__), "css.css")
9
-
10
- with gr.Blocks(
11
- css=abs_path,
12
- theme=gr.themes.Default(
13
- font_mono=[
14
- gr.themes.GoogleFont("Inconsolata"),
15
- "monospace",
16
- ],
17
- ),
18
- ) as demo:
19
- gr.Markdown(
20
- """
21
- # `gradio_mediagallery`
22
-
23
- <div style="display: flex; gap: 7px;">
24
- <a href="https://pypi.org/project/gradio_mediagallery/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_mediagallery"></a>
25
- </div>
26
-
27
- Python library for easily interacting with trained machine learning models
28
- """, elem_classes=["md-custom"], header_links=True)
29
- app.render()
30
- gr.Markdown(
31
- """
32
- ## Installation
33
-
34
- ```bash
35
- pip install gradio_mediagallery
36
- ```
37
-
38
- ## Usage
39
-
40
- ```python
41
-
42
- import gradio as gr
43
- from gradio_mediagallery import MediaGallery
44
-
45
-
46
- example = MediaGallery().example_value()
47
-
48
- with gr.Blocks() as demo:
49
- with gr.Row():
50
- MediaGallery(label="Blank"), # blank component
51
- MediaGallery(value=example, label="Populated"), # populated component
52
-
53
-
54
- if __name__ == "__main__":
55
- demo.launch()
56
-
57
- ```
58
- """, elem_classes=["md-custom"], header_links=True)
59
-
60
-
61
- gr.Markdown("""
62
- ## `MediaGallery`
63
-
64
- ### Initialization
65
- """, elem_classes=["md-custom"], header_links=True)
66
-
67
- gr.ParamViewer(value=_docs["MediaGallery"]["members"]["__init__"], linkify=[])
68
-
69
-
70
- gr.Markdown("### Events")
71
- gr.ParamViewer(value=_docs["MediaGallery"]["events"], linkify=['Event'])
72
-
73
-
74
-
75
-
76
- gr.Markdown("""
77
-
78
- ### User function
79
-
80
- The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
81
-
82
- - When used as an Input, the component only impacts the input signature of the user function.
83
- - When used as an output, the component only impacts the return signature of the user function.
84
-
85
- The code snippet below is accurate in cases where the component is used as both an input and an output.
86
-
87
- - **As input:** Is passed, passes the list of images or videos as a list of (media, caption) tuples, or a list of (media, None) tuples if no captions are provided (which is usually the case). Images can be a `str` file path, a `numpy` array, or a `PIL.Image` object depending on `type`. Videos are always `str` file path.
88
- - **As output:** Should return, expects the function to return a `list` of images or videos, or `list` of (media, `str` caption) tuples. Each image can be a `str` file path, a `numpy` array, or a `PIL.Image` object. Each video can be a `str` file path.
89
-
90
- ```python
91
- def predict(
92
- value: list[tuple[str, str | None]]
93
- | list[tuple[PIL.Image.Image, str | None]]
94
- | list[tuple[numpy.ndarray, str | None]]
95
- | None
96
- ) -> list[
97
- typing.Union[
98
- numpy.ndarray,
99
- PIL.Image.Image,
100
- pathlib.Path,
101
- str,
102
- tuple[
103
- typing.Union[
104
- numpy.ndarray,
105
- PIL.Image.Image,
106
- pathlib.Path,
107
- str,
108
- ],
109
- str,
110
- ],
111
- ][
112
- numpy.ndarray,
113
- PIL.Image.Image,
114
- pathlib.Path,
115
- str,
116
- tuple[
117
- typing.Union[
118
- numpy.ndarray,
119
- PIL.Image.Image,
120
- pathlib.Path,
121
- str,
122
- ][
123
- numpy.ndarray,
124
- PIL.Image.Image,
125
- pathlib.Path,
126
- str,
127
- ],
128
- str,
129
- ],
130
- ]
131
- ]
132
- | None:
133
- return value
134
- ```
135
- """, elem_classes=["md-custom", "MediaGallery-user-fn"], header_links=True)
136
-
137
-
138
-
139
-
140
- demo.load(None, js=r"""function() {
141
- const refs = {};
142
- const user_fn_refs = {
143
- MediaGallery: [], };
144
- requestAnimationFrame(() => {
145
-
146
- Object.entries(user_fn_refs).forEach(([key, refs]) => {
147
- if (refs.length > 0) {
148
- const el = document.querySelector(`.${key}-user-fn`);
149
- if (!el) return;
150
- refs.forEach(ref => {
151
- el.innerHTML = el.innerHTML.replace(
152
- new RegExp("\\b"+ref+"\\b", "g"),
153
- `<a href="#h-${ref.toLowerCase()}">${ref}</a>`
154
- );
155
- })
156
- }
157
- })
158
-
159
- Object.entries(refs).forEach(([key, refs]) => {
160
- if (refs.length > 0) {
161
- const el = document.querySelector(`.${key}`);
162
- if (!el) return;
163
- refs.forEach(ref => {
164
- el.innerHTML = el.innerHTML.replace(
165
- new RegExp("\\b"+ref+"\\b", "g"),
166
- `<a href="#h-${ref.toLowerCase()}">${ref}</a>`
167
- );
168
- })
169
- }
170
- })
171
- })
172
- }
173
-
174
- """)
175
-
176
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/frontend/Example.svelte DELETED
@@ -1,171 +0,0 @@
1
- <script lang="ts">
2
- import type { GalleryImage, GalleryVideo, GalleryAudio, GalleryData } from "./types";
3
-
4
- export let value: GalleryData[] | null;
5
- export let type: "gallery" | "table";
6
- export let selected = false;
7
- </script>
8
-
9
- <div
10
- class="container"
11
- class:table={type === "table"}
12
- class:gallery={type === "gallery"}
13
- class:selected
14
- >
15
- {#if value && value.length > 0}
16
- <div class="images-wrapper">
17
- {#each value.slice(0, 5) as item}
18
- {#if "image" in item && item.image}
19
- <div class="image-container">
20
- <img src={item.image.url} alt={item.caption || ""} />
21
- </div>
22
- {:else if "video" in item && item.video}
23
- <div class="image-container">
24
- <video
25
- src={item.video.url}
26
- controls={false}
27
- muted
28
- preload="metadata"
29
- />
30
- </div>
31
- {:else if "audio" in item && item.audio}
32
- <div class="image-container audio">
33
- <svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
34
- <path d="M9 18V5l12-2v13"></path>
35
- <circle cx="6" cy="18" r="3"></circle>
36
- <circle cx="18" cy="16" r="3"></circle>
37
- </svg>
38
- </div>
39
- {/if}
40
- {/each}
41
- {#if value.length > 5}
42
- <div class="more-indicator">+{value.length - 5}</div>
43
- {/if}
44
- </div>
45
- {/if}
46
- </div>
47
-
48
- <style>
49
- .container {
50
- border-radius: var(--radius-lg);
51
- overflow: hidden;
52
- border: 2px solid transparent;
53
- box-sizing: border-box;
54
- }
55
-
56
- .container.selected {
57
- border-color: var(--border-color-accent);
58
- }
59
-
60
- /* Prevent any parent hover effects from causing shifts */
61
- :global(tr:hover) .container,
62
- :global(tr:hover) .images-wrapper,
63
- :global(tr:hover) .image-container,
64
- :global(tr:hover) .image-container img,
65
- :global(tr:hover) .image-container video {
66
- transform: none !important;
67
- scale: none !important;
68
- }
69
-
70
- .container *,
71
- .images-wrapper,
72
- .image-container {
73
- box-sizing: border-box;
74
- }
75
-
76
- .images-wrapper {
77
- display: flex;
78
- gap: var(--spacing-sm);
79
- }
80
-
81
- .container.table .images-wrapper {
82
- flex-direction: row;
83
- align-items: center;
84
- padding: var(--spacing-sm);
85
- border: 1px solid var(--border-color-primary);
86
- border-radius: var(--radius-lg);
87
- background: var(--background-fill-secondary);
88
- }
89
-
90
- .container.gallery .images-wrapper {
91
- flex-direction: row;
92
- gap: 0;
93
- }
94
-
95
- .image-container {
96
- position: relative;
97
- flex-shrink: 0;
98
- }
99
-
100
- .container.table .image-container {
101
- width: var(--size-12);
102
- height: var(--size-12);
103
- }
104
-
105
- .container.gallery .image-container {
106
- width: var(--size-20);
107
- height: var(--size-20);
108
- margin-left: calc(-1 * var(--size-8));
109
- }
110
-
111
- .container.gallery .image-container:first-child {
112
- margin-left: 0;
113
- }
114
-
115
- .more-indicator {
116
- display: flex;
117
- align-items: center;
118
- justify-content: center;
119
- font-size: var(--text-sm);
120
- font-weight: bold;
121
- color: var(--body-text-color-subdued);
122
- background: var(--background-fill-secondary);
123
- border-radius: var(--radius-md);
124
- }
125
-
126
- .container.table .more-indicator {
127
- width: var(--size-12);
128
- height: var(--size-12);
129
- }
130
-
131
- .container.gallery .more-indicator {
132
- width: var(--size-20);
133
- height: var(--size-20);
134
- margin-left: calc(-1 * var(--size-8));
135
- }
136
-
137
- .image-container img,
138
- .image-container video {
139
- width: 100%;
140
- height: 100%;
141
- object-fit: cover;
142
- border-radius: var(--radius-md);
143
- }
144
-
145
- .image-container.audio {
146
- display: flex;
147
- align-items: center;
148
- justify-content: center;
149
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
150
- border-radius: var(--radius-md);
151
- color: white;
152
- }
153
-
154
- /* Remove hover effects */
155
- .container,
156
- .container *,
157
- .image-container,
158
- .image-container img,
159
- .image-container video {
160
- transition: none !important;
161
- }
162
-
163
- .container:hover,
164
- .image-container:hover,
165
- .image-container:hover img,
166
- .image-container:hover video {
167
- transform: none !important;
168
- filter: none !important;
169
- opacity: 1 !important;
170
- }
171
- </style>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/frontend/Index.svelte DELETED
@@ -1,193 +0,0 @@
1
- <script context="module" lang="ts">
2
- export { default as BaseGallery } from "./shared/Gallery.svelte";
3
- export { default as BaseExample } from "./Example.svelte";
4
- </script>
5
-
6
- <script lang="ts">
7
- import type { GalleryImage, GalleryVideo, GalleryAudio, GalleryData } from "./types";
8
- import type { FileData } from "@gradio/client";
9
- import type { Gradio, ShareData, SelectData } from "@gradio/utils";
10
- import { Block, UploadText } from "@gradio/atoms";
11
- import Gallery from "./shared/Gallery.svelte";
12
- import type { LoadingStatus } from "@gradio/statustracker";
13
- import { StatusTracker } from "@gradio/statustracker";
14
- import { createEventDispatcher } from "svelte";
15
- import { BaseFileUpload } from "@gradio/file";
16
-
17
- export let loading_status: LoadingStatus;
18
- export let show_label: boolean;
19
- export let label: string;
20
- export let root: string;
21
- export let elem_id = "";
22
- export let elem_classes: string[] = [];
23
- export let visible = true;
24
- export let value: GalleryData[] | null = null;
25
- export let file_types: string[] | null = ["image", "video", "audio"];
26
- export let container = true;
27
- export let scale: number | null = null;
28
- export let min_width: number | undefined = undefined;
29
- export let columns: number | number[] | undefined = [2];
30
- export let rows: number | number[] | undefined = undefined;
31
- export let height: number | "auto" = "auto";
32
- export let preview: boolean;
33
- export let allow_preview = true;
34
- export let selected_index: number | null = null;
35
- export let object_fit: "contain" | "cover" | "fill" | "none" | "scale-down" =
36
- "cover";
37
- export let show_share_button = false;
38
- export let interactive: boolean;
39
- export let show_download_button = false;
40
- export let gradio: Gradio<{
41
- change: typeof value;
42
- upload: typeof value;
43
- select: SelectData;
44
- share: ShareData;
45
- error: string;
46
- prop_change: Record<string, any>;
47
- clear_status: LoadingStatus;
48
- preview_open: never;
49
- preview_close: never;
50
- }>;
51
- export let show_fullscreen_button = true;
52
- export let fullscreen = false;
53
-
54
- const dispatch = createEventDispatcher();
55
-
56
- $: no_value = value === null ? true : value.length === 0;
57
- $: selected_index, dispatch("prop_change", { selected_index });
58
-
59
- // Audio file extensions for detection
60
- const AUDIO_EXTENSIONS = ['.mp3', '.wav', '.ogg', '.m4a', '.flac', '.aac'];
61
-
62
- function isAudioFile(file: FileData): boolean {
63
- if (file.mime_type?.includes("audio")) return true;
64
- const ext = (file.orig_name || file.path || "").toLowerCase();
65
- return AUDIO_EXTENSIONS.some(e => ext.endsWith(e));
66
- }
67
-
68
- async function process_upload_files(
69
- files: FileData[]
70
- ): Promise<GalleryData[]> {
71
- const processed_files = await Promise.all(
72
- files.map(async (x) => {
73
- if (x.path?.toLowerCase().endsWith(".svg") && x.url) {
74
- const response = await fetch(x.url);
75
- const svgContent = await response.text();
76
- return {
77
- ...x,
78
- url: `data:image/svg+xml,${encodeURIComponent(svgContent)}`
79
- };
80
- }
81
- return x;
82
- })
83
- );
84
-
85
- return processed_files.map((x): GalleryData => {
86
- if (x.mime_type?.includes("video")) {
87
- return { video: x, caption: null };
88
- } else if (isAudioFile(x)) {
89
- return { audio: x, caption: null };
90
- } else {
91
- return { image: x, caption: null };
92
- }
93
- });
94
- }
95
-
96
- // Handle adding more files to existing gallery
97
- async function handle_upload(e: CustomEvent<FileData | FileData[]>) {
98
- const files = Array.isArray(e.detail) ? e.detail : [e.detail];
99
- const new_items = await process_upload_files(files);
100
-
101
- // Append to existing items instead of replacing
102
- if (value && value.length > 0) {
103
- value = [...value, ...new_items];
104
- } else {
105
- value = new_items;
106
- }
107
-
108
- gradio.dispatch("upload", value);
109
- gradio.dispatch("change", value);
110
- }
111
- </script>
112
-
113
- <Block
114
- {visible}
115
- variant="solid"
116
- padding={false}
117
- {elem_id}
118
- {elem_classes}
119
- {container}
120
- {scale}
121
- {min_width}
122
- allow_overflow={false}
123
- height={typeof height === "number" ? height : undefined}
124
- bind:fullscreen
125
- >
126
- <StatusTracker
127
- autoscroll={gradio.autoscroll}
128
- i18n={gradio.i18n}
129
- {...loading_status}
130
- on:clear_status={() => gradio.dispatch("clear_status", loading_status)}
131
- />
132
- {#if interactive && no_value}
133
- <!-- Initial upload area when gallery is empty -->
134
- <BaseFileUpload
135
- value={null}
136
- {root}
137
- {label}
138
- {file_types}
139
- max_file_size={gradio.max_file_size}
140
- file_count={"multiple"}
141
- i18n={gradio.i18n}
142
- upload={(...args) => gradio.client.upload(...args)}
143
- stream_handler={(...args) => gradio.client.stream(...args)}
144
- on:upload={handle_upload}
145
- on:error={({ detail }) => {
146
- loading_status = loading_status || {};
147
- loading_status.status = "error";
148
- gradio.dispatch("error", detail);
149
- }}
150
- >
151
- <UploadText i18n={gradio.i18n} type="gallery" />
152
- </BaseFileUpload>
153
- {:else}
154
- <Gallery
155
- on:change={() => gradio.dispatch("change", value)}
156
- on:select={(e) => gradio.dispatch("select", e.detail)}
157
- on:share={(e) => gradio.dispatch("share", e.detail)}
158
- on:error={(e) => gradio.dispatch("error", e.detail)}
159
- on:preview_open={() => gradio.dispatch("preview_open")}
160
- on:preview_close={() => gradio.dispatch("preview_close")}
161
- on:fullscreen={({ detail }) => {
162
- fullscreen = detail;
163
- }}
164
- on:upload={handle_upload}
165
- {label}
166
- {show_label}
167
- {columns}
168
- {rows}
169
- {height}
170
- {preview}
171
- {object_fit}
172
- {interactive}
173
- {allow_preview}
174
- bind:selected_index
175
- bind:value
176
- {show_share_button}
177
- {show_download_button}
178
- i18n={gradio.i18n}
179
- _fetch={(...args) => gradio.client.fetch(...args)}
180
- {show_fullscreen_button}
181
- {fullscreen}
182
- {root}
183
- {file_types}
184
- max_file_size={gradio.max_file_size}
185
- upload={(...args) => gradio.client.upload(...args)}
186
- stream_handler={(...args) => gradio.client.stream(...args)}
187
- />
188
- {/if}
189
- </Block>
190
-
191
- <style>
192
- /* Component styles are in Gallery.svelte */
193
- </style>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/frontend/gradio.config.js DELETED
@@ -1,9 +0,0 @@
1
- export default {
2
- plugins: [],
3
- svelte: {
4
- preprocess: [],
5
- },
6
- build: {
7
- target: "modules",
8
- },
9
- };
 
 
 
 
 
 
 
 
 
 
mediagallery/frontend/package-lock.json DELETED
The diff for this file is too large to render. See raw diff
 
mediagallery/frontend/package.json DELETED
@@ -1,52 +0,0 @@
1
- {
2
- "name": "gradio_mediagallery",
3
- "version": "0.15.24",
4
- "description": "Gradio UI packages",
5
- "type": "module",
6
- "author": "",
7
- "license": "ISC",
8
- "private": false,
9
- "dependencies": {
10
- "@gradio/atoms": "0.16.2",
11
- "@gradio/client": "1.15.3",
12
- "@gradio/icons": "0.12.0",
13
- "@gradio/image": "0.22.10",
14
- "@gradio/statustracker": "0.10.13",
15
- "@gradio/upload": "0.16.8",
16
- "@gradio/utils": "0.10.2",
17
- "@gradio/video": "0.14.18",
18
- "@gradio/file": "0.12.21",
19
- "dequal": "^2.0.2"
20
- },
21
- "devDependencies": {
22
- "@gradio/preview": "0.13.2"
23
- },
24
- "main": "./Index.svelte",
25
- "main_changeset": true,
26
- "exports": {
27
- ".": {
28
- "gradio": "./Index.svelte",
29
- "svelte": "./dist/Index.svelte",
30
- "types": "./dist/Index.svelte.d.ts"
31
- },
32
- "./package.json": "./package.json",
33
- "./base": {
34
- "gradio": "./shared/Gallery.svelte",
35
- "svelte": "./dist/shared/Gallery.svelte",
36
- "types": "./dist/shared/Gallery.svelte.d.ts"
37
- },
38
- "./example": {
39
- "gradio": "./Example.svelte",
40
- "svelte": "./dist/Example.svelte",
41
- "types": "./dist/Example.svelte.d.ts"
42
- }
43
- },
44
- "peerDependencies": {
45
- "svelte": "^4.0.0"
46
- },
47
- "repository": {
48
- "type": "git",
49
- "url": "git+https://github.com/gradio-app/gradio.git",
50
- "directory": "js/gallery"
51
- }
52
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/frontend/shared/Gallery.svelte DELETED
@@ -1,1031 +0,0 @@
1
- <script lang="ts">
2
- import {
3
- BlockLabel,
4
- Empty,
5
- ShareButton,
6
- IconButton,
7
- IconButtonWrapper,
8
- FullscreenButton
9
- } from "@gradio/atoms";
10
- import { Upload } from "@gradio/upload";
11
- import type { SelectData, I18nFormatter } from "@gradio/utils";
12
- import { Image } from "@gradio/image/shared";
13
- import { Video } from "@gradio/video/shared";
14
- import { dequal } from "dequal";
15
- import { createEventDispatcher, onMount } from "svelte";
16
- import { tick } from "svelte";
17
- import type { GalleryImage, GalleryVideo, GalleryAudio, GalleryData } from "../types";
18
- import { getMediaType, getMediaFile } from "../types";
19
-
20
- import { Download, Image as ImageIcon, Clear, Play } from "@gradio/icons";
21
- import { FileData } from "@gradio/client";
22
- import type { Client } from "@gradio/client";
23
- import { format_gallery_for_sharing } from "./utils";
24
-
25
- export let show_label = true;
26
- export let label: string;
27
- export let value: GalleryData[] | null = null;
28
- export let columns: number | number[] | undefined = [2];
29
- export let rows: number | number[] | undefined = undefined;
30
- export let height: number | "auto" = "auto";
31
- export let preview: boolean;
32
- export let allow_preview = true;
33
- export let object_fit: "contain" | "cover" | "fill" | "none" | "scale-down" =
34
- "cover";
35
- export let show_share_button = false;
36
- export let show_download_button = false;
37
- export let i18n: I18nFormatter;
38
- export let selected_index: number | null = null;
39
- export let interactive: boolean;
40
- export let _fetch: typeof fetch;
41
- export let mode: "normal" | "minimal" = "normal";
42
- export let show_fullscreen_button = true;
43
- export let display_icon_button_wrapper_top_corner = false;
44
- export let fullscreen = false;
45
- export let root = "";
46
- export let file_types: string[] | null = ["image", "video", "audio"];
47
- export let max_file_size: number | null = null;
48
- export let upload: Client["upload"] | undefined = undefined;
49
- export let stream_handler: Client["stream"] | undefined = undefined;
50
-
51
- let is_full_screen = false;
52
- let image_container: HTMLElement;
53
-
54
- const dispatch = createEventDispatcher<{
55
- change: undefined;
56
- select: SelectData;
57
- preview_open: undefined;
58
- preview_close: undefined;
59
- fullscreen: boolean;
60
- upload: FileData | FileData[];
61
- error: string;
62
- }>();
63
-
64
- // tracks whether the value of the gallery was reset
65
- let was_reset = true;
66
-
67
- $: was_reset = value == null || value.length === 0 ? true : was_reset;
68
-
69
- let resolved_value: GalleryData[] | null = null;
70
-
71
- $: resolved_value =
72
- value == null
73
- ? null
74
- : (value.map((data) => {
75
- if ("video" in data) {
76
- return {
77
- video: data.video as FileData,
78
- caption: data.caption
79
- };
80
- } else if ("audio" in data) {
81
- return {
82
- audio: data.audio as FileData,
83
- caption: data.caption
84
- };
85
- } else if ("image" in data) {
86
- return { image: data.image as FileData, caption: data.caption };
87
- }
88
- return {};
89
- }) as GalleryData[]);
90
-
91
- let prev_value: GalleryData[] | null = value;
92
- if (selected_index == null && preview && value?.length) {
93
- selected_index = 0;
94
- }
95
- let old_selected_index: number | null = selected_index;
96
-
97
- $: if (!dequal(prev_value, value)) {
98
- // When value is falsy (clear button or first load),
99
- // preview determines the selected image
100
- if (was_reset) {
101
- selected_index = preview && value?.length ? 0 : null;
102
- was_reset = false;
103
- // Otherwise we keep the selected_index the same if the
104
- // gallery has at least as many elements as it did before
105
- } else {
106
- if (selected_index !== null && value !== null) {
107
- selected_index = Math.max(
108
- 0,
109
- Math.min(selected_index, value.length - 1)
110
- );
111
- } else {
112
- selected_index = null;
113
- }
114
- }
115
- dispatch("change");
116
- prev_value = value;
117
- }
118
-
119
- $: previous =
120
- ((selected_index ?? 0) + (resolved_value?.length ?? 0) - 1) %
121
- (resolved_value?.length ?? 0);
122
- $: next = ((selected_index ?? 0) + 1) % (resolved_value?.length ?? 0);
123
-
124
- function handle_preview_click(event: MouseEvent): void {
125
- const element = event.target as HTMLElement;
126
- const x = event.offsetX;
127
- const width = element.offsetWidth;
128
- const centerX = width / 2;
129
-
130
- if (x < centerX) {
131
- selected_index = previous;
132
- } else {
133
- selected_index = next;
134
- }
135
- }
136
-
137
- function on_keydown(e: KeyboardEvent): void {
138
- switch (e.code) {
139
- case "Escape":
140
- e.preventDefault();
141
- selected_index = null;
142
- break;
143
- case "ArrowLeft":
144
- e.preventDefault();
145
- selected_index = previous;
146
- break;
147
- case "ArrowRight":
148
- e.preventDefault();
149
- selected_index = next;
150
- break;
151
- default:
152
- break;
153
- }
154
- }
155
-
156
- $: {
157
- if (selected_index !== old_selected_index) {
158
- old_selected_index = selected_index;
159
- if (selected_index !== null) {
160
- if (resolved_value != null) {
161
- selected_index = Math.max(
162
- 0,
163
- Math.min(selected_index, resolved_value.length - 1)
164
- );
165
- }
166
- dispatch("select", {
167
- index: selected_index,
168
- value: resolved_value?.[selected_index]
169
- });
170
- }
171
- }
172
- }
173
-
174
- $: if (allow_preview) {
175
- scroll_to_img(selected_index);
176
- }
177
-
178
- let el: HTMLButtonElement[] = [];
179
- let container_element: HTMLDivElement;
180
-
181
- async function scroll_to_img(index: number | null): Promise<void> {
182
- if (typeof index !== "number") return;
183
- await tick();
184
-
185
- if (el[index] === undefined) return;
186
-
187
- el[index]?.focus();
188
-
189
- const { left: container_left, width: container_width } =
190
- container_element.getBoundingClientRect();
191
- const { left, width } = el[index].getBoundingClientRect();
192
-
193
- const relative_left = left - container_left;
194
-
195
- const pos =
196
- relative_left +
197
- width / 2 -
198
- container_width / 2 +
199
- container_element.scrollLeft;
200
-
201
- if (container_element && typeof container_element.scrollTo === "function") {
202
- container_element.scrollTo({
203
- left: pos < 0 ? 0 : pos,
204
- behavior: "smooth"
205
- });
206
- }
207
- }
208
-
209
- let window_height = 0;
210
-
211
- // Unlike `gr.Image()`, images specified via remote URLs are not cached in the server
212
- // and their remote URLs are directly passed to the client as `value[].image.url`.
213
- // The `download` attribute of the <a> tag doesn't work for remote URLs (https://developer.mozilla.org/en-US/docs/Web/HTML/Element/a#download),
214
- // so we need to download the image via JS as below.
215
- async function download(file_url: string, name: string): Promise<void> {
216
- let response;
217
- try {
218
- response = await _fetch(file_url);
219
- } catch (error) {
220
- if (error instanceof TypeError) {
221
- // If CORS is not allowed (https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API/Using_Fetch#checking_that_the_fetch_was_successful),
222
- // open the link in a new tab instead, mimicing the behavior of the `download` attribute for remote URLs,
223
- // which is not ideal, but a reasonable fallback.
224
- window.open(file_url, "_blank", "noreferrer");
225
- return;
226
- }
227
-
228
- throw error;
229
- }
230
- const blob = await response.blob();
231
- const url = URL.createObjectURL(blob);
232
- const link = document.createElement("a");
233
- link.href = url;
234
- link.download = name;
235
- link.click();
236
- URL.revokeObjectURL(url);
237
- }
238
-
239
- $: selected_media =
240
- selected_index != null && resolved_value != null
241
- ? resolved_value[selected_index]
242
- : null;
243
-
244
- let thumbnails_overflow = false;
245
-
246
- function check_thumbnails_overflow(): void {
247
- if (container_element) {
248
- thumbnails_overflow =
249
- container_element.scrollWidth > container_element.clientWidth;
250
- }
251
- }
252
-
253
- onMount(() => {
254
- check_thumbnails_overflow();
255
- document.addEventListener("fullscreenchange", () => {
256
- is_full_screen = !!document.fullscreenElement;
257
- });
258
- window.addEventListener("resize", check_thumbnails_overflow);
259
- return () =>
260
- window.removeEventListener("resize", check_thumbnails_overflow);
261
- });
262
-
263
- $: resolved_value, check_thumbnails_overflow();
264
- $: if (container_element) {
265
- check_thumbnails_overflow();
266
- }
267
- </script>
268
-
269
- <svelte:window bind:innerHeight={window_height} />
270
-
271
- {#if show_label}
272
- <BlockLabel {show_label} Icon={ImageIcon} label={label || "Gallery"} />
273
- {/if}
274
- {#if value == null || resolved_value == null || resolved_value.length === 0}
275
- <Empty unpadded_box={true} size="large"><ImageIcon /></Empty>
276
- {:else}
277
- <div class="gallery-container" bind:this={image_container}>
278
- {#if selected_media && allow_preview}
279
- <button
280
- on:keydown={on_keydown}
281
- class="preview"
282
- class:minimal={mode === "minimal"}
283
- >
284
- <IconButtonWrapper
285
- display_top_corner={display_icon_button_wrapper_top_corner}
286
- >
287
- {#if show_download_button}
288
- <IconButton
289
- Icon={Download}
290
- label={i18n("common.download")}
291
- on:click={() => {
292
- const file = getMediaFile(selected_media);
293
- if (file == null) {
294
- return;
295
- }
296
- const { url, orig_name } = file;
297
- if (url) {
298
- download(url, orig_name ?? "media");
299
- }
300
- }}
301
- />
302
- {/if}
303
-
304
- {#if show_fullscreen_button}
305
- <FullscreenButton {fullscreen} on:fullscreen />
306
- {/if}
307
-
308
- {#if show_share_button}
309
- <div class="icon-button">
310
- <ShareButton
311
- {i18n}
312
- on:share
313
- on:error
314
- value={resolved_value}
315
- formatter={format_gallery_for_sharing}
316
- />
317
- </div>
318
- {/if}
319
- {#if !is_full_screen}
320
- <IconButton
321
- Icon={Clear}
322
- label="Close"
323
- on:click={() => {
324
- selected_index = null;
325
- dispatch("preview_close");
326
- }}
327
- />
328
- {/if}
329
- </IconButtonWrapper>
330
- <button
331
- class="media-button"
332
- on:click={"image" in selected_media
333
- ? (event) => handle_preview_click(event)
334
- : null}
335
- style="height: calc(100% - {selected_media.caption
336
- ? '80px'
337
- : '60px'})"
338
- aria-label="detailed view of selected media"
339
- >
340
- {#if "image" in selected_media}
341
- <Image
342
- data-testid="detailed-image"
343
- src={selected_media.image.url}
344
- alt={selected_media.caption || ""}
345
- title={selected_media.caption || null}
346
- class={selected_media.caption && "with-caption"}
347
- loading="lazy"
348
- />
349
- {:else if "video" in selected_media}
350
- <Video
351
- src={selected_media.video.url}
352
- data-testid={"detailed-video"}
353
- alt={selected_media.caption || ""}
354
- loading="lazy"
355
- loop={false}
356
- is_stream={false}
357
- muted={false}
358
- controls={true}
359
- />
360
- {:else if "audio" in selected_media}
361
- <div class="audio-preview">
362
- <div class="audio-icon-large">
363
- <svg xmlns="http://www.w3.org/2000/svg" width="64" height="64" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round">
364
- <path d="M9 18V5l12-2v13"></path>
365
- <circle cx="6" cy="18" r="3"></circle>
366
- <circle cx="18" cy="16" r="3"></circle>
367
- </svg>
368
- </div>
369
- <div class="audio-filename">{selected_media.audio.orig_name || "Audio"}</div>
370
- <audio
371
- src={selected_media.audio.url}
372
- controls
373
- class="audio-player"
374
- data-testid="detailed-audio"
375
- />
376
- </div>
377
- {/if}
378
- </button>
379
- {#if selected_media?.caption}
380
- <caption class="caption">
381
- {selected_media.caption}
382
- </caption>
383
- {/if}
384
- <div
385
- bind:this={container_element}
386
- class="thumbnails scroll-hide"
387
- data-testid="container_el"
388
- style="justify-content: {thumbnails_overflow
389
- ? 'flex-start'
390
- : 'center'};"
391
- >
392
- {#each resolved_value as media, i}
393
- <button
394
- bind:this={el[i]}
395
- on:click={() => (selected_index = i)}
396
- class="thumbnail-item thumbnail-small"
397
- class:selected={selected_index === i && mode !== "minimal"}
398
- aria-label={"Thumbnail " +
399
- (i + 1) +
400
- " of " +
401
- resolved_value.length}
402
- >
403
- {#if "image" in media}
404
- <Image
405
- src={media.image.url}
406
- title={media.caption || null}
407
- data-testid={"thumbnail " + (i + 1)}
408
- alt=""
409
- loading="lazy"
410
- />
411
- {:else if "video" in media}
412
- <Play />
413
- <Video
414
- src={media.video.url}
415
- title={media.caption || null}
416
- is_stream={false}
417
- data-testid={"thumbnail " + (i + 1)}
418
- alt=""
419
- loading="lazy"
420
- loop={false}
421
- />
422
- {:else if "audio" in media}
423
- <div class="audio-thumbnail">
424
- <svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
425
- <path d="M9 18V5l12-2v13"></path>
426
- <circle cx="6" cy="18" r="3"></circle>
427
- <circle cx="18" cy="16" r="3"></circle>
428
- </svg>
429
- </div>
430
- {/if}
431
- </button>
432
- {/each}
433
- </div>
434
- </button>
435
- {/if}
436
-
437
- <div
438
- class="grid-wrap"
439
- class:minimal={mode === "minimal"}
440
- class:fixed-height={mode !== "minimal" && (!height || height == "auto")}
441
- class:hidden={is_full_screen}
442
- >
443
- <div
444
- class="grid-container"
445
- style="--grid-cols:{columns}; --grid-rows:{rows}; --object-fit: {object_fit}; height: {height};"
446
- class:pt-6={show_label}
447
- >
448
- {#each resolved_value as entry, i}
449
- <div class="thumbnail-wrapper">
450
- <button
451
- class="thumbnail-item thumbnail-lg"
452
- class:selected={selected_index === i}
453
- on:click={() => {
454
- if (selected_index === null && allow_preview) {
455
- dispatch("preview_open");
456
- }
457
- selected_index = i;
458
- }}
459
- aria-label={"Thumbnail " + (i + 1) + " of " + resolved_value.length}
460
- >
461
- {#if "image" in entry}
462
- <Image
463
- alt={entry.caption || ""}
464
- src={typeof entry.image === "string"
465
- ? entry.image
466
- : entry.image.url}
467
- loading="lazy"
468
- />
469
- <div class="media-type-badge image">
470
- <svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
471
- <rect x="3" y="3" width="18" height="18" rx="2" ry="2"></rect>
472
- <circle cx="8.5" cy="8.5" r="1.5"></circle>
473
- <polyline points="21 15 16 10 5 21"></polyline>
474
- </svg>
475
- </div>
476
- {:else if "video" in entry}
477
- <Play />
478
- <Video
479
- src={entry.video.url}
480
- title={entry.caption || null}
481
- is_stream={false}
482
- data-testid={"thumbnail " + (i + 1)}
483
- alt=""
484
- loading="lazy"
485
- loop={false}
486
- />
487
- <div class="media-type-badge video">
488
- <svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
489
- <polygon points="5 3 19 12 5 21 5 3"></polygon>
490
- </svg>
491
- </div>
492
- {:else if "audio" in entry}
493
- <div class="audio-thumbnail-lg">
494
- <svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round">
495
- <path d="M9 18V5l12-2v13"></path>
496
- <circle cx="6" cy="18" r="3"></circle>
497
- <circle cx="18" cy="16" r="3"></circle>
498
- </svg>
499
- </div>
500
- <div class="media-type-badge audio">
501
- <svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
502
- <path d="M9 18V5l12-2v13"></path>
503
- <circle cx="6" cy="18" r="3"></circle>
504
- <circle cx="18" cy="16" r="3"></circle>
505
- </svg>
506
- </div>
507
- {/if}
508
- </button>
509
- <!-- Remove button -->
510
- {#if interactive}
511
- <button
512
- class="remove-btn"
513
- on:click|stopPropagation={() => {
514
- if (value) {
515
- value = value.filter((_, idx) => idx !== i);
516
- if (selected_index !== null && selected_index >= i) {
517
- selected_index = selected_index > 0 ? selected_index - 1 : null;
518
- }
519
- dispatch("change");
520
- }
521
- }}
522
- aria-label="Remove item"
523
- >
524
- <svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
525
- <line x1="18" y1="6" x2="6" y2="18"></line>
526
- <line x1="6" y1="6" x2="18" y2="18"></line>
527
- </svg>
528
- </button>
529
- {/if}
530
- <!-- Filename label -->
531
- <div class="filename-label">
532
- {#if "image" in entry}
533
- {entry.image.orig_name || "Image"}
534
- {:else if "video" in entry}
535
- {entry.video.orig_name || "Video"}
536
- {:else if "audio" in entry}
537
- {entry.audio.orig_name || "Audio"}
538
- {/if}
539
- </div>
540
- </div>
541
- {/each}
542
- </div>
543
- </div>
544
- <!-- Add Media button below grid -->
545
- {#if interactive && upload && stream_handler}
546
- <div class="add-media-bar">
547
- <Upload
548
- filetype={file_types}
549
- file_count="multiple"
550
- {max_file_size}
551
- {root}
552
- {upload}
553
- {stream_handler}
554
- on:load={(e) => dispatch("upload", e.detail)}
555
- on:error={(e) => dispatch("error", e.detail)}
556
- >
557
- <div class="add-media-btn">
558
- <svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
559
- <line x1="12" y1="5" x2="12" y2="19"></line>
560
- <line x1="5" y1="12" x2="19" y2="12"></line>
561
- </svg>
562
- <span>Add Media</span>
563
- </div>
564
- </Upload>
565
- </div>
566
- {/if}
567
- </div>
568
- {/if}
569
-
570
- <style lang="postcss">
571
- .gallery-container {
572
- position: relative;
573
- display: flex;
574
- flex-direction: column;
575
- width: 100%;
576
- height: 100%;
577
- min-height: 0;
578
- overflow: hidden;
579
- }
580
-
581
- .image-container {
582
- height: 100%;
583
- position: relative;
584
- }
585
- .image-container :global(img),
586
- button {
587
- width: var(--size-full);
588
- height: var(--size-full);
589
- object-fit: contain;
590
- display: block;
591
- border-radius: var(--radius-lg);
592
- }
593
-
594
- .preview {
595
- display: flex;
596
- position: absolute;
597
- flex-direction: column;
598
- z-index: var(--layer-2);
599
- border-radius: calc(var(--block-radius) - var(--block-border-width));
600
- -webkit-backdrop-filter: blur(8px);
601
- backdrop-filter: blur(8px);
602
- width: var(--size-full);
603
- height: var(--size-full);
604
- }
605
-
606
- .preview.minimal {
607
- width: fit-content;
608
- height: fit-content;
609
- }
610
-
611
- .preview::before {
612
- content: "";
613
- position: absolute;
614
- z-index: var(--layer-below);
615
- background: var(--background-fill-primary);
616
- opacity: 0.9;
617
- width: var(--size-full);
618
- height: var(--size-full);
619
- }
620
-
621
- .fixed-height {
622
- min-height: var(--size-80);
623
- max-height: 55vh;
624
- }
625
-
626
- @media (--screen-xl) {
627
- .fixed-height {
628
- min-height: 450px;
629
- }
630
- }
631
-
632
- .media-button {
633
- height: calc(100% - 60px);
634
- width: 100%;
635
- display: flex;
636
- }
637
- .media-button :global(img),
638
- .media-button :global(video) {
639
- width: var(--size-full);
640
- height: var(--size-full);
641
- object-fit: contain;
642
- }
643
- .thumbnails :global(img) {
644
- object-fit: cover;
645
- width: var(--size-full);
646
- height: var(--size-full);
647
- }
648
- .thumbnails :global(svg) {
649
- position: absolute;
650
- top: var(--size-2);
651
- left: var(--size-2);
652
- width: 50%;
653
- height: 50%;
654
- opacity: 50%;
655
- }
656
- .preview :global(img.with-caption) {
657
- height: var(--size-full);
658
- }
659
-
660
- .preview.minimal :global(img.with-caption) {
661
- height: auto;
662
- }
663
-
664
- .selectable {
665
- cursor: crosshair;
666
- }
667
-
668
- .caption {
669
- padding: var(--size-2) var(--size-3);
670
- overflow: hidden;
671
- color: var(--block-label-text-color);
672
- font-weight: var(--weight-semibold);
673
- text-align: center;
674
- text-overflow: ellipsis;
675
- white-space: nowrap;
676
- align-self: center;
677
- }
678
-
679
- .thumbnails {
680
- display: flex;
681
- position: absolute;
682
- bottom: 0;
683
- justify-content: flex-start;
684
- align-items: center;
685
- gap: var(--spacing-lg);
686
- width: var(--size-full);
687
- height: var(--size-14);
688
- overflow-x: scroll;
689
- z-index: var(--layer-5);
690
- }
691
-
692
- .thumbnail-item {
693
- --ring-color: transparent;
694
- position: relative;
695
- box-shadow:
696
- inset 0 0 0 1px var(--ring-color),
697
- var(--shadow-drop);
698
- border: 1px solid var(--border-color-primary);
699
- border-radius: var(--button-small-radius);
700
- background: var(--background-fill-secondary);
701
- aspect-ratio: var(--ratio-square);
702
- width: var(--size-full);
703
- height: var(--size-full);
704
- overflow: clip;
705
- }
706
-
707
- .thumbnail-item:hover {
708
- --ring-color: var(--color-accent);
709
- border-color: var(--color-accent);
710
- filter: brightness(1.1);
711
- }
712
-
713
- .thumbnail-item.selected {
714
- --ring-color: var(--color-accent);
715
- border-color: var(--color-accent);
716
- }
717
-
718
- .thumbnail-item :global(svg) {
719
- position: absolute;
720
- top: 50%;
721
- left: 50%;
722
- width: 50%;
723
- height: 50%;
724
- opacity: 50%;
725
- transform: translate(-50%, -50%);
726
- }
727
-
728
- .thumbnail-item :global(video) {
729
- width: var(--size-full);
730
- height: var(--size-full);
731
- overflow: hidden;
732
- object-fit: cover;
733
- }
734
-
735
- .thumbnail-small {
736
- flex: none;
737
- transform: scale(0.9);
738
- transition: 0.075s;
739
- width: var(--size-9);
740
- height: var(--size-9);
741
- }
742
- .thumbnail-small.selected {
743
- --ring-color: var(--color-accent);
744
- transform: scale(1);
745
- border-color: var(--color-accent);
746
- }
747
-
748
- .thumbnail-small > img {
749
- width: var(--size-full);
750
- height: var(--size-full);
751
- overflow: hidden;
752
- object-fit: var(--object-fit);
753
- }
754
-
755
- .grid-wrap {
756
- position: relative;
757
- padding: var(--size-2);
758
- max-height: 100%;
759
- overflow-y: auto;
760
- overflow-x: hidden;
761
- flex: 1;
762
- }
763
-
764
- .grid-container {
765
- display: grid;
766
- position: relative;
767
- grid-template-columns: repeat(2, minmax(0, 1fr));
768
- gap: var(--spacing-md);
769
- width: 100%;
770
- }
771
-
772
- /* Responsive columns */
773
- @media (min-width: 768px) {
774
- .grid-container {
775
- grid-template-columns: repeat(3, minmax(0, 1fr));
776
- }
777
- }
778
-
779
- @media (min-width: 1280px) {
780
- .grid-container {
781
- grid-template-columns: repeat(4, minmax(0, 1fr));
782
- }
783
- }
784
-
785
- @media (min-width: 1536px) {
786
- .grid-container {
787
- grid-template-columns: repeat(5, minmax(0, 1fr));
788
- }
789
- }
790
-
791
- .thumbnail-wrapper {
792
- position: relative;
793
- width: 100%;
794
- padding-bottom: 100%; /* Square aspect ratio using padding trick */
795
- min-width: 0;
796
- overflow: hidden;
797
- border-radius: var(--button-small-radius);
798
- background: var(--background-fill-secondary);
799
- aspect-ratio: 1 / 1;
800
- }
801
-
802
- .thumbnail-lg {
803
- position: absolute;
804
- top: 0;
805
- left: 0;
806
- width: 100%;
807
- height: 100%;
808
- overflow: hidden;
809
- padding: 0;
810
- margin: 0;
811
- border: none;
812
- background: transparent;
813
- cursor: pointer;
814
- }
815
-
816
- /* Force all images and videos to be square cropped */
817
- .thumbnail-lg :global(img),
818
- .thumbnail-lg :global(video) {
819
- position: absolute !important;
820
- top: 0 !important;
821
- left: 0 !important;
822
- width: 100% !important;
823
- height: 100% !important;
824
- object-fit: cover !important;
825
- max-width: none !important;
826
- max-height: none !important;
827
- min-width: 100% !important;
828
- min-height: 100% !important;
829
- }
830
-
831
- /* Picture elements from Image component */
832
- .thumbnail-lg :global(picture) {
833
- position: absolute !important;
834
- top: 0 !important;
835
- left: 0 !important;
836
- width: 100% !important;
837
- height: 100% !important;
838
- overflow: hidden !important;
839
- }
840
-
841
- .grid-wrap.minimal {
842
- padding: 0;
843
- }
844
-
845
- /* Audio thumbnail styles */
846
- .audio-thumbnail {
847
- display: flex;
848
- align-items: center;
849
- justify-content: center;
850
- width: 100%;
851
- height: 100%;
852
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
853
- color: white;
854
- }
855
-
856
- .audio-thumbnail-lg {
857
- position: absolute;
858
- top: 0;
859
- left: 0;
860
- display: flex;
861
- flex-direction: column;
862
- align-items: center;
863
- justify-content: center;
864
- width: 100%;
865
- height: 100%;
866
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
867
- color: white;
868
- gap: var(--size-2);
869
- padding: var(--size-2);
870
- }
871
-
872
- .audio-thumbnail-lg .audio-name {
873
- font-size: var(--text-sm);
874
- text-overflow: ellipsis;
875
- overflow: hidden;
876
- white-space: nowrap;
877
- max-width: 100%;
878
- text-align: center;
879
- }
880
-
881
- /* Audio preview styles */
882
- .audio-preview {
883
- display: flex;
884
- flex-direction: column;
885
- align-items: center;
886
- justify-content: center;
887
- width: 100%;
888
- height: 100%;
889
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
890
- color: white;
891
- gap: var(--size-4);
892
- padding: var(--size-8);
893
- }
894
-
895
- .audio-icon-large {
896
- opacity: 0.8;
897
- }
898
-
899
- .audio-filename {
900
- font-size: var(--text-lg);
901
- font-weight: var(--weight-semibold);
902
- text-align: center;
903
- max-width: 80%;
904
- overflow: hidden;
905
- text-overflow: ellipsis;
906
- white-space: nowrap;
907
- }
908
-
909
- .audio-player {
910
- width: 80%;
911
- max-width: 400px;
912
- }
913
-
914
- /* Media type badge styles */
915
- .media-type-badge {
916
- position: absolute;
917
- bottom: var(--size-1);
918
- left: var(--size-1);
919
- display: flex;
920
- align-items: center;
921
- justify-content: center;
922
- padding: var(--size-1);
923
- border-radius: var(--radius-sm);
924
- background: rgba(0, 0, 0, 0.6);
925
- color: white;
926
- z-index: var(--layer-1);
927
- }
928
-
929
- .media-type-badge.image {
930
- background: rgba(59, 130, 246, 0.8);
931
- }
932
-
933
- .media-type-badge.video {
934
- background: rgba(239, 68, 68, 0.8);
935
- }
936
-
937
- .media-type-badge.audio {
938
- background: rgba(139, 92, 246, 0.8);
939
- }
940
-
941
- .media-type-badge svg {
942
- width: 12px;
943
- height: 12px;
944
- }
945
-
946
- /* Remove button */
947
- .remove-btn {
948
- position: absolute;
949
- top: var(--size-1);
950
- right: var(--size-1);
951
- display: flex;
952
- align-items: center;
953
- justify-content: center;
954
- width: 24px;
955
- height: 24px;
956
- border-radius: 50%;
957
- background: rgba(0, 0, 0, 0.7);
958
- color: white;
959
- border: none;
960
- cursor: pointer;
961
- opacity: 0;
962
- transition: opacity 0.2s ease, background 0.2s ease;
963
- z-index: var(--layer-2);
964
- }
965
-
966
- .thumbnail-wrapper:hover .remove-btn {
967
- opacity: 1;
968
- }
969
-
970
- .remove-btn:hover {
971
- background: rgba(239, 68, 68, 0.9);
972
- }
973
-
974
- .remove-btn svg {
975
- width: 14px;
976
- height: 14px;
977
- }
978
-
979
- /* Filename label */
980
- .filename-label {
981
- position: absolute;
982
- bottom: 0;
983
- left: 0;
984
- right: 0;
985
- padding: var(--size-1) var(--size-2);
986
- background: linear-gradient(transparent, rgba(0, 0, 0, 0.8));
987
- color: white;
988
- font-size: var(--text-xs);
989
- text-overflow: ellipsis;
990
- overflow: hidden;
991
- white-space: nowrap;
992
- pointer-events: none;
993
- border-radius: 0 0 var(--button-small-radius) var(--button-small-radius);
994
- z-index: var(--layer-1);
995
- }
996
-
997
- /* Add Media button bar */
998
- .add-media-bar {
999
- padding: var(--size-2);
1000
- border-top: 1px solid var(--border-color-primary);
1001
- }
1002
-
1003
- .add-media-btn {
1004
- display: flex;
1005
- align-items: center;
1006
- justify-content: center;
1007
- gap: var(--size-2);
1008
- padding: var(--size-2) var(--size-4);
1009
- background: var(--background-fill-secondary);
1010
- border: 1px dashed var(--border-color-primary);
1011
- border-radius: var(--radius-lg);
1012
- color: var(--body-text-color-subdued);
1013
- cursor: pointer;
1014
- transition: all 0.2s ease;
1015
- width: 100%;
1016
- }
1017
-
1018
- .add-media-btn:hover {
1019
- background: var(--background-fill-primary);
1020
- border-color: var(--color-accent);
1021
- color: var(--color-accent);
1022
- }
1023
-
1024
- .add-media-btn input {
1025
- display: none;
1026
- }
1027
-
1028
- .add-media-btn svg {
1029
- flex-shrink: 0;
1030
- }
1031
- </style>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/frontend/shared/utils.ts DELETED
@@ -1,21 +0,0 @@
1
- import { uploadToHuggingFace } from "@gradio/utils";
2
- import type { GalleryData } from "../types";
3
- import { getMediaFile } from "../types";
4
-
5
- export async function format_gallery_for_sharing(
6
- value: GalleryData[] | null
7
- ): Promise<string> {
8
- if (!value) return "";
9
- let urls = await Promise.all(
10
- value.map(async (item) => {
11
- const file = getMediaFile(item);
12
- if (!file || !file.url) return "";
13
- return await uploadToHuggingFace(file.url, "url");
14
- })
15
- );
16
-
17
- return `<div style="display: flex; flex-wrap: wrap; gap: 16px">${urls
18
- .filter(url => url)
19
- .map((url) => `<img src="${url}" style="height: 400px" />`)
20
- .join("")}</div>`;
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/frontend/tsconfig.json DELETED
@@ -1,14 +0,0 @@
1
- {
2
- "compilerOptions": {
3
- "allowJs": true,
4
- "checkJs": true,
5
- "esModuleInterop": true,
6
- "forceConsistentCasingInFileNames": true,
7
- "resolveJsonModule": true,
8
- "skipLibCheck": true,
9
- "sourceMap": true,
10
- "strict": true,
11
- "verbatimModuleSyntax": true
12
- },
13
- "exclude": ["node_modules", "dist", "./gradio.config.js"]
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/frontend/types.ts DELETED
@@ -1,33 +0,0 @@
1
- import type { FileData } from "@gradio/client";
2
-
3
- export interface GalleryImage {
4
- image: FileData;
5
- caption: string | null;
6
- }
7
-
8
- export interface GalleryVideo {
9
- video: FileData;
10
- caption: string | null;
11
- }
12
-
13
- export interface GalleryAudio {
14
- audio: FileData;
15
- caption: string | null;
16
- }
17
-
18
- export type GalleryData = GalleryImage | GalleryVideo | GalleryAudio;
19
-
20
- // Helper to detect media type
21
- export type MediaType = "image" | "video" | "audio";
22
-
23
- export function getMediaType(item: GalleryData): MediaType {
24
- if ("video" in item) return "video";
25
- if ("audio" in item) return "audio";
26
- return "image";
27
- }
28
-
29
- export function getMediaFile(item: GalleryData): FileData {
30
- if ("video" in item) return item.video;
31
- if ("audio" in item) return item.audio;
32
- return item.image;
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mediagallery/pyproject.toml DELETED
@@ -1,54 +0,0 @@
1
- [build-system]
2
- requires = [
3
- "hatchling",
4
- "hatch-requirements-txt",
5
- "hatch-fancy-pypi-readme>=22.5.0",
6
- ]
7
- build-backend = "hatchling.build"
8
-
9
- [project]
10
- name = "gradio_mediagallery"
11
- version = "0.0.1"
12
- description = "Python library for easily interacting with trained machine learning models"
13
- readme = "README.md"
14
- license = "Apache-2.0"
15
- requires-python = ">=3.8"
16
- authors = [{ name = "YOUR NAME", email = "[email protected]" }]
17
- keywords = [
18
- "gradio-custom-component",
19
- "gradio-template-Gallery"
20
- ]
21
- # Add dependencies here
22
- dependencies = ["gradio>=4.0,<6.0"]
23
- classifiers = [
24
- 'Development Status :: 3 - Alpha',
25
- 'Operating System :: OS Independent',
26
- 'Programming Language :: Python :: 3',
27
- 'Programming Language :: Python :: 3 :: Only',
28
- 'Programming Language :: Python :: 3.8',
29
- 'Programming Language :: Python :: 3.9',
30
- 'Programming Language :: Python :: 3.10',
31
- 'Programming Language :: Python :: 3.11',
32
- 'Topic :: Scientific/Engineering',
33
- 'Topic :: Scientific/Engineering :: Artificial Intelligence',
34
- 'Topic :: Scientific/Engineering :: Visualization',
35
- ]
36
-
37
- # The repository and space URLs are optional, but recommended.
38
- # Adding a repository URL will create a badge in the auto-generated README that links to the repository.
39
- # Adding a space URL will create a badge in the auto-generated README that links to the space.
40
- # This will make it easy for people to find your deployed demo or source code when they
41
- # encounter your project in the wild.
42
-
43
- # [project.urls]
44
- # repository = "your github repository"
45
- # space = "your space url"
46
-
47
- [project.optional-dependencies]
48
- dev = ["build", "twine"]
49
-
50
- [tool.hatch.build]
51
- artifacts = ["/backend/gradio_mediagallery/templates", "*.pyi"]
52
-
53
- [tool.hatch.build.targets.wheel]
54
- packages = ["/backend/gradio_mediagallery"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
  openai>=1.55.0
2
- gradio==5.34.1
3
  moviepy==1
 
1
  openai>=1.55.0
2
+ gradio==5.6.0
3
  moviepy==1