Yelorix commited on
Commit
5d077e1
·
verified ·
1 Parent(s): 8fa82f9

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +7 -0
  2. .gitignore +27 -0
  3. README.md +193 -12
  4. __init__.py +0 -0
  5. app.py +475 -0
  6. custom_time_control.js +121 -0
  7. requirements.txt +1 -0
  8. src/.gitignore +27 -0
  9. src/README.md +193 -0
  10. src/backend/gradio_vistimeline/__init__.py +5 -0
  11. src/backend/gradio_vistimeline/model.py +35 -0
  12. src/backend/gradio_vistimeline/templates/component/index.js +0 -0
  13. src/backend/gradio_vistimeline/templates/component/style.css +0 -0
  14. src/backend/gradio_vistimeline/templates/example/index.js +151 -0
  15. src/backend/gradio_vistimeline/templates/example/style.css +1 -0
  16. src/backend/gradio_vistimeline/vistimeline.py +74 -0
  17. src/backend/gradio_vistimeline/vistimeline.pyi +305 -0
  18. src/demo/__init__.py +0 -0
  19. src/demo/app.py +475 -0
  20. src/demo/custom_time_control.js +121 -0
  21. src/demo/requirements.txt +1 -0
  22. src/frontend/Example.svelte +110 -0
  23. src/frontend/Index.svelte +440 -0
  24. src/frontend/gradio.config.js +9 -0
  25. src/frontend/package-lock.json +0 -0
  26. src/frontend/package.json +41 -0
  27. src/frontend/tsconfig.json +9 -0
  28. src/package.json +6 -0
  29. src/pyproject.toml +45 -0
  30. src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER +1 -0
  31. src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst +28 -0
  32. src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA +93 -0
  33. src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD +14 -0
  34. src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL +5 -0
  35. src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt +1 -0
  36. src/venv/Lib/site-packages/PIL/BdfFontFile.py +133 -0
  37. src/venv/Lib/site-packages/PIL/BlpImagePlugin.py +493 -0
  38. src/venv/Lib/site-packages/PIL/BmpImagePlugin.py +511 -0
  39. src/venv/Lib/site-packages/PIL/BufrStubImagePlugin.py +76 -0
  40. src/venv/Lib/site-packages/PIL/ContainerIO.py +173 -0
  41. src/venv/Lib/site-packages/PIL/CurImagePlugin.py +75 -0
  42. src/venv/Lib/site-packages/PIL/DcxImagePlugin.py +80 -0
  43. src/venv/Lib/site-packages/PIL/DdsImagePlugin.py +575 -0
  44. src/venv/Lib/site-packages/PIL/EpsImagePlugin.py +474 -0
  45. src/venv/Lib/site-packages/PIL/ExifTags.py +381 -0
  46. src/venv/Lib/site-packages/PIL/FitsImagePlugin.py +152 -0
  47. src/venv/Lib/site-packages/PIL/FliImagePlugin.py +175 -0
  48. src/venv/Lib/site-packages/PIL/FontFile.py +134 -0
  49. src/venv/Lib/site-packages/PIL/FpxImagePlugin.py +257 -0
  50. src/venv/Lib/site-packages/PIL/FtexImagePlugin.py +115 -0
.gitattributes CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ src/venv/Lib/site-packages/gradio/frpc_windows_amd64_v0.3 filter=lfs diff=lfs merge=lfs -text
37
+ src/venv/Lib/site-packages/gradio/templates/frontend/assets/Canvas3D-CBtQcOmf.js.map filter=lfs diff=lfs merge=lfs -text
38
+ src/venv/Lib/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Canvas3D.kwvn8xU9.js.br filter=lfs diff=lfs merge=lfs -text
39
+ src/venv/Lib/site-packages/gradio/templates/node/build/server/chunks/Canvas3D-CBRlDVSB.js.map filter=lfs diff=lfs merge=lfs -text
40
+ src/venv/Lib/site-packages/gradio/templates/node/build/server/chunks/PlotlyPlot-P3y0I-J2.js.map filter=lfs diff=lfs merge=lfs -text
41
+ src/venv/Lib/site-packages/numpy.libs/libscipy_openblas64_-c16e4918366c6bc1f1cd71e28ca36fc0.dll filter=lfs diff=lfs merge=lfs -text
42
+ src/venv/Scripts/ruff.exe filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.py[cod]
2
+ __pycache__/
3
+ *.so
4
+ .Python
5
+ .eggs/
6
+ *.egg-info/
7
+ dist/
8
+ build/
9
+ .pytest_cache/
10
+ .gradio/
11
+ .gradio/certificate.pem
12
+
13
+ node_modules/
14
+ .svelte-kit/
15
+ .vite/
16
+
17
+ venv/
18
+ .env/
19
+ .venv/
20
+ env/
21
+
22
+ .vscode/
23
+ .idea/
24
+ *.swp
25
+ *.swo
26
+
27
+ .DS_Store
README.md CHANGED
@@ -1,12 +1,193 @@
1
- ---
2
- title: Gradio Vistimeline
3
- emoji: 📉
4
- colorFrom: purple
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 5.9.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: [gradio-custom-component, timeline, vis timeline, vis-timeline, vis.js]
3
+ title: gradio_vistimeline
4
+ short_description: Gradio implementation for the vis.js Timeline library
5
+ colorFrom: blue
6
+ colorTo: yellow
7
+ sdk: gradio
8
+ pinned: false
9
+ app_file: app.py
10
+ ---
11
+ <h1 style='text-align: center; margin-bottom: 1rem'> Gradio vis.js Timeline </h1>
12
+
13
+ <div style="display: flex; flex-direction: row; justify-content: center">
14
+ <img style="display: block; padding-right: 5px; height: 20px;" alt="Static Badge" src="https://img.shields.io/pypi/v/gradio_vistimeline">
15
+ <a href="https://github.com/Yelorix/gradio-vis-timeline" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/github-white?logo=github&logoColor=black"></a>
16
+ <a href="https://huggingface.co/spaces/Yelorix/gradio_vistimeline" target="_blank"><img alt="Open in Spaces" src="https://huggingface.co/datasets/huggingface/badges/resolve/main/open-in-hf-spaces-sm-dark.svg"></a>
17
+ </div>
18
+
19
+ A Gradio component that implements the [vis.js Timeline](https://github.com/visjs/vis-timeline) visualization library, allowing you to create interactive timelines in your Gradio apps.
20
+
21
+ **Resources:**
22
+ - [Timeline Examples](https://visjs.github.io/vis-timeline/examples/timeline/)
23
+ - [Timeline Documentation](https://visjs.github.io/vis-timeline/docs/timeline/)
24
+ - [Dataset Documentation](https://visjs.github.io/vis-data/data/dataset.html)
25
+
26
+ ## Installation
27
+
28
+ ```bash
29
+ pip install gradio_vistimeline
30
+ ```
31
+
32
+ ## Usage
33
+
34
+ ```python
35
+ import gradio as gr
36
+ from gradio_vistimeline import VisTimeline
37
+
38
+ demo = gr.Interface(
39
+ lambda x: x,
40
+ VisTimeline(
41
+ value={
42
+ "items": [
43
+ {"content": "Item 1", "start": "2024-12-2", "end": "2024-12-10"},
44
+ {"content": "Item 2", "start": "2024-12-14"}
45
+ ]
46
+ },
47
+ options={
48
+ "start": "2024-12-1",
49
+ "end": "2024-12-15",
50
+ "editable": True
51
+ }
52
+ ),
53
+ "json"
54
+ )
55
+
56
+ if __name__ == "__main__":
57
+ demo.launch()
58
+ ```
59
+
60
+ ## `VisTimeline`
61
+
62
+ ### Features
63
+
64
+ - Interactive timeline visualization
65
+ - Integration of vis.js Timeline includes:
66
+ - Items
67
+ - Ranges
68
+ - Points
69
+ - Background items
70
+ - Groups
71
+ - Subgroups
72
+ - Pass options object during instantiation
73
+ - Styled with the gradio style variables
74
+ - Gradio events for editing data and selecting items
75
+
76
+ ### Value Data Format
77
+
78
+ The timeline accepts a value in the following format:
79
+
80
+ ```python
81
+ {
82
+ "groups": [
83
+ {
84
+ "id": "group_id",
85
+ "content": "Group Name" # Optional
86
+ }
87
+ ],
88
+ "items": [
89
+ {
90
+ "content": "Item content",
91
+ "start": "2024-01-01", # ISO date string or Unix timestamp
92
+ "end": "2024-01-05", # Optional
93
+ "group": "group_id", # Optional
94
+ }
95
+ ]
96
+ }
97
+ ```
98
+
99
+ Or as a VisTimelineData object:
100
+
101
+ ```python
102
+ from gradio_vistimeline import VisTimelineGroup, VisTimelineItem, VisTimelineData
103
+
104
+ value = VisTimelineData(
105
+ groups=[
106
+ VisTimelineGroup(
107
+ id="group_id",
108
+ content="Group Name" # Optional
109
+ )
110
+ ],
111
+ items=[
112
+ VisTimelineItem(
113
+ content="Item content",
114
+ start="2024-01-01", # ISO date string or Unix timestamp
115
+ end="2024-01-05", # Optional
116
+ group="group_id" # Optional
117
+ )
118
+ ]
119
+ )
120
+ ```
121
+
122
+ ### Events
123
+
124
+ | name | description |
125
+ |:-----|:------------|
126
+ | `load` | Triggered when the component is mounted for the first time |
127
+ | `change` | Triggered when the timeline value changes through any means |
128
+ | `input` | Triggered when a user directly modifies timeline items (add/remove/update) |
129
+ | `select` | Triggered when clicking the timeline |
130
+ | `item_select` | Triggered when items are selected or unselected |
131
+
132
+ ### Configuration
133
+
134
+ #### vis.js Timeline Options
135
+
136
+ The component accepts all configuration options supported by vis.js Timeline. Some commonly used options:
137
+
138
+ ```python
139
+ options = {
140
+ "editable": True, # Enable item editing
141
+ "multiselect": True, # Allow selecting multiple items
142
+ "showCurrentTime": True, # Show a marker for current time
143
+ "stack": True, # Stack overlapping items
144
+ "zoomable": True # Allow zooming the timeline
145
+ }
146
+ ```
147
+
148
+ For a complete list of options, see the [vis.js Timeline documentation](https://visjs.github.io/vis-timeline/docs/timeline/).
149
+
150
+ #### Component-Specific Options
151
+
152
+ **Data Synchronization**
153
+ ```python
154
+ VisTimeline(
155
+ value=value,
156
+ preserve_old_content_on_value_change=True # Default: False
157
+ )
158
+ ```
159
+ Controls how the timeline updates their groups and items DataSets when the component value changes:
160
+ - `False`: Clears and reinitializes all DataSets to ensure perfect sync with the Gradio component value
161
+ - `True`: Merges new data with existing content (updates existing items, adds new ones, removes missing ones)
162
+
163
+ Defaulted to false to ensure the value matches the visualization on the timeline.
164
+
165
+ Changing it to true reduces visual flicker when dragging items around.
166
+ Desync is only a real risk in this mode when you edit item ID's or add/remove item properties of existing items.
167
+
168
+ **JavaScript Integration**
169
+ ```python
170
+ VisTimeline(
171
+ value=value,
172
+ elem_id="my-timeline" # Optional
173
+ )
174
+ ```
175
+ When `elem_id` is set, the timeline instance becomes available in JavaScript as `window.visTimelineInstances["my-timeline"]`, allowing easy access through custom scripts.
176
+
177
+ **Styling Items**
178
+ The component provides pre-defined color classes matching Gradio's theme colors. Apply them by setting an item's `className`:
179
+
180
+ ```python
181
+ item = {
182
+ "content": "Colored item",
183
+ "start": "2024-01-01",
184
+ "className": "color-primary-500" # Uses Gradio's primary-500 color
185
+ }
186
+ ```
187
+
188
+ Available color classes follow the pattern:
189
+ - `color-primary-[50-950]`
190
+ - `color-secondary-[50-950]`
191
+ - `color-neutral-[50-950]`
192
+
193
+ Custom styles can be applied by defining your own CSS classes.
__init__.py ADDED
File without changes
app.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime as dt
2
+ import gradio as gr
3
+ import json
4
+ import numpy as np
5
+ import os
6
+ from datetime import timedelta, date, datetime
7
+ from gradio_vistimeline import VisTimeline, VisTimelineData
8
+
9
+ # --- Region: Handlers for demo tab 1 ---
10
+ def pull_from_timeline(timeline):
11
+ """Convert timeline data to JSON string for display"""
12
+ if hasattr(timeline, "model_dump"):
13
+ data = timeline.model_dump(exclude_none=True)
14
+ else:
15
+ data = timeline
16
+ return json.dumps(data, indent=2)
17
+
18
+ def push_to_timeline(json_str):
19
+ """Convert JSON string to timeline data"""
20
+ try:
21
+ return VisTimelineData.model_validate_json(json_str)
22
+ except Exception as e:
23
+ print(f"Error parsing JSON: {e}")
24
+ return VisTimelineData(groups=[], items=[])
25
+
26
+ def on_timeline_change():
27
+ return f"Most recent value change event:\n{get_now()}"
28
+
29
+ def on_timeline_input(event_data: gr.EventData):
30
+ return f"Most recent input event:\nAction: '{event_data._data}' at {get_now()}"
31
+
32
+ def on_timeline_select():
33
+ return f"Most recent timeline selected event::\n{get_now()}"
34
+
35
+ def on_item_select(timeline, event_data: gr.EventData):
36
+ selected_ids = event_data._data # A collection of selected item IDs that can be str or int: ["example", 0, "example2"]
37
+ items = timeline.items
38
+
39
+ if selected_ids:
40
+ first_id = selected_ids[0]
41
+ for item in items:
42
+ if item.id == first_id:
43
+ content = getattr(item, 'content', 'Unknown')
44
+ return f"Currently selected item:\nContent: \"{content}\"\nID: \"{first_id}\""
45
+
46
+ return "Currently selected item:\nNone"
47
+
48
+ # --- Region: Handlers for demo tab 1 ---
49
+ def update_table(timeline):
50
+ if hasattr(timeline, "model_dump"):
51
+ data = timeline.model_dump(exclude_none=True)
52
+ else:
53
+ data = timeline
54
+
55
+ items = data["items"]
56
+ track_length_ms = get_grouped_item_end_in_ms(items, "track-length")
57
+
58
+ rows = []
59
+ for item in items:
60
+ if item["content"] != "":
61
+ duration = calculate_and_format_duration(item["start"], item.get("end"), track_length_ms)
62
+ rows.append([
63
+ item["content"],
64
+ format_date_to_milliseconds(item["start"]),
65
+ duration
66
+ ])
67
+
68
+ return gr.DataFrame(
69
+ value=rows,
70
+ headers=["Item Name", "Start Time", "Duration"]
71
+ )
72
+
73
+ # --- Region: Handlers for demo tab 2 ---
74
+ def update_audio(timeline):
75
+ """
76
+ Handler function for generating audio from timeline data.
77
+ Returns audio data in format expected by Gradio's Audio component.
78
+ """
79
+ audio_data, sample_rate = generate_audio_from_timeline(timeline)
80
+ # Convert to correct shape and data type for Gradio Audio
81
+ # Gradio expects a 2D array with shape (samples, channels)
82
+ audio_data = audio_data.reshape(-1, 1) # Make it 2D with 1 channel
83
+ return (sample_rate, audio_data)
84
+
85
+ def generate_audio_from_timeline(timeline_data, sample_rate=44100):
86
+ """
87
+ Generate audio from timeline items containing frequency information.
88
+
89
+ Args:
90
+ timeline_data: Timeline data containing items with start/end times in milliseconds
91
+ sample_rate: Audio sample rate in Hz (default 44100)
92
+
93
+ Returns:
94
+ Tuple of (audio_data: np.ndarray, sample_rate: int)
95
+ """
96
+ # Get all items from the timeline
97
+ if hasattr(timeline_data, "model_dump"):
98
+ data = timeline_data.model_dump(exclude_none=True)
99
+ else:
100
+ data = timeline_data
101
+
102
+ items = data["items"]
103
+
104
+ # Find the track length from the background item
105
+ track_length_ms = get_grouped_item_end_in_ms(items, "track-length")
106
+
107
+ # Convert milliseconds to samples
108
+ total_samples = int((track_length_ms / 1000) * sample_rate)
109
+
110
+ # Initialize empty audio buffer
111
+ audio_buffer = np.zeros(total_samples)
112
+
113
+ # Frequency mapping
114
+ freq_map = {
115
+ 1: 440.0,
116
+ 2: 554.37,
117
+ 3: 659.26
118
+ }
119
+ # Generate sine waves for each item
120
+ for item in items:
121
+ id = item.get("id", 0)
122
+ start_time = parse_date_to_milliseconds(item["start"])
123
+ end_time = parse_date_to_milliseconds(item["end"])
124
+
125
+ # Skip items that are completely outside the valid range
126
+ if end_time <= 0 or start_time >= track_length_ms or start_time >= end_time:
127
+ continue
128
+
129
+ # Clamp times to valid range
130
+ start_time = max(0, min(start_time, track_length_ms))
131
+ end_time = max(0, min(end_time, track_length_ms))
132
+
133
+ if id in freq_map:
134
+ freq = freq_map[id]
135
+
136
+ # Convert millisecond timestamps to sample indices
137
+ start_sample = int((start_time / 1000) * sample_rate)
138
+ end_sample = int((end_time / 1000) * sample_rate)
139
+
140
+ # Generate time array for this segment
141
+ t = np.arange(start_sample, end_sample)
142
+
143
+ # Generate sine wave
144
+ duration = end_sample - start_sample
145
+ envelope = np.ones(duration)
146
+ fade_samples = min(int(0.10 * sample_rate), duration // 2) # 100ms fade or half duration
147
+ envelope[:fade_samples] = np.linspace(0, 1, fade_samples)
148
+ envelope[-fade_samples:] = np.linspace(1, 0, fade_samples)
149
+
150
+ wave = 0.2 * envelope * np.sin(2 * np.pi * freq * t / sample_rate)
151
+
152
+ # Add to buffer
153
+ audio_buffer[start_sample:end_sample] += wave
154
+
155
+ # Normalize to prevent clipping
156
+ max_val = np.max(np.abs(audio_buffer))
157
+ if max_val > 0:
158
+ audio_buffer = audio_buffer / max_val
159
+
160
+ return (audio_buffer, sample_rate)
161
+
162
+ # Helper function to get hard-coded track-length from timeline value
163
+ def get_grouped_item_end_in_ms(items, group_id):
164
+ default_length = 6000
165
+ for item in items:
166
+ if item.get("group") == group_id:
167
+ return parse_date_to_milliseconds(item.get("end", default_length))
168
+ return default_length
169
+
170
+ # --- Region: Demo specific datetime helper functions ---
171
+ def calculate_and_format_duration(start_date, end_date, max_range):
172
+ """Calculate the seconds between two datetime inputs and format the result with up to 2 decimals."""
173
+ if not end_date:
174
+ return "0s"
175
+
176
+ # Convert dates to milliseconds
177
+ start_ms = max(0, parse_date_to_milliseconds(start_date))
178
+ end_ms = min(max_range, parse_date_to_milliseconds(end_date))
179
+
180
+ if end_ms < start_ms:
181
+ return "0s"
182
+
183
+ # Calculate duration in seconds
184
+ duration = (end_ms - start_ms) / 1000
185
+
186
+ # Format to remove trailing zeroes after rounding to 2 decimal places
187
+ formatted_duration = f"{duration:.2f}".rstrip("0").rstrip(".")
188
+ return f"{formatted_duration}s"
189
+
190
+ def format_date_to_milliseconds(date):
191
+ """Format input (ISO8601 string or milliseconds) to mm:ss.SSS."""
192
+ date_in_milliseconds = max(0, parse_date_to_milliseconds(date))
193
+ time = timedelta(milliseconds=date_in_milliseconds)
194
+
195
+ # Format timedelta into mm:ss.SSS
196
+ minutes, seconds = divmod(time.seconds, 60)
197
+ milliseconds_part = time.microseconds // 1000
198
+ return f"{minutes:02}:{seconds:02}.{milliseconds_part:03}"
199
+
200
+ def parse_date_to_milliseconds(date):
201
+ """Convert input (ISO8601 string or milliseconds) milliseconds"""
202
+ if isinstance(date, int): # Input is already in milliseconds (Unix timestamp)
203
+ return date
204
+ elif isinstance(date, str): # Input is ISO8601 datetime string
205
+ dt = datetime.fromisoformat(date.replace("Z", "+00:00"))
206
+ epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) # Calculate difference from Unix epoch
207
+ return int((dt - epoch).total_seconds() * 1000)
208
+ else:
209
+ return 0 # Fallback for unsupported types
210
+
211
+ def get_now():
212
+ """Returns current time in HH:MM:SS format"""
213
+ return datetime.now().strftime("%H:%M:%S")
214
+
215
+ TIMELINE_ID = "dateless_timeline"
216
+ AUDIO_ID = "timeline-audio"
217
+
218
+ # Example for how to access the timeline through JavaScript
219
+ # In this case, to bind the custom time bar of the timeline to be in sync with the audio component
220
+ # Read the JavaScript file
221
+ js_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'custom_time_control.js')
222
+ with open(js_path, 'r') as f:
223
+ js_content = f.read()
224
+ script = f"""<script>{js_content}</script>"""
225
+ style = f"""<style>.vis-custom-time.{TIMELINE_ID} {{pointer-events: none !important;}}</style>"""
226
+ head = script + style
227
+
228
+ # --- Region: Gradio ---
229
+ with gr.Blocks(head=head) as demo:
230
+ today = date.today()
231
+ day_offset = lambda days: (today + dt.timedelta(days=days)).isoformat()
232
+
233
+ gr.Markdown("# Vis.js Timeline Component Demo")
234
+
235
+ with gr.Tabs():
236
+ # --- Tab 1: Basic Timeline with Events ---
237
+ with gr.Tab("Basic Timeline Demo"):
238
+ # Timeline component
239
+ basic_timeline = VisTimeline(
240
+ value={
241
+ "groups": [{"id": 0, "content": ""}],
242
+ "items": []
243
+ },
244
+ options= {
245
+ "start": day_offset(-1),
246
+ "end": day_offset(20),
247
+ "editable": True,
248
+ "format": { # You don't need to define this as these are the default values, but for this demo it is necessary because of two timelines with different formats on one page
249
+ "minorLabels": {
250
+ "millisecond": "SSS",
251
+ "second": "ss",
252
+ "minute": "HH:mm",
253
+ "hour": "HH:mm",
254
+ }
255
+ }
256
+ },
257
+ label="Interactive Timeline",
258
+ interactive=True
259
+ )
260
+
261
+ gr.Markdown("### Events")
262
+
263
+ # Event listener outputs
264
+ with gr.Row():
265
+ change_textbox = gr.Textbox(value="Most recent value change event:", label="Change:", lines=3, interactive=False)
266
+ input_textbox = gr.Textbox(value="Most recent user input event:", label="Input:", lines=3, interactive=False)
267
+ select_textbox = gr.Textbox(value="Most recent timeline selected event:", label="Select:", lines=3, interactive=False)
268
+ item_select_textbox = gr.Textbox(value="Currently selected item:\nNone", label="Currently selected item:", lines=3, interactive=False)
269
+
270
+ # Examples and JSON area in two columns
271
+ with gr.Row():
272
+ # Left column: Examples
273
+ with gr.Column():
274
+ gr.Markdown("### Timeline Examples")
275
+ gr.Examples(
276
+ examples=[
277
+ {
278
+ "groups": [{"id": 0, "content": ""}],
279
+ "items": [
280
+ {"content": "Working", "group": 0, "start": day_offset(1), "end": day_offset(5)},
281
+ {"content": "Resting", "group": 0, "start": day_offset(5), "end": day_offset(7)},
282
+ {"content": "Working", "group": 0, "start": day_offset(7), "end": day_offset(11)},
283
+ {"content": "Resting", "group": 0, "start": day_offset(11), "end": day_offset(13)},
284
+ {"content": "Working", "group": 0, "start": day_offset(13), "end": day_offset(17)},
285
+ {"content": "Resting", "group": 0, "start": day_offset(17), "end": day_offset(19)},
286
+ {"content": "Working", "group": 0, "start": day_offset(19), "end": day_offset(23)},
287
+ ],
288
+ "description": "DateTime ranges"
289
+ },
290
+ {
291
+ "groups": [{"id": 0, "content": "Group"}],
292
+ "items": [
293
+ {"id": 0, "content": "Simple item", "group": 0, "start": day_offset(9)}
294
+ ]
295
+ },
296
+ {
297
+ "groups": [{"id": 0, "content": "Worker 1"}, {"id": 1, "content": "Worker 2"}],
298
+ "items": [
299
+ {"content": "Working", "group": 0, "start": day_offset(1), "end": day_offset(5)},
300
+ {"content": "Resting", "group": 0, "start": day_offset(5), "end": day_offset(7)},
301
+ {"content": "Working", "group": 0, "start": day_offset(7), "end": day_offset(11)},
302
+ {"content": "Resting", "group": 0, "start": day_offset(11), "end": day_offset(13)},
303
+ {"content": "Working", "group": 0, "start": day_offset(13), "end": day_offset(17)},
304
+ {"content": "Resting", "group": 0, "start": day_offset(17), "end": day_offset(19)},
305
+ {"content": "Working", "group": 0, "start": day_offset(19), "end": day_offset(23)},
306
+ {"content": "Working", "group": 1, "start": day_offset(-3), "end": day_offset(2)},
307
+ {"content": "Resting", "group": 1, "start": day_offset(2), "end": day_offset(4)},
308
+ {"content": "Working", "group": 1, "start": day_offset(4), "end": day_offset(8)},
309
+ {"content": "Resting", "group": 1, "start": day_offset(8), "end": day_offset(10)},
310
+ {"content": "Working", "group": 1, "start": day_offset(10), "end": day_offset(14)},
311
+ {"content": "Resting", "group": 1, "start": day_offset(14), "end": day_offset(16)},
312
+ {"content": "Working", "group": 1, "start": day_offset(16), "end": day_offset(20)}
313
+ ],
314
+ "description": "DateTime ranges in groups"
315
+ },
316
+ {
317
+ "groups": [{"id": 1, "content": "Group 1"}, {"id": 2, "content": "Group 2"}],
318
+ "items": [
319
+ {"id": "A", "content": "Period A", "start": day_offset(1), "end": day_offset(7), "type": "background", "group": 1 },
320
+ {"id": "B", "content": "Period B", "start": day_offset(8), "end": day_offset(11), "type": "background", "group": 2 },
321
+ {"id": "C", "content": "Period C", "start": day_offset(12), "end": day_offset(17), "type": "background" },
322
+ {"content": "Range inside period A", "start": day_offset(2), "end": day_offset(6), "group": 1 },
323
+ {"content": "Item inside period C", "group": 2, "start": day_offset(14) }
324
+ ],
325
+ "description": "Background type example"
326
+ },
327
+ {
328
+ "groups": [{"id": 1, "content": "Group 1"}, {"id": 2, "content": "Group 2"}],
329
+ "items": [
330
+ {"content": "Range item", "group": 1, "start": day_offset(7), "end": day_offset(14) },
331
+ {"content": "Point item", "group": 2, "start": day_offset(7), "type": "point" },
332
+ {"content": "Point item with a longer name", "group": 2, "start": day_offset(7), "type": "point" },
333
+ ],
334
+ "description": "Point type example"
335
+ },
336
+ {
337
+ "groups": [{"id": 1, "content": "Group 1", "subgroupStack": {"A": True, "B": True}}, {"id": 2, "content": "Group 2" }],
338
+ "items": [
339
+ {"content": "Subgroup 2 Background", "start": day_offset(0), "end": day_offset(4), "type": "background", "group": 1, "subgroup": "A", "subgroupOrder": 0},
340
+ {"content": "Subgroup 2 Item", "start": day_offset(5), "end": day_offset(7), "group": 1, "subgroup": "A", "subgroupOrder": 0},
341
+ {"content": "Subgroup 1 Background", "start": day_offset(0), "end": day_offset(4), "type": "background", "group": 1, "subgroup": "B", "subgroupOrder": 1},
342
+ {"content": "Subgroup 1 Item", "start": day_offset(8), "end": day_offset(10), "group": 1, "subgroup": "B", "subgroupOrder": 1},
343
+ {"content": "Full group background", "start": day_offset(5), "end": day_offset(9), "type": "background", "group": 2},
344
+ {"content": "No subgroup item 1", "start": day_offset(10), "end": day_offset(12), "group": 2},
345
+ {"content": "No subgroup item 2", "start": day_offset(13), "end": day_offset(15), "group": 2}
346
+
347
+ ],
348
+ "description": "Subgroups with backgrounds and items"
349
+ },
350
+ {
351
+ "groups": [{"id": 1, "content": "Group 1", "subgroupStack": {"A": True, "B": True}}, {"id": 2, "content": "Group 2" }],
352
+ "items": [
353
+ {"content": "Subgroup 2 background", "start": day_offset(0), "end": day_offset(4), "type": "background", "group": 1, "subgroup": "A", "subgroupOrder": 0},
354
+ {"content": "Subgroup 2 range", "start": day_offset(5), "end": day_offset(7), "group": 1, "subgroup": "A", "subgroupOrder": 0},
355
+ {"content": "Subgroup 2 item", "start": day_offset(10), "group": 1, "subgroup": "A" },
356
+ {"content": "Subgroup 1 background", "start": day_offset(0), "end": day_offset(4), "type": "background", "group": 1, "subgroup": "B", "subgroupOrder": 1},
357
+ {"content": "Subgroup 1 range", "start": day_offset(8), "end": day_offset(10), "group": 1, "subgroup": "B", "subgroupOrder": 1},
358
+ {"content": "Subgroup 1 item", "start": day_offset(14), "group": 1, "subgroup": "B" },
359
+ {"content": "No subgroup item", "start": day_offset(12), "group": 1},
360
+ {"content": "Full group background", "start": day_offset(5), "end": day_offset(9), "type": "background", "group": 2},
361
+ {"content": "No subgroup range 1", "start": day_offset(11), "end": day_offset(13), "group": 2},
362
+ {"content": "No subgroup range 2", "start": day_offset(15), "end": day_offset(17), "group": 2},
363
+ {"content": "No subgroup point", "start": day_offset(1), "group": 2, "type": "point" }
364
+
365
+ ],
366
+ "description": "Combination of item and group types"
367
+ }
368
+ ],
369
+ inputs=basic_timeline
370
+ )
371
+
372
+ # Right column: JSON staging area
373
+ with gr.Column():
374
+ gr.Markdown("### Serialized Timeline Value")
375
+ json_textbox = gr.Textbox(label="JSON", lines=4)
376
+ with gr.Row():
377
+ pull_button = gr.Button("Pull Timeline into JSON")
378
+ push_button = gr.Button("Push JSON onto Timeline", variant="primary")
379
+
380
+ # Event handlers
381
+ basic_timeline.change(fn=on_timeline_change, outputs=[change_textbox]) # Triggered when the value of the timeline changes by any means
382
+ basic_timeline.input(fn=on_timeline_input, outputs=[input_textbox]) # Triggered when the value of the timeline changes, caused directly by a user input on the component (dragging, adding & removing items)
383
+ basic_timeline.select(fn=on_timeline_select, outputs=[select_textbox]) # Triggered when the timeline is clicked
384
+ basic_timeline.item_select(fn=on_item_select, inputs=[basic_timeline], outputs=[item_select_textbox]) # Triggered when items are selected or unselected
385
+
386
+ pull_button.click(fn=pull_from_timeline, inputs=[basic_timeline], outputs=[json_textbox]) # Example of using the timeline as an input
387
+ push_button.click(fn=push_to_timeline, inputs=[json_textbox], outputs=[basic_timeline]) # Example of using the timeline as an output
388
+
389
+ # --- Tab 2: Timeline without date ---
390
+ with gr.Tab("Timeline Without Date"):
391
+ audio_output = gr.Audio(label="Generated Audio", type="numpy", elem_id=AUDIO_ID)
392
+
393
+ dateless_timeline = VisTimeline(
394
+ value={
395
+ "groups": [{"id": "track-length", "content": ""}, {"id": 1, "content": ""}, {"id": 2, "content": ""}, {"id": 3, "content": ""}],
396
+ "items": [
397
+ {"content": "", "group": "track-length", "selectable": False, "type": "background", "start": 0, "end": 6000, "className": "color-primary-600"},
398
+ {"id": 1, "content": "440.00Hz", "group": 1, "selectable": False, "start": 0, "end": 1500},
399
+ {"id": 2, "content": "554.37Hz", "group": 2, "selectable": False, "start": 2000, "end": 3500},
400
+ {"id": 3, "content": "659.26Hz", "group": 3, "selectable": False, "start": 4000, "end": 5500}
401
+ ]},
402
+ options={
403
+ "moment": "+00:00", # Force the timeline into a certain UTC offset timezone
404
+ "showCurrentTime": False,
405
+ "editable": {
406
+ "add": False,
407
+ "remove": False,
408
+ "updateGroup": False,
409
+ "updateTime": True
410
+ },
411
+ "itemsAlwaysDraggable": { # So dragging does not require selection first
412
+ "item": True,
413
+ "range": True
414
+ },
415
+ "showMajorLabels": False, # This hides the month & year labels
416
+ "format": {
417
+ "minorLabels": { # Force the minor labels into a format that does not include weekdays or months
418
+ "millisecond": "mm:ss.SSS",
419
+ "second": "mm:ss",
420
+ "minute": "mm:ss",
421
+ "hour": "HH:mm:ss"
422
+ }
423
+ },
424
+ "start": 0, # Timeline will start at unix epoch
425
+ "end": 6000, # Initial timeline range will end at 1 minute (unix timestamp in milliseconds)
426
+ "min": 0, # Restrict timeline navigation, timeline can not be scrolled further to the left than 0 seconds
427
+ "max": 7000, # Restrict timeline navigation, timeline can not be scrolled further to the right than 70 seconds
428
+ "zoomMin": 1000, # Allow zoom in up until the entire timeline spans 1000 milliseconds
429
+ },
430
+ label="Timeline without date labels, with restrictions on navigation and zoom. You can drag and resize items without having to select them first.",
431
+ elem_id=TIMELINE_ID # This will also make the timeline instance accessible in JavaScript via 'window.visTimelineInstances["your elem_id"]'
432
+ )
433
+
434
+ table = gr.DataFrame(
435
+ headers=["Item Name", "Start Time", "Duration"],
436
+ label="Timeline Items",
437
+ interactive=False
438
+ )
439
+
440
+ generate_audio_button = gr.Button("Generate Audio")
441
+
442
+ # Event handlers
443
+ dateless_timeline.change(fn=update_table, inputs=[dateless_timeline], outputs=[table])
444
+ dateless_timeline.load(fn=update_table, inputs=[dateless_timeline], outputs=[table])
445
+ generate_audio_button.click(fn=update_audio, inputs=[dateless_timeline], outputs=[audio_output])
446
+
447
+ generate_audio_button.click(
448
+ fn=update_audio,
449
+ inputs=[dateless_timeline],
450
+ outputs=[audio_output],
451
+ ).then(
452
+ fn=None,
453
+ inputs=None,
454
+ outputs=None,
455
+ js=f'() => initAudioSync("{TIMELINE_ID}", "{AUDIO_ID}", 6000)'
456
+ )
457
+
458
+ # --- Tab 3: Links to documentation and examples ---
459
+ with gr.Tab("Documentation & More Examples"):
460
+ gr.Markdown("""
461
+ ## Vis.js Timeline Examples
462
+ A collection of HTML/CSS/JavaScript snippets displaying various properties and use-cases:
463
+ [https://visjs.github.io/vis-timeline/examples/timeline/](https://visjs.github.io/vis-timeline/examples/timeline/)
464
+ <br><br>
465
+ ## Vis.js Timeline Documentation
466
+ The official documentation of the timeline:
467
+ [https://visjs.github.io/vis-timeline/docs/timeline/](https://visjs.github.io/vis-timeline/docs/timeline/)
468
+ <br><br>
469
+ ## Vis.js DataSet Documentation
470
+ The official documentation of the DataSet model:
471
+ [https://visjs.github.io/vis-data/data/dataset.html](https://visjs.github.io/vis-data/data/dataset.html)
472
+ """)
473
+
474
+ if __name__ == "__main__":
475
+ demo.launch(show_api=False)
custom_time_control.js ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ function manageTimeBar(elemId, time) {
2
+ if (!window.visTimelineInstances) {
3
+ console.error(`Timeline instances collection not found`);
4
+ return;
5
+ }
6
+
7
+ const timeline = window.visTimelineInstances[elemId];
8
+ if (!timeline) {
9
+ console.error(`Timeline instance ${elemId} not found`);
10
+ return;
11
+ }
12
+
13
+ if (!window.customTimeBarIds) {
14
+ window.customTimeBarIds = {};
15
+ }
16
+
17
+ try {
18
+ timeline.setCustomTime(time, elemId);
19
+ } catch (e) {
20
+ timeline.addCustomTime(time, elemId);
21
+ }
22
+ }
23
+
24
+ function setTimeBarDirect(elemId, time) {
25
+ manageTimeBar(elemId, time);
26
+ }
27
+
28
+ function setTimeBarNormalized(elemId, start, end, normalizedPos) {
29
+ const time = start + (end - start) * normalizedPos;
30
+ manageTimeBar(elemId, time);
31
+ }
32
+
33
+ class AudioTimelineSync {
34
+ constructor(timelineId, audioId, trackLength) {
35
+ this.timelineId = timelineId;
36
+ this.trackLength = trackLength;
37
+ const container = document.getElementById(audioId);
38
+
39
+ // Find the progress element through shadow DOM
40
+ const waveform = container.querySelector('#waveform');
41
+ if (!waveform) {
42
+ console.error('Waveform container not found');
43
+ return;
44
+ }
45
+
46
+ // Access shadow root and find progress element
47
+ const shadowRoot = waveform.querySelector('div').shadowRoot;
48
+ this.progressElement = shadowRoot.querySelector('div[part="progress"]');
49
+
50
+ if (!this.progressElement) {
51
+ console.error('Progress element not found');
52
+ return;
53
+ }
54
+
55
+ this.setupProgressObserver();
56
+ }
57
+
58
+ setupProgressObserver() {
59
+ // Create mutation observer to watch for style changes to a specific progress element of the audio component
60
+ // The style is defined by the completion of the audio source, even when the audio is not playing but the time bar is being dragged by the cursor.
61
+ this.observer = new MutationObserver((mutations) => {
62
+ mutations.forEach((mutation) => {
63
+ if (mutation.type === 'attributes' && mutation.attributeName === 'style') {
64
+ this.onProgressUpdate();
65
+ }
66
+ });
67
+ });
68
+
69
+ // Observe the progress element for style changes
70
+ this.observer.observe(this.progressElement, {
71
+ attributes: true,
72
+ attributeFilter: ['style']
73
+ });
74
+ }
75
+
76
+ onProgressUpdate() {
77
+ const style = this.progressElement.style;
78
+ const widthStr = style.width;
79
+ if (!widthStr) return;
80
+
81
+ // Convert percentage string to number (e.g., "70.7421%" -> 0.707421)
82
+ const percentage = parseFloat(widthStr) / 100;
83
+ this.syncTimeBarToPlayback(percentage);
84
+ }
85
+
86
+ syncTimeBarToPlayback(normalizedPosition) {
87
+ const timeline = window.visTimelineInstances[this.timelineId];
88
+ if (timeline) {
89
+ setTimeBarNormalized(this.timelineId, 0, this.trackLength, normalizedPosition);
90
+ }
91
+ }
92
+
93
+ cleanup() {
94
+ // Disconnect observer
95
+ if (this.observer) {
96
+ this.observer.disconnect();
97
+ this.observer = null;
98
+ }
99
+ }
100
+ }
101
+
102
+ function initAudioSync(timelineId, audioId, trackLength) {
103
+ try {
104
+ // Initialize syncs container if it doesn't exist
105
+ if (!window.audioTimelineSyncs) {
106
+ window.audioTimelineSyncs = {};
107
+ }
108
+
109
+ // Cleanup existing sync if any
110
+ if (window.audioTimelineSyncs[timelineId]) {
111
+ window.audioTimelineSyncs[timelineId].cleanup();
112
+ }
113
+
114
+ // Create new sync instance
115
+ window.audioTimelineSyncs[timelineId] = new AudioTimelineSync(timelineId, audioId, trackLength);
116
+ } catch (error) {
117
+ console.error('Error initializing audio sync:', error);
118
+ }
119
+
120
+ return null;
121
+ }
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ gradio_vistimeline
src/.gitignore ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.py[cod]
2
+ __pycache__/
3
+ *.so
4
+ .Python
5
+ .eggs/
6
+ *.egg-info/
7
+ dist/
8
+ build/
9
+ .pytest_cache/
10
+ .gradio/
11
+ .gradio/certificate.pem
12
+
13
+ node_modules/
14
+ .svelte-kit/
15
+ .vite/
16
+
17
+ venv/
18
+ .env/
19
+ .venv/
20
+ env/
21
+
22
+ .vscode/
23
+ .idea/
24
+ *.swp
25
+ *.swo
26
+
27
+ .DS_Store
src/README.md ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: [gradio-custom-component, timeline, vis timeline, vis-timeline, vis.js]
3
+ title: gradio_vistimeline
4
+ short_description: Gradio implementation for the vis.js Timeline library
5
+ colorFrom: blue
6
+ colorTo: yellow
7
+ sdk: gradio
8
+ pinned: false
9
+ app_file: app.py
10
+ ---
11
+ <h1 style='text-align: center; margin-bottom: 1rem'> Gradio vis.js Timeline </h1>
12
+
13
+ <div style="display: flex; flex-direction: row; justify-content: center">
14
+ <img style="display: block; padding-right: 5px; height: 20px;" alt="Static Badge" src="https://img.shields.io/pypi/v/gradio_vistimeline">
15
+ <a href="https://github.com/Yelorix/gradio-vis-timeline" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/github-white?logo=github&logoColor=black"></a>
16
+ <a href="https://huggingface.co/spaces/Yelorix/gradio_vistimeline" target="_blank"><img alt="Open in Spaces" src="https://huggingface.co/datasets/huggingface/badges/resolve/main/open-in-hf-spaces-sm-dark.svg"></a>
17
+ </div>
18
+
19
+ A Gradio component that implements the [vis.js Timeline](https://github.com/visjs/vis-timeline) visualization library, allowing you to create interactive timelines in your Gradio apps.
20
+
21
+ **Resources:**
22
+ - [Timeline Examples](https://visjs.github.io/vis-timeline/examples/timeline/)
23
+ - [Timeline Documentation](https://visjs.github.io/vis-timeline/docs/timeline/)
24
+ - [Dataset Documentation](https://visjs.github.io/vis-data/data/dataset.html)
25
+
26
+ ## Installation
27
+
28
+ ```bash
29
+ pip install gradio_vistimeline
30
+ ```
31
+
32
+ ## Usage
33
+
34
+ ```python
35
+ import gradio as gr
36
+ from gradio_vistimeline import VisTimeline
37
+
38
+ demo = gr.Interface(
39
+ lambda x: x,
40
+ VisTimeline(
41
+ value={
42
+ "items": [
43
+ {"content": "Item 1", "start": "2024-12-2", "end": "2024-12-10"},
44
+ {"content": "Item 2", "start": "2024-12-14"}
45
+ ]
46
+ },
47
+ options={
48
+ "start": "2024-12-1",
49
+ "end": "2024-12-15",
50
+ "editable": True
51
+ }
52
+ ),
53
+ "json"
54
+ )
55
+
56
+ if __name__ == "__main__":
57
+ demo.launch()
58
+ ```
59
+
60
+ ## `VisTimeline`
61
+
62
+ ### Features
63
+
64
+ - Interactive timeline visualization
65
+ - Integration of vis.js Timeline includes:
66
+ - Items
67
+ - Ranges
68
+ - Points
69
+ - Background items
70
+ - Groups
71
+ - Subgroups
72
+ - Pass options object during instantiation
73
+ - Styled with the gradio style variables
74
+ - Gradio events for editing data and selecting items
75
+
76
+ ### Value Data Format
77
+
78
+ The timeline accepts a value in the following format:
79
+
80
+ ```python
81
+ {
82
+ "groups": [
83
+ {
84
+ "id": "group_id",
85
+ "content": "Group Name" # Optional
86
+ }
87
+ ],
88
+ "items": [
89
+ {
90
+ "content": "Item content",
91
+ "start": "2024-01-01", # ISO date string or Unix timestamp
92
+ "end": "2024-01-05", # Optional
93
+ "group": "group_id", # Optional
94
+ }
95
+ ]
96
+ }
97
+ ```
98
+
99
+ Or as a VisTimelineData object:
100
+
101
+ ```python
102
+ from gradio_vistimeline import VisTimelineGroup, VisTimelineItem, VisTimelineData
103
+
104
+ value = VisTimelineData(
105
+ groups=[
106
+ VisTimelineGroup(
107
+ id="group_id",
108
+ content="Group Name" # Optional
109
+ )
110
+ ],
111
+ items=[
112
+ VisTimelineItem(
113
+ content="Item content",
114
+ start="2024-01-01", # ISO date string or Unix timestamp
115
+ end="2024-01-05", # Optional
116
+ group="group_id" # Optional
117
+ )
118
+ ]
119
+ )
120
+ ```
121
+
122
+ ### Events
123
+
124
+ | name | description |
125
+ |:-----|:------------|
126
+ | `load` | Triggered when the component is mounted for the first time |
127
+ | `change` | Triggered when the timeline value changes through any means |
128
+ | `input` | Triggered when a user directly modifies timeline items (add/remove/update) |
129
+ | `select` | Triggered when clicking the timeline |
130
+ | `item_select` | Triggered when items are selected or unselected |
131
+
132
+ ### Configuration
133
+
134
+ #### vis.js Timeline Options
135
+
136
+ The component accepts all configuration options supported by vis.js Timeline. Some commonly used options:
137
+
138
+ ```python
139
+ options = {
140
+ "editable": True, # Enable item editing
141
+ "multiselect": True, # Allow selecting multiple items
142
+ "showCurrentTime": True, # Show a marker for current time
143
+ "stack": True, # Stack overlapping items
144
+ "zoomable": True # Allow zooming the timeline
145
+ }
146
+ ```
147
+
148
+ For a complete list of options, see the [vis.js Timeline documentation](https://visjs.github.io/vis-timeline/docs/timeline/).
149
+
150
+ #### Component-Specific Options
151
+
152
+ **Data Synchronization**
153
+ ```python
154
+ VisTimeline(
155
+ value=value,
156
+ preserve_old_content_on_value_change=True # Default: False
157
+ )
158
+ ```
159
+ Controls how the timeline updates their groups and items DataSets when the component value changes:
160
+ - `False`: Clears and reinitializes all DataSets to ensure perfect sync with the Gradio component value
161
+ - `True`: Merges new data with existing content (updates existing items, adds new ones, removes missing ones)
162
+
163
+ Defaulted to false to ensure the value matches the visualization on the timeline.
164
+
165
+ Changing it to true reduces visual flicker when dragging items around.
166
+ Desync is only a real risk in this mode when you edit item ID's or add/remove item properties of existing items.
167
+
168
+ **JavaScript Integration**
169
+ ```python
170
+ VisTimeline(
171
+ value=value,
172
+ elem_id="my-timeline" # Optional
173
+ )
174
+ ```
175
+ When `elem_id` is set, the timeline instance becomes available in JavaScript as `window.visTimelineInstances["my-timeline"]`, allowing easy access through custom scripts.
176
+
177
+ **Styling Items**
178
+ The component provides pre-defined color classes matching Gradio's theme colors. Apply them by setting an item's `className`:
179
+
180
+ ```python
181
+ item = {
182
+ "content": "Colored item",
183
+ "start": "2024-01-01",
184
+ "className": "color-primary-500" # Uses Gradio's primary-500 color
185
+ }
186
+ ```
187
+
188
+ Available color classes follow the pattern:
189
+ - `color-primary-[50-950]`
190
+ - `color-secondary-[50-950]`
191
+ - `color-neutral-[50-950]`
192
+
193
+ Custom styles can be applied by defining your own CSS classes.
src/backend/gradio_vistimeline/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+
2
+ from .vistimeline import VisTimeline
3
+ from .model import VisTimelineData, VisTimelineItem, VisTimelineGroup
4
+
5
+ __all__ = ['VisTimeline', 'VisTimelineGroup', 'VisTimelineItem', 'VisTimelineData']
src/backend/gradio_vistimeline/model.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Optional, List, Union
2
+ from gradio.data_classes import GradioModel
3
+
4
+ class VisTimelineGroup(GradioModel):
5
+ id: Union[str, int]
6
+ content: str
7
+ className: Optional[str] = None
8
+ style: Optional[str] = None
9
+ order: Optional[Union[str, int]] = None
10
+ subgroupOrder: Optional[Union[str, Callable]] = None
11
+ subgroupStack: Optional[Union[bool, dict]] = None
12
+ subgroupVisibility: Optional[dict] = None
13
+ title: Optional[str] = None
14
+ visible: Optional[bool] = None
15
+ nestedGroups: Optional[List[Union[str, int]]] = None
16
+ showNested: Optional[bool] = None
17
+
18
+ class VisTimelineItem(GradioModel):
19
+ id: Optional[Union[str, int]] = None
20
+ content: str
21
+ start: str
22
+ end: Optional[str] = None
23
+ group: Optional[Union[str, int]] = None
24
+ className: Optional[str] = None
25
+ align: Optional[str] = None
26
+ style: Optional[str] = None
27
+ title: Optional[str] = None
28
+ type: Optional[str] = None # 'box', 'point', 'range', or 'background'
29
+ selectable: Optional[bool] = None
30
+ limitSize: Optional[bool] = None
31
+ subgroup: Optional[Union[str, int]] = None
32
+
33
+ class VisTimelineData(GradioModel):
34
+ groups: List[Union[VisTimelineGroup, dict[str, Any]]]
35
+ items: List[Union[VisTimelineItem, dict[str, Any]]]
src/backend/gradio_vistimeline/templates/component/index.js ADDED
The diff for this file is too large to render. See raw diff
 
src/backend/gradio_vistimeline/templates/component/style.css ADDED
The diff for this file is too large to render. See raw diff
 
src/backend/gradio_vistimeline/templates/example/index.js ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const {
2
+ SvelteComponent: M,
3
+ append_hydration: w,
4
+ attr: S,
5
+ children: k,
6
+ claim_element: T,
7
+ claim_text: I,
8
+ detach: _,
9
+ element: x,
10
+ init: L,
11
+ insert_hydration: R,
12
+ noop: C,
13
+ safe_not_equal: U,
14
+ set_data: V,
15
+ text: q,
16
+ toggle_class: d
17
+ } = window.__gradio__svelte__internal;
18
+ function j(i) {
19
+ let e, t, a = E(
20
+ /*value*/
21
+ i[0]
22
+ ) + "", l;
23
+ return {
24
+ c() {
25
+ e = x("div"), t = x("div"), l = q(a), this.h();
26
+ },
27
+ l(c) {
28
+ e = T(c, "DIV", { class: !0 });
29
+ var u = k(e);
30
+ t = T(u, "DIV", { class: !0 });
31
+ var s = k(t);
32
+ l = I(s, a), s.forEach(_), u.forEach(_), this.h();
33
+ },
34
+ h() {
35
+ S(t, "class", "example-content svelte-1rustph"), S(e, "class", "example-container svelte-1rustph"), d(
36
+ e,
37
+ "table",
38
+ /*type*/
39
+ i[1] === "table"
40
+ ), d(
41
+ e,
42
+ "gallery",
43
+ /*type*/
44
+ i[1] === "gallery"
45
+ ), d(
46
+ e,
47
+ "selected",
48
+ /*selected*/
49
+ i[2]
50
+ );
51
+ },
52
+ m(c, u) {
53
+ R(c, e, u), w(e, t), w(t, l);
54
+ },
55
+ p(c, [u]) {
56
+ u & /*value*/
57
+ 1 && a !== (a = E(
58
+ /*value*/
59
+ c[0]
60
+ ) + "") && V(l, a), u & /*type*/
61
+ 2 && d(
62
+ e,
63
+ "table",
64
+ /*type*/
65
+ c[1] === "table"
66
+ ), u & /*type*/
67
+ 2 && d(
68
+ e,
69
+ "gallery",
70
+ /*type*/
71
+ c[1] === "gallery"
72
+ ), u & /*selected*/
73
+ 4 && d(
74
+ e,
75
+ "selected",
76
+ /*selected*/
77
+ c[2]
78
+ );
79
+ },
80
+ i: C,
81
+ o: C,
82
+ d(c) {
83
+ c && _(e);
84
+ }
85
+ };
86
+ }
87
+ function E(i) {
88
+ const e = i.items.length, t = i.groups.length, a = z(i.items);
89
+ return `${i.description ? i.description : `${e} item${e !== 1 ? "s" : ""} in ${t} group${t !== 1 ? "s" : ""}`}
90
+ ${a}`;
91
+ }
92
+ function z(i) {
93
+ if (i.length === 0) return "";
94
+ const e = i.flatMap((l) => [new Date(l.start), l.end ? new Date(l.end) : null]).filter((l) => l !== null), t = new Date(Math.min(...e.map((l) => l.getTime()))), a = new Date(Math.max(...e.map((l) => l.getTime())));
95
+ return t.getTime() === a.getTime() ? f(t, !0) : `${f(t)} - ${f(a)}`;
96
+ }
97
+ function f(i, e = !1) {
98
+ return e ? i.toLocaleDateString("en-US", {
99
+ month: "long",
100
+ day: "numeric",
101
+ year: "numeric"
102
+ }) : i.toLocaleDateString("en-US", {
103
+ month: "short",
104
+ day: "numeric",
105
+ year: "numeric"
106
+ });
107
+ }
108
+ function A(i, e, t) {
109
+ let { value: a } = e, { type: l } = e, { selected: c = !1 } = e, { options: u = void 0 } = e, { preserve_old_content_on_value_change: s = void 0 } = e, { label: m = void 0 } = e, { interactive: r = void 0 } = e, { visible: o = void 0 } = e, { elem_id: g = void 0 } = e, { elem_classes: h = void 0 } = e, { key: v = void 0 } = e, { samples_dir: y = void 0 } = e, { index: b = void 0 } = e, { root: D = void 0 } = e;
110
+ return i.$$set = (n) => {
111
+ "value" in n && t(0, a = n.value), "type" in n && t(1, l = n.type), "selected" in n && t(2, c = n.selected), "options" in n && t(3, u = n.options), "preserve_old_content_on_value_change" in n && t(4, s = n.preserve_old_content_on_value_change), "label" in n && t(5, m = n.label), "interactive" in n && t(6, r = n.interactive), "visible" in n && t(7, o = n.visible), "elem_id" in n && t(8, g = n.elem_id), "elem_classes" in n && t(9, h = n.elem_classes), "key" in n && t(10, v = n.key), "samples_dir" in n && t(11, y = n.samples_dir), "index" in n && t(12, b = n.index), "root" in n && t(13, D = n.root);
112
+ }, [
113
+ a,
114
+ l,
115
+ c,
116
+ u,
117
+ s,
118
+ m,
119
+ r,
120
+ o,
121
+ g,
122
+ h,
123
+ v,
124
+ y,
125
+ b,
126
+ D
127
+ ];
128
+ }
129
+ class B extends M {
130
+ constructor(e) {
131
+ super(), L(this, e, A, j, U, {
132
+ value: 0,
133
+ type: 1,
134
+ selected: 2,
135
+ options: 3,
136
+ preserve_old_content_on_value_change: 4,
137
+ label: 5,
138
+ interactive: 6,
139
+ visible: 7,
140
+ elem_id: 8,
141
+ elem_classes: 9,
142
+ key: 10,
143
+ samples_dir: 11,
144
+ index: 12,
145
+ root: 13
146
+ });
147
+ }
148
+ }
149
+ export {
150
+ B as default
151
+ };
src/backend/gradio_vistimeline/templates/example/style.css ADDED
@@ -0,0 +1 @@
 
 
1
+ .example-container.svelte-1rustph{border:var(--button-border-width) solid var(--button-secondary-border-color)!important;background:var(--button-secondary-background-fill)!important;color:var(--button-secondary-text-color)!important;border-radius:var(--button-large-radius)!important;transition:all .2s ease;cursor:pointer;overflow:hidden}.example-content.svelte-1rustph{padding:var(--spacing-md);font-size:var(--text-sm);white-space:pre-line;line-height:1.4}.selected.svelte-1rustph{border:var(--button-border-width) solid var(--button-secondary-border-color-hover)!important;background:var(--button-secondary-background-fill-hover)!important;color:var(--button-secondary-text-color-hover)!important;border-radius:var(--button-large-radius)!important}.gallery.svelte-1rustph{min-width:100px}
src/backend/gradio_vistimeline/vistimeline.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import Any, Callable, Union
3
+ from gradio.components import Component
4
+ from gradio.events import Events
5
+ from .model import VisTimelineData
6
+
7
+ class VisTimeline(Component):
8
+ """
9
+ Custom Gradio component integrating vis.js Timeline.
10
+ """
11
+ data_model = VisTimelineData
12
+ EVENTS = [Events.load, Events.change, Events.input, "item_select", Events.select]
13
+
14
+ def __init__(
15
+ self,
16
+ value: Union[VisTimelineData, dict[str, Any], Callable, None] = None,
17
+ options: dict[str, Any] | None = None,
18
+ preserve_old_content_on_value_change: bool = False,
19
+ *,
20
+ label: str | None = None,
21
+ interactive: bool | None = True,
22
+ visible: bool = True,
23
+ elem_id: str | None = None,
24
+ elem_classes: list[str] | str | None = None,
25
+ render: bool = True,
26
+ key: int | str | None = None,
27
+ ):
28
+ self.value = self._get_default_value_if_none(value)
29
+ self.options = options or {}
30
+ self.preserve_old_content_on_value_change = preserve_old_content_on_value_change
31
+
32
+ super().__init__(
33
+ value=self.value,
34
+ label=label,
35
+ interactive=interactive,
36
+ visible=visible,
37
+ elem_id=elem_id,
38
+ elem_classes=elem_classes,
39
+ render=render,
40
+ key=key
41
+ )
42
+
43
+ def preprocess(self, payload: Union[VisTimelineData, dict[str, Any], None]) -> Union[VisTimelineData, dict[str, Any], None]:
44
+ return self._get_default_value_if_none(payload)
45
+
46
+ def postprocess(self, value: Union[VisTimelineData, dict[str, Any], None]) -> Union[VisTimelineData, dict[str, Any], None]:
47
+ def remove_first_level_none_properties(obj):
48
+ return {key: value for key, value in obj.items() if value is not None}
49
+
50
+ value = self._get_default_value_if_none(value)
51
+
52
+ if isinstance(value, VisTimelineData):
53
+ value.groups = [remove_first_level_none_properties(vars(group)) for group in value.groups]
54
+ value.items = [remove_first_level_none_properties(vars(item)) for item in value.items]
55
+ elif isinstance(value, dict):
56
+ value["groups"] = [remove_first_level_none_properties(group) for group in value.get("groups", [])]
57
+ value["items"] = [remove_first_level_none_properties(item) for item in value.get("items", [])]
58
+
59
+ return value
60
+
61
+ def example_payload(self) -> Any:
62
+ return {
63
+ "groups": [{"id": 0, "content": "Group 1"}],
64
+ "items": [{"content": "Item 1", "group": 0, "start": "2024-01-01"}]
65
+ }
66
+
67
+ def example_value(self) -> dict[str, Any]:
68
+ return self.example_payload()
69
+
70
+ def _get_default_value_if_none(self, value):
71
+ if isinstance(value, VisTimelineData):
72
+ return value or VisTimelineData(groups=[], items=[])
73
+ else:
74
+ return value or {"groups": [], "items": []}
src/backend/gradio_vistimeline/vistimeline.pyi ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import Any, Callable, Union
3
+ from gradio.components import Component
4
+ from gradio.events import Events
5
+ from .model import VisTimelineData
6
+ from gradio.events import Dependency
7
+
8
+ class VisTimeline(Component):
9
+ """
10
+ Custom Gradio component integrating vis.js Timeline.
11
+ """
12
+ data_model = VisTimelineData
13
+ EVENTS = [Events.load, Events.change, Events.input, "item_select", Events.select]
14
+
15
+ def __init__(
16
+ self,
17
+ value: Union[VisTimelineData, dict[str, Any], Callable, None] = None,
18
+ options: dict[str, Any] | None = None,
19
+ preserve_old_content_on_value_change: bool = False,
20
+ *,
21
+ label: str | None = None,
22
+ interactive: bool | None = True,
23
+ visible: bool = True,
24
+ elem_id: str | None = None,
25
+ elem_classes: list[str] | str | None = None,
26
+ render: bool = True,
27
+ key: int | str | None = None,
28
+ ):
29
+ self.value = self._get_default_value_if_none(value)
30
+ self.options = options or {}
31
+ self.preserve_old_content_on_value_change = preserve_old_content_on_value_change
32
+
33
+ super().__init__(
34
+ value=self.value,
35
+ label=label,
36
+ interactive=interactive,
37
+ visible=visible,
38
+ elem_id=elem_id,
39
+ elem_classes=elem_classes,
40
+ render=render,
41
+ key=key
42
+ )
43
+
44
+ def preprocess(self, payload: Union[VisTimelineData, dict[str, Any], None]) -> Union[VisTimelineData, dict[str, Any], None]:
45
+ return self._get_default_value_if_none(payload)
46
+
47
+ def postprocess(self, value: Union[VisTimelineData, dict[str, Any], None]) -> Union[VisTimelineData, dict[str, Any], None]:
48
+ def remove_first_level_none_properties(obj):
49
+ return {key: value for key, value in obj.items() if value is not None}
50
+
51
+ value = self._get_default_value_if_none(value)
52
+
53
+ if isinstance(value, VisTimelineData):
54
+ value.groups = [remove_first_level_none_properties(vars(group)) for group in value.groups]
55
+ value.items = [remove_first_level_none_properties(vars(item)) for item in value.items]
56
+ elif isinstance(value, dict):
57
+ value["groups"] = [remove_first_level_none_properties(group) for group in value.get("groups", [])]
58
+ value["items"] = [remove_first_level_none_properties(item) for item in value.get("items", [])]
59
+
60
+ return value
61
+
62
+ def example_payload(self) -> Any:
63
+ return {
64
+ "groups": [{"id": 0, "content": "Group 1"}],
65
+ "items": [{"content": "Item 1", "group": 0, "start": "2024-01-01"}]
66
+ }
67
+
68
+ def example_value(self) -> dict[str, Any]:
69
+ return self.example_payload()
70
+
71
+ def _get_default_value_if_none(self, value):
72
+ if isinstance(value, VisTimelineData):
73
+ return value or VisTimelineData(groups=[], items=[])
74
+ else:
75
+ return value or {"groups": [], "items": []}
76
+ from typing import Callable, Literal, Sequence, Any, TYPE_CHECKING
77
+ from gradio.blocks import Block
78
+ if TYPE_CHECKING:
79
+ from gradio.components import Timer
80
+
81
+
82
+ def load(self,
83
+ fn: Callable[..., Any] | None = None,
84
+ inputs: Block | Sequence[Block] | set[Block] | None = None,
85
+ outputs: Block | Sequence[Block] | None = None,
86
+ api_name: str | None | Literal[False] = None,
87
+ scroll_to_output: bool = False,
88
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
89
+ queue: bool | None = None,
90
+ batch: bool = False,
91
+ max_batch_size: int = 4,
92
+ preprocess: bool = True,
93
+ postprocess: bool = True,
94
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
95
+ every: Timer | float | None = None,
96
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
97
+ js: str | None = None,
98
+ concurrency_limit: int | None | Literal["default"] = "default",
99
+ concurrency_id: str | None = None,
100
+ show_api: bool = True,
101
+
102
+ ) -> Dependency:
103
+ """
104
+ Parameters:
105
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
106
+ inputs: list of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
107
+ outputs: list of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
108
+ api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
109
+ scroll_to_output: if True, will scroll to output component on completion
110
+ show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all
111
+ queue: if True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
112
+ batch: if True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
113
+ max_batch_size: maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
114
+ preprocess: if False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
115
+ postprocess: if False, will not run postprocessing of component data before returning 'fn' output to the browser.
116
+ cancels: a list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
117
+ every: continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
118
+ trigger_mode: if "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
119
+ js: optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
120
+ concurrency_limit: if set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
121
+ concurrency_id: if set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
122
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
123
+
124
+ """
125
+ ...
126
+
127
+ def change(self,
128
+ fn: Callable[..., Any] | None = None,
129
+ inputs: Block | Sequence[Block] | set[Block] | None = None,
130
+ outputs: Block | Sequence[Block] | None = None,
131
+ api_name: str | None | Literal[False] = None,
132
+ scroll_to_output: bool = False,
133
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
134
+ queue: bool | None = None,
135
+ batch: bool = False,
136
+ max_batch_size: int = 4,
137
+ preprocess: bool = True,
138
+ postprocess: bool = True,
139
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
140
+ every: Timer | float | None = None,
141
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
142
+ js: str | None = None,
143
+ concurrency_limit: int | None | Literal["default"] = "default",
144
+ concurrency_id: str | None = None,
145
+ show_api: bool = True,
146
+
147
+ ) -> Dependency:
148
+ """
149
+ Parameters:
150
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
151
+ inputs: list of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
152
+ outputs: list of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
153
+ api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
154
+ scroll_to_output: if True, will scroll to output component on completion
155
+ show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all
156
+ queue: if True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
157
+ batch: if True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
158
+ max_batch_size: maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
159
+ preprocess: if False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
160
+ postprocess: if False, will not run postprocessing of component data before returning 'fn' output to the browser.
161
+ cancels: a list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
162
+ every: continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
163
+ trigger_mode: if "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
164
+ js: optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
165
+ concurrency_limit: if set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
166
+ concurrency_id: if set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
167
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
168
+
169
+ """
170
+ ...
171
+
172
+ def input(self,
173
+ fn: Callable[..., Any] | None = None,
174
+ inputs: Block | Sequence[Block] | set[Block] | None = None,
175
+ outputs: Block | Sequence[Block] | None = None,
176
+ api_name: str | None | Literal[False] = None,
177
+ scroll_to_output: bool = False,
178
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
179
+ queue: bool | None = None,
180
+ batch: bool = False,
181
+ max_batch_size: int = 4,
182
+ preprocess: bool = True,
183
+ postprocess: bool = True,
184
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
185
+ every: Timer | float | None = None,
186
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
187
+ js: str | None = None,
188
+ concurrency_limit: int | None | Literal["default"] = "default",
189
+ concurrency_id: str | None = None,
190
+ show_api: bool = True,
191
+
192
+ ) -> Dependency:
193
+ """
194
+ Parameters:
195
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
196
+ inputs: list of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
197
+ outputs: list of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
198
+ api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
199
+ scroll_to_output: if True, will scroll to output component on completion
200
+ show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all
201
+ queue: if True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
202
+ batch: if True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
203
+ max_batch_size: maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
204
+ preprocess: if False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
205
+ postprocess: if False, will not run postprocessing of component data before returning 'fn' output to the browser.
206
+ cancels: a list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
207
+ every: continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
208
+ trigger_mode: if "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
209
+ js: optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
210
+ concurrency_limit: if set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
211
+ concurrency_id: if set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
212
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
213
+
214
+ """
215
+ ...
216
+
217
+ def item_select(self,
218
+ fn: Callable[..., Any] | None = None,
219
+ inputs: Block | Sequence[Block] | set[Block] | None = None,
220
+ outputs: Block | Sequence[Block] | None = None,
221
+ api_name: str | None | Literal[False] = None,
222
+ scroll_to_output: bool = False,
223
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
224
+ queue: bool | None = None,
225
+ batch: bool = False,
226
+ max_batch_size: int = 4,
227
+ preprocess: bool = True,
228
+ postprocess: bool = True,
229
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
230
+ every: Timer | float | None = None,
231
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
232
+ js: str | None = None,
233
+ concurrency_limit: int | None | Literal["default"] = "default",
234
+ concurrency_id: str | None = None,
235
+ show_api: bool = True,
236
+
237
+ ) -> Dependency:
238
+ """
239
+ Parameters:
240
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
241
+ inputs: list of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
242
+ outputs: list of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
243
+ api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
244
+ scroll_to_output: if True, will scroll to output component on completion
245
+ show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all
246
+ queue: if True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
247
+ batch: if True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
248
+ max_batch_size: maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
249
+ preprocess: if False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
250
+ postprocess: if False, will not run postprocessing of component data before returning 'fn' output to the browser.
251
+ cancels: a list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
252
+ every: continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
253
+ trigger_mode: if "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
254
+ js: optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
255
+ concurrency_limit: if set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
256
+ concurrency_id: if set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
257
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
258
+
259
+ """
260
+ ...
261
+
262
+ def select(self,
263
+ fn: Callable[..., Any] | None = None,
264
+ inputs: Block | Sequence[Block] | set[Block] | None = None,
265
+ outputs: Block | Sequence[Block] | None = None,
266
+ api_name: str | None | Literal[False] = None,
267
+ scroll_to_output: bool = False,
268
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
269
+ queue: bool | None = None,
270
+ batch: bool = False,
271
+ max_batch_size: int = 4,
272
+ preprocess: bool = True,
273
+ postprocess: bool = True,
274
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
275
+ every: Timer | float | None = None,
276
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
277
+ js: str | None = None,
278
+ concurrency_limit: int | None | Literal["default"] = "default",
279
+ concurrency_id: str | None = None,
280
+ show_api: bool = True,
281
+
282
+ ) -> Dependency:
283
+ """
284
+ Parameters:
285
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
286
+ inputs: list of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
287
+ outputs: list of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
288
+ api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
289
+ scroll_to_output: if True, will scroll to output component on completion
290
+ show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all
291
+ queue: if True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
292
+ batch: if True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
293
+ max_batch_size: maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
294
+ preprocess: if False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
295
+ postprocess: if False, will not run postprocessing of component data before returning 'fn' output to the browser.
296
+ cancels: a list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
297
+ every: continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
298
+ trigger_mode: if "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
299
+ js: optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
300
+ concurrency_limit: if set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
301
+ concurrency_id: if set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
302
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
303
+
304
+ """
305
+ ...
src/demo/__init__.py ADDED
File without changes
src/demo/app.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime as dt
2
+ import gradio as gr
3
+ import json
4
+ import numpy as np
5
+ import os
6
+ from datetime import timedelta, date, datetime
7
+ from gradio_vistimeline import VisTimeline, VisTimelineData
8
+
9
+ # --- Region: Handlers for demo tab 1 ---
10
+ def pull_from_timeline(timeline):
11
+ """Convert timeline data to JSON string for display"""
12
+ if hasattr(timeline, "model_dump"):
13
+ data = timeline.model_dump(exclude_none=True)
14
+ else:
15
+ data = timeline
16
+ return json.dumps(data, indent=2)
17
+
18
+ def push_to_timeline(json_str):
19
+ """Convert JSON string to timeline data"""
20
+ try:
21
+ return VisTimelineData.model_validate_json(json_str)
22
+ except Exception as e:
23
+ print(f"Error parsing JSON: {e}")
24
+ return VisTimelineData(groups=[], items=[])
25
+
26
+ def on_timeline_change():
27
+ return f"Most recent value change event:\n{get_now()}"
28
+
29
+ def on_timeline_input(event_data: gr.EventData):
30
+ return f"Most recent input event:\nAction: '{event_data._data}' at {get_now()}"
31
+
32
+ def on_timeline_select():
33
+ return f"Most recent timeline selected event::\n{get_now()}"
34
+
35
+ def on_item_select(timeline, event_data: gr.EventData):
36
+ selected_ids = event_data._data # A collection of selected item IDs that can be str or int: ["example", 0, "example2"]
37
+ items = timeline.items
38
+
39
+ if selected_ids:
40
+ first_id = selected_ids[0]
41
+ for item in items:
42
+ if item.id == first_id:
43
+ content = getattr(item, 'content', 'Unknown')
44
+ return f"Currently selected item:\nContent: \"{content}\"\nID: \"{first_id}\""
45
+
46
+ return "Currently selected item:\nNone"
47
+
48
+ # --- Region: Handlers for demo tab 1 ---
49
+ def update_table(timeline):
50
+ if hasattr(timeline, "model_dump"):
51
+ data = timeline.model_dump(exclude_none=True)
52
+ else:
53
+ data = timeline
54
+
55
+ items = data["items"]
56
+ track_length_ms = get_grouped_item_end_in_ms(items, "track-length")
57
+
58
+ rows = []
59
+ for item in items:
60
+ if item["content"] != "":
61
+ duration = calculate_and_format_duration(item["start"], item.get("end"), track_length_ms)
62
+ rows.append([
63
+ item["content"],
64
+ format_date_to_milliseconds(item["start"]),
65
+ duration
66
+ ])
67
+
68
+ return gr.DataFrame(
69
+ value=rows,
70
+ headers=["Item Name", "Start Time", "Duration"]
71
+ )
72
+
73
+ # --- Region: Handlers for demo tab 2 ---
74
+ def update_audio(timeline):
75
+ """
76
+ Handler function for generating audio from timeline data.
77
+ Returns audio data in format expected by Gradio's Audio component.
78
+ """
79
+ audio_data, sample_rate = generate_audio_from_timeline(timeline)
80
+ # Convert to correct shape and data type for Gradio Audio
81
+ # Gradio expects a 2D array with shape (samples, channels)
82
+ audio_data = audio_data.reshape(-1, 1) # Make it 2D with 1 channel
83
+ return (sample_rate, audio_data)
84
+
85
+ def generate_audio_from_timeline(timeline_data, sample_rate=44100):
86
+ """
87
+ Generate audio from timeline items containing frequency information.
88
+
89
+ Args:
90
+ timeline_data: Timeline data containing items with start/end times in milliseconds
91
+ sample_rate: Audio sample rate in Hz (default 44100)
92
+
93
+ Returns:
94
+ Tuple of (audio_data: np.ndarray, sample_rate: int)
95
+ """
96
+ # Get all items from the timeline
97
+ if hasattr(timeline_data, "model_dump"):
98
+ data = timeline_data.model_dump(exclude_none=True)
99
+ else:
100
+ data = timeline_data
101
+
102
+ items = data["items"]
103
+
104
+ # Find the track length from the background item
105
+ track_length_ms = get_grouped_item_end_in_ms(items, "track-length")
106
+
107
+ # Convert milliseconds to samples
108
+ total_samples = int((track_length_ms / 1000) * sample_rate)
109
+
110
+ # Initialize empty audio buffer
111
+ audio_buffer = np.zeros(total_samples)
112
+
113
+ # Frequency mapping
114
+ freq_map = {
115
+ 1: 440.0,
116
+ 2: 554.37,
117
+ 3: 659.26
118
+ }
119
+ # Generate sine waves for each item
120
+ for item in items:
121
+ id = item.get("id", 0)
122
+ start_time = parse_date_to_milliseconds(item["start"])
123
+ end_time = parse_date_to_milliseconds(item["end"])
124
+
125
+ # Skip items that are completely outside the valid range
126
+ if end_time <= 0 or start_time >= track_length_ms or start_time >= end_time:
127
+ continue
128
+
129
+ # Clamp times to valid range
130
+ start_time = max(0, min(start_time, track_length_ms))
131
+ end_time = max(0, min(end_time, track_length_ms))
132
+
133
+ if id in freq_map:
134
+ freq = freq_map[id]
135
+
136
+ # Convert millisecond timestamps to sample indices
137
+ start_sample = int((start_time / 1000) * sample_rate)
138
+ end_sample = int((end_time / 1000) * sample_rate)
139
+
140
+ # Generate time array for this segment
141
+ t = np.arange(start_sample, end_sample)
142
+
143
+ # Generate sine wave
144
+ duration = end_sample - start_sample
145
+ envelope = np.ones(duration)
146
+ fade_samples = min(int(0.10 * sample_rate), duration // 2) # 100ms fade or half duration
147
+ envelope[:fade_samples] = np.linspace(0, 1, fade_samples)
148
+ envelope[-fade_samples:] = np.linspace(1, 0, fade_samples)
149
+
150
+ wave = 0.2 * envelope * np.sin(2 * np.pi * freq * t / sample_rate)
151
+
152
+ # Add to buffer
153
+ audio_buffer[start_sample:end_sample] += wave
154
+
155
+ # Normalize to prevent clipping
156
+ max_val = np.max(np.abs(audio_buffer))
157
+ if max_val > 0:
158
+ audio_buffer = audio_buffer / max_val
159
+
160
+ return (audio_buffer, sample_rate)
161
+
162
+ # Helper function to get hard-coded track-length from timeline value
163
+ def get_grouped_item_end_in_ms(items, group_id):
164
+ default_length = 6000
165
+ for item in items:
166
+ if item.get("group") == group_id:
167
+ return parse_date_to_milliseconds(item.get("end", default_length))
168
+ return default_length
169
+
170
+ # --- Region: Demo specific datetime helper functions ---
171
+ def calculate_and_format_duration(start_date, end_date, max_range):
172
+ """Calculate the seconds between two datetime inputs and format the result with up to 2 decimals."""
173
+ if not end_date:
174
+ return "0s"
175
+
176
+ # Convert dates to milliseconds
177
+ start_ms = max(0, parse_date_to_milliseconds(start_date))
178
+ end_ms = min(max_range, parse_date_to_milliseconds(end_date))
179
+
180
+ if end_ms < start_ms:
181
+ return "0s"
182
+
183
+ # Calculate duration in seconds
184
+ duration = (end_ms - start_ms) / 1000
185
+
186
+ # Format to remove trailing zeroes after rounding to 2 decimal places
187
+ formatted_duration = f"{duration:.2f}".rstrip("0").rstrip(".")
188
+ return f"{formatted_duration}s"
189
+
190
+ def format_date_to_milliseconds(date):
191
+ """Format input (ISO8601 string or milliseconds) to mm:ss.SSS."""
192
+ date_in_milliseconds = max(0, parse_date_to_milliseconds(date))
193
+ time = timedelta(milliseconds=date_in_milliseconds)
194
+
195
+ # Format timedelta into mm:ss.SSS
196
+ minutes, seconds = divmod(time.seconds, 60)
197
+ milliseconds_part = time.microseconds // 1000
198
+ return f"{minutes:02}:{seconds:02}.{milliseconds_part:03}"
199
+
200
+ def parse_date_to_milliseconds(date):
201
+ """Convert input (ISO8601 string or milliseconds) milliseconds"""
202
+ if isinstance(date, int): # Input is already in milliseconds (Unix timestamp)
203
+ return date
204
+ elif isinstance(date, str): # Input is ISO8601 datetime string
205
+ dt = datetime.fromisoformat(date.replace("Z", "+00:00"))
206
+ epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo) # Calculate difference from Unix epoch
207
+ return int((dt - epoch).total_seconds() * 1000)
208
+ else:
209
+ return 0 # Fallback for unsupported types
210
+
211
+ def get_now():
212
+ """Returns current time in HH:MM:SS format"""
213
+ return datetime.now().strftime("%H:%M:%S")
214
+
215
+ TIMELINE_ID = "dateless_timeline"
216
+ AUDIO_ID = "timeline-audio"
217
+
218
+ # Example for how to access the timeline through JavaScript
219
+ # In this case, to bind the custom time bar of the timeline to be in sync with the audio component
220
+ # Read the JavaScript file
221
+ js_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'custom_time_control.js')
222
+ with open(js_path, 'r') as f:
223
+ js_content = f.read()
224
+ script = f"""<script>{js_content}</script>"""
225
+ style = f"""<style>.vis-custom-time.{TIMELINE_ID} {{pointer-events: none !important;}}</style>"""
226
+ head = script + style
227
+
228
+ # --- Region: Gradio ---
229
+ with gr.Blocks(head=head) as demo:
230
+ today = date.today()
231
+ day_offset = lambda days: (today + dt.timedelta(days=days)).isoformat()
232
+
233
+ gr.Markdown("# Vis.js Timeline Component Demo")
234
+
235
+ with gr.Tabs():
236
+ # --- Tab 1: Basic Timeline with Events ---
237
+ with gr.Tab("Basic Timeline Demo"):
238
+ # Timeline component
239
+ basic_timeline = VisTimeline(
240
+ value={
241
+ "groups": [{"id": 0, "content": ""}],
242
+ "items": []
243
+ },
244
+ options= {
245
+ "start": day_offset(-1),
246
+ "end": day_offset(20),
247
+ "editable": True,
248
+ "format": { # You don't need to define this as these are the default values, but for this demo it is necessary because of two timelines with different formats on one page
249
+ "minorLabels": {
250
+ "millisecond": "SSS",
251
+ "second": "ss",
252
+ "minute": "HH:mm",
253
+ "hour": "HH:mm",
254
+ }
255
+ }
256
+ },
257
+ label="Interactive Timeline",
258
+ interactive=True
259
+ )
260
+
261
+ gr.Markdown("### Events")
262
+
263
+ # Event listener outputs
264
+ with gr.Row():
265
+ change_textbox = gr.Textbox(value="Most recent value change event:", label="Change:", lines=3, interactive=False)
266
+ input_textbox = gr.Textbox(value="Most recent user input event:", label="Input:", lines=3, interactive=False)
267
+ select_textbox = gr.Textbox(value="Most recent timeline selected event:", label="Select:", lines=3, interactive=False)
268
+ item_select_textbox = gr.Textbox(value="Currently selected item:\nNone", label="Currently selected item:", lines=3, interactive=False)
269
+
270
+ # Examples and JSON area in two columns
271
+ with gr.Row():
272
+ # Left column: Examples
273
+ with gr.Column():
274
+ gr.Markdown("### Timeline Examples")
275
+ gr.Examples(
276
+ examples=[
277
+ {
278
+ "groups": [{"id": 0, "content": ""}],
279
+ "items": [
280
+ {"content": "Working", "group": 0, "start": day_offset(1), "end": day_offset(5)},
281
+ {"content": "Resting", "group": 0, "start": day_offset(5), "end": day_offset(7)},
282
+ {"content": "Working", "group": 0, "start": day_offset(7), "end": day_offset(11)},
283
+ {"content": "Resting", "group": 0, "start": day_offset(11), "end": day_offset(13)},
284
+ {"content": "Working", "group": 0, "start": day_offset(13), "end": day_offset(17)},
285
+ {"content": "Resting", "group": 0, "start": day_offset(17), "end": day_offset(19)},
286
+ {"content": "Working", "group": 0, "start": day_offset(19), "end": day_offset(23)},
287
+ ],
288
+ "description": "DateTime ranges"
289
+ },
290
+ {
291
+ "groups": [{"id": 0, "content": "Group"}],
292
+ "items": [
293
+ {"id": 0, "content": "Simple item", "group": 0, "start": day_offset(9)}
294
+ ]
295
+ },
296
+ {
297
+ "groups": [{"id": 0, "content": "Worker 1"}, {"id": 1, "content": "Worker 2"}],
298
+ "items": [
299
+ {"content": "Working", "group": 0, "start": day_offset(1), "end": day_offset(5)},
300
+ {"content": "Resting", "group": 0, "start": day_offset(5), "end": day_offset(7)},
301
+ {"content": "Working", "group": 0, "start": day_offset(7), "end": day_offset(11)},
302
+ {"content": "Resting", "group": 0, "start": day_offset(11), "end": day_offset(13)},
303
+ {"content": "Working", "group": 0, "start": day_offset(13), "end": day_offset(17)},
304
+ {"content": "Resting", "group": 0, "start": day_offset(17), "end": day_offset(19)},
305
+ {"content": "Working", "group": 0, "start": day_offset(19), "end": day_offset(23)},
306
+ {"content": "Working", "group": 1, "start": day_offset(-3), "end": day_offset(2)},
307
+ {"content": "Resting", "group": 1, "start": day_offset(2), "end": day_offset(4)},
308
+ {"content": "Working", "group": 1, "start": day_offset(4), "end": day_offset(8)},
309
+ {"content": "Resting", "group": 1, "start": day_offset(8), "end": day_offset(10)},
310
+ {"content": "Working", "group": 1, "start": day_offset(10), "end": day_offset(14)},
311
+ {"content": "Resting", "group": 1, "start": day_offset(14), "end": day_offset(16)},
312
+ {"content": "Working", "group": 1, "start": day_offset(16), "end": day_offset(20)}
313
+ ],
314
+ "description": "DateTime ranges in groups"
315
+ },
316
+ {
317
+ "groups": [{"id": 1, "content": "Group 1"}, {"id": 2, "content": "Group 2"}],
318
+ "items": [
319
+ {"id": "A", "content": "Period A", "start": day_offset(1), "end": day_offset(7), "type": "background", "group": 1 },
320
+ {"id": "B", "content": "Period B", "start": day_offset(8), "end": day_offset(11), "type": "background", "group": 2 },
321
+ {"id": "C", "content": "Period C", "start": day_offset(12), "end": day_offset(17), "type": "background" },
322
+ {"content": "Range inside period A", "start": day_offset(2), "end": day_offset(6), "group": 1 },
323
+ {"content": "Item inside period C", "group": 2, "start": day_offset(14) }
324
+ ],
325
+ "description": "Background type example"
326
+ },
327
+ {
328
+ "groups": [{"id": 1, "content": "Group 1"}, {"id": 2, "content": "Group 2"}],
329
+ "items": [
330
+ {"content": "Range item", "group": 1, "start": day_offset(7), "end": day_offset(14) },
331
+ {"content": "Point item", "group": 2, "start": day_offset(7), "type": "point" },
332
+ {"content": "Point item with a longer name", "group": 2, "start": day_offset(7), "type": "point" },
333
+ ],
334
+ "description": "Point type example"
335
+ },
336
+ {
337
+ "groups": [{"id": 1, "content": "Group 1", "subgroupStack": {"A": True, "B": True}}, {"id": 2, "content": "Group 2" }],
338
+ "items": [
339
+ {"content": "Subgroup 2 Background", "start": day_offset(0), "end": day_offset(4), "type": "background", "group": 1, "subgroup": "A", "subgroupOrder": 0},
340
+ {"content": "Subgroup 2 Item", "start": day_offset(5), "end": day_offset(7), "group": 1, "subgroup": "A", "subgroupOrder": 0},
341
+ {"content": "Subgroup 1 Background", "start": day_offset(0), "end": day_offset(4), "type": "background", "group": 1, "subgroup": "B", "subgroupOrder": 1},
342
+ {"content": "Subgroup 1 Item", "start": day_offset(8), "end": day_offset(10), "group": 1, "subgroup": "B", "subgroupOrder": 1},
343
+ {"content": "Full group background", "start": day_offset(5), "end": day_offset(9), "type": "background", "group": 2},
344
+ {"content": "No subgroup item 1", "start": day_offset(10), "end": day_offset(12), "group": 2},
345
+ {"content": "No subgroup item 2", "start": day_offset(13), "end": day_offset(15), "group": 2}
346
+
347
+ ],
348
+ "description": "Subgroups with backgrounds and items"
349
+ },
350
+ {
351
+ "groups": [{"id": 1, "content": "Group 1", "subgroupStack": {"A": True, "B": True}}, {"id": 2, "content": "Group 2" }],
352
+ "items": [
353
+ {"content": "Subgroup 2 background", "start": day_offset(0), "end": day_offset(4), "type": "background", "group": 1, "subgroup": "A", "subgroupOrder": 0},
354
+ {"content": "Subgroup 2 range", "start": day_offset(5), "end": day_offset(7), "group": 1, "subgroup": "A", "subgroupOrder": 0},
355
+ {"content": "Subgroup 2 item", "start": day_offset(10), "group": 1, "subgroup": "A" },
356
+ {"content": "Subgroup 1 background", "start": day_offset(0), "end": day_offset(4), "type": "background", "group": 1, "subgroup": "B", "subgroupOrder": 1},
357
+ {"content": "Subgroup 1 range", "start": day_offset(8), "end": day_offset(10), "group": 1, "subgroup": "B", "subgroupOrder": 1},
358
+ {"content": "Subgroup 1 item", "start": day_offset(14), "group": 1, "subgroup": "B" },
359
+ {"content": "No subgroup item", "start": day_offset(12), "group": 1},
360
+ {"content": "Full group background", "start": day_offset(5), "end": day_offset(9), "type": "background", "group": 2},
361
+ {"content": "No subgroup range 1", "start": day_offset(11), "end": day_offset(13), "group": 2},
362
+ {"content": "No subgroup range 2", "start": day_offset(15), "end": day_offset(17), "group": 2},
363
+ {"content": "No subgroup point", "start": day_offset(1), "group": 2, "type": "point" }
364
+
365
+ ],
366
+ "description": "Combination of item and group types"
367
+ }
368
+ ],
369
+ inputs=basic_timeline
370
+ )
371
+
372
+ # Right column: JSON staging area
373
+ with gr.Column():
374
+ gr.Markdown("### Serialized Timeline Value")
375
+ json_textbox = gr.Textbox(label="JSON", lines=4)
376
+ with gr.Row():
377
+ pull_button = gr.Button("Pull Timeline into JSON")
378
+ push_button = gr.Button("Push JSON onto Timeline", variant="primary")
379
+
380
+ # Event handlers
381
+ basic_timeline.change(fn=on_timeline_change, outputs=[change_textbox]) # Triggered when the value of the timeline changes by any means
382
+ basic_timeline.input(fn=on_timeline_input, outputs=[input_textbox]) # Triggered when the value of the timeline changes, caused directly by a user input on the component (dragging, adding & removing items)
383
+ basic_timeline.select(fn=on_timeline_select, outputs=[select_textbox]) # Triggered when the timeline is clicked
384
+ basic_timeline.item_select(fn=on_item_select, inputs=[basic_timeline], outputs=[item_select_textbox]) # Triggered when items are selected or unselected
385
+
386
+ pull_button.click(fn=pull_from_timeline, inputs=[basic_timeline], outputs=[json_textbox]) # Example of using the timeline as an input
387
+ push_button.click(fn=push_to_timeline, inputs=[json_textbox], outputs=[basic_timeline]) # Example of using the timeline as an output
388
+
389
+ # --- Tab 2: Timeline without date ---
390
+ with gr.Tab("Timeline Without Date"):
391
+ audio_output = gr.Audio(label="Generated Audio", type="numpy", elem_id=AUDIO_ID)
392
+
393
+ dateless_timeline = VisTimeline(
394
+ value={
395
+ "groups": [{"id": "track-length", "content": ""}, {"id": 1, "content": ""}, {"id": 2, "content": ""}, {"id": 3, "content": ""}],
396
+ "items": [
397
+ {"content": "", "group": "track-length", "selectable": False, "type": "background", "start": 0, "end": 6000, "className": "color-primary-600"},
398
+ {"id": 1, "content": "440.00Hz", "group": 1, "selectable": False, "start": 0, "end": 1500},
399
+ {"id": 2, "content": "554.37Hz", "group": 2, "selectable": False, "start": 2000, "end": 3500},
400
+ {"id": 3, "content": "659.26Hz", "group": 3, "selectable": False, "start": 4000, "end": 5500}
401
+ ]},
402
+ options={
403
+ "moment": "+00:00", # Force the timeline into a certain UTC offset timezone
404
+ "showCurrentTime": False,
405
+ "editable": {
406
+ "add": False,
407
+ "remove": False,
408
+ "updateGroup": False,
409
+ "updateTime": True
410
+ },
411
+ "itemsAlwaysDraggable": { # So dragging does not require selection first
412
+ "item": True,
413
+ "range": True
414
+ },
415
+ "showMajorLabels": False, # This hides the month & year labels
416
+ "format": {
417
+ "minorLabels": { # Force the minor labels into a format that does not include weekdays or months
418
+ "millisecond": "mm:ss.SSS",
419
+ "second": "mm:ss",
420
+ "minute": "mm:ss",
421
+ "hour": "HH:mm:ss"
422
+ }
423
+ },
424
+ "start": 0, # Timeline will start at unix epoch
425
+ "end": 6000, # Initial timeline range will end at 1 minute (unix timestamp in milliseconds)
426
+ "min": 0, # Restrict timeline navigation, timeline can not be scrolled further to the left than 0 seconds
427
+ "max": 7000, # Restrict timeline navigation, timeline can not be scrolled further to the right than 70 seconds
428
+ "zoomMin": 1000, # Allow zoom in up until the entire timeline spans 1000 milliseconds
429
+ },
430
+ label="Timeline without date labels, with restrictions on navigation and zoom. You can drag and resize items without having to select them first.",
431
+ elem_id=TIMELINE_ID # This will also make the timeline instance accessible in JavaScript via 'window.visTimelineInstances["your elem_id"]'
432
+ )
433
+
434
+ table = gr.DataFrame(
435
+ headers=["Item Name", "Start Time", "Duration"],
436
+ label="Timeline Items",
437
+ interactive=False
438
+ )
439
+
440
+ generate_audio_button = gr.Button("Generate Audio")
441
+
442
+ # Event handlers
443
+ dateless_timeline.change(fn=update_table, inputs=[dateless_timeline], outputs=[table])
444
+ dateless_timeline.load(fn=update_table, inputs=[dateless_timeline], outputs=[table])
445
+ generate_audio_button.click(fn=update_audio, inputs=[dateless_timeline], outputs=[audio_output])
446
+
447
+ generate_audio_button.click(
448
+ fn=update_audio,
449
+ inputs=[dateless_timeline],
450
+ outputs=[audio_output],
451
+ ).then(
452
+ fn=None,
453
+ inputs=None,
454
+ outputs=None,
455
+ js=f'() => initAudioSync("{TIMELINE_ID}", "{AUDIO_ID}", 6000)'
456
+ )
457
+
458
+ # --- Tab 3: Links to documentation and examples ---
459
+ with gr.Tab("Documentation & More Examples"):
460
+ gr.Markdown("""
461
+ ## Vis.js Timeline Examples
462
+ A collection of HTML/CSS/JavaScript snippets displaying various properties and use-cases:
463
+ [https://visjs.github.io/vis-timeline/examples/timeline/](https://visjs.github.io/vis-timeline/examples/timeline/)
464
+ <br><br>
465
+ ## Vis.js Timeline Documentation
466
+ The official documentation of the timeline:
467
+ [https://visjs.github.io/vis-timeline/docs/timeline/](https://visjs.github.io/vis-timeline/docs/timeline/)
468
+ <br><br>
469
+ ## Vis.js DataSet Documentation
470
+ The official documentation of the DataSet model:
471
+ [https://visjs.github.io/vis-data/data/dataset.html](https://visjs.github.io/vis-data/data/dataset.html)
472
+ """)
473
+
474
+ if __name__ == "__main__":
475
+ demo.launch(show_api=False)
src/demo/custom_time_control.js ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ function manageTimeBar(elemId, time) {
2
+ if (!window.visTimelineInstances) {
3
+ console.error(`Timeline instances collection not found`);
4
+ return;
5
+ }
6
+
7
+ const timeline = window.visTimelineInstances[elemId];
8
+ if (!timeline) {
9
+ console.error(`Timeline instance ${elemId} not found`);
10
+ return;
11
+ }
12
+
13
+ if (!window.customTimeBarIds) {
14
+ window.customTimeBarIds = {};
15
+ }
16
+
17
+ try {
18
+ timeline.setCustomTime(time, elemId);
19
+ } catch (e) {
20
+ timeline.addCustomTime(time, elemId);
21
+ }
22
+ }
23
+
24
+ function setTimeBarDirect(elemId, time) {
25
+ manageTimeBar(elemId, time);
26
+ }
27
+
28
+ function setTimeBarNormalized(elemId, start, end, normalizedPos) {
29
+ const time = start + (end - start) * normalizedPos;
30
+ manageTimeBar(elemId, time);
31
+ }
32
+
33
+ class AudioTimelineSync {
34
+ constructor(timelineId, audioId, trackLength) {
35
+ this.timelineId = timelineId;
36
+ this.trackLength = trackLength;
37
+ const container = document.getElementById(audioId);
38
+
39
+ // Find the progress element through shadow DOM
40
+ const waveform = container.querySelector('#waveform');
41
+ if (!waveform) {
42
+ console.error('Waveform container not found');
43
+ return;
44
+ }
45
+
46
+ // Access shadow root and find progress element
47
+ const shadowRoot = waveform.querySelector('div').shadowRoot;
48
+ this.progressElement = shadowRoot.querySelector('div[part="progress"]');
49
+
50
+ if (!this.progressElement) {
51
+ console.error('Progress element not found');
52
+ return;
53
+ }
54
+
55
+ this.setupProgressObserver();
56
+ }
57
+
58
+ setupProgressObserver() {
59
+ // Create mutation observer to watch for style changes to a specific progress element of the audio component
60
+ // The style is defined by the completion of the audio source, even when the audio is not playing but the time bar is being dragged by the cursor.
61
+ this.observer = new MutationObserver((mutations) => {
62
+ mutations.forEach((mutation) => {
63
+ if (mutation.type === 'attributes' && mutation.attributeName === 'style') {
64
+ this.onProgressUpdate();
65
+ }
66
+ });
67
+ });
68
+
69
+ // Observe the progress element for style changes
70
+ this.observer.observe(this.progressElement, {
71
+ attributes: true,
72
+ attributeFilter: ['style']
73
+ });
74
+ }
75
+
76
+ onProgressUpdate() {
77
+ const style = this.progressElement.style;
78
+ const widthStr = style.width;
79
+ if (!widthStr) return;
80
+
81
+ // Convert percentage string to number (e.g., "70.7421%" -> 0.707421)
82
+ const percentage = parseFloat(widthStr) / 100;
83
+ this.syncTimeBarToPlayback(percentage);
84
+ }
85
+
86
+ syncTimeBarToPlayback(normalizedPosition) {
87
+ const timeline = window.visTimelineInstances[this.timelineId];
88
+ if (timeline) {
89
+ setTimeBarNormalized(this.timelineId, 0, this.trackLength, normalizedPosition);
90
+ }
91
+ }
92
+
93
+ cleanup() {
94
+ // Disconnect observer
95
+ if (this.observer) {
96
+ this.observer.disconnect();
97
+ this.observer = null;
98
+ }
99
+ }
100
+ }
101
+
102
+ function initAudioSync(timelineId, audioId, trackLength) {
103
+ try {
104
+ // Initialize syncs container if it doesn't exist
105
+ if (!window.audioTimelineSyncs) {
106
+ window.audioTimelineSyncs = {};
107
+ }
108
+
109
+ // Cleanup existing sync if any
110
+ if (window.audioTimelineSyncs[timelineId]) {
111
+ window.audioTimelineSyncs[timelineId].cleanup();
112
+ }
113
+
114
+ // Create new sync instance
115
+ window.audioTimelineSyncs[timelineId] = new AudioTimelineSync(timelineId, audioId, trackLength);
116
+ } catch (error) {
117
+ console.error('Error initializing audio sync:', error);
118
+ }
119
+
120
+ return null;
121
+ }
src/demo/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ gradio_vistimeline
src/frontend/Example.svelte ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script lang="ts">
2
+ interface TimelineExample {
3
+ groups: Array<{id: number, content: string}>;
4
+ items: Array<{
5
+ id: number | string,
6
+ content: string,
7
+ group?: number,
8
+ start: string,
9
+ end?: string
10
+ }>;
11
+ description?: string;
12
+ }
13
+
14
+ export let value: TimelineExample;
15
+ export let type: "gallery" | "table";
16
+ export let selected = false;
17
+
18
+ export let options: Record<string, any> | undefined = undefined;
19
+ export let preserve_old_content_on_value_change: boolean | undefined = undefined;
20
+ export let label: string | undefined = undefined;
21
+ export let interactive: boolean | undefined = undefined;
22
+ export let visible: boolean | undefined = undefined;
23
+ export let elem_id: string | undefined = undefined;
24
+ export let elem_classes: string[] | undefined = undefined;
25
+ export let key: string | undefined = undefined;
26
+ export let samples_dir: string | undefined = undefined;
27
+ export let index: number | undefined = undefined;
28
+ export let root: any = undefined;
29
+
30
+ function formatSummary(example: TimelineExample): string {
31
+ const itemCount = example.items.length;
32
+ const groupCount = example.groups.length;
33
+ const dateRange = getDateRange(example.items);
34
+
35
+ const summary = example.description
36
+ ? example.description
37
+ : `${itemCount} item${itemCount !== 1 ? 's' : ''} in ${groupCount} group${groupCount !== 1 ? 's' : ''}`;
38
+
39
+ return `${summary}\n${dateRange}`;
40
+ }
41
+
42
+ function getDateRange(items: TimelineExample['items']): string {
43
+ if (items.length === 0) return '';
44
+
45
+ const dates = items.flatMap(item => [new Date(item.start), item.end ? new Date(item.end) : null])
46
+ .filter((date): date is Date => date !== null);
47
+
48
+ const minDate = new Date(Math.min(...dates.map(d => d.getTime())));
49
+ const maxDate = new Date(Math.max(...dates.map(d => d.getTime())));
50
+
51
+ if (minDate.getTime() === maxDate.getTime()) {
52
+ return formatDate(minDate, true);
53
+ }
54
+
55
+ return `${formatDate(minDate)} - ${formatDate(maxDate)}`;
56
+ }
57
+
58
+ function formatDate(date: Date, fullFormat: boolean = false): string {
59
+ if (fullFormat) {
60
+ return date.toLocaleDateString('en-US', {
61
+ month: 'long',
62
+ day: 'numeric',
63
+ year: 'numeric'
64
+ });
65
+ }
66
+ return date.toLocaleDateString('en-US', {
67
+ month: 'short',
68
+ day: 'numeric',
69
+ year: 'numeric'
70
+ });
71
+ }
72
+ </script>
73
+
74
+ <div
75
+ class:table={type === "table"}
76
+ class:gallery={type === "gallery"}
77
+ class:selected
78
+ class="example-container"
79
+ >
80
+ <div class="example-content">
81
+ {formatSummary(value)}
82
+ </div>
83
+ </div>
84
+
85
+ <style>
86
+ .example-container {
87
+ border: var(--button-border-width) solid var(--button-secondary-border-color) !important;
88
+ background: var(--button-secondary-background-fill) !important;
89
+ color: var(--button-secondary-text-color) !important;
90
+ border-radius: var(--button-large-radius) !important;
91
+ transition: all 0.2s ease;
92
+ cursor: pointer;
93
+ overflow: hidden;
94
+ }
95
+ .example-content {
96
+ padding: var(--spacing-md);
97
+ font-size: var(--text-sm);
98
+ white-space: pre-line;
99
+ line-height: 1.4;
100
+ }
101
+ .selected {
102
+ border: var(--button-border-width) solid var(--button-secondary-border-color-hover) !important;
103
+ background: var(--button-secondary-background-fill-hover) !important;
104
+ color: var(--button-secondary-text-color-hover) !important;
105
+ border-radius: var(--button-large-radius) !important;
106
+ }
107
+ .gallery {
108
+ min-width: 100px;
109
+ }
110
+ </style>
src/frontend/Index.svelte ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script lang="ts">
2
+ import { DataSet, Timeline, moment } from 'vis-timeline/standalone';
3
+ import 'vis-timeline/styles/vis-timeline-graph2d.css';
4
+
5
+ import type { Gradio } from "@gradio/utils";
6
+ import { Block } from "@gradio/atoms";
7
+ import { StatusTracker } from "@gradio/statustracker";
8
+ import type { LoadingStatus } from "@gradio/statustracker";
9
+
10
+ import { onMount } from 'svelte';
11
+
12
+ export let gradio: Gradio<{
13
+ load: any,
14
+ change: any,
15
+ input: any,
16
+ item_select: any,
17
+ select: any
18
+ }>;
19
+
20
+ export let value = { groups: [], items: [] };
21
+ export let options: Record<string, any> = {};
22
+ export let preserve_old_content_on_value_change: boolean = false;
23
+
24
+ export let label: string | null = null;
25
+ export let interactive: true;
26
+ export let visible: boolean = true;
27
+ export let elem_id: string = "";
28
+ export let elem_classes: string[] = [];
29
+ export let loading_status: LoadingStatus | undefined = undefined;
30
+
31
+ let container: HTMLDivElement;
32
+ let timeline: Timeline;
33
+ let groupsDataSet = new DataSet();
34
+ let itemsDataSet = new DataSet();
35
+ let inputLock = false;
36
+ let timelineHasGroups = false;
37
+ let isMounted = false;
38
+
39
+ function updateBackendState() {
40
+ const currentGroups = groupsDataSet ? groupsDataSet.get() : null;
41
+ const currentItems = itemsDataSet ? itemsDataSet.get() : null;
42
+ value = { groups: currentGroups, items: currentItems };
43
+ }
44
+
45
+ function updateFrontEndState() {
46
+ inputLock = true;
47
+
48
+ try {
49
+ let selection: [] | null = null;
50
+
51
+ if (!preserve_old_content_on_value_change && timeline) {
52
+ selection = timeline.getSelection();
53
+ }
54
+
55
+ const newValueHasGroups = value.groups.length > 0;
56
+
57
+ if (newValueHasGroups != timelineHasGroups && isMounted) {
58
+ instantiateTimeline(newValueHasGroups);
59
+ }
60
+
61
+ tryUpdateDataSet(value.groups, groupsDataSet);
62
+ tryUpdateDataSet(value.items, itemsDataSet);
63
+
64
+ if (selection && selection.length > 0) {
65
+ timeline.setSelection(selection);
66
+ }
67
+ } catch (error) {
68
+ console.error("Error updating frontend state:", error);
69
+ } finally {
70
+ inputLock = false;
71
+ }
72
+ }
73
+
74
+ function tryUpdateDataSet(newData, dataSet) {
75
+ try {
76
+ if (newData) {
77
+ if (preserve_old_content_on_value_change) {
78
+ removeOldDataFromDataSet(newData, dataSet);
79
+ dataSet.update(newData);
80
+ } else {
81
+ dataSet.clear();
82
+ dataSet.add(newData);
83
+ }
84
+ } else {
85
+ dataSet.clear();
86
+ }
87
+ } catch (error) {
88
+ console.error("Error updating timeline content datasets:", error);
89
+ }
90
+ }
91
+
92
+ function removeOldDataFromDataSet(newData, dataSet) {
93
+ if (newData) {
94
+ const newIds = newData.map(item => item.id);
95
+ const currentItems = dataSet.get();
96
+ const itemIdsToRemove = currentItems.filter(item => !newIds.includes(item.id)).map(item => item.id);
97
+
98
+ if (itemIdsToRemove && itemIdsToRemove.length > 0) {
99
+ dataSet.remove(itemIdsToRemove);
100
+ }
101
+ }
102
+ }
103
+
104
+ function instantiateTimeline(hasGroups) {
105
+ declareGlobalWindow();
106
+ parseOptions();
107
+
108
+ if (timeline) {
109
+ removeTimelineFromGlobalWindow();
110
+ timeline.destroy();
111
+ }
112
+
113
+ timelineHasGroups = hasGroups;
114
+
115
+ if (hasGroups) {
116
+ timeline = new Timeline(container, itemsDataSet, groupsDataSet, options);
117
+ } else {
118
+ timeline = new Timeline(container, itemsDataSet, options);
119
+ }
120
+
121
+ addTimelineToGlobalWindow();
122
+ listenToTimelineEvents();
123
+ }
124
+
125
+ function declareGlobalWindow() {
126
+ if (!window.visTimelineInstances) {
127
+ window.visTimelineInstances = {};
128
+ }
129
+ }
130
+
131
+ function removeTimelineFromGlobalWindow() {
132
+ if (elem_id && window.visTimelineInstances[elem_id]) {
133
+ delete window.visTimelineInstances[elem_id];
134
+ }
135
+ }
136
+
137
+ function addTimelineToGlobalWindow() {
138
+ if (elem_id) {
139
+ window.visTimelineInstances[elem_id] = timeline;
140
+ }
141
+ }
142
+
143
+ function parseOptions() {
144
+ if (options && typeof options.moment === 'string' && options.moment.trim() !== '') {
145
+ const offsetString = options.moment.trim();
146
+ options.moment = function (date) {
147
+ return moment(date).utcOffset(offsetString);
148
+ };
149
+ }
150
+ }
151
+
152
+ function listenToTimelineEvents() {
153
+ timeline.on("click", (properties) => {
154
+ gradio.dispatch("select");
155
+ });
156
+
157
+ timeline.on("select", (properties) => {
158
+ if (!inputLock) {
159
+ gradio.dispatch("item_select", properties.items);
160
+ }
161
+ });
162
+ }
163
+
164
+ function listenToUserInput(dataSet) {
165
+ ["update", "add", "remove"].forEach((eventType) => {
166
+ dataSet.on(eventType, (name, payload) => {
167
+ if (!inputLock) {
168
+ gradio.dispatch("input", eventType);
169
+ updateBackendState();
170
+ }
171
+ });
172
+ });
173
+ }
174
+
175
+ function addResizeObserver() {
176
+ const observer = new ResizeObserver((entries) => {
177
+ for (const entry of entries) {
178
+ const { width, height } = entry.contentRect;
179
+
180
+ if (width > 0 && height > 0 && timeline) {
181
+ timeline.redraw();
182
+ }
183
+ }
184
+ });
185
+
186
+ observer.observe(container);
187
+ }
188
+
189
+ function onValueChange() {
190
+ gradio.dispatch("change");
191
+ }
192
+
193
+ onMount(() => {
194
+ instantiateTimeline(groupsDataSet.get().length > 0);
195
+
196
+ if (!interactive) {
197
+ timeline.setOptions({ editable: false });
198
+ }
199
+
200
+ isMounted = true;
201
+
202
+ updateFrontEndState();
203
+ onValueChange();
204
+
205
+ listenToUserInput(itemsDataSet);
206
+ listenToUserInput(groupsDataSet);
207
+ addResizeObserver();
208
+
209
+ gradio.dispatch("load");
210
+ });
211
+
212
+ $: if (value) {
213
+ updateFrontEndState();
214
+ onValueChange();
215
+ }
216
+ </script>
217
+
218
+ <div class:hidden={!visible}>
219
+ <Block {elem_id} {elem_classes} allow_overflow={false} padding={true}>
220
+ {#if loading_status}
221
+ <StatusTracker
222
+ autoscroll={gradio.autoscroll}
223
+ i18n={gradio.i18n}
224
+ {...loading_status}
225
+ />
226
+ {/if}
227
+ {#if label}<label for="{elem_id}" class="gr-vistimeline-label">{label}</label>{/if}
228
+ <div class="gr-vistimeline" bind:this={container}></div>
229
+ </Block>
230
+ </div>
231
+
232
+ <style>
233
+ .hidden {
234
+ display: none !important;
235
+ }
236
+ .gr-vistimeline-label {
237
+ display: inline-block;
238
+ position: relative;
239
+ z-index: var(--layer-4);
240
+ border: solid var(--block-title-border-width) var(--block-title-border-color);
241
+ border-radius: var(--block-title-radius);
242
+ background: var(--block-title-background-fill);
243
+ padding: var(--block-title-padding);
244
+ color: var(--block-title-text-color);
245
+ font-weight: var(--block-title-text-weight);
246
+ font-size: var(--block-title-text-size);
247
+ line-height: var(--line-sm);
248
+ margin-bottom: var(--spacing-lg);
249
+ }
250
+ .gr-vistimeline :global(.vis-timeline) {
251
+ border-radius: var(--block-radius) !important;
252
+ border-color: var(--block-border-color) !important;
253
+ }
254
+ .gr-vistimeline :global(.vis-item) {
255
+ border-radius: var(--block-radius) !important;
256
+ border-color: var(--neutral-400) !important;
257
+ background: var(--button-secondary-background-fill) !important;
258
+ color: var(--body-text-color) !important;
259
+ font-family: var(--font) !important;
260
+ }
261
+ .gr-vistimeline :global(.vis-item.vis-selected) {
262
+ border-color: var(--primary-500) !important;
263
+ background: var(--primary-400) !important;
264
+ color: var(--button-primary-text-color) !important;
265
+ }
266
+ .gr-vistimeline :global(.vis-item.vis-line) {
267
+ width: 0px !important;
268
+ border-radius: 0px !important;
269
+ border-top-width: 0px !important;
270
+ border-right-width: 0px !important;
271
+ border-bottom-width: 0px !important;
272
+ border-left-width: 1px !important;
273
+ }
274
+ .gr-vistimeline :global(.vis-delete), .gr-vistimeline :global(.vis-delete-rtl) {
275
+ background-color: transparent !important;
276
+ }
277
+ .gr-vistimeline :global(.vis-delete::after), .gr-vistimeline :global(.vis-delete-rtl::after) {
278
+ color: var(--button-cancel-background-fill) !important;
279
+ }
280
+ .gr-vistimeline :global(.vis-time-axis .vis-text) {
281
+ color: var(--block-title-text-color) !important;
282
+ font-size: var(--text-md) !important;
283
+ padding-left: var(--spacing-sm) !important;
284
+ }
285
+ .gr-vistimeline :global(.vis-time-axis .vis-grid.vis-minor), .gr-vistimeline :global(.vis-time-axis .vis-grid.vis-major) {
286
+ border-color: var(--block-border-color) !important;
287
+ }
288
+ .gr-vistimeline :global(.vis-panel), .gr-vistimeline :global(.vis-group), .gr-vistimeline :global(.vis-labelset .vis-label) {
289
+ border-color: var(--block-border-color) !important;
290
+ }
291
+ .gr-vistimeline :global(.vis-labelset .vis-label) {
292
+ color: var(--block-title-text-color) !important;
293
+ }
294
+ .gr-vistimeline :global(.vis-panel) {
295
+ border-bottom-width: 2px !important;
296
+ }
297
+ .gr-vistimeline :global(.vis-panel.vis-center), .gr-vistimeline :global(.vis-panel.vis-bottom) {
298
+ border-left-width: 2px !important;
299
+ }
300
+ .gr-vistimeline :global(.vis-current-time) {
301
+ background-color: var(--primary-500) !important;
302
+ color: var(--button-primary-text-color) !important;
303
+ }
304
+ .gr-vistimeline :global(.vis-custom-time) {
305
+ background-color: var(--primary-600) !important;
306
+ color: var(--button-primary-text-color) !important;
307
+ }
308
+ .gr-vistimeline :global(.color-primary-50) {
309
+ background-color: var(--primary-50) !important;
310
+ color: var(--button-primary-text-color) !important;
311
+ }
312
+ .gr-vistimeline :global(.color-primary-100) {
313
+ background-color: var(--primary-100) !important;
314
+ color: var(--button-primary-text-color) !important;
315
+ }
316
+ .gr-vistimeline :global(.color-primary-200) {
317
+ background-color: var(--primary-200) !important;
318
+ color: var(--button-primary-text-color) !important;
319
+ }
320
+ .gr-vistimeline :global(.color-primary-300) {
321
+ background-color: var(--primary-300) !important;
322
+ color: var(--button-primary-text-color) !important;
323
+ }
324
+ .gr-vistimeline :global(.color-primary-400) {
325
+ background-color: var(--primary-400) !important;
326
+ color: var(--button-primary-text-color) !important;
327
+ }
328
+ .gr-vistimeline :global(.color-primary-500) {
329
+ background-color: var(--primary-500) !important;
330
+ color: var(--button-primary-text-color) !important;
331
+ }
332
+ .gr-vistimeline :global(.color-primary-600) {
333
+ background-color: var(--primary-600) !important;
334
+ color: var(--button-primary-text-color) !important;
335
+ }
336
+ .gr-vistimeline :global(.color-primary-700) {
337
+ background-color: var(--primary-700) !important;
338
+ color: var(--button-primary-text-color) !important;
339
+ }
340
+ .gr-vistimeline :global(.color-primary-800) {
341
+ background-color: var(--primary-800) !important;
342
+ color: var(--button-primary-text-color) !important;
343
+ }
344
+ .gr-vistimeline :global(.color-primary-900) {
345
+ background-color: var(--primary-900) !important;
346
+ color: var(--button-primary-text-color) !important;
347
+ }
348
+ .gr-vistimeline :global(.color-primary-950) {
349
+ background-color: var(--primary-950) !important;
350
+ color: var(--button-primary-text-color) !important;
351
+ }
352
+ .gr-vistimeline :global(.color-secondary-50) {
353
+ background-color: var(--secondary-50) !important;
354
+ color: var(--button-secondary-text-color) !important;
355
+ }
356
+ .gr-vistimeline :global(.color-secondary-100) {
357
+ background-color: var(--secondary-100) !important;
358
+ color: var(--button-secondary-text-color) !important;
359
+ }
360
+ .gr-vistimeline :global(.color-secondary-200) {
361
+ background-color: var(--secondary-200) !important;
362
+ color: var(--button-secondary-text-color) !important;
363
+ }
364
+ .gr-vistimeline :global(.color-secondary-300) {
365
+ background-color: var(--secondary-300) !important;
366
+ color: var(--button-secondary-text-color) !important;
367
+ }
368
+ .gr-vistimeline :global(.color-secondary-400) {
369
+ background-color: var(--secondary-400) !important;
370
+ color: var(--button-secondary-text-color) !important;
371
+ }
372
+ .gr-vistimeline :global(.color-secondary-500) {
373
+ background-color: var(--secondary-500) !important;
374
+ color: var(--button-secondary-text-color) !important;
375
+ }
376
+ .gr-vistimeline :global(.color-secondary-600) {
377
+ background-color: var(--secondary-600) !important;
378
+ color: var(--button-secondary-text-color) !important;
379
+ }
380
+ .gr-vistimeline :global(.color-secondary-700) {
381
+ background-color: var(--secondary-700) !important;
382
+ color: var(--button-secondary-text-color) !important;
383
+ }
384
+ .gr-vistimeline :global(.color-secondary-800) {
385
+ background-color: var(--secondary-800) !important;
386
+ color: var(--button-secondary-text-color) !important;
387
+ }
388
+ .gr-vistimeline :global(.color-secondary-900) {
389
+ background-color: var(--secondary-900) !important;
390
+ color: var(--button-secondary-text-color) !important;
391
+ }
392
+ .gr-vistimeline :global(.color-secondary-950) {
393
+ background-color: var(--secondary-950) !important;
394
+ color: var(--button-secondary-text-color) !important;
395
+ }
396
+ .gr-vistimeline :global(.color-neutral-50) {
397
+ background-color: var(--neutral-50) !important;
398
+ color: var(--neutral-950) !important;
399
+ }
400
+ .gr-vistimeline :global(.color-neutral-100) {
401
+ background-color: var(--neutral-100) !important;
402
+ color: var(--neutral-950) !important;
403
+ }
404
+ .gr-vistimeline :global(.color-neutral-200) {
405
+ background-color: var(--neutral-200) !important;
406
+ color: var(--neutral-950) !important;
407
+ }
408
+ .gr-vistimeline :global(.color-neutral-300) {
409
+ background-color: var(--neutral-300) !important;
410
+ color: var(--neutral-950) !important;
411
+ }
412
+ .gr-vistimeline :global(.color-neutral-400) {
413
+ background-color: var(--neutral-400) !important;
414
+ color: var(--neutral-950) !important;
415
+ }
416
+ .gr-vistimeline :global(.color-neutral-500) {
417
+ background-color: var(--neutral-500) !important;
418
+ color: var(--neutral-50) !important;
419
+ }
420
+ .gr-vistimeline :global(.color-neutral-600) {
421
+ background-color: var(--neutral-600) !important;
422
+ color: var(--neutral-50) !important;
423
+ }
424
+ .gr-vistimeline :global(.color-neutral-700) {
425
+ background-color: var(--neutral-700) !important;
426
+ color: var(--neutral-50) !important;
427
+ }
428
+ .gr-vistimeline :global(.color-neutral-800) {
429
+ background-color: var(--neutral-800) !important;
430
+ color: var(--neutral-50) !important;
431
+ }
432
+ .gr-vistimeline :global(.color-neutral-900) {
433
+ background-color: var(--neutral-900) !important;
434
+ color: var(--neutral-50) !important;
435
+ }
436
+ .gr-vistimeline :global(.color-neutral-950) {
437
+ background-color: var(--neutral-950) !important;
438
+ color: var(--neutral-50) !important;
439
+ }
440
+ </style>
src/frontend/gradio.config.js ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ export default {
2
+ plugins: [],
3
+ svelte: {
4
+ preprocess: [],
5
+ },
6
+ build: {
7
+ target: "modules",
8
+ },
9
+ };
src/frontend/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
src/frontend/package.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "gradio_vistimeline",
3
+ "version": "0.3.8",
4
+ "description": "Gradio UI packages",
5
+ "type": "module",
6
+ "author": "",
7
+ "license": "ISC",
8
+ "private": false,
9
+ "main_changeset": true,
10
+ "exports": {
11
+ ".": {
12
+ "gradio": "./Index.svelte",
13
+ "svelte": "./dist/Index.svelte",
14
+ "types": "./dist/Index.svelte.d.ts"
15
+ },
16
+ "./example": {
17
+ "gradio": "./Example.svelte",
18
+ "svelte": "./dist/Example.svelte",
19
+ "types": "./dist/Example.svelte.d.ts"
20
+ },
21
+ "./package.json": "./package.json"
22
+ },
23
+ "dependencies": {
24
+ "@gradio/atoms": "0.11.2",
25
+ "@gradio/icons": "0.8.1",
26
+ "@gradio/statustracker": "0.9.6",
27
+ "@gradio/utils": "0.9.0",
28
+ "vis-timeline": "^7.7.3"
29
+ },
30
+ "devDependencies": {
31
+ "@gradio/preview": "0.13.0"
32
+ },
33
+ "peerDependencies": {
34
+ "svelte": "^4.2.19"
35
+ },
36
+ "repository": {
37
+ "type": "git",
38
+ "url": "git+https://github.com/gradio-app/gradio.git",
39
+ "directory": "js/simpletextbox"
40
+ }
41
+ }
src/frontend/tsconfig.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "verbatimModuleSyntax": true,
4
+ "strict": true,
5
+ "moduleResolution": "bundler",
6
+ "isolatedModules": true
7
+ }
8
+ }
9
+
src/package.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "dependencies": {
3
+ "@gradio/atoms": "^0.11.2",
4
+ "@gradio/statustracker": "^0.9.6"
5
+ }
6
+ }
src/pyproject.toml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = [
3
+ "hatchling",
4
+ "hatch-requirements-txt",
5
+ "hatch-fancy-pypi-readme>=22.5.0",
6
+ ]
7
+ build-backend = "hatchling.build"
8
+
9
+ [project]
10
+ name = "gradio_vistimeline"
11
+ version = "1.0.1"
12
+ description = "Gradio implementation for the vis.js Timeline visualization library"
13
+ readme = "README.md"
14
+ license = "apache-2.0"
15
+ requires-python = ">=3.10"
16
+ authors = [{ name = "Yelorix", email = "[email protected]" }]
17
+ keywords = ["gradio-custom-component", "timeline", "vis timeline", "vis-timeline", "vis.js"]
18
+
19
+ dependencies = ["gradio>=4.0,<6.0"]
20
+ classifiers = [
21
+ 'Development Status :: 3 - Alpha',
22
+ 'Operating System :: OS Independent',
23
+ 'Programming Language :: Python :: 3',
24
+ 'Programming Language :: Python :: 3 :: Only',
25
+ 'Programming Language :: Python :: 3.8',
26
+ 'Programming Language :: Python :: 3.9',
27
+ 'Programming Language :: Python :: 3.10',
28
+ 'Programming Language :: Python :: 3.11',
29
+ 'Topic :: Scientific/Engineering',
30
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
31
+ 'Topic :: Scientific/Engineering :: Visualization',
32
+ ]
33
+
34
+ [project.urls]
35
+ repository = "https://github.com/Yelorix/gradio-vis-timeline"
36
+ space = "https://huggingface.co/spaces/Yelorix/gradio_vistimeline"
37
+
38
+ [project.optional-dependencies]
39
+ dev = ["build", "twine"]
40
+
41
+ [tool.hatch.build]
42
+ artifacts = ["/backend/gradio_vistimeline/templates", "*.pyi", "/\\backend\\gradio_vistimeline\\templates"]
43
+
44
+ [tool.hatch.build.targets.wheel]
45
+ packages = ["/backend/gradio_vistimeline"]
src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2010 Pallets
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted provided that the following conditions are
5
+ met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ 3. Neither the name of the copyright holder nor the names of its
15
+ contributors may be used to endorse or promote products derived from
16
+ this software without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
21
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
24
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: MarkupSafe
3
+ Version: 2.1.5
4
+ Summary: Safely add untrusted strings to HTML/XML markup.
5
+ Home-page: https://palletsprojects.com/p/markupsafe/
6
+ Maintainer: Pallets
7
+ Maintainer-email: [email protected]
8
+ License: BSD-3-Clause
9
+ Project-URL: Donate, https://palletsprojects.com/donate
10
+ Project-URL: Documentation, https://markupsafe.palletsprojects.com/
11
+ Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
12
+ Project-URL: Source Code, https://github.com/pallets/markupsafe/
13
+ Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/
14
+ Project-URL: Chat, https://discord.gg/pallets
15
+ Classifier: Development Status :: 5 - Production/Stable
16
+ Classifier: Environment :: Web Environment
17
+ Classifier: Intended Audience :: Developers
18
+ Classifier: License :: OSI Approved :: BSD License
19
+ Classifier: Operating System :: OS Independent
20
+ Classifier: Programming Language :: Python
21
+ Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
22
+ Classifier: Topic :: Text Processing :: Markup :: HTML
23
+ Requires-Python: >=3.7
24
+ Description-Content-Type: text/x-rst
25
+ License-File: LICENSE.rst
26
+
27
+ MarkupSafe
28
+ ==========
29
+
30
+ MarkupSafe implements a text object that escapes characters so it is
31
+ safe to use in HTML and XML. Characters that have special meanings are
32
+ replaced so that they display as the actual characters. This mitigates
33
+ injection attacks, meaning untrusted user input can safely be displayed
34
+ on a page.
35
+
36
+
37
+ Installing
38
+ ----------
39
+
40
+ Install and update using `pip`_:
41
+
42
+ .. code-block:: text
43
+
44
+ pip install -U MarkupSafe
45
+
46
+ .. _pip: https://pip.pypa.io/en/stable/getting-started/
47
+
48
+
49
+ Examples
50
+ --------
51
+
52
+ .. code-block:: pycon
53
+
54
+ >>> from markupsafe import Markup, escape
55
+
56
+ >>> # escape replaces special characters and wraps in Markup
57
+ >>> escape("<script>alert(document.cookie);</script>")
58
+ Markup('&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
59
+
60
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
61
+ >>> Markup("<strong>Hello</strong>")
62
+ Markup('<strong>hello</strong>')
63
+
64
+ >>> escape(Markup("<strong>Hello</strong>"))
65
+ Markup('<strong>hello</strong>')
66
+
67
+ >>> # Markup is a str subclass
68
+ >>> # methods and operators escape their arguments
69
+ >>> template = Markup("Hello <em>{name}</em>")
70
+ >>> template.format(name='"World"')
71
+ Markup('Hello <em>&#34;World&#34;</em>')
72
+
73
+
74
+ Donate
75
+ ------
76
+
77
+ The Pallets organization develops and supports MarkupSafe and other
78
+ popular packages. In order to grow the community of contributors and
79
+ users, and allow the maintainers to devote more time to the projects,
80
+ `please donate today`_.
81
+
82
+ .. _please donate today: https://palletsprojects.com/donate
83
+
84
+
85
+ Links
86
+ -----
87
+
88
+ - Documentation: https://markupsafe.palletsprojects.com/
89
+ - Changes: https://markupsafe.palletsprojects.com/changes/
90
+ - PyPI Releases: https://pypi.org/project/MarkupSafe/
91
+ - Source Code: https://github.com/pallets/markupsafe/
92
+ - Issue Tracker: https://github.com/pallets/markupsafe/issues/
93
+ - Chat: https://discord.gg/pallets
src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MarkupSafe-2.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ MarkupSafe-2.1.5.dist-info/LICENSE.rst,sha256=RjHsDbX9kKVH4zaBcmTGeYIUM4FG-KyUtKV_lu6MnsQ,1503
3
+ MarkupSafe-2.1.5.dist-info/METADATA,sha256=icNlaniV7YIQZ1BScCVqNaRtm7MAgfw8d3OBmoSVyAY,3096
4
+ MarkupSafe-2.1.5.dist-info/RECORD,,
5
+ MarkupSafe-2.1.5.dist-info/WHEEL,sha256=j9Aissza3750LQHFAQyYerNjmkEON1-8w_RaZNFtKSs,102
6
+ MarkupSafe-2.1.5.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
7
+ markupsafe/__init__.py,sha256=m1ysNeqf55zbEoJtaovca40ivrkEFolPlw5bGoC5Gi4,11290
8
+ markupsafe/__pycache__/__init__.cpython-312.pyc,,
9
+ markupsafe/__pycache__/_native.cpython-312.pyc,,
10
+ markupsafe/_native.py,sha256=_Q7UsXCOvgdonCgqG3l5asANI6eo50EKnDM-mlwEC5M,1776
11
+ markupsafe/_speedups.c,sha256=n3jzzaJwXcoN8nTFyA53f3vSqsWK2vujI-v6QYifjhQ,7403
12
+ markupsafe/_speedups.cp312-win_amd64.pyd,sha256=CLz8k0mpvM-dgLP0eSHpGYHm8shlGxXoCinA12zgHsY,15872
13
+ markupsafe/_speedups.pyi,sha256=f5QtwIOP0eLrxh2v5p6SmaYmlcHIGIfmz0DovaqL0OU,238
14
+ markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp312-cp312-win_amd64
5
+
src/venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ markupsafe
src/venv/Lib/site-packages/PIL/BdfFontFile.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # bitmap distribution font (bdf) file parser
6
+ #
7
+ # history:
8
+ # 1996-05-16 fl created (as bdf2pil)
9
+ # 1997-08-25 fl converted to FontFile driver
10
+ # 2001-05-25 fl removed bogus __init__ call
11
+ # 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
12
+ # 2003-04-22 fl more robustification (from Graham Dumpleton)
13
+ #
14
+ # Copyright (c) 1997-2003 by Secret Labs AB.
15
+ # Copyright (c) 1997-2003 by Fredrik Lundh.
16
+ #
17
+ # See the README file for information on usage and redistribution.
18
+ #
19
+
20
+ """
21
+ Parse X Bitmap Distribution Format (BDF)
22
+ """
23
+ from __future__ import annotations
24
+
25
+ from typing import BinaryIO
26
+
27
+ from . import FontFile, Image
28
+
29
+ bdf_slant = {
30
+ "R": "Roman",
31
+ "I": "Italic",
32
+ "O": "Oblique",
33
+ "RI": "Reverse Italic",
34
+ "RO": "Reverse Oblique",
35
+ "OT": "Other",
36
+ }
37
+
38
+ bdf_spacing = {"P": "Proportional", "M": "Monospaced", "C": "Cell"}
39
+
40
+
41
+ def bdf_char(
42
+ f: BinaryIO,
43
+ ) -> (
44
+ tuple[
45
+ str,
46
+ int,
47
+ tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]],
48
+ Image.Image,
49
+ ]
50
+ | None
51
+ ):
52
+ # skip to STARTCHAR
53
+ while True:
54
+ s = f.readline()
55
+ if not s:
56
+ return None
57
+ if s[:9] == b"STARTCHAR":
58
+ break
59
+ id = s[9:].strip().decode("ascii")
60
+
61
+ # load symbol properties
62
+ props = {}
63
+ while True:
64
+ s = f.readline()
65
+ if not s or s[:6] == b"BITMAP":
66
+ break
67
+ i = s.find(b" ")
68
+ props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
69
+
70
+ # load bitmap
71
+ bitmap = bytearray()
72
+ while True:
73
+ s = f.readline()
74
+ if not s or s[:7] == b"ENDCHAR":
75
+ break
76
+ bitmap += s[:-1]
77
+
78
+ # The word BBX
79
+ # followed by the width in x (BBw), height in y (BBh),
80
+ # and x and y displacement (BBxoff0, BByoff0)
81
+ # of the lower left corner from the origin of the character.
82
+ width, height, x_disp, y_disp = (int(p) for p in props["BBX"].split())
83
+
84
+ # The word DWIDTH
85
+ # followed by the width in x and y of the character in device pixels.
86
+ dwx, dwy = (int(p) for p in props["DWIDTH"].split())
87
+
88
+ bbox = (
89
+ (dwx, dwy),
90
+ (x_disp, -y_disp - height, width + x_disp, -y_disp),
91
+ (0, 0, width, height),
92
+ )
93
+
94
+ try:
95
+ im = Image.frombytes("1", (width, height), bitmap, "hex", "1")
96
+ except ValueError:
97
+ # deal with zero-width characters
98
+ im = Image.new("1", (width, height))
99
+
100
+ return id, int(props["ENCODING"]), bbox, im
101
+
102
+
103
+ class BdfFontFile(FontFile.FontFile):
104
+ """Font file plugin for the X11 BDF format."""
105
+
106
+ def __init__(self, fp: BinaryIO) -> None:
107
+ super().__init__()
108
+
109
+ s = fp.readline()
110
+ if s[:13] != b"STARTFONT 2.1":
111
+ msg = "not a valid BDF file"
112
+ raise SyntaxError(msg)
113
+
114
+ props = {}
115
+ comments = []
116
+
117
+ while True:
118
+ s = fp.readline()
119
+ if not s or s[:13] == b"ENDPROPERTIES":
120
+ break
121
+ i = s.find(b" ")
122
+ props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
123
+ if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
124
+ if s.find(b"LogicalFontDescription") < 0:
125
+ comments.append(s[i + 1 : -1].decode("ascii"))
126
+
127
+ while True:
128
+ c = bdf_char(fp)
129
+ if not c:
130
+ break
131
+ id, ch, (xy, dst, src), im = c
132
+ if 0 <= ch < len(self.glyph):
133
+ self.glyph[ch] = xy, dst, src, im
src/venv/Lib/site-packages/PIL/BlpImagePlugin.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Blizzard Mipmap Format (.blp)
3
+ Jerome Leclanche <[email protected]>
4
+
5
+ The contents of this file are hereby released in the public domain (CC0)
6
+ Full text of the CC0 license:
7
+ https://creativecommons.org/publicdomain/zero/1.0/
8
+
9
+ BLP1 files, used mostly in Warcraft III, are not fully supported.
10
+ All types of BLP2 files used in World of Warcraft are supported.
11
+
12
+ The BLP file structure consists of a header, up to 16 mipmaps of the
13
+ texture
14
+
15
+ Texture sizes must be powers of two, though the two dimensions do
16
+ not have to be equal; 512x256 is valid, but 512x200 is not.
17
+ The first mipmap (mipmap #0) is the full size image; each subsequent
18
+ mipmap halves both dimensions. The final mipmap should be 1x1.
19
+
20
+ BLP files come in many different flavours:
21
+ * JPEG-compressed (type == 0) - only supported for BLP1.
22
+ * RAW images (type == 1, encoding == 1). Each mipmap is stored as an
23
+ array of 8-bit values, one per pixel, left to right, top to bottom.
24
+ Each value is an index to the palette.
25
+ * DXT-compressed (type == 1, encoding == 2):
26
+ - DXT1 compression is used if alpha_encoding == 0.
27
+ - An additional alpha bit is used if alpha_depth == 1.
28
+ - DXT3 compression is used if alpha_encoding == 1.
29
+ - DXT5 compression is used if alpha_encoding == 7.
30
+ """
31
+
32
+ from __future__ import annotations
33
+
34
+ import abc
35
+ import os
36
+ import struct
37
+ from enum import IntEnum
38
+ from io import BytesIO
39
+ from typing import IO
40
+
41
+ from . import Image, ImageFile
42
+
43
+
44
+ class Format(IntEnum):
45
+ JPEG = 0
46
+
47
+
48
+ class Encoding(IntEnum):
49
+ UNCOMPRESSED = 1
50
+ DXT = 2
51
+ UNCOMPRESSED_RAW_BGRA = 3
52
+
53
+
54
+ class AlphaEncoding(IntEnum):
55
+ DXT1 = 0
56
+ DXT3 = 1
57
+ DXT5 = 7
58
+
59
+
60
+ def unpack_565(i: int) -> tuple[int, int, int]:
61
+ return ((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3
62
+
63
+
64
+ def decode_dxt1(
65
+ data: bytes, alpha: bool = False
66
+ ) -> tuple[bytearray, bytearray, bytearray, bytearray]:
67
+ """
68
+ input: one "row" of data (i.e. will produce 4*width pixels)
69
+ """
70
+
71
+ blocks = len(data) // 8 # number of blocks in row
72
+ ret = (bytearray(), bytearray(), bytearray(), bytearray())
73
+
74
+ for block_index in range(blocks):
75
+ # Decode next 8-byte block.
76
+ idx = block_index * 8
77
+ color0, color1, bits = struct.unpack_from("<HHI", data, idx)
78
+
79
+ r0, g0, b0 = unpack_565(color0)
80
+ r1, g1, b1 = unpack_565(color1)
81
+
82
+ # Decode this block into 4x4 pixels
83
+ # Accumulate the results onto our 4 row accumulators
84
+ for j in range(4):
85
+ for i in range(4):
86
+ # get next control op and generate a pixel
87
+
88
+ control = bits & 3
89
+ bits = bits >> 2
90
+
91
+ a = 0xFF
92
+ if control == 0:
93
+ r, g, b = r0, g0, b0
94
+ elif control == 1:
95
+ r, g, b = r1, g1, b1
96
+ elif control == 2:
97
+ if color0 > color1:
98
+ r = (2 * r0 + r1) // 3
99
+ g = (2 * g0 + g1) // 3
100
+ b = (2 * b0 + b1) // 3
101
+ else:
102
+ r = (r0 + r1) // 2
103
+ g = (g0 + g1) // 2
104
+ b = (b0 + b1) // 2
105
+ elif control == 3:
106
+ if color0 > color1:
107
+ r = (2 * r1 + r0) // 3
108
+ g = (2 * g1 + g0) // 3
109
+ b = (2 * b1 + b0) // 3
110
+ else:
111
+ r, g, b, a = 0, 0, 0, 0
112
+
113
+ if alpha:
114
+ ret[j].extend([r, g, b, a])
115
+ else:
116
+ ret[j].extend([r, g, b])
117
+
118
+ return ret
119
+
120
+
121
+ def decode_dxt3(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
122
+ """
123
+ input: one "row" of data (i.e. will produce 4*width pixels)
124
+ """
125
+
126
+ blocks = len(data) // 16 # number of blocks in row
127
+ ret = (bytearray(), bytearray(), bytearray(), bytearray())
128
+
129
+ for block_index in range(blocks):
130
+ idx = block_index * 16
131
+ block = data[idx : idx + 16]
132
+ # Decode next 16-byte block.
133
+ bits = struct.unpack_from("<8B", block)
134
+ color0, color1 = struct.unpack_from("<HH", block, 8)
135
+
136
+ (code,) = struct.unpack_from("<I", block, 12)
137
+
138
+ r0, g0, b0 = unpack_565(color0)
139
+ r1, g1, b1 = unpack_565(color1)
140
+
141
+ for j in range(4):
142
+ high = False # Do we want the higher bits?
143
+ for i in range(4):
144
+ alphacode_index = (4 * j + i) // 2
145
+ a = bits[alphacode_index]
146
+ if high:
147
+ high = False
148
+ a >>= 4
149
+ else:
150
+ high = True
151
+ a &= 0xF
152
+ a *= 17 # We get a value between 0 and 15
153
+
154
+ color_code = (code >> 2 * (4 * j + i)) & 0x03
155
+
156
+ if color_code == 0:
157
+ r, g, b = r0, g0, b0
158
+ elif color_code == 1:
159
+ r, g, b = r1, g1, b1
160
+ elif color_code == 2:
161
+ r = (2 * r0 + r1) // 3
162
+ g = (2 * g0 + g1) // 3
163
+ b = (2 * b0 + b1) // 3
164
+ elif color_code == 3:
165
+ r = (2 * r1 + r0) // 3
166
+ g = (2 * g1 + g0) // 3
167
+ b = (2 * b1 + b0) // 3
168
+
169
+ ret[j].extend([r, g, b, a])
170
+
171
+ return ret
172
+
173
+
174
+ def decode_dxt5(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
175
+ """
176
+ input: one "row" of data (i.e. will produce 4 * width pixels)
177
+ """
178
+
179
+ blocks = len(data) // 16 # number of blocks in row
180
+ ret = (bytearray(), bytearray(), bytearray(), bytearray())
181
+
182
+ for block_index in range(blocks):
183
+ idx = block_index * 16
184
+ block = data[idx : idx + 16]
185
+ # Decode next 16-byte block.
186
+ a0, a1 = struct.unpack_from("<BB", block)
187
+
188
+ bits = struct.unpack_from("<6B", block, 2)
189
+ alphacode1 = bits[2] | (bits[3] << 8) | (bits[4] << 16) | (bits[5] << 24)
190
+ alphacode2 = bits[0] | (bits[1] << 8)
191
+
192
+ color0, color1 = struct.unpack_from("<HH", block, 8)
193
+
194
+ (code,) = struct.unpack_from("<I", block, 12)
195
+
196
+ r0, g0, b0 = unpack_565(color0)
197
+ r1, g1, b1 = unpack_565(color1)
198
+
199
+ for j in range(4):
200
+ for i in range(4):
201
+ # get next control op and generate a pixel
202
+ alphacode_index = 3 * (4 * j + i)
203
+
204
+ if alphacode_index <= 12:
205
+ alphacode = (alphacode2 >> alphacode_index) & 0x07
206
+ elif alphacode_index == 15:
207
+ alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06)
208
+ else: # alphacode_index >= 18 and alphacode_index <= 45
209
+ alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07
210
+
211
+ if alphacode == 0:
212
+ a = a0
213
+ elif alphacode == 1:
214
+ a = a1
215
+ elif a0 > a1:
216
+ a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7
217
+ elif alphacode == 6:
218
+ a = 0
219
+ elif alphacode == 7:
220
+ a = 255
221
+ else:
222
+ a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5
223
+
224
+ color_code = (code >> 2 * (4 * j + i)) & 0x03
225
+
226
+ if color_code == 0:
227
+ r, g, b = r0, g0, b0
228
+ elif color_code == 1:
229
+ r, g, b = r1, g1, b1
230
+ elif color_code == 2:
231
+ r = (2 * r0 + r1) // 3
232
+ g = (2 * g0 + g1) // 3
233
+ b = (2 * b0 + b1) // 3
234
+ elif color_code == 3:
235
+ r = (2 * r1 + r0) // 3
236
+ g = (2 * g1 + g0) // 3
237
+ b = (2 * b1 + b0) // 3
238
+
239
+ ret[j].extend([r, g, b, a])
240
+
241
+ return ret
242
+
243
+
244
+ class BLPFormatError(NotImplementedError):
245
+ pass
246
+
247
+
248
+ def _accept(prefix: bytes) -> bool:
249
+ return prefix[:4] in (b"BLP1", b"BLP2")
250
+
251
+
252
+ class BlpImageFile(ImageFile.ImageFile):
253
+ """
254
+ Blizzard Mipmap Format
255
+ """
256
+
257
+ format = "BLP"
258
+ format_description = "Blizzard Mipmap Format"
259
+
260
+ def _open(self) -> None:
261
+ self.magic = self.fp.read(4)
262
+
263
+ self.fp.seek(5, os.SEEK_CUR)
264
+ (self._blp_alpha_depth,) = struct.unpack("<b", self.fp.read(1))
265
+
266
+ self.fp.seek(2, os.SEEK_CUR)
267
+ self._size = struct.unpack("<II", self.fp.read(8))
268
+
269
+ if self.magic in (b"BLP1", b"BLP2"):
270
+ decoder = self.magic.decode()
271
+ else:
272
+ msg = f"Bad BLP magic {repr(self.magic)}"
273
+ raise BLPFormatError(msg)
274
+
275
+ self._mode = "RGBA" if self._blp_alpha_depth else "RGB"
276
+ self.tile = [ImageFile._Tile(decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))]
277
+
278
+
279
+ class _BLPBaseDecoder(ImageFile.PyDecoder):
280
+ _pulls_fd = True
281
+
282
+ def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
283
+ try:
284
+ self._read_blp_header()
285
+ self._load()
286
+ except struct.error as e:
287
+ msg = "Truncated BLP file"
288
+ raise OSError(msg) from e
289
+ return -1, 0
290
+
291
+ @abc.abstractmethod
292
+ def _load(self) -> None:
293
+ pass
294
+
295
+ def _read_blp_header(self) -> None:
296
+ assert self.fd is not None
297
+ self.fd.seek(4)
298
+ (self._blp_compression,) = struct.unpack("<i", self._safe_read(4))
299
+
300
+ (self._blp_encoding,) = struct.unpack("<b", self._safe_read(1))
301
+ (self._blp_alpha_depth,) = struct.unpack("<b", self._safe_read(1))
302
+ (self._blp_alpha_encoding,) = struct.unpack("<b", self._safe_read(1))
303
+ self.fd.seek(1, os.SEEK_CUR) # mips
304
+
305
+ self.size = struct.unpack("<II", self._safe_read(8))
306
+
307
+ if isinstance(self, BLP1Decoder):
308
+ # Only present for BLP1
309
+ (self._blp_encoding,) = struct.unpack("<i", self._safe_read(4))
310
+ self.fd.seek(4, os.SEEK_CUR) # subtype
311
+
312
+ self._blp_offsets = struct.unpack("<16I", self._safe_read(16 * 4))
313
+ self._blp_lengths = struct.unpack("<16I", self._safe_read(16 * 4))
314
+
315
+ def _safe_read(self, length: int) -> bytes:
316
+ assert self.fd is not None
317
+ return ImageFile._safe_read(self.fd, length)
318
+
319
+ def _read_palette(self) -> list[tuple[int, int, int, int]]:
320
+ ret = []
321
+ for i in range(256):
322
+ try:
323
+ b, g, r, a = struct.unpack("<4B", self._safe_read(4))
324
+ except struct.error:
325
+ break
326
+ ret.append((b, g, r, a))
327
+ return ret
328
+
329
+ def _read_bgra(self, palette: list[tuple[int, int, int, int]]) -> bytearray:
330
+ data = bytearray()
331
+ _data = BytesIO(self._safe_read(self._blp_lengths[0]))
332
+ while True:
333
+ try:
334
+ (offset,) = struct.unpack("<B", _data.read(1))
335
+ except struct.error:
336
+ break
337
+ b, g, r, a = palette[offset]
338
+ d: tuple[int, ...] = (r, g, b)
339
+ if self._blp_alpha_depth:
340
+ d += (a,)
341
+ data.extend(d)
342
+ return data
343
+
344
+
345
+ class BLP1Decoder(_BLPBaseDecoder):
346
+ def _load(self) -> None:
347
+ if self._blp_compression == Format.JPEG:
348
+ self._decode_jpeg_stream()
349
+
350
+ elif self._blp_compression == 1:
351
+ if self._blp_encoding in (4, 5):
352
+ palette = self._read_palette()
353
+ data = self._read_bgra(palette)
354
+ self.set_as_raw(data)
355
+ else:
356
+ msg = f"Unsupported BLP encoding {repr(self._blp_encoding)}"
357
+ raise BLPFormatError(msg)
358
+ else:
359
+ msg = f"Unsupported BLP compression {repr(self._blp_encoding)}"
360
+ raise BLPFormatError(msg)
361
+
362
+ def _decode_jpeg_stream(self) -> None:
363
+ from .JpegImagePlugin import JpegImageFile
364
+
365
+ (jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
366
+ jpeg_header = self._safe_read(jpeg_header_size)
367
+ assert self.fd is not None
368
+ self._safe_read(self._blp_offsets[0] - self.fd.tell()) # What IS this?
369
+ data = self._safe_read(self._blp_lengths[0])
370
+ data = jpeg_header + data
371
+ image = JpegImageFile(BytesIO(data))
372
+ Image._decompression_bomb_check(image.size)
373
+ if image.mode == "CMYK":
374
+ decoder_name, extents, offset, args = image.tile[0]
375
+ assert isinstance(args, tuple)
376
+ image.tile = [
377
+ ImageFile._Tile(decoder_name, extents, offset, (args[0], "CMYK"))
378
+ ]
379
+ r, g, b = image.convert("RGB").split()
380
+ reversed_image = Image.merge("RGB", (b, g, r))
381
+ self.set_as_raw(reversed_image.tobytes())
382
+
383
+
384
+ class BLP2Decoder(_BLPBaseDecoder):
385
+ def _load(self) -> None:
386
+ palette = self._read_palette()
387
+
388
+ assert self.fd is not None
389
+ self.fd.seek(self._blp_offsets[0])
390
+
391
+ if self._blp_compression == 1:
392
+ # Uncompressed or DirectX compression
393
+
394
+ if self._blp_encoding == Encoding.UNCOMPRESSED:
395
+ data = self._read_bgra(palette)
396
+
397
+ elif self._blp_encoding == Encoding.DXT:
398
+ data = bytearray()
399
+ if self._blp_alpha_encoding == AlphaEncoding.DXT1:
400
+ linesize = (self.size[0] + 3) // 4 * 8
401
+ for yb in range((self.size[1] + 3) // 4):
402
+ for d in decode_dxt1(
403
+ self._safe_read(linesize), alpha=bool(self._blp_alpha_depth)
404
+ ):
405
+ data += d
406
+
407
+ elif self._blp_alpha_encoding == AlphaEncoding.DXT3:
408
+ linesize = (self.size[0] + 3) // 4 * 16
409
+ for yb in range((self.size[1] + 3) // 4):
410
+ for d in decode_dxt3(self._safe_read(linesize)):
411
+ data += d
412
+
413
+ elif self._blp_alpha_encoding == AlphaEncoding.DXT5:
414
+ linesize = (self.size[0] + 3) // 4 * 16
415
+ for yb in range((self.size[1] + 3) // 4):
416
+ for d in decode_dxt5(self._safe_read(linesize)):
417
+ data += d
418
+ else:
419
+ msg = f"Unsupported alpha encoding {repr(self._blp_alpha_encoding)}"
420
+ raise BLPFormatError(msg)
421
+ else:
422
+ msg = f"Unknown BLP encoding {repr(self._blp_encoding)}"
423
+ raise BLPFormatError(msg)
424
+
425
+ else:
426
+ msg = f"Unknown BLP compression {repr(self._blp_compression)}"
427
+ raise BLPFormatError(msg)
428
+
429
+ self.set_as_raw(data)
430
+
431
+
432
+ class BLPEncoder(ImageFile.PyEncoder):
433
+ _pushes_fd = True
434
+
435
+ def _write_palette(self) -> bytes:
436
+ data = b""
437
+ assert self.im is not None
438
+ palette = self.im.getpalette("RGBA", "RGBA")
439
+ for i in range(len(palette) // 4):
440
+ r, g, b, a = palette[i * 4 : (i + 1) * 4]
441
+ data += struct.pack("<4B", b, g, r, a)
442
+ while len(data) < 256 * 4:
443
+ data += b"\x00" * 4
444
+ return data
445
+
446
+ def encode(self, bufsize: int) -> tuple[int, int, bytes]:
447
+ palette_data = self._write_palette()
448
+
449
+ offset = 20 + 16 * 4 * 2 + len(palette_data)
450
+ data = struct.pack("<16I", offset, *((0,) * 15))
451
+
452
+ assert self.im is not None
453
+ w, h = self.im.size
454
+ data += struct.pack("<16I", w * h, *((0,) * 15))
455
+
456
+ data += palette_data
457
+
458
+ for y in range(h):
459
+ for x in range(w):
460
+ data += struct.pack("<B", self.im.getpixel((x, y)))
461
+
462
+ return len(data), 0, data
463
+
464
+
465
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
466
+ if im.mode != "P":
467
+ msg = "Unsupported BLP image mode"
468
+ raise ValueError(msg)
469
+
470
+ magic = b"BLP1" if im.encoderinfo.get("blp_version") == "BLP1" else b"BLP2"
471
+ fp.write(magic)
472
+
473
+ assert im.palette is not None
474
+ fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression
475
+ fp.write(struct.pack("<b", Encoding.UNCOMPRESSED))
476
+ fp.write(struct.pack("<b", 1 if im.palette.mode == "RGBA" else 0))
477
+ fp.write(struct.pack("<b", 0)) # alpha encoding
478
+ fp.write(struct.pack("<b", 0)) # mips
479
+ fp.write(struct.pack("<II", *im.size))
480
+ if magic == b"BLP1":
481
+ fp.write(struct.pack("<i", 5))
482
+ fp.write(struct.pack("<i", 0))
483
+
484
+ ImageFile._save(im, fp, [ImageFile._Tile("BLP", (0, 0) + im.size, 0, im.mode)])
485
+
486
+
487
+ Image.register_open(BlpImageFile.format, BlpImageFile, _accept)
488
+ Image.register_extension(BlpImageFile.format, ".blp")
489
+ Image.register_decoder("BLP1", BLP1Decoder)
490
+ Image.register_decoder("BLP2", BLP2Decoder)
491
+
492
+ Image.register_save(BlpImageFile.format, _save)
493
+ Image.register_encoder("BLP", BLPEncoder)
src/venv/Lib/site-packages/PIL/BmpImagePlugin.py ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # BMP file handler
6
+ #
7
+ # Windows (and OS/2) native bitmap storage format.
8
+ #
9
+ # history:
10
+ # 1995-09-01 fl Created
11
+ # 1996-04-30 fl Added save
12
+ # 1997-08-27 fl Fixed save of 1-bit images
13
+ # 1998-03-06 fl Load P images as L where possible
14
+ # 1998-07-03 fl Load P images as 1 where possible
15
+ # 1998-12-29 fl Handle small palettes
16
+ # 2002-12-30 fl Fixed load of 1-bit palette images
17
+ # 2003-04-21 fl Fixed load of 1-bit monochrome images
18
+ # 2003-04-23 fl Added limited support for BI_BITFIELDS compression
19
+ #
20
+ # Copyright (c) 1997-2003 by Secret Labs AB
21
+ # Copyright (c) 1995-2003 by Fredrik Lundh
22
+ #
23
+ # See the README file for information on usage and redistribution.
24
+ #
25
+ from __future__ import annotations
26
+
27
+ import os
28
+ from typing import IO, Any
29
+
30
+ from . import Image, ImageFile, ImagePalette
31
+ from ._binary import i16le as i16
32
+ from ._binary import i32le as i32
33
+ from ._binary import o8
34
+ from ._binary import o16le as o16
35
+ from ._binary import o32le as o32
36
+
37
+ #
38
+ # --------------------------------------------------------------------
39
+ # Read BMP file
40
+
41
+ BIT2MODE = {
42
+ # bits => mode, rawmode
43
+ 1: ("P", "P;1"),
44
+ 4: ("P", "P;4"),
45
+ 8: ("P", "P"),
46
+ 16: ("RGB", "BGR;15"),
47
+ 24: ("RGB", "BGR"),
48
+ 32: ("RGB", "BGRX"),
49
+ }
50
+
51
+
52
+ def _accept(prefix: bytes) -> bool:
53
+ return prefix[:2] == b"BM"
54
+
55
+
56
+ def _dib_accept(prefix: bytes) -> bool:
57
+ return i32(prefix) in [12, 40, 52, 56, 64, 108, 124]
58
+
59
+
60
+ # =============================================================================
61
+ # Image plugin for the Windows BMP format.
62
+ # =============================================================================
63
+ class BmpImageFile(ImageFile.ImageFile):
64
+ """Image plugin for the Windows Bitmap format (BMP)"""
65
+
66
+ # ------------------------------------------------------------- Description
67
+ format_description = "Windows Bitmap"
68
+ format = "BMP"
69
+
70
+ # -------------------------------------------------- BMP Compression values
71
+ COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5}
72
+ for k, v in COMPRESSIONS.items():
73
+ vars()[k] = v
74
+
75
+ def _bitmap(self, header: int = 0, offset: int = 0) -> None:
76
+ """Read relevant info about the BMP"""
77
+ read, seek = self.fp.read, self.fp.seek
78
+ if header:
79
+ seek(header)
80
+ # read bmp header size @offset 14 (this is part of the header size)
81
+ file_info: dict[str, bool | int | tuple[int, ...]] = {
82
+ "header_size": i32(read(4)),
83
+ "direction": -1,
84
+ }
85
+
86
+ # -------------------- If requested, read header at a specific position
87
+ # read the rest of the bmp header, without its size
88
+ assert isinstance(file_info["header_size"], int)
89
+ header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4)
90
+
91
+ # ------------------------------- Windows Bitmap v2, IBM OS/2 Bitmap v1
92
+ # ----- This format has different offsets because of width/height types
93
+ # 12: BITMAPCOREHEADER/OS21XBITMAPHEADER
94
+ if file_info["header_size"] == 12:
95
+ file_info["width"] = i16(header_data, 0)
96
+ file_info["height"] = i16(header_data, 2)
97
+ file_info["planes"] = i16(header_data, 4)
98
+ file_info["bits"] = i16(header_data, 6)
99
+ file_info["compression"] = self.COMPRESSIONS["RAW"]
100
+ file_info["palette_padding"] = 3
101
+
102
+ # --------------------------------------------- Windows Bitmap v3 to v5
103
+ # 40: BITMAPINFOHEADER
104
+ # 52: BITMAPV2HEADER
105
+ # 56: BITMAPV3HEADER
106
+ # 64: BITMAPCOREHEADER2/OS22XBITMAPHEADER
107
+ # 108: BITMAPV4HEADER
108
+ # 124: BITMAPV5HEADER
109
+ elif file_info["header_size"] in (40, 52, 56, 64, 108, 124):
110
+ file_info["y_flip"] = header_data[7] == 0xFF
111
+ file_info["direction"] = 1 if file_info["y_flip"] else -1
112
+ file_info["width"] = i32(header_data, 0)
113
+ file_info["height"] = (
114
+ i32(header_data, 4)
115
+ if not file_info["y_flip"]
116
+ else 2**32 - i32(header_data, 4)
117
+ )
118
+ file_info["planes"] = i16(header_data, 8)
119
+ file_info["bits"] = i16(header_data, 10)
120
+ file_info["compression"] = i32(header_data, 12)
121
+ # byte size of pixel data
122
+ file_info["data_size"] = i32(header_data, 16)
123
+ file_info["pixels_per_meter"] = (
124
+ i32(header_data, 20),
125
+ i32(header_data, 24),
126
+ )
127
+ file_info["colors"] = i32(header_data, 28)
128
+ file_info["palette_padding"] = 4
129
+ assert isinstance(file_info["pixels_per_meter"], tuple)
130
+ self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"])
131
+ if file_info["compression"] == self.COMPRESSIONS["BITFIELDS"]:
132
+ masks = ["r_mask", "g_mask", "b_mask"]
133
+ if len(header_data) >= 48:
134
+ if len(header_data) >= 52:
135
+ masks.append("a_mask")
136
+ else:
137
+ file_info["a_mask"] = 0x0
138
+ for idx, mask in enumerate(masks):
139
+ file_info[mask] = i32(header_data, 36 + idx * 4)
140
+ else:
141
+ # 40 byte headers only have the three components in the
142
+ # bitfields masks, ref:
143
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
144
+ # See also
145
+ # https://github.com/python-pillow/Pillow/issues/1293
146
+ # There is a 4th component in the RGBQuad, in the alpha
147
+ # location, but it is listed as a reserved component,
148
+ # and it is not generally an alpha channel
149
+ file_info["a_mask"] = 0x0
150
+ for mask in masks:
151
+ file_info[mask] = i32(read(4))
152
+ assert isinstance(file_info["r_mask"], int)
153
+ assert isinstance(file_info["g_mask"], int)
154
+ assert isinstance(file_info["b_mask"], int)
155
+ assert isinstance(file_info["a_mask"], int)
156
+ file_info["rgb_mask"] = (
157
+ file_info["r_mask"],
158
+ file_info["g_mask"],
159
+ file_info["b_mask"],
160
+ )
161
+ file_info["rgba_mask"] = (
162
+ file_info["r_mask"],
163
+ file_info["g_mask"],
164
+ file_info["b_mask"],
165
+ file_info["a_mask"],
166
+ )
167
+ else:
168
+ msg = f"Unsupported BMP header type ({file_info['header_size']})"
169
+ raise OSError(msg)
170
+
171
+ # ------------------ Special case : header is reported 40, which
172
+ # ---------------------- is shorter than real size for bpp >= 16
173
+ assert isinstance(file_info["width"], int)
174
+ assert isinstance(file_info["height"], int)
175
+ self._size = file_info["width"], file_info["height"]
176
+
177
+ # ------- If color count was not found in the header, compute from bits
178
+ assert isinstance(file_info["bits"], int)
179
+ file_info["colors"] = (
180
+ file_info["colors"]
181
+ if file_info.get("colors", 0)
182
+ else (1 << file_info["bits"])
183
+ )
184
+ assert isinstance(file_info["colors"], int)
185
+ if offset == 14 + file_info["header_size"] and file_info["bits"] <= 8:
186
+ offset += 4 * file_info["colors"]
187
+
188
+ # ---------------------- Check bit depth for unusual unsupported values
189
+ self._mode, raw_mode = BIT2MODE.get(file_info["bits"], ("", ""))
190
+ if not self.mode:
191
+ msg = f"Unsupported BMP pixel depth ({file_info['bits']})"
192
+ raise OSError(msg)
193
+
194
+ # ---------------- Process BMP with Bitfields compression (not palette)
195
+ decoder_name = "raw"
196
+ if file_info["compression"] == self.COMPRESSIONS["BITFIELDS"]:
197
+ SUPPORTED: dict[int, list[tuple[int, ...]]] = {
198
+ 32: [
199
+ (0xFF0000, 0xFF00, 0xFF, 0x0),
200
+ (0xFF000000, 0xFF0000, 0xFF00, 0x0),
201
+ (0xFF000000, 0xFF00, 0xFF, 0x0),
202
+ (0xFF000000, 0xFF0000, 0xFF00, 0xFF),
203
+ (0xFF, 0xFF00, 0xFF0000, 0xFF000000),
204
+ (0xFF0000, 0xFF00, 0xFF, 0xFF000000),
205
+ (0xFF000000, 0xFF00, 0xFF, 0xFF0000),
206
+ (0x0, 0x0, 0x0, 0x0),
207
+ ],
208
+ 24: [(0xFF0000, 0xFF00, 0xFF)],
209
+ 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],
210
+ }
211
+ MASK_MODES = {
212
+ (32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX",
213
+ (32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR",
214
+ (32, (0xFF000000, 0xFF00, 0xFF, 0x0)): "BGXR",
215
+ (32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): "ABGR",
216
+ (32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA",
217
+ (32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA",
218
+ (32, (0xFF000000, 0xFF00, 0xFF, 0xFF0000)): "BGAR",
219
+ (32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
220
+ (24, (0xFF0000, 0xFF00, 0xFF)): "BGR",
221
+ (16, (0xF800, 0x7E0, 0x1F)): "BGR;16",
222
+ (16, (0x7C00, 0x3E0, 0x1F)): "BGR;15",
223
+ }
224
+ if file_info["bits"] in SUPPORTED:
225
+ if (
226
+ file_info["bits"] == 32
227
+ and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]]
228
+ ):
229
+ assert isinstance(file_info["rgba_mask"], tuple)
230
+ raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])]
231
+ self._mode = "RGBA" if "A" in raw_mode else self.mode
232
+ elif (
233
+ file_info["bits"] in (24, 16)
234
+ and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]]
235
+ ):
236
+ assert isinstance(file_info["rgb_mask"], tuple)
237
+ raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])]
238
+ else:
239
+ msg = "Unsupported BMP bitfields layout"
240
+ raise OSError(msg)
241
+ else:
242
+ msg = "Unsupported BMP bitfields layout"
243
+ raise OSError(msg)
244
+ elif file_info["compression"] == self.COMPRESSIONS["RAW"]:
245
+ if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset
246
+ raw_mode, self._mode = "BGRA", "RGBA"
247
+ elif file_info["compression"] in (
248
+ self.COMPRESSIONS["RLE8"],
249
+ self.COMPRESSIONS["RLE4"],
250
+ ):
251
+ decoder_name = "bmp_rle"
252
+ else:
253
+ msg = f"Unsupported BMP compression ({file_info['compression']})"
254
+ raise OSError(msg)
255
+
256
+ # --------------- Once the header is processed, process the palette/LUT
257
+ if self.mode == "P": # Paletted for 1, 4 and 8 bit images
258
+ # ---------------------------------------------------- 1-bit images
259
+ if not (0 < file_info["colors"] <= 65536):
260
+ msg = f"Unsupported BMP Palette size ({file_info['colors']})"
261
+ raise OSError(msg)
262
+ else:
263
+ assert isinstance(file_info["palette_padding"], int)
264
+ padding = file_info["palette_padding"]
265
+ palette = read(padding * file_info["colors"])
266
+ grayscale = True
267
+ indices = (
268
+ (0, 255)
269
+ if file_info["colors"] == 2
270
+ else list(range(file_info["colors"]))
271
+ )
272
+
273
+ # ----------------- Check if grayscale and ignore palette if so
274
+ for ind, val in enumerate(indices):
275
+ rgb = palette[ind * padding : ind * padding + 3]
276
+ if rgb != o8(val) * 3:
277
+ grayscale = False
278
+
279
+ # ------- If all colors are gray, white or black, ditch palette
280
+ if grayscale:
281
+ self._mode = "1" if file_info["colors"] == 2 else "L"
282
+ raw_mode = self.mode
283
+ else:
284
+ self._mode = "P"
285
+ self.palette = ImagePalette.raw(
286
+ "BGRX" if padding == 4 else "BGR", palette
287
+ )
288
+
289
+ # ---------------------------- Finally set the tile data for the plugin
290
+ self.info["compression"] = file_info["compression"]
291
+ args: list[Any] = [raw_mode]
292
+ if decoder_name == "bmp_rle":
293
+ args.append(file_info["compression"] == self.COMPRESSIONS["RLE4"])
294
+ else:
295
+ assert isinstance(file_info["width"], int)
296
+ args.append(((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3))
297
+ args.append(file_info["direction"])
298
+ self.tile = [
299
+ ImageFile._Tile(
300
+ decoder_name,
301
+ (0, 0, file_info["width"], file_info["height"]),
302
+ offset or self.fp.tell(),
303
+ tuple(args),
304
+ )
305
+ ]
306
+
307
+ def _open(self) -> None:
308
+ """Open file, check magic number and read header"""
309
+ # read 14 bytes: magic number, filesize, reserved, header final offset
310
+ head_data = self.fp.read(14)
311
+ # choke if the file does not have the required magic bytes
312
+ if not _accept(head_data):
313
+ msg = "Not a BMP file"
314
+ raise SyntaxError(msg)
315
+ # read the start position of the BMP image data (u32)
316
+ offset = i32(head_data, 10)
317
+ # load bitmap information (offset=raster info)
318
+ self._bitmap(offset=offset)
319
+
320
+
321
+ class BmpRleDecoder(ImageFile.PyDecoder):
322
+ _pulls_fd = True
323
+
324
+ def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
325
+ assert self.fd is not None
326
+ rle4 = self.args[1]
327
+ data = bytearray()
328
+ x = 0
329
+ dest_length = self.state.xsize * self.state.ysize
330
+ while len(data) < dest_length:
331
+ pixels = self.fd.read(1)
332
+ byte = self.fd.read(1)
333
+ if not pixels or not byte:
334
+ break
335
+ num_pixels = pixels[0]
336
+ if num_pixels:
337
+ # encoded mode
338
+ if x + num_pixels > self.state.xsize:
339
+ # Too much data for row
340
+ num_pixels = max(0, self.state.xsize - x)
341
+ if rle4:
342
+ first_pixel = o8(byte[0] >> 4)
343
+ second_pixel = o8(byte[0] & 0x0F)
344
+ for index in range(num_pixels):
345
+ if index % 2 == 0:
346
+ data += first_pixel
347
+ else:
348
+ data += second_pixel
349
+ else:
350
+ data += byte * num_pixels
351
+ x += num_pixels
352
+ else:
353
+ if byte[0] == 0:
354
+ # end of line
355
+ while len(data) % self.state.xsize != 0:
356
+ data += b"\x00"
357
+ x = 0
358
+ elif byte[0] == 1:
359
+ # end of bitmap
360
+ break
361
+ elif byte[0] == 2:
362
+ # delta
363
+ bytes_read = self.fd.read(2)
364
+ if len(bytes_read) < 2:
365
+ break
366
+ right, up = self.fd.read(2)
367
+ data += b"\x00" * (right + up * self.state.xsize)
368
+ x = len(data) % self.state.xsize
369
+ else:
370
+ # absolute mode
371
+ if rle4:
372
+ # 2 pixels per byte
373
+ byte_count = byte[0] // 2
374
+ bytes_read = self.fd.read(byte_count)
375
+ for byte_read in bytes_read:
376
+ data += o8(byte_read >> 4)
377
+ data += o8(byte_read & 0x0F)
378
+ else:
379
+ byte_count = byte[0]
380
+ bytes_read = self.fd.read(byte_count)
381
+ data += bytes_read
382
+ if len(bytes_read) < byte_count:
383
+ break
384
+ x += byte[0]
385
+
386
+ # align to 16-bit word boundary
387
+ if self.fd.tell() % 2 != 0:
388
+ self.fd.seek(1, os.SEEK_CUR)
389
+ rawmode = "L" if self.mode == "L" else "P"
390
+ self.set_as_raw(bytes(data), rawmode, (0, self.args[-1]))
391
+ return -1, 0
392
+
393
+
394
+ # =============================================================================
395
+ # Image plugin for the DIB format (BMP alias)
396
+ # =============================================================================
397
+ class DibImageFile(BmpImageFile):
398
+ format = "DIB"
399
+ format_description = "Windows Bitmap"
400
+
401
+ def _open(self) -> None:
402
+ self._bitmap()
403
+
404
+
405
+ #
406
+ # --------------------------------------------------------------------
407
+ # Write BMP file
408
+
409
+
410
+ SAVE = {
411
+ "1": ("1", 1, 2),
412
+ "L": ("L", 8, 256),
413
+ "P": ("P", 8, 256),
414
+ "RGB": ("BGR", 24, 0),
415
+ "RGBA": ("BGRA", 32, 0),
416
+ }
417
+
418
+
419
+ def _dib_save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
420
+ _save(im, fp, filename, False)
421
+
422
+
423
+ def _save(
424
+ im: Image.Image, fp: IO[bytes], filename: str | bytes, bitmap_header: bool = True
425
+ ) -> None:
426
+ try:
427
+ rawmode, bits, colors = SAVE[im.mode]
428
+ except KeyError as e:
429
+ msg = f"cannot write mode {im.mode} as BMP"
430
+ raise OSError(msg) from e
431
+
432
+ info = im.encoderinfo
433
+
434
+ dpi = info.get("dpi", (96, 96))
435
+
436
+ # 1 meter == 39.3701 inches
437
+ ppm = tuple(int(x * 39.3701 + 0.5) for x in dpi)
438
+
439
+ stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3)
440
+ header = 40 # or 64 for OS/2 version 2
441
+ image = stride * im.size[1]
442
+
443
+ if im.mode == "1":
444
+ palette = b"".join(o8(i) * 4 for i in (0, 255))
445
+ elif im.mode == "L":
446
+ palette = b"".join(o8(i) * 4 for i in range(256))
447
+ elif im.mode == "P":
448
+ palette = im.im.getpalette("RGB", "BGRX")
449
+ colors = len(palette) // 4
450
+ else:
451
+ palette = None
452
+
453
+ # bitmap header
454
+ if bitmap_header:
455
+ offset = 14 + header + colors * 4
456
+ file_size = offset + image
457
+ if file_size > 2**32 - 1:
458
+ msg = "File size is too large for the BMP format"
459
+ raise ValueError(msg)
460
+ fp.write(
461
+ b"BM" # file type (magic)
462
+ + o32(file_size) # file size
463
+ + o32(0) # reserved
464
+ + o32(offset) # image data offset
465
+ )
466
+
467
+ # bitmap info header
468
+ fp.write(
469
+ o32(header) # info header size
470
+ + o32(im.size[0]) # width
471
+ + o32(im.size[1]) # height
472
+ + o16(1) # planes
473
+ + o16(bits) # depth
474
+ + o32(0) # compression (0=uncompressed)
475
+ + o32(image) # size of bitmap
476
+ + o32(ppm[0]) # resolution
477
+ + o32(ppm[1]) # resolution
478
+ + o32(colors) # colors used
479
+ + o32(colors) # colors important
480
+ )
481
+
482
+ fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
483
+
484
+ if palette:
485
+ fp.write(palette)
486
+
487
+ ImageFile._save(
488
+ im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))]
489
+ )
490
+
491
+
492
+ #
493
+ # --------------------------------------------------------------------
494
+ # Registry
495
+
496
+
497
+ Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
498
+ Image.register_save(BmpImageFile.format, _save)
499
+
500
+ Image.register_extension(BmpImageFile.format, ".bmp")
501
+
502
+ Image.register_mime(BmpImageFile.format, "image/bmp")
503
+
504
+ Image.register_decoder("bmp_rle", BmpRleDecoder)
505
+
506
+ Image.register_open(DibImageFile.format, DibImageFile, _dib_accept)
507
+ Image.register_save(DibImageFile.format, _dib_save)
508
+
509
+ Image.register_extension(DibImageFile.format, ".dib")
510
+
511
+ Image.register_mime(DibImageFile.format, "image/bmp")
src/venv/Lib/site-packages/PIL/BufrStubImagePlugin.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # BUFR stub adapter
6
+ #
7
+ # Copyright (c) 1996-2003 by Fredrik Lundh
8
+ #
9
+ # See the README file for information on usage and redistribution.
10
+ #
11
+ from __future__ import annotations
12
+
13
+ from typing import IO
14
+
15
+ from . import Image, ImageFile
16
+
17
+ _handler = None
18
+
19
+
20
+ def register_handler(handler: ImageFile.StubHandler | None) -> None:
21
+ """
22
+ Install application-specific BUFR image handler.
23
+
24
+ :param handler: Handler object.
25
+ """
26
+ global _handler
27
+ _handler = handler
28
+
29
+
30
+ # --------------------------------------------------------------------
31
+ # Image adapter
32
+
33
+
34
+ def _accept(prefix: bytes) -> bool:
35
+ return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC"
36
+
37
+
38
+ class BufrStubImageFile(ImageFile.StubImageFile):
39
+ format = "BUFR"
40
+ format_description = "BUFR"
41
+
42
+ def _open(self) -> None:
43
+ offset = self.fp.tell()
44
+
45
+ if not _accept(self.fp.read(4)):
46
+ msg = "Not a BUFR file"
47
+ raise SyntaxError(msg)
48
+
49
+ self.fp.seek(offset)
50
+
51
+ # make something up
52
+ self._mode = "F"
53
+ self._size = 1, 1
54
+
55
+ loader = self._load()
56
+ if loader:
57
+ loader.open(self)
58
+
59
+ def _load(self) -> ImageFile.StubHandler | None:
60
+ return _handler
61
+
62
+
63
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
64
+ if _handler is None or not hasattr(_handler, "save"):
65
+ msg = "BUFR save handler not installed"
66
+ raise OSError(msg)
67
+ _handler.save(im, fp, filename)
68
+
69
+
70
+ # --------------------------------------------------------------------
71
+ # Registry
72
+
73
+ Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
74
+ Image.register_save(BufrStubImageFile.format, _save)
75
+
76
+ Image.register_extension(BufrStubImageFile.format, ".bufr")
src/venv/Lib/site-packages/PIL/ContainerIO.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # a class to read from a container file
6
+ #
7
+ # History:
8
+ # 1995-06-18 fl Created
9
+ # 1995-09-07 fl Added readline(), readlines()
10
+ #
11
+ # Copyright (c) 1997-2001 by Secret Labs AB
12
+ # Copyright (c) 1995 by Fredrik Lundh
13
+ #
14
+ # See the README file for information on usage and redistribution.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ import io
19
+ from collections.abc import Iterable
20
+ from typing import IO, AnyStr, NoReturn
21
+
22
+
23
+ class ContainerIO(IO[AnyStr]):
24
+ """
25
+ A file object that provides read access to a part of an existing
26
+ file (for example a TAR file).
27
+ """
28
+
29
+ def __init__(self, file: IO[AnyStr], offset: int, length: int) -> None:
30
+ """
31
+ Create file object.
32
+
33
+ :param file: Existing file.
34
+ :param offset: Start of region, in bytes.
35
+ :param length: Size of region, in bytes.
36
+ """
37
+ self.fh: IO[AnyStr] = file
38
+ self.pos = 0
39
+ self.offset = offset
40
+ self.length = length
41
+ self.fh.seek(offset)
42
+
43
+ ##
44
+ # Always false.
45
+
46
+ def isatty(self) -> bool:
47
+ return False
48
+
49
+ def seekable(self) -> bool:
50
+ return True
51
+
52
+ def seek(self, offset: int, mode: int = io.SEEK_SET) -> int:
53
+ """
54
+ Move file pointer.
55
+
56
+ :param offset: Offset in bytes.
57
+ :param mode: Starting position. Use 0 for beginning of region, 1
58
+ for current offset, and 2 for end of region. You cannot move
59
+ the pointer outside the defined region.
60
+ :returns: Offset from start of region, in bytes.
61
+ """
62
+ if mode == 1:
63
+ self.pos = self.pos + offset
64
+ elif mode == 2:
65
+ self.pos = self.length + offset
66
+ else:
67
+ self.pos = offset
68
+ # clamp
69
+ self.pos = max(0, min(self.pos, self.length))
70
+ self.fh.seek(self.offset + self.pos)
71
+ return self.pos
72
+
73
+ def tell(self) -> int:
74
+ """
75
+ Get current file pointer.
76
+
77
+ :returns: Offset from start of region, in bytes.
78
+ """
79
+ return self.pos
80
+
81
+ def readable(self) -> bool:
82
+ return True
83
+
84
+ def read(self, n: int = -1) -> AnyStr:
85
+ """
86
+ Read data.
87
+
88
+ :param n: Number of bytes to read. If omitted, zero or negative,
89
+ read until end of region.
90
+ :returns: An 8-bit string.
91
+ """
92
+ if n > 0:
93
+ n = min(n, self.length - self.pos)
94
+ else:
95
+ n = self.length - self.pos
96
+ if n <= 0: # EOF
97
+ return b"" if "b" in self.fh.mode else "" # type: ignore[return-value]
98
+ self.pos = self.pos + n
99
+ return self.fh.read(n)
100
+
101
+ def readline(self, n: int = -1) -> AnyStr:
102
+ """
103
+ Read a line of text.
104
+
105
+ :param n: Number of bytes to read. If omitted, zero or negative,
106
+ read until end of line.
107
+ :returns: An 8-bit string.
108
+ """
109
+ s: AnyStr = b"" if "b" in self.fh.mode else "" # type: ignore[assignment]
110
+ newline_character = b"\n" if "b" in self.fh.mode else "\n"
111
+ while True:
112
+ c = self.read(1)
113
+ if not c:
114
+ break
115
+ s = s + c
116
+ if c == newline_character or len(s) == n:
117
+ break
118
+ return s
119
+
120
+ def readlines(self, n: int | None = -1) -> list[AnyStr]:
121
+ """
122
+ Read multiple lines of text.
123
+
124
+ :param n: Number of lines to read. If omitted, zero, negative or None,
125
+ read until end of region.
126
+ :returns: A list of 8-bit strings.
127
+ """
128
+ lines = []
129
+ while True:
130
+ s = self.readline()
131
+ if not s:
132
+ break
133
+ lines.append(s)
134
+ if len(lines) == n:
135
+ break
136
+ return lines
137
+
138
+ def writable(self) -> bool:
139
+ return False
140
+
141
+ def write(self, b: AnyStr) -> NoReturn:
142
+ raise NotImplementedError()
143
+
144
+ def writelines(self, lines: Iterable[AnyStr]) -> NoReturn:
145
+ raise NotImplementedError()
146
+
147
+ def truncate(self, size: int | None = None) -> int:
148
+ raise NotImplementedError()
149
+
150
+ def __enter__(self) -> ContainerIO[AnyStr]:
151
+ return self
152
+
153
+ def __exit__(self, *args: object) -> None:
154
+ self.close()
155
+
156
+ def __iter__(self) -> ContainerIO[AnyStr]:
157
+ return self
158
+
159
+ def __next__(self) -> AnyStr:
160
+ line = self.readline()
161
+ if not line:
162
+ msg = "end of region"
163
+ raise StopIteration(msg)
164
+ return line
165
+
166
+ def fileno(self) -> int:
167
+ return self.fh.fileno()
168
+
169
+ def flush(self) -> None:
170
+ self.fh.flush()
171
+
172
+ def close(self) -> None:
173
+ self.fh.close()
src/venv/Lib/site-packages/PIL/CurImagePlugin.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # Windows Cursor support for PIL
6
+ #
7
+ # notes:
8
+ # uses BmpImagePlugin.py to read the bitmap data.
9
+ #
10
+ # history:
11
+ # 96-05-27 fl Created
12
+ #
13
+ # Copyright (c) Secret Labs AB 1997.
14
+ # Copyright (c) Fredrik Lundh 1996.
15
+ #
16
+ # See the README file for information on usage and redistribution.
17
+ #
18
+ from __future__ import annotations
19
+
20
+ from . import BmpImagePlugin, Image, ImageFile
21
+ from ._binary import i16le as i16
22
+ from ._binary import i32le as i32
23
+
24
+ #
25
+ # --------------------------------------------------------------------
26
+
27
+
28
+ def _accept(prefix: bytes) -> bool:
29
+ return prefix[:4] == b"\0\0\2\0"
30
+
31
+
32
+ ##
33
+ # Image plugin for Windows Cursor files.
34
+
35
+
36
+ class CurImageFile(BmpImagePlugin.BmpImageFile):
37
+ format = "CUR"
38
+ format_description = "Windows Cursor"
39
+
40
+ def _open(self) -> None:
41
+ offset = self.fp.tell()
42
+
43
+ # check magic
44
+ s = self.fp.read(6)
45
+ if not _accept(s):
46
+ msg = "not a CUR file"
47
+ raise SyntaxError(msg)
48
+
49
+ # pick the largest cursor in the file
50
+ m = b""
51
+ for i in range(i16(s, 4)):
52
+ s = self.fp.read(16)
53
+ if not m:
54
+ m = s
55
+ elif s[0] > m[0] and s[1] > m[1]:
56
+ m = s
57
+ if not m:
58
+ msg = "No cursors were found"
59
+ raise TypeError(msg)
60
+
61
+ # load as bitmap
62
+ self._bitmap(i32(m, 12) + offset)
63
+
64
+ # patch up the bitmap height
65
+ self._size = self.size[0], self.size[1] // 2
66
+ d, e, o, a = self.tile[0]
67
+ self.tile[0] = ImageFile._Tile(d, (0, 0) + self.size, o, a)
68
+
69
+
70
+ #
71
+ # --------------------------------------------------------------------
72
+
73
+ Image.register_open(CurImageFile.format, CurImageFile, _accept)
74
+
75
+ Image.register_extension(CurImageFile.format, ".cur")
src/venv/Lib/site-packages/PIL/DcxImagePlugin.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # DCX file handling
6
+ #
7
+ # DCX is a container file format defined by Intel, commonly used
8
+ # for fax applications. Each DCX file consists of a directory
9
+ # (a list of file offsets) followed by a set of (usually 1-bit)
10
+ # PCX files.
11
+ #
12
+ # History:
13
+ # 1995-09-09 fl Created
14
+ # 1996-03-20 fl Properly derived from PcxImageFile.
15
+ # 1998-07-15 fl Renamed offset attribute to avoid name clash
16
+ # 2002-07-30 fl Fixed file handling
17
+ #
18
+ # Copyright (c) 1997-98 by Secret Labs AB.
19
+ # Copyright (c) 1995-96 by Fredrik Lundh.
20
+ #
21
+ # See the README file for information on usage and redistribution.
22
+ #
23
+ from __future__ import annotations
24
+
25
+ from . import Image
26
+ from ._binary import i32le as i32
27
+ from .PcxImagePlugin import PcxImageFile
28
+
29
+ MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
30
+
31
+
32
+ def _accept(prefix: bytes) -> bool:
33
+ return len(prefix) >= 4 and i32(prefix) == MAGIC
34
+
35
+
36
+ ##
37
+ # Image plugin for the Intel DCX format.
38
+
39
+
40
+ class DcxImageFile(PcxImageFile):
41
+ format = "DCX"
42
+ format_description = "Intel DCX"
43
+ _close_exclusive_fp_after_loading = False
44
+
45
+ def _open(self) -> None:
46
+ # Header
47
+ s = self.fp.read(4)
48
+ if not _accept(s):
49
+ msg = "not a DCX file"
50
+ raise SyntaxError(msg)
51
+
52
+ # Component directory
53
+ self._offset = []
54
+ for i in range(1024):
55
+ offset = i32(self.fp.read(4))
56
+ if not offset:
57
+ break
58
+ self._offset.append(offset)
59
+
60
+ self._fp = self.fp
61
+ self.frame = -1
62
+ self.n_frames = len(self._offset)
63
+ self.is_animated = self.n_frames > 1
64
+ self.seek(0)
65
+
66
+ def seek(self, frame: int) -> None:
67
+ if not self._seek_check(frame):
68
+ return
69
+ self.frame = frame
70
+ self.fp = self._fp
71
+ self.fp.seek(self._offset[frame])
72
+ PcxImageFile._open(self)
73
+
74
+ def tell(self) -> int:
75
+ return self.frame
76
+
77
+
78
+ Image.register_open(DcxImageFile.format, DcxImageFile, _accept)
79
+
80
+ Image.register_extension(DcxImageFile.format, ".dcx")
src/venv/Lib/site-packages/PIL/DdsImagePlugin.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A Pillow loader for .dds files (S3TC-compressed aka DXTC)
3
+ Jerome Leclanche <[email protected]>
4
+
5
+ Documentation:
6
+ https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt
7
+
8
+ The contents of this file are hereby released in the public domain (CC0)
9
+ Full text of the CC0 license:
10
+ https://creativecommons.org/publicdomain/zero/1.0/
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import io
16
+ import struct
17
+ import sys
18
+ from enum import IntEnum, IntFlag
19
+ from typing import IO
20
+
21
+ from . import Image, ImageFile, ImagePalette
22
+ from ._binary import i32le as i32
23
+ from ._binary import o8
24
+ from ._binary import o32le as o32
25
+
26
+ # Magic ("DDS ")
27
+ DDS_MAGIC = 0x20534444
28
+
29
+
30
+ # DDS flags
31
+ class DDSD(IntFlag):
32
+ CAPS = 0x1
33
+ HEIGHT = 0x2
34
+ WIDTH = 0x4
35
+ PITCH = 0x8
36
+ PIXELFORMAT = 0x1000
37
+ MIPMAPCOUNT = 0x20000
38
+ LINEARSIZE = 0x80000
39
+ DEPTH = 0x800000
40
+
41
+
42
+ # DDS caps
43
+ class DDSCAPS(IntFlag):
44
+ COMPLEX = 0x8
45
+ TEXTURE = 0x1000
46
+ MIPMAP = 0x400000
47
+
48
+
49
+ class DDSCAPS2(IntFlag):
50
+ CUBEMAP = 0x200
51
+ CUBEMAP_POSITIVEX = 0x400
52
+ CUBEMAP_NEGATIVEX = 0x800
53
+ CUBEMAP_POSITIVEY = 0x1000
54
+ CUBEMAP_NEGATIVEY = 0x2000
55
+ CUBEMAP_POSITIVEZ = 0x4000
56
+ CUBEMAP_NEGATIVEZ = 0x8000
57
+ VOLUME = 0x200000
58
+
59
+
60
+ # Pixel Format
61
+ class DDPF(IntFlag):
62
+ ALPHAPIXELS = 0x1
63
+ ALPHA = 0x2
64
+ FOURCC = 0x4
65
+ PALETTEINDEXED8 = 0x20
66
+ RGB = 0x40
67
+ LUMINANCE = 0x20000
68
+
69
+
70
+ # dxgiformat.h
71
+ class DXGI_FORMAT(IntEnum):
72
+ UNKNOWN = 0
73
+ R32G32B32A32_TYPELESS = 1
74
+ R32G32B32A32_FLOAT = 2
75
+ R32G32B32A32_UINT = 3
76
+ R32G32B32A32_SINT = 4
77
+ R32G32B32_TYPELESS = 5
78
+ R32G32B32_FLOAT = 6
79
+ R32G32B32_UINT = 7
80
+ R32G32B32_SINT = 8
81
+ R16G16B16A16_TYPELESS = 9
82
+ R16G16B16A16_FLOAT = 10
83
+ R16G16B16A16_UNORM = 11
84
+ R16G16B16A16_UINT = 12
85
+ R16G16B16A16_SNORM = 13
86
+ R16G16B16A16_SINT = 14
87
+ R32G32_TYPELESS = 15
88
+ R32G32_FLOAT = 16
89
+ R32G32_UINT = 17
90
+ R32G32_SINT = 18
91
+ R32G8X24_TYPELESS = 19
92
+ D32_FLOAT_S8X24_UINT = 20
93
+ R32_FLOAT_X8X24_TYPELESS = 21
94
+ X32_TYPELESS_G8X24_UINT = 22
95
+ R10G10B10A2_TYPELESS = 23
96
+ R10G10B10A2_UNORM = 24
97
+ R10G10B10A2_UINT = 25
98
+ R11G11B10_FLOAT = 26
99
+ R8G8B8A8_TYPELESS = 27
100
+ R8G8B8A8_UNORM = 28
101
+ R8G8B8A8_UNORM_SRGB = 29
102
+ R8G8B8A8_UINT = 30
103
+ R8G8B8A8_SNORM = 31
104
+ R8G8B8A8_SINT = 32
105
+ R16G16_TYPELESS = 33
106
+ R16G16_FLOAT = 34
107
+ R16G16_UNORM = 35
108
+ R16G16_UINT = 36
109
+ R16G16_SNORM = 37
110
+ R16G16_SINT = 38
111
+ R32_TYPELESS = 39
112
+ D32_FLOAT = 40
113
+ R32_FLOAT = 41
114
+ R32_UINT = 42
115
+ R32_SINT = 43
116
+ R24G8_TYPELESS = 44
117
+ D24_UNORM_S8_UINT = 45
118
+ R24_UNORM_X8_TYPELESS = 46
119
+ X24_TYPELESS_G8_UINT = 47
120
+ R8G8_TYPELESS = 48
121
+ R8G8_UNORM = 49
122
+ R8G8_UINT = 50
123
+ R8G8_SNORM = 51
124
+ R8G8_SINT = 52
125
+ R16_TYPELESS = 53
126
+ R16_FLOAT = 54
127
+ D16_UNORM = 55
128
+ R16_UNORM = 56
129
+ R16_UINT = 57
130
+ R16_SNORM = 58
131
+ R16_SINT = 59
132
+ R8_TYPELESS = 60
133
+ R8_UNORM = 61
134
+ R8_UINT = 62
135
+ R8_SNORM = 63
136
+ R8_SINT = 64
137
+ A8_UNORM = 65
138
+ R1_UNORM = 66
139
+ R9G9B9E5_SHAREDEXP = 67
140
+ R8G8_B8G8_UNORM = 68
141
+ G8R8_G8B8_UNORM = 69
142
+ BC1_TYPELESS = 70
143
+ BC1_UNORM = 71
144
+ BC1_UNORM_SRGB = 72
145
+ BC2_TYPELESS = 73
146
+ BC2_UNORM = 74
147
+ BC2_UNORM_SRGB = 75
148
+ BC3_TYPELESS = 76
149
+ BC3_UNORM = 77
150
+ BC3_UNORM_SRGB = 78
151
+ BC4_TYPELESS = 79
152
+ BC4_UNORM = 80
153
+ BC4_SNORM = 81
154
+ BC5_TYPELESS = 82
155
+ BC5_UNORM = 83
156
+ BC5_SNORM = 84
157
+ B5G6R5_UNORM = 85
158
+ B5G5R5A1_UNORM = 86
159
+ B8G8R8A8_UNORM = 87
160
+ B8G8R8X8_UNORM = 88
161
+ R10G10B10_XR_BIAS_A2_UNORM = 89
162
+ B8G8R8A8_TYPELESS = 90
163
+ B8G8R8A8_UNORM_SRGB = 91
164
+ B8G8R8X8_TYPELESS = 92
165
+ B8G8R8X8_UNORM_SRGB = 93
166
+ BC6H_TYPELESS = 94
167
+ BC6H_UF16 = 95
168
+ BC6H_SF16 = 96
169
+ BC7_TYPELESS = 97
170
+ BC7_UNORM = 98
171
+ BC7_UNORM_SRGB = 99
172
+ AYUV = 100
173
+ Y410 = 101
174
+ Y416 = 102
175
+ NV12 = 103
176
+ P010 = 104
177
+ P016 = 105
178
+ OPAQUE_420 = 106
179
+ YUY2 = 107
180
+ Y210 = 108
181
+ Y216 = 109
182
+ NV11 = 110
183
+ AI44 = 111
184
+ IA44 = 112
185
+ P8 = 113
186
+ A8P8 = 114
187
+ B4G4R4A4_UNORM = 115
188
+ P208 = 130
189
+ V208 = 131
190
+ V408 = 132
191
+ SAMPLER_FEEDBACK_MIN_MIP_OPAQUE = 189
192
+ SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE = 190
193
+
194
+
195
+ class D3DFMT(IntEnum):
196
+ UNKNOWN = 0
197
+ R8G8B8 = 20
198
+ A8R8G8B8 = 21
199
+ X8R8G8B8 = 22
200
+ R5G6B5 = 23
201
+ X1R5G5B5 = 24
202
+ A1R5G5B5 = 25
203
+ A4R4G4B4 = 26
204
+ R3G3B2 = 27
205
+ A8 = 28
206
+ A8R3G3B2 = 29
207
+ X4R4G4B4 = 30
208
+ A2B10G10R10 = 31
209
+ A8B8G8R8 = 32
210
+ X8B8G8R8 = 33
211
+ G16R16 = 34
212
+ A2R10G10B10 = 35
213
+ A16B16G16R16 = 36
214
+ A8P8 = 40
215
+ P8 = 41
216
+ L8 = 50
217
+ A8L8 = 51
218
+ A4L4 = 52
219
+ V8U8 = 60
220
+ L6V5U5 = 61
221
+ X8L8V8U8 = 62
222
+ Q8W8V8U8 = 63
223
+ V16U16 = 64
224
+ A2W10V10U10 = 67
225
+ D16_LOCKABLE = 70
226
+ D32 = 71
227
+ D15S1 = 73
228
+ D24S8 = 75
229
+ D24X8 = 77
230
+ D24X4S4 = 79
231
+ D16 = 80
232
+ D32F_LOCKABLE = 82
233
+ D24FS8 = 83
234
+ D32_LOCKABLE = 84
235
+ S8_LOCKABLE = 85
236
+ L16 = 81
237
+ VERTEXDATA = 100
238
+ INDEX16 = 101
239
+ INDEX32 = 102
240
+ Q16W16V16U16 = 110
241
+ R16F = 111
242
+ G16R16F = 112
243
+ A16B16G16R16F = 113
244
+ R32F = 114
245
+ G32R32F = 115
246
+ A32B32G32R32F = 116
247
+ CxV8U8 = 117
248
+ A1 = 118
249
+ A2B10G10R10_XR_BIAS = 119
250
+ BINARYBUFFER = 199
251
+
252
+ UYVY = i32(b"UYVY")
253
+ R8G8_B8G8 = i32(b"RGBG")
254
+ YUY2 = i32(b"YUY2")
255
+ G8R8_G8B8 = i32(b"GRGB")
256
+ DXT1 = i32(b"DXT1")
257
+ DXT2 = i32(b"DXT2")
258
+ DXT3 = i32(b"DXT3")
259
+ DXT4 = i32(b"DXT4")
260
+ DXT5 = i32(b"DXT5")
261
+ DX10 = i32(b"DX10")
262
+ BC4S = i32(b"BC4S")
263
+ BC4U = i32(b"BC4U")
264
+ BC5S = i32(b"BC5S")
265
+ BC5U = i32(b"BC5U")
266
+ ATI1 = i32(b"ATI1")
267
+ ATI2 = i32(b"ATI2")
268
+ MULTI2_ARGB8 = i32(b"MET1")
269
+
270
+
271
+ # Backward compatibility layer
272
+ module = sys.modules[__name__]
273
+ for item in DDSD:
274
+ assert item.name is not None
275
+ setattr(module, f"DDSD_{item.name}", item.value)
276
+ for item1 in DDSCAPS:
277
+ assert item1.name is not None
278
+ setattr(module, f"DDSCAPS_{item1.name}", item1.value)
279
+ for item2 in DDSCAPS2:
280
+ assert item2.name is not None
281
+ setattr(module, f"DDSCAPS2_{item2.name}", item2.value)
282
+ for item3 in DDPF:
283
+ assert item3.name is not None
284
+ setattr(module, f"DDPF_{item3.name}", item3.value)
285
+
286
+ DDS_FOURCC = DDPF.FOURCC
287
+ DDS_RGB = DDPF.RGB
288
+ DDS_RGBA = DDPF.RGB | DDPF.ALPHAPIXELS
289
+ DDS_LUMINANCE = DDPF.LUMINANCE
290
+ DDS_LUMINANCEA = DDPF.LUMINANCE | DDPF.ALPHAPIXELS
291
+ DDS_ALPHA = DDPF.ALPHA
292
+ DDS_PAL8 = DDPF.PALETTEINDEXED8
293
+
294
+ DDS_HEADER_FLAGS_TEXTURE = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PIXELFORMAT
295
+ DDS_HEADER_FLAGS_MIPMAP = DDSD.MIPMAPCOUNT
296
+ DDS_HEADER_FLAGS_VOLUME = DDSD.DEPTH
297
+ DDS_HEADER_FLAGS_PITCH = DDSD.PITCH
298
+ DDS_HEADER_FLAGS_LINEARSIZE = DDSD.LINEARSIZE
299
+
300
+ DDS_HEIGHT = DDSD.HEIGHT
301
+ DDS_WIDTH = DDSD.WIDTH
302
+
303
+ DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS.TEXTURE
304
+ DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS.COMPLEX | DDSCAPS.MIPMAP
305
+ DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS.COMPLEX
306
+
307
+ DDS_CUBEMAP_POSITIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEX
308
+ DDS_CUBEMAP_NEGATIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEX
309
+ DDS_CUBEMAP_POSITIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEY
310
+ DDS_CUBEMAP_NEGATIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEY
311
+ DDS_CUBEMAP_POSITIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEZ
312
+ DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEZ
313
+
314
+ DXT1_FOURCC = D3DFMT.DXT1
315
+ DXT3_FOURCC = D3DFMT.DXT3
316
+ DXT5_FOURCC = D3DFMT.DXT5
317
+
318
+ DXGI_FORMAT_R8G8B8A8_TYPELESS = DXGI_FORMAT.R8G8B8A8_TYPELESS
319
+ DXGI_FORMAT_R8G8B8A8_UNORM = DXGI_FORMAT.R8G8B8A8_UNORM
320
+ DXGI_FORMAT_R8G8B8A8_UNORM_SRGB = DXGI_FORMAT.R8G8B8A8_UNORM_SRGB
321
+ DXGI_FORMAT_BC5_TYPELESS = DXGI_FORMAT.BC5_TYPELESS
322
+ DXGI_FORMAT_BC5_UNORM = DXGI_FORMAT.BC5_UNORM
323
+ DXGI_FORMAT_BC5_SNORM = DXGI_FORMAT.BC5_SNORM
324
+ DXGI_FORMAT_BC6H_UF16 = DXGI_FORMAT.BC6H_UF16
325
+ DXGI_FORMAT_BC6H_SF16 = DXGI_FORMAT.BC6H_SF16
326
+ DXGI_FORMAT_BC7_TYPELESS = DXGI_FORMAT.BC7_TYPELESS
327
+ DXGI_FORMAT_BC7_UNORM = DXGI_FORMAT.BC7_UNORM
328
+ DXGI_FORMAT_BC7_UNORM_SRGB = DXGI_FORMAT.BC7_UNORM_SRGB
329
+
330
+
331
+ class DdsImageFile(ImageFile.ImageFile):
332
+ format = "DDS"
333
+ format_description = "DirectDraw Surface"
334
+
335
+ def _open(self) -> None:
336
+ if not _accept(self.fp.read(4)):
337
+ msg = "not a DDS file"
338
+ raise SyntaxError(msg)
339
+ (header_size,) = struct.unpack("<I", self.fp.read(4))
340
+ if header_size != 124:
341
+ msg = f"Unsupported header size {repr(header_size)}"
342
+ raise OSError(msg)
343
+ header_bytes = self.fp.read(header_size - 4)
344
+ if len(header_bytes) != 120:
345
+ msg = f"Incomplete header: {len(header_bytes)} bytes"
346
+ raise OSError(msg)
347
+ header = io.BytesIO(header_bytes)
348
+
349
+ flags, height, width = struct.unpack("<3I", header.read(12))
350
+ self._size = (width, height)
351
+ extents = (0, 0) + self.size
352
+
353
+ pitch, depth, mipmaps = struct.unpack("<3I", header.read(12))
354
+ struct.unpack("<11I", header.read(44)) # reserved
355
+
356
+ # pixel format
357
+ pfsize, pfflags, fourcc, bitcount = struct.unpack("<4I", header.read(16))
358
+ n = 0
359
+ rawmode = None
360
+ if pfflags & DDPF.RGB:
361
+ # Texture contains uncompressed RGB data
362
+ if pfflags & DDPF.ALPHAPIXELS:
363
+ self._mode = "RGBA"
364
+ mask_count = 4
365
+ else:
366
+ self._mode = "RGB"
367
+ mask_count = 3
368
+
369
+ masks = struct.unpack(f"<{mask_count}I", header.read(mask_count * 4))
370
+ self.tile = [ImageFile._Tile("dds_rgb", extents, 0, (bitcount, masks))]
371
+ return
372
+ elif pfflags & DDPF.LUMINANCE:
373
+ if bitcount == 8:
374
+ self._mode = "L"
375
+ elif bitcount == 16 and pfflags & DDPF.ALPHAPIXELS:
376
+ self._mode = "LA"
377
+ else:
378
+ msg = f"Unsupported bitcount {bitcount} for {pfflags}"
379
+ raise OSError(msg)
380
+ elif pfflags & DDPF.PALETTEINDEXED8:
381
+ self._mode = "P"
382
+ self.palette = ImagePalette.raw("RGBA", self.fp.read(1024))
383
+ self.palette.mode = "RGBA"
384
+ elif pfflags & DDPF.FOURCC:
385
+ offset = header_size + 4
386
+ if fourcc == D3DFMT.DXT1:
387
+ self._mode = "RGBA"
388
+ self.pixel_format = "DXT1"
389
+ n = 1
390
+ elif fourcc == D3DFMT.DXT3:
391
+ self._mode = "RGBA"
392
+ self.pixel_format = "DXT3"
393
+ n = 2
394
+ elif fourcc == D3DFMT.DXT5:
395
+ self._mode = "RGBA"
396
+ self.pixel_format = "DXT5"
397
+ n = 3
398
+ elif fourcc in (D3DFMT.BC4U, D3DFMT.ATI1):
399
+ self._mode = "L"
400
+ self.pixel_format = "BC4"
401
+ n = 4
402
+ elif fourcc == D3DFMT.BC5S:
403
+ self._mode = "RGB"
404
+ self.pixel_format = "BC5S"
405
+ n = 5
406
+ elif fourcc in (D3DFMT.BC5U, D3DFMT.ATI2):
407
+ self._mode = "RGB"
408
+ self.pixel_format = "BC5"
409
+ n = 5
410
+ elif fourcc == D3DFMT.DX10:
411
+ offset += 20
412
+ # ignoring flags which pertain to volume textures and cubemaps
413
+ (dxgi_format,) = struct.unpack("<I", self.fp.read(4))
414
+ self.fp.read(16)
415
+ if dxgi_format in (
416
+ DXGI_FORMAT.BC1_UNORM,
417
+ DXGI_FORMAT.BC1_TYPELESS,
418
+ ):
419
+ self._mode = "RGBA"
420
+ self.pixel_format = "BC1"
421
+ n = 1
422
+ elif dxgi_format in (DXGI_FORMAT.BC4_TYPELESS, DXGI_FORMAT.BC4_UNORM):
423
+ self._mode = "L"
424
+ self.pixel_format = "BC4"
425
+ n = 4
426
+ elif dxgi_format in (DXGI_FORMAT.BC5_TYPELESS, DXGI_FORMAT.BC5_UNORM):
427
+ self._mode = "RGB"
428
+ self.pixel_format = "BC5"
429
+ n = 5
430
+ elif dxgi_format == DXGI_FORMAT.BC5_SNORM:
431
+ self._mode = "RGB"
432
+ self.pixel_format = "BC5S"
433
+ n = 5
434
+ elif dxgi_format == DXGI_FORMAT.BC6H_UF16:
435
+ self._mode = "RGB"
436
+ self.pixel_format = "BC6H"
437
+ n = 6
438
+ elif dxgi_format == DXGI_FORMAT.BC6H_SF16:
439
+ self._mode = "RGB"
440
+ self.pixel_format = "BC6HS"
441
+ n = 6
442
+ elif dxgi_format in (
443
+ DXGI_FORMAT.BC7_TYPELESS,
444
+ DXGI_FORMAT.BC7_UNORM,
445
+ DXGI_FORMAT.BC7_UNORM_SRGB,
446
+ ):
447
+ self._mode = "RGBA"
448
+ self.pixel_format = "BC7"
449
+ n = 7
450
+ if dxgi_format == DXGI_FORMAT.BC7_UNORM_SRGB:
451
+ self.info["gamma"] = 1 / 2.2
452
+ elif dxgi_format in (
453
+ DXGI_FORMAT.R8G8B8A8_TYPELESS,
454
+ DXGI_FORMAT.R8G8B8A8_UNORM,
455
+ DXGI_FORMAT.R8G8B8A8_UNORM_SRGB,
456
+ ):
457
+ self._mode = "RGBA"
458
+ if dxgi_format == DXGI_FORMAT.R8G8B8A8_UNORM_SRGB:
459
+ self.info["gamma"] = 1 / 2.2
460
+ else:
461
+ msg = f"Unimplemented DXGI format {dxgi_format}"
462
+ raise NotImplementedError(msg)
463
+ else:
464
+ msg = f"Unimplemented pixel format {repr(fourcc)}"
465
+ raise NotImplementedError(msg)
466
+ else:
467
+ msg = f"Unknown pixel format flags {pfflags}"
468
+ raise NotImplementedError(msg)
469
+
470
+ if n:
471
+ self.tile = [
472
+ ImageFile._Tile("bcn", extents, offset, (n, self.pixel_format))
473
+ ]
474
+ else:
475
+ self.tile = [ImageFile._Tile("raw", extents, 0, rawmode or self.mode)]
476
+
477
+ def load_seek(self, pos: int) -> None:
478
+ pass
479
+
480
+
481
+ class DdsRgbDecoder(ImageFile.PyDecoder):
482
+ _pulls_fd = True
483
+
484
+ def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
485
+ assert self.fd is not None
486
+ bitcount, masks = self.args
487
+
488
+ # Some masks will be padded with zeros, e.g. R 0b11 G 0b1100
489
+ # Calculate how many zeros each mask is padded with
490
+ mask_offsets = []
491
+ # And the maximum value of each channel without the padding
492
+ mask_totals = []
493
+ for mask in masks:
494
+ offset = 0
495
+ if mask != 0:
496
+ while mask >> (offset + 1) << (offset + 1) == mask:
497
+ offset += 1
498
+ mask_offsets.append(offset)
499
+ mask_totals.append(mask >> offset)
500
+
501
+ data = bytearray()
502
+ bytecount = bitcount // 8
503
+ dest_length = self.state.xsize * self.state.ysize * len(masks)
504
+ while len(data) < dest_length:
505
+ value = int.from_bytes(self.fd.read(bytecount), "little")
506
+ for i, mask in enumerate(masks):
507
+ masked_value = value & mask
508
+ # Remove the zero padding, and scale it to 8 bits
509
+ data += o8(
510
+ int(((masked_value >> mask_offsets[i]) / mask_totals[i]) * 255)
511
+ )
512
+ self.set_as_raw(data)
513
+ return -1, 0
514
+
515
+
516
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
517
+ if im.mode not in ("RGB", "RGBA", "L", "LA"):
518
+ msg = f"cannot write mode {im.mode} as DDS"
519
+ raise OSError(msg)
520
+
521
+ alpha = im.mode[-1] == "A"
522
+ if im.mode[0] == "L":
523
+ pixel_flags = DDPF.LUMINANCE
524
+ rawmode = im.mode
525
+ if alpha:
526
+ rgba_mask = [0x000000FF, 0x000000FF, 0x000000FF]
527
+ else:
528
+ rgba_mask = [0xFF000000, 0xFF000000, 0xFF000000]
529
+ else:
530
+ pixel_flags = DDPF.RGB
531
+ rawmode = im.mode[::-1]
532
+ rgba_mask = [0x00FF0000, 0x0000FF00, 0x000000FF]
533
+
534
+ if alpha:
535
+ r, g, b, a = im.split()
536
+ im = Image.merge("RGBA", (a, r, g, b))
537
+ if alpha:
538
+ pixel_flags |= DDPF.ALPHAPIXELS
539
+ rgba_mask.append(0xFF000000 if alpha else 0)
540
+
541
+ flags = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PITCH | DDSD.PIXELFORMAT
542
+ bitcount = len(im.getbands()) * 8
543
+ pitch = (im.width * bitcount + 7) // 8
544
+
545
+ fp.write(
546
+ o32(DDS_MAGIC)
547
+ + struct.pack(
548
+ "<7I",
549
+ 124, # header size
550
+ flags, # flags
551
+ im.height,
552
+ im.width,
553
+ pitch,
554
+ 0, # depth
555
+ 0, # mipmaps
556
+ )
557
+ + struct.pack("11I", *((0,) * 11)) # reserved
558
+ # pfsize, pfflags, fourcc, bitcount
559
+ + struct.pack("<4I", 32, pixel_flags, 0, bitcount)
560
+ + struct.pack("<4I", *rgba_mask) # dwRGBABitMask
561
+ + struct.pack("<5I", DDSCAPS.TEXTURE, 0, 0, 0, 0)
562
+ )
563
+ ImageFile._save(
564
+ im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]
565
+ )
566
+
567
+
568
+ def _accept(prefix: bytes) -> bool:
569
+ return prefix[:4] == b"DDS "
570
+
571
+
572
+ Image.register_open(DdsImageFile.format, DdsImageFile, _accept)
573
+ Image.register_decoder("dds_rgb", DdsRgbDecoder)
574
+ Image.register_save(DdsImageFile.format, _save)
575
+ Image.register_extension(DdsImageFile.format, ".dds")
src/venv/Lib/site-packages/PIL/EpsImagePlugin.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # EPS file handling
6
+ #
7
+ # History:
8
+ # 1995-09-01 fl Created (0.1)
9
+ # 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
10
+ # 1996-08-22 fl Don't choke on floating point BoundingBox values
11
+ # 1996-08-23 fl Handle files from Macintosh (0.3)
12
+ # 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
13
+ # 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
14
+ # 2014-05-07 e Handling of EPS with binary preview and fixed resolution
15
+ # resizing
16
+ #
17
+ # Copyright (c) 1997-2003 by Secret Labs AB.
18
+ # Copyright (c) 1995-2003 by Fredrik Lundh
19
+ #
20
+ # See the README file for information on usage and redistribution.
21
+ #
22
+ from __future__ import annotations
23
+
24
+ import io
25
+ import os
26
+ import re
27
+ import subprocess
28
+ import sys
29
+ import tempfile
30
+ from typing import IO
31
+
32
+ from . import Image, ImageFile
33
+ from ._binary import i32le as i32
34
+
35
+ # --------------------------------------------------------------------
36
+
37
+
38
+ split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
39
+ field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
40
+
41
+ gs_binary: str | bool | None = None
42
+ gs_windows_binary = None
43
+
44
+
45
+ def has_ghostscript() -> bool:
46
+ global gs_binary, gs_windows_binary
47
+ if gs_binary is None:
48
+ if sys.platform.startswith("win"):
49
+ if gs_windows_binary is None:
50
+ import shutil
51
+
52
+ for binary in ("gswin32c", "gswin64c", "gs"):
53
+ if shutil.which(binary) is not None:
54
+ gs_windows_binary = binary
55
+ break
56
+ else:
57
+ gs_windows_binary = False
58
+ gs_binary = gs_windows_binary
59
+ else:
60
+ try:
61
+ subprocess.check_call(["gs", "--version"], stdout=subprocess.DEVNULL)
62
+ gs_binary = "gs"
63
+ except OSError:
64
+ gs_binary = False
65
+ return gs_binary is not False
66
+
67
+
68
+ def Ghostscript(
69
+ tile: list[ImageFile._Tile],
70
+ size: tuple[int, int],
71
+ fp: IO[bytes],
72
+ scale: int = 1,
73
+ transparency: bool = False,
74
+ ) -> Image.core.ImagingCore:
75
+ """Render an image using Ghostscript"""
76
+ global gs_binary
77
+ if not has_ghostscript():
78
+ msg = "Unable to locate Ghostscript on paths"
79
+ raise OSError(msg)
80
+ assert isinstance(gs_binary, str)
81
+
82
+ # Unpack decoder tile
83
+ args = tile[0].args
84
+ assert isinstance(args, tuple)
85
+ length, bbox = args
86
+
87
+ # Hack to support hi-res rendering
88
+ scale = int(scale) or 1
89
+ width = size[0] * scale
90
+ height = size[1] * scale
91
+ # resolution is dependent on bbox and size
92
+ res_x = 72.0 * width / (bbox[2] - bbox[0])
93
+ res_y = 72.0 * height / (bbox[3] - bbox[1])
94
+
95
+ out_fd, outfile = tempfile.mkstemp()
96
+ os.close(out_fd)
97
+
98
+ infile_temp = None
99
+ if hasattr(fp, "name") and os.path.exists(fp.name):
100
+ infile = fp.name
101
+ else:
102
+ in_fd, infile_temp = tempfile.mkstemp()
103
+ os.close(in_fd)
104
+ infile = infile_temp
105
+
106
+ # Ignore length and offset!
107
+ # Ghostscript can read it
108
+ # Copy whole file to read in Ghostscript
109
+ with open(infile_temp, "wb") as f:
110
+ # fetch length of fp
111
+ fp.seek(0, io.SEEK_END)
112
+ fsize = fp.tell()
113
+ # ensure start position
114
+ # go back
115
+ fp.seek(0)
116
+ lengthfile = fsize
117
+ while lengthfile > 0:
118
+ s = fp.read(min(lengthfile, 100 * 1024))
119
+ if not s:
120
+ break
121
+ lengthfile -= len(s)
122
+ f.write(s)
123
+
124
+ if transparency:
125
+ # "RGBA"
126
+ device = "pngalpha"
127
+ else:
128
+ # "pnmraw" automatically chooses between
129
+ # PBM ("1"), PGM ("L"), and PPM ("RGB").
130
+ device = "pnmraw"
131
+
132
+ # Build Ghostscript command
133
+ command = [
134
+ gs_binary,
135
+ "-q", # quiet mode
136
+ f"-g{width:d}x{height:d}", # set output geometry (pixels)
137
+ f"-r{res_x:f}x{res_y:f}", # set input DPI (dots per inch)
138
+ "-dBATCH", # exit after processing
139
+ "-dNOPAUSE", # don't pause between pages
140
+ "-dSAFER", # safe mode
141
+ f"-sDEVICE={device}",
142
+ f"-sOutputFile={outfile}", # output file
143
+ # adjust for image origin
144
+ "-c",
145
+ f"{-bbox[0]} {-bbox[1]} translate",
146
+ "-f",
147
+ infile, # input file
148
+ # showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272)
149
+ "-c",
150
+ "showpage",
151
+ ]
152
+
153
+ # push data through Ghostscript
154
+ try:
155
+ startupinfo = None
156
+ if sys.platform.startswith("win"):
157
+ startupinfo = subprocess.STARTUPINFO()
158
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
159
+ subprocess.check_call(command, startupinfo=startupinfo)
160
+ with Image.open(outfile) as out_im:
161
+ out_im.load()
162
+ return out_im.im.copy()
163
+ finally:
164
+ try:
165
+ os.unlink(outfile)
166
+ if infile_temp:
167
+ os.unlink(infile_temp)
168
+ except OSError:
169
+ pass
170
+
171
+
172
+ def _accept(prefix: bytes) -> bool:
173
+ return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5)
174
+
175
+
176
+ ##
177
+ # Image plugin for Encapsulated PostScript. This plugin supports only
178
+ # a few variants of this format.
179
+
180
+
181
+ class EpsImageFile(ImageFile.ImageFile):
182
+ """EPS File Parser for the Python Imaging Library"""
183
+
184
+ format = "EPS"
185
+ format_description = "Encapsulated Postscript"
186
+
187
+ mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"}
188
+
189
+ def _open(self) -> None:
190
+ (length, offset) = self._find_offset(self.fp)
191
+
192
+ # go to offset - start of "%!PS"
193
+ self.fp.seek(offset)
194
+
195
+ self._mode = "RGB"
196
+
197
+ # When reading header comments, the first comment is used.
198
+ # When reading trailer comments, the last comment is used.
199
+ bounding_box: list[int] | None = None
200
+ imagedata_size: tuple[int, int] | None = None
201
+
202
+ byte_arr = bytearray(255)
203
+ bytes_mv = memoryview(byte_arr)
204
+ bytes_read = 0
205
+ reading_header_comments = True
206
+ reading_trailer_comments = False
207
+ trailer_reached = False
208
+
209
+ def check_required_header_comments() -> None:
210
+ """
211
+ The EPS specification requires that some headers exist.
212
+ This should be checked when the header comments formally end,
213
+ when image data starts, or when the file ends, whichever comes first.
214
+ """
215
+ if "PS-Adobe" not in self.info:
216
+ msg = 'EPS header missing "%!PS-Adobe" comment'
217
+ raise SyntaxError(msg)
218
+ if "BoundingBox" not in self.info:
219
+ msg = 'EPS header missing "%%BoundingBox" comment'
220
+ raise SyntaxError(msg)
221
+
222
+ def read_comment(s: str) -> bool:
223
+ nonlocal bounding_box, reading_trailer_comments
224
+ try:
225
+ m = split.match(s)
226
+ except re.error as e:
227
+ msg = "not an EPS file"
228
+ raise SyntaxError(msg) from e
229
+
230
+ if not m:
231
+ return False
232
+
233
+ k, v = m.group(1, 2)
234
+ self.info[k] = v
235
+ if k == "BoundingBox":
236
+ if v == "(atend)":
237
+ reading_trailer_comments = True
238
+ elif not bounding_box or (trailer_reached and reading_trailer_comments):
239
+ try:
240
+ # Note: The DSC spec says that BoundingBox
241
+ # fields should be integers, but some drivers
242
+ # put floating point values there anyway.
243
+ bounding_box = [int(float(i)) for i in v.split()]
244
+ except Exception:
245
+ pass
246
+ return True
247
+
248
+ while True:
249
+ byte = self.fp.read(1)
250
+ if byte == b"":
251
+ # if we didn't read a byte we must be at the end of the file
252
+ if bytes_read == 0:
253
+ if reading_header_comments:
254
+ check_required_header_comments()
255
+ break
256
+ elif byte in b"\r\n":
257
+ # if we read a line ending character, ignore it and parse what
258
+ # we have already read. if we haven't read any other characters,
259
+ # continue reading
260
+ if bytes_read == 0:
261
+ continue
262
+ else:
263
+ # ASCII/hexadecimal lines in an EPS file must not exceed
264
+ # 255 characters, not including line ending characters
265
+ if bytes_read >= 255:
266
+ # only enforce this for lines starting with a "%",
267
+ # otherwise assume it's binary data
268
+ if byte_arr[0] == ord("%"):
269
+ msg = "not an EPS file"
270
+ raise SyntaxError(msg)
271
+ else:
272
+ if reading_header_comments:
273
+ check_required_header_comments()
274
+ reading_header_comments = False
275
+ # reset bytes_read so we can keep reading
276
+ # data until the end of the line
277
+ bytes_read = 0
278
+ byte_arr[bytes_read] = byte[0]
279
+ bytes_read += 1
280
+ continue
281
+
282
+ if reading_header_comments:
283
+ # Load EPS header
284
+
285
+ # if this line doesn't start with a "%",
286
+ # or does start with "%%EndComments",
287
+ # then we've reached the end of the header/comments
288
+ if byte_arr[0] != ord("%") or bytes_mv[:13] == b"%%EndComments":
289
+ check_required_header_comments()
290
+ reading_header_comments = False
291
+ continue
292
+
293
+ s = str(bytes_mv[:bytes_read], "latin-1")
294
+ if not read_comment(s):
295
+ m = field.match(s)
296
+ if m:
297
+ k = m.group(1)
298
+ if k[:8] == "PS-Adobe":
299
+ self.info["PS-Adobe"] = k[9:]
300
+ else:
301
+ self.info[k] = ""
302
+ elif s[0] == "%":
303
+ # handle non-DSC PostScript comments that some
304
+ # tools mistakenly put in the Comments section
305
+ pass
306
+ else:
307
+ msg = "bad EPS header"
308
+ raise OSError(msg)
309
+ elif bytes_mv[:11] == b"%ImageData:":
310
+ # Check for an "ImageData" descriptor
311
+ # https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577413_pgfId-1035096
312
+
313
+ # If we've already read an "ImageData" descriptor,
314
+ # don't read another one.
315
+ if imagedata_size:
316
+ bytes_read = 0
317
+ continue
318
+
319
+ # Values:
320
+ # columns
321
+ # rows
322
+ # bit depth (1 or 8)
323
+ # mode (1: L, 2: LAB, 3: RGB, 4: CMYK)
324
+ # number of padding channels
325
+ # block size (number of bytes per row per channel)
326
+ # binary/ascii (1: binary, 2: ascii)
327
+ # data start identifier (the image data follows after a single line
328
+ # consisting only of this quoted value)
329
+ image_data_values = byte_arr[11:bytes_read].split(None, 7)
330
+ columns, rows, bit_depth, mode_id = (
331
+ int(value) for value in image_data_values[:4]
332
+ )
333
+
334
+ if bit_depth == 1:
335
+ self._mode = "1"
336
+ elif bit_depth == 8:
337
+ try:
338
+ self._mode = self.mode_map[mode_id]
339
+ except ValueError:
340
+ break
341
+ else:
342
+ break
343
+
344
+ # Parse the columns and rows after checking the bit depth and mode
345
+ # in case the bit depth and/or mode are invalid.
346
+ imagedata_size = columns, rows
347
+ elif bytes_mv[:5] == b"%%EOF":
348
+ break
349
+ elif trailer_reached and reading_trailer_comments:
350
+ # Load EPS trailer
351
+ s = str(bytes_mv[:bytes_read], "latin-1")
352
+ read_comment(s)
353
+ elif bytes_mv[:9] == b"%%Trailer":
354
+ trailer_reached = True
355
+ bytes_read = 0
356
+
357
+ # A "BoundingBox" is always required,
358
+ # even if an "ImageData" descriptor size exists.
359
+ if not bounding_box:
360
+ msg = "cannot determine EPS bounding box"
361
+ raise OSError(msg)
362
+
363
+ # An "ImageData" size takes precedence over the "BoundingBox".
364
+ self._size = imagedata_size or (
365
+ bounding_box[2] - bounding_box[0],
366
+ bounding_box[3] - bounding_box[1],
367
+ )
368
+
369
+ self.tile = [
370
+ ImageFile._Tile("eps", (0, 0) + self.size, offset, (length, bounding_box))
371
+ ]
372
+
373
+ def _find_offset(self, fp: IO[bytes]) -> tuple[int, int]:
374
+ s = fp.read(4)
375
+
376
+ if s == b"%!PS":
377
+ # for HEAD without binary preview
378
+ fp.seek(0, io.SEEK_END)
379
+ length = fp.tell()
380
+ offset = 0
381
+ elif i32(s) == 0xC6D3D0C5:
382
+ # FIX for: Some EPS file not handled correctly / issue #302
383
+ # EPS can contain binary data
384
+ # or start directly with latin coding
385
+ # more info see:
386
+ # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
387
+ s = fp.read(8)
388
+ offset = i32(s)
389
+ length = i32(s, 4)
390
+ else:
391
+ msg = "not an EPS file"
392
+ raise SyntaxError(msg)
393
+
394
+ return length, offset
395
+
396
+ def load(
397
+ self, scale: int = 1, transparency: bool = False
398
+ ) -> Image.core.PixelAccess | None:
399
+ # Load EPS via Ghostscript
400
+ if self.tile:
401
+ self.im = Ghostscript(self.tile, self.size, self.fp, scale, transparency)
402
+ self._mode = self.im.mode
403
+ self._size = self.im.size
404
+ self.tile = []
405
+ return Image.Image.load(self)
406
+
407
+ def load_seek(self, pos: int) -> None:
408
+ # we can't incrementally load, so force ImageFile.parser to
409
+ # use our custom load method by defining this method.
410
+ pass
411
+
412
+
413
+ # --------------------------------------------------------------------
414
+
415
+
416
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes, eps: int = 1) -> None:
417
+ """EPS Writer for the Python Imaging Library."""
418
+
419
+ # make sure image data is available
420
+ im.load()
421
+
422
+ # determine PostScript image mode
423
+ if im.mode == "L":
424
+ operator = (8, 1, b"image")
425
+ elif im.mode == "RGB":
426
+ operator = (8, 3, b"false 3 colorimage")
427
+ elif im.mode == "CMYK":
428
+ operator = (8, 4, b"false 4 colorimage")
429
+ else:
430
+ msg = "image mode is not supported"
431
+ raise ValueError(msg)
432
+
433
+ if eps:
434
+ # write EPS header
435
+ fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
436
+ fp.write(b"%%Creator: PIL 0.1 EpsEncode\n")
437
+ # fp.write("%%CreationDate: %s"...)
438
+ fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size)
439
+ fp.write(b"%%Pages: 1\n")
440
+ fp.write(b"%%EndComments\n")
441
+ fp.write(b"%%Page: 1 1\n")
442
+ fp.write(b"%%ImageData: %d %d " % im.size)
443
+ fp.write(b'%d %d 0 1 1 "%s"\n' % operator)
444
+
445
+ # image header
446
+ fp.write(b"gsave\n")
447
+ fp.write(b"10 dict begin\n")
448
+ fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1]))
449
+ fp.write(b"%d %d scale\n" % im.size)
450
+ fp.write(b"%d %d 8\n" % im.size) # <= bits
451
+ fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
452
+ fp.write(b"{ currentfile buf readhexstring pop } bind\n")
453
+ fp.write(operator[2] + b"\n")
454
+ if hasattr(fp, "flush"):
455
+ fp.flush()
456
+
457
+ ImageFile._save(im, fp, [ImageFile._Tile("eps", (0, 0) + im.size, 0, None)])
458
+
459
+ fp.write(b"\n%%%%EndBinary\n")
460
+ fp.write(b"grestore end\n")
461
+ if hasattr(fp, "flush"):
462
+ fp.flush()
463
+
464
+
465
+ # --------------------------------------------------------------------
466
+
467
+
468
+ Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
469
+
470
+ Image.register_save(EpsImageFile.format, _save)
471
+
472
+ Image.register_extensions(EpsImageFile.format, [".ps", ".eps"])
473
+
474
+ Image.register_mime(EpsImageFile.format, "application/postscript")
src/venv/Lib/site-packages/PIL/ExifTags.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # EXIF tags
6
+ #
7
+ # Copyright (c) 2003 by Secret Labs AB
8
+ #
9
+ # See the README file for information on usage and redistribution.
10
+ #
11
+
12
+ """
13
+ This module provides constants and clear-text names for various
14
+ well-known EXIF tags.
15
+ """
16
+ from __future__ import annotations
17
+
18
+ from enum import IntEnum
19
+
20
+
21
+ class Base(IntEnum):
22
+ # possibly incomplete
23
+ InteropIndex = 0x0001
24
+ ProcessingSoftware = 0x000B
25
+ NewSubfileType = 0x00FE
26
+ SubfileType = 0x00FF
27
+ ImageWidth = 0x0100
28
+ ImageLength = 0x0101
29
+ BitsPerSample = 0x0102
30
+ Compression = 0x0103
31
+ PhotometricInterpretation = 0x0106
32
+ Thresholding = 0x0107
33
+ CellWidth = 0x0108
34
+ CellLength = 0x0109
35
+ FillOrder = 0x010A
36
+ DocumentName = 0x010D
37
+ ImageDescription = 0x010E
38
+ Make = 0x010F
39
+ Model = 0x0110
40
+ StripOffsets = 0x0111
41
+ Orientation = 0x0112
42
+ SamplesPerPixel = 0x0115
43
+ RowsPerStrip = 0x0116
44
+ StripByteCounts = 0x0117
45
+ MinSampleValue = 0x0118
46
+ MaxSampleValue = 0x0119
47
+ XResolution = 0x011A
48
+ YResolution = 0x011B
49
+ PlanarConfiguration = 0x011C
50
+ PageName = 0x011D
51
+ FreeOffsets = 0x0120
52
+ FreeByteCounts = 0x0121
53
+ GrayResponseUnit = 0x0122
54
+ GrayResponseCurve = 0x0123
55
+ T4Options = 0x0124
56
+ T6Options = 0x0125
57
+ ResolutionUnit = 0x0128
58
+ PageNumber = 0x0129
59
+ TransferFunction = 0x012D
60
+ Software = 0x0131
61
+ DateTime = 0x0132
62
+ Artist = 0x013B
63
+ HostComputer = 0x013C
64
+ Predictor = 0x013D
65
+ WhitePoint = 0x013E
66
+ PrimaryChromaticities = 0x013F
67
+ ColorMap = 0x0140
68
+ HalftoneHints = 0x0141
69
+ TileWidth = 0x0142
70
+ TileLength = 0x0143
71
+ TileOffsets = 0x0144
72
+ TileByteCounts = 0x0145
73
+ SubIFDs = 0x014A
74
+ InkSet = 0x014C
75
+ InkNames = 0x014D
76
+ NumberOfInks = 0x014E
77
+ DotRange = 0x0150
78
+ TargetPrinter = 0x0151
79
+ ExtraSamples = 0x0152
80
+ SampleFormat = 0x0153
81
+ SMinSampleValue = 0x0154
82
+ SMaxSampleValue = 0x0155
83
+ TransferRange = 0x0156
84
+ ClipPath = 0x0157
85
+ XClipPathUnits = 0x0158
86
+ YClipPathUnits = 0x0159
87
+ Indexed = 0x015A
88
+ JPEGTables = 0x015B
89
+ OPIProxy = 0x015F
90
+ JPEGProc = 0x0200
91
+ JpegIFOffset = 0x0201
92
+ JpegIFByteCount = 0x0202
93
+ JpegRestartInterval = 0x0203
94
+ JpegLosslessPredictors = 0x0205
95
+ JpegPointTransforms = 0x0206
96
+ JpegQTables = 0x0207
97
+ JpegDCTables = 0x0208
98
+ JpegACTables = 0x0209
99
+ YCbCrCoefficients = 0x0211
100
+ YCbCrSubSampling = 0x0212
101
+ YCbCrPositioning = 0x0213
102
+ ReferenceBlackWhite = 0x0214
103
+ XMLPacket = 0x02BC
104
+ RelatedImageFileFormat = 0x1000
105
+ RelatedImageWidth = 0x1001
106
+ RelatedImageLength = 0x1002
107
+ Rating = 0x4746
108
+ RatingPercent = 0x4749
109
+ ImageID = 0x800D
110
+ CFARepeatPatternDim = 0x828D
111
+ BatteryLevel = 0x828F
112
+ Copyright = 0x8298
113
+ ExposureTime = 0x829A
114
+ FNumber = 0x829D
115
+ IPTCNAA = 0x83BB
116
+ ImageResources = 0x8649
117
+ ExifOffset = 0x8769
118
+ InterColorProfile = 0x8773
119
+ ExposureProgram = 0x8822
120
+ SpectralSensitivity = 0x8824
121
+ GPSInfo = 0x8825
122
+ ISOSpeedRatings = 0x8827
123
+ OECF = 0x8828
124
+ Interlace = 0x8829
125
+ TimeZoneOffset = 0x882A
126
+ SelfTimerMode = 0x882B
127
+ SensitivityType = 0x8830
128
+ StandardOutputSensitivity = 0x8831
129
+ RecommendedExposureIndex = 0x8832
130
+ ISOSpeed = 0x8833
131
+ ISOSpeedLatitudeyyy = 0x8834
132
+ ISOSpeedLatitudezzz = 0x8835
133
+ ExifVersion = 0x9000
134
+ DateTimeOriginal = 0x9003
135
+ DateTimeDigitized = 0x9004
136
+ OffsetTime = 0x9010
137
+ OffsetTimeOriginal = 0x9011
138
+ OffsetTimeDigitized = 0x9012
139
+ ComponentsConfiguration = 0x9101
140
+ CompressedBitsPerPixel = 0x9102
141
+ ShutterSpeedValue = 0x9201
142
+ ApertureValue = 0x9202
143
+ BrightnessValue = 0x9203
144
+ ExposureBiasValue = 0x9204
145
+ MaxApertureValue = 0x9205
146
+ SubjectDistance = 0x9206
147
+ MeteringMode = 0x9207
148
+ LightSource = 0x9208
149
+ Flash = 0x9209
150
+ FocalLength = 0x920A
151
+ Noise = 0x920D
152
+ ImageNumber = 0x9211
153
+ SecurityClassification = 0x9212
154
+ ImageHistory = 0x9213
155
+ TIFFEPStandardID = 0x9216
156
+ MakerNote = 0x927C
157
+ UserComment = 0x9286
158
+ SubsecTime = 0x9290
159
+ SubsecTimeOriginal = 0x9291
160
+ SubsecTimeDigitized = 0x9292
161
+ AmbientTemperature = 0x9400
162
+ Humidity = 0x9401
163
+ Pressure = 0x9402
164
+ WaterDepth = 0x9403
165
+ Acceleration = 0x9404
166
+ CameraElevationAngle = 0x9405
167
+ XPTitle = 0x9C9B
168
+ XPComment = 0x9C9C
169
+ XPAuthor = 0x9C9D
170
+ XPKeywords = 0x9C9E
171
+ XPSubject = 0x9C9F
172
+ FlashPixVersion = 0xA000
173
+ ColorSpace = 0xA001
174
+ ExifImageWidth = 0xA002
175
+ ExifImageHeight = 0xA003
176
+ RelatedSoundFile = 0xA004
177
+ ExifInteroperabilityOffset = 0xA005
178
+ FlashEnergy = 0xA20B
179
+ SpatialFrequencyResponse = 0xA20C
180
+ FocalPlaneXResolution = 0xA20E
181
+ FocalPlaneYResolution = 0xA20F
182
+ FocalPlaneResolutionUnit = 0xA210
183
+ SubjectLocation = 0xA214
184
+ ExposureIndex = 0xA215
185
+ SensingMethod = 0xA217
186
+ FileSource = 0xA300
187
+ SceneType = 0xA301
188
+ CFAPattern = 0xA302
189
+ CustomRendered = 0xA401
190
+ ExposureMode = 0xA402
191
+ WhiteBalance = 0xA403
192
+ DigitalZoomRatio = 0xA404
193
+ FocalLengthIn35mmFilm = 0xA405
194
+ SceneCaptureType = 0xA406
195
+ GainControl = 0xA407
196
+ Contrast = 0xA408
197
+ Saturation = 0xA409
198
+ Sharpness = 0xA40A
199
+ DeviceSettingDescription = 0xA40B
200
+ SubjectDistanceRange = 0xA40C
201
+ ImageUniqueID = 0xA420
202
+ CameraOwnerName = 0xA430
203
+ BodySerialNumber = 0xA431
204
+ LensSpecification = 0xA432
205
+ LensMake = 0xA433
206
+ LensModel = 0xA434
207
+ LensSerialNumber = 0xA435
208
+ CompositeImage = 0xA460
209
+ CompositeImageCount = 0xA461
210
+ CompositeImageExposureTimes = 0xA462
211
+ Gamma = 0xA500
212
+ PrintImageMatching = 0xC4A5
213
+ DNGVersion = 0xC612
214
+ DNGBackwardVersion = 0xC613
215
+ UniqueCameraModel = 0xC614
216
+ LocalizedCameraModel = 0xC615
217
+ CFAPlaneColor = 0xC616
218
+ CFALayout = 0xC617
219
+ LinearizationTable = 0xC618
220
+ BlackLevelRepeatDim = 0xC619
221
+ BlackLevel = 0xC61A
222
+ BlackLevelDeltaH = 0xC61B
223
+ BlackLevelDeltaV = 0xC61C
224
+ WhiteLevel = 0xC61D
225
+ DefaultScale = 0xC61E
226
+ DefaultCropOrigin = 0xC61F
227
+ DefaultCropSize = 0xC620
228
+ ColorMatrix1 = 0xC621
229
+ ColorMatrix2 = 0xC622
230
+ CameraCalibration1 = 0xC623
231
+ CameraCalibration2 = 0xC624
232
+ ReductionMatrix1 = 0xC625
233
+ ReductionMatrix2 = 0xC626
234
+ AnalogBalance = 0xC627
235
+ AsShotNeutral = 0xC628
236
+ AsShotWhiteXY = 0xC629
237
+ BaselineExposure = 0xC62A
238
+ BaselineNoise = 0xC62B
239
+ BaselineSharpness = 0xC62C
240
+ BayerGreenSplit = 0xC62D
241
+ LinearResponseLimit = 0xC62E
242
+ CameraSerialNumber = 0xC62F
243
+ LensInfo = 0xC630
244
+ ChromaBlurRadius = 0xC631
245
+ AntiAliasStrength = 0xC632
246
+ ShadowScale = 0xC633
247
+ DNGPrivateData = 0xC634
248
+ MakerNoteSafety = 0xC635
249
+ CalibrationIlluminant1 = 0xC65A
250
+ CalibrationIlluminant2 = 0xC65B
251
+ BestQualityScale = 0xC65C
252
+ RawDataUniqueID = 0xC65D
253
+ OriginalRawFileName = 0xC68B
254
+ OriginalRawFileData = 0xC68C
255
+ ActiveArea = 0xC68D
256
+ MaskedAreas = 0xC68E
257
+ AsShotICCProfile = 0xC68F
258
+ AsShotPreProfileMatrix = 0xC690
259
+ CurrentICCProfile = 0xC691
260
+ CurrentPreProfileMatrix = 0xC692
261
+ ColorimetricReference = 0xC6BF
262
+ CameraCalibrationSignature = 0xC6F3
263
+ ProfileCalibrationSignature = 0xC6F4
264
+ AsShotProfileName = 0xC6F6
265
+ NoiseReductionApplied = 0xC6F7
266
+ ProfileName = 0xC6F8
267
+ ProfileHueSatMapDims = 0xC6F9
268
+ ProfileHueSatMapData1 = 0xC6FA
269
+ ProfileHueSatMapData2 = 0xC6FB
270
+ ProfileToneCurve = 0xC6FC
271
+ ProfileEmbedPolicy = 0xC6FD
272
+ ProfileCopyright = 0xC6FE
273
+ ForwardMatrix1 = 0xC714
274
+ ForwardMatrix2 = 0xC715
275
+ PreviewApplicationName = 0xC716
276
+ PreviewApplicationVersion = 0xC717
277
+ PreviewSettingsName = 0xC718
278
+ PreviewSettingsDigest = 0xC719
279
+ PreviewColorSpace = 0xC71A
280
+ PreviewDateTime = 0xC71B
281
+ RawImageDigest = 0xC71C
282
+ OriginalRawFileDigest = 0xC71D
283
+ SubTileBlockSize = 0xC71E
284
+ RowInterleaveFactor = 0xC71F
285
+ ProfileLookTableDims = 0xC725
286
+ ProfileLookTableData = 0xC726
287
+ OpcodeList1 = 0xC740
288
+ OpcodeList2 = 0xC741
289
+ OpcodeList3 = 0xC74E
290
+ NoiseProfile = 0xC761
291
+
292
+
293
+ """Maps EXIF tags to tag names."""
294
+ TAGS = {
295
+ **{i.value: i.name for i in Base},
296
+ 0x920C: "SpatialFrequencyResponse",
297
+ 0x9214: "SubjectLocation",
298
+ 0x9215: "ExposureIndex",
299
+ 0x828E: "CFAPattern",
300
+ 0x920B: "FlashEnergy",
301
+ 0x9216: "TIFF/EPStandardID",
302
+ }
303
+
304
+
305
+ class GPS(IntEnum):
306
+ GPSVersionID = 0
307
+ GPSLatitudeRef = 1
308
+ GPSLatitude = 2
309
+ GPSLongitudeRef = 3
310
+ GPSLongitude = 4
311
+ GPSAltitudeRef = 5
312
+ GPSAltitude = 6
313
+ GPSTimeStamp = 7
314
+ GPSSatellites = 8
315
+ GPSStatus = 9
316
+ GPSMeasureMode = 10
317
+ GPSDOP = 11
318
+ GPSSpeedRef = 12
319
+ GPSSpeed = 13
320
+ GPSTrackRef = 14
321
+ GPSTrack = 15
322
+ GPSImgDirectionRef = 16
323
+ GPSImgDirection = 17
324
+ GPSMapDatum = 18
325
+ GPSDestLatitudeRef = 19
326
+ GPSDestLatitude = 20
327
+ GPSDestLongitudeRef = 21
328
+ GPSDestLongitude = 22
329
+ GPSDestBearingRef = 23
330
+ GPSDestBearing = 24
331
+ GPSDestDistanceRef = 25
332
+ GPSDestDistance = 26
333
+ GPSProcessingMethod = 27
334
+ GPSAreaInformation = 28
335
+ GPSDateStamp = 29
336
+ GPSDifferential = 30
337
+ GPSHPositioningError = 31
338
+
339
+
340
+ """Maps EXIF GPS tags to tag names."""
341
+ GPSTAGS = {i.value: i.name for i in GPS}
342
+
343
+
344
+ class Interop(IntEnum):
345
+ InteropIndex = 1
346
+ InteropVersion = 2
347
+ RelatedImageFileFormat = 4096
348
+ RelatedImageWidth = 4097
349
+ RelatedImageHeight = 4098
350
+
351
+
352
+ class IFD(IntEnum):
353
+ Exif = 34665
354
+ GPSInfo = 34853
355
+ Makernote = 37500
356
+ Interop = 40965
357
+ IFD1 = -1
358
+
359
+
360
+ class LightSource(IntEnum):
361
+ Unknown = 0
362
+ Daylight = 1
363
+ Fluorescent = 2
364
+ Tungsten = 3
365
+ Flash = 4
366
+ Fine = 9
367
+ Cloudy = 10
368
+ Shade = 11
369
+ DaylightFluorescent = 12
370
+ DayWhiteFluorescent = 13
371
+ CoolWhiteFluorescent = 14
372
+ WhiteFluorescent = 15
373
+ StandardLightA = 17
374
+ StandardLightB = 18
375
+ StandardLightC = 19
376
+ D55 = 20
377
+ D65 = 21
378
+ D75 = 22
379
+ D50 = 23
380
+ ISO = 24
381
+ Other = 255
src/venv/Lib/site-packages/PIL/FitsImagePlugin.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # FITS file handling
6
+ #
7
+ # Copyright (c) 1998-2003 by Fredrik Lundh
8
+ #
9
+ # See the README file for information on usage and redistribution.
10
+ #
11
+ from __future__ import annotations
12
+
13
+ import gzip
14
+ import math
15
+
16
+ from . import Image, ImageFile
17
+
18
+
19
+ def _accept(prefix: bytes) -> bool:
20
+ return prefix[:6] == b"SIMPLE"
21
+
22
+
23
+ class FitsImageFile(ImageFile.ImageFile):
24
+ format = "FITS"
25
+ format_description = "FITS"
26
+
27
+ def _open(self) -> None:
28
+ assert self.fp is not None
29
+
30
+ headers: dict[bytes, bytes] = {}
31
+ header_in_progress = False
32
+ decoder_name = ""
33
+ while True:
34
+ header = self.fp.read(80)
35
+ if not header:
36
+ msg = "Truncated FITS file"
37
+ raise OSError(msg)
38
+ keyword = header[:8].strip()
39
+ if keyword in (b"SIMPLE", b"XTENSION"):
40
+ header_in_progress = True
41
+ elif headers and not header_in_progress:
42
+ # This is now a data unit
43
+ break
44
+ elif keyword == b"END":
45
+ # Seek to the end of the header unit
46
+ self.fp.seek(math.ceil(self.fp.tell() / 2880) * 2880)
47
+ if not decoder_name:
48
+ decoder_name, offset, args = self._parse_headers(headers)
49
+
50
+ header_in_progress = False
51
+ continue
52
+
53
+ if decoder_name:
54
+ # Keep going to read past the headers
55
+ continue
56
+
57
+ value = header[8:].split(b"/")[0].strip()
58
+ if value.startswith(b"="):
59
+ value = value[1:].strip()
60
+ if not headers and (not _accept(keyword) or value != b"T"):
61
+ msg = "Not a FITS file"
62
+ raise SyntaxError(msg)
63
+ headers[keyword] = value
64
+
65
+ if not decoder_name:
66
+ msg = "No image data"
67
+ raise ValueError(msg)
68
+
69
+ offset += self.fp.tell() - 80
70
+ self.tile = [ImageFile._Tile(decoder_name, (0, 0) + self.size, offset, args)]
71
+
72
+ def _get_size(
73
+ self, headers: dict[bytes, bytes], prefix: bytes
74
+ ) -> tuple[int, int] | None:
75
+ naxis = int(headers[prefix + b"NAXIS"])
76
+ if naxis == 0:
77
+ return None
78
+
79
+ if naxis == 1:
80
+ return 1, int(headers[prefix + b"NAXIS1"])
81
+ else:
82
+ return int(headers[prefix + b"NAXIS1"]), int(headers[prefix + b"NAXIS2"])
83
+
84
+ def _parse_headers(
85
+ self, headers: dict[bytes, bytes]
86
+ ) -> tuple[str, int, tuple[str | int, ...]]:
87
+ prefix = b""
88
+ decoder_name = "raw"
89
+ offset = 0
90
+ if (
91
+ headers.get(b"XTENSION") == b"'BINTABLE'"
92
+ and headers.get(b"ZIMAGE") == b"T"
93
+ and headers[b"ZCMPTYPE"] == b"'GZIP_1 '"
94
+ ):
95
+ no_prefix_size = self._get_size(headers, prefix) or (0, 0)
96
+ number_of_bits = int(headers[b"BITPIX"])
97
+ offset = no_prefix_size[0] * no_prefix_size[1] * (number_of_bits // 8)
98
+
99
+ prefix = b"Z"
100
+ decoder_name = "fits_gzip"
101
+
102
+ size = self._get_size(headers, prefix)
103
+ if not size:
104
+ return "", 0, ()
105
+
106
+ self._size = size
107
+
108
+ number_of_bits = int(headers[prefix + b"BITPIX"])
109
+ if number_of_bits == 8:
110
+ self._mode = "L"
111
+ elif number_of_bits == 16:
112
+ self._mode = "I;16"
113
+ elif number_of_bits == 32:
114
+ self._mode = "I"
115
+ elif number_of_bits in (-32, -64):
116
+ self._mode = "F"
117
+
118
+ args: tuple[str | int, ...]
119
+ if decoder_name == "raw":
120
+ args = (self.mode, 0, -1)
121
+ else:
122
+ args = (number_of_bits,)
123
+ return decoder_name, offset, args
124
+
125
+
126
+ class FitsGzipDecoder(ImageFile.PyDecoder):
127
+ _pulls_fd = True
128
+
129
+ def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
130
+ assert self.fd is not None
131
+ value = gzip.decompress(self.fd.read())
132
+
133
+ rows = []
134
+ offset = 0
135
+ number_of_bits = min(self.args[0] // 8, 4)
136
+ for y in range(self.state.ysize):
137
+ row = bytearray()
138
+ for x in range(self.state.xsize):
139
+ row += value[offset + (4 - number_of_bits) : offset + 4]
140
+ offset += 4
141
+ rows.append(row)
142
+ self.set_as_raw(bytes([pixel for row in rows[::-1] for pixel in row]))
143
+ return -1, 0
144
+
145
+
146
+ # --------------------------------------------------------------------
147
+ # Registry
148
+
149
+ Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
150
+ Image.register_decoder("fits_gzip", FitsGzipDecoder)
151
+
152
+ Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])
src/venv/Lib/site-packages/PIL/FliImagePlugin.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # FLI/FLC file handling.
6
+ #
7
+ # History:
8
+ # 95-09-01 fl Created
9
+ # 97-01-03 fl Fixed parser, setup decoder tile
10
+ # 98-07-15 fl Renamed offset attribute to avoid name clash
11
+ #
12
+ # Copyright (c) Secret Labs AB 1997-98.
13
+ # Copyright (c) Fredrik Lundh 1995-97.
14
+ #
15
+ # See the README file for information on usage and redistribution.
16
+ #
17
+ from __future__ import annotations
18
+
19
+ import os
20
+
21
+ from . import Image, ImageFile, ImagePalette
22
+ from ._binary import i16le as i16
23
+ from ._binary import i32le as i32
24
+ from ._binary import o8
25
+
26
+ #
27
+ # decoder
28
+
29
+
30
+ def _accept(prefix: bytes) -> bool:
31
+ return (
32
+ len(prefix) >= 6
33
+ and i16(prefix, 4) in [0xAF11, 0xAF12]
34
+ and i16(prefix, 14) in [0, 3] # flags
35
+ )
36
+
37
+
38
+ ##
39
+ # Image plugin for the FLI/FLC animation format. Use the <b>seek</b>
40
+ # method to load individual frames.
41
+
42
+
43
+ class FliImageFile(ImageFile.ImageFile):
44
+ format = "FLI"
45
+ format_description = "Autodesk FLI/FLC Animation"
46
+ _close_exclusive_fp_after_loading = False
47
+
48
+ def _open(self) -> None:
49
+ # HEAD
50
+ s = self.fp.read(128)
51
+ if not (_accept(s) and s[20:22] == b"\x00\x00"):
52
+ msg = "not an FLI/FLC file"
53
+ raise SyntaxError(msg)
54
+
55
+ # frames
56
+ self.n_frames = i16(s, 6)
57
+ self.is_animated = self.n_frames > 1
58
+
59
+ # image characteristics
60
+ self._mode = "P"
61
+ self._size = i16(s, 8), i16(s, 10)
62
+
63
+ # animation speed
64
+ duration = i32(s, 16)
65
+ magic = i16(s, 4)
66
+ if magic == 0xAF11:
67
+ duration = (duration * 1000) // 70
68
+ self.info["duration"] = duration
69
+
70
+ # look for palette
71
+ palette = [(a, a, a) for a in range(256)]
72
+
73
+ s = self.fp.read(16)
74
+
75
+ self.__offset = 128
76
+
77
+ if i16(s, 4) == 0xF100:
78
+ # prefix chunk; ignore it
79
+ self.__offset = self.__offset + i32(s)
80
+ self.fp.seek(self.__offset)
81
+ s = self.fp.read(16)
82
+
83
+ if i16(s, 4) == 0xF1FA:
84
+ # look for palette chunk
85
+ number_of_subchunks = i16(s, 6)
86
+ chunk_size: int | None = None
87
+ for _ in range(number_of_subchunks):
88
+ if chunk_size is not None:
89
+ self.fp.seek(chunk_size - 6, os.SEEK_CUR)
90
+ s = self.fp.read(6)
91
+ chunk_type = i16(s, 4)
92
+ if chunk_type in (4, 11):
93
+ self._palette(palette, 2 if chunk_type == 11 else 0)
94
+ break
95
+ chunk_size = i32(s)
96
+ if not chunk_size:
97
+ break
98
+
99
+ self.palette = ImagePalette.raw(
100
+ "RGB", b"".join(o8(r) + o8(g) + o8(b) for (r, g, b) in palette)
101
+ )
102
+
103
+ # set things up to decode first frame
104
+ self.__frame = -1
105
+ self._fp = self.fp
106
+ self.__rewind = self.fp.tell()
107
+ self.seek(0)
108
+
109
+ def _palette(self, palette: list[tuple[int, int, int]], shift: int) -> None:
110
+ # load palette
111
+
112
+ i = 0
113
+ for e in range(i16(self.fp.read(2))):
114
+ s = self.fp.read(2)
115
+ i = i + s[0]
116
+ n = s[1]
117
+ if n == 0:
118
+ n = 256
119
+ s = self.fp.read(n * 3)
120
+ for n in range(0, len(s), 3):
121
+ r = s[n] << shift
122
+ g = s[n + 1] << shift
123
+ b = s[n + 2] << shift
124
+ palette[i] = (r, g, b)
125
+ i += 1
126
+
127
+ def seek(self, frame: int) -> None:
128
+ if not self._seek_check(frame):
129
+ return
130
+ if frame < self.__frame:
131
+ self._seek(0)
132
+
133
+ for f in range(self.__frame + 1, frame + 1):
134
+ self._seek(f)
135
+
136
+ def _seek(self, frame: int) -> None:
137
+ if frame == 0:
138
+ self.__frame = -1
139
+ self._fp.seek(self.__rewind)
140
+ self.__offset = 128
141
+ else:
142
+ # ensure that the previous frame was loaded
143
+ self.load()
144
+
145
+ if frame != self.__frame + 1:
146
+ msg = f"cannot seek to frame {frame}"
147
+ raise ValueError(msg)
148
+ self.__frame = frame
149
+
150
+ # move to next frame
151
+ self.fp = self._fp
152
+ self.fp.seek(self.__offset)
153
+
154
+ s = self.fp.read(4)
155
+ if not s:
156
+ msg = "missing frame size"
157
+ raise EOFError(msg)
158
+
159
+ framesize = i32(s)
160
+
161
+ self.decodermaxblock = framesize
162
+ self.tile = [ImageFile._Tile("fli", (0, 0) + self.size, self.__offset, None)]
163
+
164
+ self.__offset += framesize
165
+
166
+ def tell(self) -> int:
167
+ return self.__frame
168
+
169
+
170
+ #
171
+ # registry
172
+
173
+ Image.register_open(FliImageFile.format, FliImageFile, _accept)
174
+
175
+ Image.register_extensions(FliImageFile.format, [".fli", ".flc"])
src/venv/Lib/site-packages/PIL/FontFile.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # base class for raster font file parsers
6
+ #
7
+ # history:
8
+ # 1997-06-05 fl created
9
+ # 1997-08-19 fl restrict image width
10
+ #
11
+ # Copyright (c) 1997-1998 by Secret Labs AB
12
+ # Copyright (c) 1997-1998 by Fredrik Lundh
13
+ #
14
+ # See the README file for information on usage and redistribution.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ import os
19
+ from typing import BinaryIO
20
+
21
+ from . import Image, _binary
22
+
23
+ WIDTH = 800
24
+
25
+
26
+ def puti16(
27
+ fp: BinaryIO, values: tuple[int, int, int, int, int, int, int, int, int, int]
28
+ ) -> None:
29
+ """Write network order (big-endian) 16-bit sequence"""
30
+ for v in values:
31
+ if v < 0:
32
+ v += 65536
33
+ fp.write(_binary.o16be(v))
34
+
35
+
36
+ class FontFile:
37
+ """Base class for raster font file handlers."""
38
+
39
+ bitmap: Image.Image | None = None
40
+
41
+ def __init__(self) -> None:
42
+ self.info: dict[bytes, bytes | int] = {}
43
+ self.glyph: list[
44
+ tuple[
45
+ tuple[int, int],
46
+ tuple[int, int, int, int],
47
+ tuple[int, int, int, int],
48
+ Image.Image,
49
+ ]
50
+ | None
51
+ ] = [None] * 256
52
+
53
+ def __getitem__(self, ix: int) -> (
54
+ tuple[
55
+ tuple[int, int],
56
+ tuple[int, int, int, int],
57
+ tuple[int, int, int, int],
58
+ Image.Image,
59
+ ]
60
+ | None
61
+ ):
62
+ return self.glyph[ix]
63
+
64
+ def compile(self) -> None:
65
+ """Create metrics and bitmap"""
66
+
67
+ if self.bitmap:
68
+ return
69
+
70
+ # create bitmap large enough to hold all data
71
+ h = w = maxwidth = 0
72
+ lines = 1
73
+ for glyph in self.glyph:
74
+ if glyph:
75
+ d, dst, src, im = glyph
76
+ h = max(h, src[3] - src[1])
77
+ w = w + (src[2] - src[0])
78
+ if w > WIDTH:
79
+ lines += 1
80
+ w = src[2] - src[0]
81
+ maxwidth = max(maxwidth, w)
82
+
83
+ xsize = maxwidth
84
+ ysize = lines * h
85
+
86
+ if xsize == 0 and ysize == 0:
87
+ return
88
+
89
+ self.ysize = h
90
+
91
+ # paste glyphs into bitmap
92
+ self.bitmap = Image.new("1", (xsize, ysize))
93
+ self.metrics: list[
94
+ tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]]
95
+ | None
96
+ ] = [None] * 256
97
+ x = y = 0
98
+ for i in range(256):
99
+ glyph = self[i]
100
+ if glyph:
101
+ d, dst, src, im = glyph
102
+ xx = src[2] - src[0]
103
+ x0, y0 = x, y
104
+ x = x + xx
105
+ if x > WIDTH:
106
+ x, y = 0, y + h
107
+ x0, y0 = x, y
108
+ x = xx
109
+ s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
110
+ self.bitmap.paste(im.crop(src), s)
111
+ self.metrics[i] = d, dst, s
112
+
113
+ def save(self, filename: str) -> None:
114
+ """Save font"""
115
+
116
+ self.compile()
117
+
118
+ # font data
119
+ if not self.bitmap:
120
+ msg = "No bitmap created"
121
+ raise ValueError(msg)
122
+ self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
123
+
124
+ # font metrics
125
+ with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp:
126
+ fp.write(b"PILfont\n")
127
+ fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!!
128
+ fp.write(b"DATA\n")
129
+ for id in range(256):
130
+ m = self.metrics[id]
131
+ if not m:
132
+ puti16(fp, (0,) * 10)
133
+ else:
134
+ puti16(fp, m[0] + m[1] + m[2])
src/venv/Lib/site-packages/PIL/FpxImagePlugin.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # THIS IS WORK IN PROGRESS
3
+ #
4
+ # The Python Imaging Library.
5
+ # $Id$
6
+ #
7
+ # FlashPix support for PIL
8
+ #
9
+ # History:
10
+ # 97-01-25 fl Created (reads uncompressed RGB images only)
11
+ #
12
+ # Copyright (c) Secret Labs AB 1997.
13
+ # Copyright (c) Fredrik Lundh 1997.
14
+ #
15
+ # See the README file for information on usage and redistribution.
16
+ #
17
+ from __future__ import annotations
18
+
19
+ import olefile
20
+
21
+ from . import Image, ImageFile
22
+ from ._binary import i32le as i32
23
+
24
+ # we map from colour field tuples to (mode, rawmode) descriptors
25
+ MODES = {
26
+ # opacity
27
+ (0x00007FFE,): ("A", "L"),
28
+ # monochrome
29
+ (0x00010000,): ("L", "L"),
30
+ (0x00018000, 0x00017FFE): ("RGBA", "LA"),
31
+ # photo YCC
32
+ (0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
33
+ (0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"),
34
+ # standard RGB (NIFRGB)
35
+ (0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"),
36
+ (0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"),
37
+ }
38
+
39
+
40
+ #
41
+ # --------------------------------------------------------------------
42
+
43
+
44
+ def _accept(prefix: bytes) -> bool:
45
+ return prefix[:8] == olefile.MAGIC
46
+
47
+
48
+ ##
49
+ # Image plugin for the FlashPix images.
50
+
51
+
52
+ class FpxImageFile(ImageFile.ImageFile):
53
+ format = "FPX"
54
+ format_description = "FlashPix"
55
+
56
+ def _open(self) -> None:
57
+ #
58
+ # read the OLE directory and see if this is a likely
59
+ # to be a FlashPix file
60
+
61
+ try:
62
+ self.ole = olefile.OleFileIO(self.fp)
63
+ except OSError as e:
64
+ msg = "not an FPX file; invalid OLE file"
65
+ raise SyntaxError(msg) from e
66
+
67
+ root = self.ole.root
68
+ if not root or root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
69
+ msg = "not an FPX file; bad root CLSID"
70
+ raise SyntaxError(msg)
71
+
72
+ self._open_index(1)
73
+
74
+ def _open_index(self, index: int = 1) -> None:
75
+ #
76
+ # get the Image Contents Property Set
77
+
78
+ prop = self.ole.getproperties(
79
+ [f"Data Object Store {index:06d}", "\005Image Contents"]
80
+ )
81
+
82
+ # size (highest resolution)
83
+
84
+ assert isinstance(prop[0x1000002], int)
85
+ assert isinstance(prop[0x1000003], int)
86
+ self._size = prop[0x1000002], prop[0x1000003]
87
+
88
+ size = max(self.size)
89
+ i = 1
90
+ while size > 64:
91
+ size = size // 2
92
+ i += 1
93
+ self.maxid = i - 1
94
+
95
+ # mode. instead of using a single field for this, flashpix
96
+ # requires you to specify the mode for each channel in each
97
+ # resolution subimage, and leaves it to the decoder to make
98
+ # sure that they all match. for now, we'll cheat and assume
99
+ # that this is always the case.
100
+
101
+ id = self.maxid << 16
102
+
103
+ s = prop[0x2000002 | id]
104
+
105
+ if not isinstance(s, bytes) or (bands := i32(s, 4)) > 4:
106
+ msg = "Invalid number of bands"
107
+ raise OSError(msg)
108
+
109
+ # note: for now, we ignore the "uncalibrated" flag
110
+ colors = tuple(i32(s, 8 + i * 4) & 0x7FFFFFFF for i in range(bands))
111
+
112
+ self._mode, self.rawmode = MODES[colors]
113
+
114
+ # load JPEG tables, if any
115
+ self.jpeg = {}
116
+ for i in range(256):
117
+ id = 0x3000001 | (i << 16)
118
+ if id in prop:
119
+ self.jpeg[i] = prop[id]
120
+
121
+ self._open_subimage(1, self.maxid)
122
+
123
+ def _open_subimage(self, index: int = 1, subimage: int = 0) -> None:
124
+ #
125
+ # setup tile descriptors for a given subimage
126
+
127
+ stream = [
128
+ f"Data Object Store {index:06d}",
129
+ f"Resolution {subimage:04d}",
130
+ "Subimage 0000 Header",
131
+ ]
132
+
133
+ fp = self.ole.openstream(stream)
134
+
135
+ # skip prefix
136
+ fp.read(28)
137
+
138
+ # header stream
139
+ s = fp.read(36)
140
+
141
+ size = i32(s, 4), i32(s, 8)
142
+ # tilecount = i32(s, 12)
143
+ tilesize = i32(s, 16), i32(s, 20)
144
+ # channels = i32(s, 24)
145
+ offset = i32(s, 28)
146
+ length = i32(s, 32)
147
+
148
+ if size != self.size:
149
+ msg = "subimage mismatch"
150
+ raise OSError(msg)
151
+
152
+ # get tile descriptors
153
+ fp.seek(28 + offset)
154
+ s = fp.read(i32(s, 12) * length)
155
+
156
+ x = y = 0
157
+ xsize, ysize = size
158
+ xtile, ytile = tilesize
159
+ self.tile = []
160
+
161
+ for i in range(0, len(s), length):
162
+ x1 = min(xsize, x + xtile)
163
+ y1 = min(ysize, y + ytile)
164
+
165
+ compression = i32(s, i + 8)
166
+
167
+ if compression == 0:
168
+ self.tile.append(
169
+ ImageFile._Tile(
170
+ "raw",
171
+ (x, y, x1, y1),
172
+ i32(s, i) + 28,
173
+ (self.rawmode,),
174
+ )
175
+ )
176
+
177
+ elif compression == 1:
178
+ # FIXME: the fill decoder is not implemented
179
+ self.tile.append(
180
+ ImageFile._Tile(
181
+ "fill",
182
+ (x, y, x1, y1),
183
+ i32(s, i) + 28,
184
+ (self.rawmode, s[12:16]),
185
+ )
186
+ )
187
+
188
+ elif compression == 2:
189
+ internal_color_conversion = s[14]
190
+ jpeg_tables = s[15]
191
+ rawmode = self.rawmode
192
+
193
+ if internal_color_conversion:
194
+ # The image is stored as usual (usually YCbCr).
195
+ if rawmode == "RGBA":
196
+ # For "RGBA", data is stored as YCbCrA based on
197
+ # negative RGB. The following trick works around
198
+ # this problem :
199
+ jpegmode, rawmode = "YCbCrK", "CMYK"
200
+ else:
201
+ jpegmode = None # let the decoder decide
202
+
203
+ else:
204
+ # The image is stored as defined by rawmode
205
+ jpegmode = rawmode
206
+
207
+ self.tile.append(
208
+ ImageFile._Tile(
209
+ "jpeg",
210
+ (x, y, x1, y1),
211
+ i32(s, i) + 28,
212
+ (rawmode, jpegmode),
213
+ )
214
+ )
215
+
216
+ # FIXME: jpeg tables are tile dependent; the prefix
217
+ # data must be placed in the tile descriptor itself!
218
+
219
+ if jpeg_tables:
220
+ self.tile_prefix = self.jpeg[jpeg_tables]
221
+
222
+ else:
223
+ msg = "unknown/invalid compression"
224
+ raise OSError(msg)
225
+
226
+ x = x + xtile
227
+ if x >= xsize:
228
+ x, y = 0, y + ytile
229
+ if y >= ysize:
230
+ break # isn't really required
231
+
232
+ self.stream = stream
233
+ self._fp = self.fp
234
+ self.fp = None
235
+
236
+ def load(self) -> Image.core.PixelAccess | None:
237
+ if not self.fp:
238
+ self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"])
239
+
240
+ return ImageFile.ImageFile.load(self)
241
+
242
+ def close(self) -> None:
243
+ self.ole.close()
244
+ super().close()
245
+
246
+ def __exit__(self, *args: object) -> None:
247
+ self.ole.close()
248
+ super().__exit__()
249
+
250
+
251
+ #
252
+ # --------------------------------------------------------------------
253
+
254
+
255
+ Image.register_open(FpxImageFile.format, FpxImageFile, _accept)
256
+
257
+ Image.register_extension(FpxImageFile.format, ".fpx")
src/venv/Lib/site-packages/PIL/FtexImagePlugin.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A Pillow loader for .ftc and .ftu files (FTEX)
3
+ Jerome Leclanche <[email protected]>
4
+
5
+ The contents of this file are hereby released in the public domain (CC0)
6
+ Full text of the CC0 license:
7
+ https://creativecommons.org/publicdomain/zero/1.0/
8
+
9
+ Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001
10
+
11
+ The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a
12
+ packed custom format called FTEX. This file format uses file extensions FTC
13
+ and FTU.
14
+ * FTC files are compressed textures (using standard texture compression).
15
+ * FTU files are not compressed.
16
+ Texture File Format
17
+ The FTC and FTU texture files both use the same format. This
18
+ has the following structure:
19
+ {header}
20
+ {format_directory}
21
+ {data}
22
+ Where:
23
+ {header} = {
24
+ u32:magic,
25
+ u32:version,
26
+ u32:width,
27
+ u32:height,
28
+ u32:mipmap_count,
29
+ u32:format_count
30
+ }
31
+
32
+ * The "magic" number is "FTEX".
33
+ * "width" and "height" are the dimensions of the texture.
34
+ * "mipmap_count" is the number of mipmaps in the texture.
35
+ * "format_count" is the number of texture formats (different versions of the
36
+ same texture) in this file.
37
+
38
+ {format_directory} = format_count * { u32:format, u32:where }
39
+
40
+ The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB
41
+ uncompressed textures.
42
+ The texture data for a format starts at the position "where" in the file.
43
+
44
+ Each set of texture data in the file has the following structure:
45
+ {data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } }
46
+ * "mipmap_size" is the number of bytes in that mip level. For compressed
47
+ textures this is the size of the texture data compressed with DXT1. For 24 bit
48
+ uncompressed textures, this is 3 * width * height. Following this are the image
49
+ bytes for that mipmap level.
50
+
51
+ Note: All data is stored in little-Endian (Intel) byte order.
52
+ """
53
+
54
+ from __future__ import annotations
55
+
56
+ import struct
57
+ from enum import IntEnum
58
+ from io import BytesIO
59
+
60
+ from . import Image, ImageFile
61
+
62
+ MAGIC = b"FTEX"
63
+
64
+
65
+ class Format(IntEnum):
66
+ DXT1 = 0
67
+ UNCOMPRESSED = 1
68
+
69
+
70
+ class FtexImageFile(ImageFile.ImageFile):
71
+ format = "FTEX"
72
+ format_description = "Texture File Format (IW2:EOC)"
73
+
74
+ def _open(self) -> None:
75
+ if not _accept(self.fp.read(4)):
76
+ msg = "not an FTEX file"
77
+ raise SyntaxError(msg)
78
+ struct.unpack("<i", self.fp.read(4)) # version
79
+ self._size = struct.unpack("<2i", self.fp.read(8))
80
+ mipmap_count, format_count = struct.unpack("<2i", self.fp.read(8))
81
+
82
+ self._mode = "RGB"
83
+
84
+ # Only support single-format files.
85
+ # I don't know of any multi-format file.
86
+ assert format_count == 1
87
+
88
+ format, where = struct.unpack("<2i", self.fp.read(8))
89
+ self.fp.seek(where)
90
+ (mipmap_size,) = struct.unpack("<i", self.fp.read(4))
91
+
92
+ data = self.fp.read(mipmap_size)
93
+
94
+ if format == Format.DXT1:
95
+ self._mode = "RGBA"
96
+ self.tile = [ImageFile._Tile("bcn", (0, 0) + self.size, 0, (1,))]
97
+ elif format == Format.UNCOMPRESSED:
98
+ self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))]
99
+ else:
100
+ msg = f"Invalid texture compression format: {repr(format)}"
101
+ raise ValueError(msg)
102
+
103
+ self.fp.close()
104
+ self.fp = BytesIO(data)
105
+
106
+ def load_seek(self, pos: int) -> None:
107
+ pass
108
+
109
+
110
+ def _accept(prefix: bytes) -> bool:
111
+ return prefix[:4] == MAGIC
112
+
113
+
114
+ Image.register_open(FtexImageFile.format, FtexImageFile, _accept)
115
+ Image.register_extensions(FtexImageFile.format, [".ftc", ".ftu"])