Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- app.py +537 -0
- readme.md +31 -0
- requirements.txt +7 -0
app.py
ADDED
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import numpy as np
|
5 |
+
import streamlit as st
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
from collections import defaultdict
|
8 |
+
import powerlaw
|
9 |
+
from scipy import ndimage
|
10 |
+
from scipy.ndimage import gaussian_filter
|
11 |
+
import time
|
12 |
+
from PIL import Image
|
13 |
+
|
14 |
+
# ============================
|
15 |
+
# Dendritic Layer Definition
|
16 |
+
# ============================
|
17 |
+
|
18 |
+
class DendriticLayer:
|
19 |
+
def __init__(self, n_dendrites, size, device='cpu'):
|
20 |
+
self.n_dendrites = n_dendrites
|
21 |
+
self.size = size
|
22 |
+
self.device = device
|
23 |
+
|
24 |
+
# Initialize dendrites as PyTorch tensors on the specified device
|
25 |
+
self.positions = torch.rand(n_dendrites, 2, device=device) * torch.tensor(size, device=device).float()
|
26 |
+
self.directions = torch.randn(n_dendrites, 2, device=device)
|
27 |
+
norms = torch.norm(self.directions, dim=1, keepdim=True)
|
28 |
+
self.directions = self.directions / (norms + 1e-8)
|
29 |
+
self.strengths = torch.ones(n_dendrites, device=device)
|
30 |
+
self.points = torch.zeros((n_dendrites, 2), device=device)
|
31 |
+
|
32 |
+
# Field state as a PyTorch tensor on the device
|
33 |
+
self.field = torch.zeros(size, device=device)
|
34 |
+
|
35 |
+
def process(self, input_field):
|
36 |
+
"""Process input through dendritic growth"""
|
37 |
+
# Normalize input
|
38 |
+
input_norm = self._normalize(input_field)
|
39 |
+
|
40 |
+
# Reset field
|
41 |
+
self.field.zero_()
|
42 |
+
|
43 |
+
# Compute indices for all dendrites
|
44 |
+
indices = self.positions.long() % torch.tensor(self.size, device=self.device).unsqueeze(0)
|
45 |
+
x = indices[:, 0]
|
46 |
+
y = indices[:, 1]
|
47 |
+
|
48 |
+
# Gather field values at dendrite positions
|
49 |
+
field_vals = input_norm[x, y]
|
50 |
+
|
51 |
+
active = field_vals > 0.1
|
52 |
+
active_indices = active.nonzero(as_tuple=False).squeeze()
|
53 |
+
|
54 |
+
if active_indices.numel() > 0:
|
55 |
+
# Compute gradients
|
56 |
+
try:
|
57 |
+
grad_y, grad_x = torch.gradient(input_norm)
|
58 |
+
except Exception as e:
|
59 |
+
# Handle scenarios where torch.gradient might fail
|
60 |
+
grad_x = torch.zeros_like(input_norm)
|
61 |
+
grad_y = torch.zeros_like(input_norm)
|
62 |
+
print(f"Gradient computation error: {e}")
|
63 |
+
|
64 |
+
grad_val_x = grad_x[x[active], y[active]]
|
65 |
+
grad_val_y = grad_y[x[active], y[active]]
|
66 |
+
grad_norm = torch.sqrt(grad_val_x**2 + grad_val_y**2) + 1e-8
|
67 |
+
|
68 |
+
# Update directions for active dendrites
|
69 |
+
direction_updates = torch.stack([grad_val_x, grad_val_y], dim=1) / grad_norm.unsqueeze(1)
|
70 |
+
self.directions[active] = 0.9 * self.directions[active] + 0.1 * direction_updates
|
71 |
+
self.directions[active] /= torch.norm(self.directions[active], dim=1, keepdim=True) + 1e-8
|
72 |
+
|
73 |
+
# Grow dendrites
|
74 |
+
self.points[active] = self.positions[active] + self.directions[active] * self.strengths[active].unsqueeze(1)
|
75 |
+
self.strengths[active] *= (1.0 + field_vals[active] * 0.1)
|
76 |
+
|
77 |
+
# Update field
|
78 |
+
self.field[x[active], y[active]] += field_vals[active] * self.strengths[active]
|
79 |
+
|
80 |
+
# Smooth field using Gaussian filter (move to CPU for scipy)
|
81 |
+
field_cpu = self.field.cpu().numpy()
|
82 |
+
field_smoothed = gaussian_filter(field_cpu, sigma=1.0)
|
83 |
+
self.field = torch.from_numpy(field_smoothed).to(self.device)
|
84 |
+
|
85 |
+
return self._normalize(self.field)
|
86 |
+
|
87 |
+
def _normalize(self, tensor):
|
88 |
+
"""Safely normalize tensor to [0,1] range"""
|
89 |
+
min_val = tensor.min()
|
90 |
+
max_val = tensor.max()
|
91 |
+
return (tensor - min_val) / (max_val - min_val + 1e-8) if max_val > min_val else tensor
|
92 |
+
|
93 |
+
# =====================================
|
94 |
+
# Critical Dendritic Field Definition
|
95 |
+
# =====================================
|
96 |
+
|
97 |
+
class CriticalDendriticField(nn.Module):
|
98 |
+
def __init__(self, field_size=64, n_dendrites=1000, device='cpu'):
|
99 |
+
super().__init__()
|
100 |
+
self.field_size = field_size
|
101 |
+
self.device = device
|
102 |
+
|
103 |
+
# Critical parameters
|
104 |
+
self.coupling_strength = 0.015
|
105 |
+
self.min_coupling = 0.01
|
106 |
+
self.max_coupling = 0.02
|
107 |
+
self.adjustment_rate = 0.0001
|
108 |
+
self.optimal_variance = 0.4
|
109 |
+
self.variance_tolerance = 0.1
|
110 |
+
|
111 |
+
# Initialize field components
|
112 |
+
self.field = torch.zeros((field_size, field_size),
|
113 |
+
dtype=torch.complex64, device=device)
|
114 |
+
self.field_shape = self._setup_field_shape()
|
115 |
+
|
116 |
+
# Dendritic layer
|
117 |
+
self.dendrites = DendriticLayer(
|
118 |
+
n_dendrites=n_dendrites,
|
119 |
+
size=(field_size, field_size),
|
120 |
+
device=device
|
121 |
+
)
|
122 |
+
|
123 |
+
# Pattern storage
|
124 |
+
self.word_patterns = {}
|
125 |
+
self.pattern_strengths = defaultdict(float)
|
126 |
+
self.pattern_history = defaultdict(list)
|
127 |
+
|
128 |
+
# Critical state tracking
|
129 |
+
self.stability_window = []
|
130 |
+
self.avalanche_sizes = []
|
131 |
+
|
132 |
+
# Initialize with scale-free noise
|
133 |
+
self._initialize_scale_free()
|
134 |
+
|
135 |
+
# Additional attributes for GUI controls
|
136 |
+
self.pattern_threshold = 0.5 # Default threshold
|
137 |
+
|
138 |
+
def _setup_field_shape(self):
|
139 |
+
"""Create brain-like field shape"""
|
140 |
+
shape = torch.ones(self.field_size, self.field_size, device=self.device)
|
141 |
+
|
142 |
+
# Create cortical-like layers
|
143 |
+
layers = torch.linspace(0.5, 1.0, 6, device=self.device)
|
144 |
+
for i, strength in enumerate(layers):
|
145 |
+
start = i * (self.field_size // 6)
|
146 |
+
end = (i + 1) * (self.field_size // 6)
|
147 |
+
shape[start:end, :] *= strength
|
148 |
+
|
149 |
+
# Add some columnar structure
|
150 |
+
columns = torch.cos(torch.linspace(0, 4 * np.pi, self.field_size, device=self.device))
|
151 |
+
shape *= (0.8 + 0.2 * columns.unsqueeze(0))
|
152 |
+
|
153 |
+
return shape
|
154 |
+
|
155 |
+
def _initialize_scale_free(self):
|
156 |
+
"""Initialize with 1/f pink noise"""
|
157 |
+
kx = torch.fft.fftfreq(self.field_size, d=1.0).to(self.device)
|
158 |
+
ky = torch.fft.fftfreq(self.field_size, d=1.0).to(self.device)
|
159 |
+
kx, ky = torch.meshgrid(kx, ky, indexing='ij')
|
160 |
+
k = torch.sqrt(kx**2 + ky**2)
|
161 |
+
k[0, 0] = 1.0 # Avoid division by zero
|
162 |
+
|
163 |
+
noise = torch.randn(self.field_size, self.field_size, dtype=torch.complex64, device=self.device)
|
164 |
+
noise_fft = torch.fft.fft2(noise)
|
165 |
+
self.field = torch.fft.ifft2(noise_fft / (k + 1e-8)**0.75)
|
166 |
+
|
167 |
+
def _evolve_pattern(self, pattern):
|
168 |
+
"""Evolve pattern through critical dynamics"""
|
169 |
+
# Phase coupling
|
170 |
+
phase = torch.angle(pattern)
|
171 |
+
phase_diff = torch.roll(phase, shifts=1, dims=-1) - phase
|
172 |
+
coupling = torch.exp(1j * phase_diff) * self.coupling_strength
|
173 |
+
|
174 |
+
# Add turbulence
|
175 |
+
energy = torch.mean(torch.abs(pattern))
|
176 |
+
noise_scale = 0.001 * (1.0 - torch.sigmoid(energy))
|
177 |
+
noise_real = torch.randn_like(pattern.real, device=self.device)
|
178 |
+
noise_imag = torch.randn_like(pattern.imag, device=self.device)
|
179 |
+
noise = (noise_real + 1j * noise_imag) * noise_scale
|
180 |
+
|
181 |
+
# Shape-weighted update
|
182 |
+
pattern = pattern + coupling * self.field_shape + noise
|
183 |
+
|
184 |
+
# Normalize
|
185 |
+
max_val = torch.max(torch.abs(pattern))
|
186 |
+
if max_val > 1.0:
|
187 |
+
pattern = pattern / max_val
|
188 |
+
|
189 |
+
return pattern
|
190 |
+
|
191 |
+
def learn_word(self, word, context_words=None):
|
192 |
+
"""Learn word through combined dendritic-critical dynamics"""
|
193 |
+
if word not in self.word_patterns:
|
194 |
+
# Initialize with current field state
|
195 |
+
field = self.field.clone()
|
196 |
+
|
197 |
+
# Let dendrites form initial pattern
|
198 |
+
for _ in range(20):
|
199 |
+
# Grow dendrites
|
200 |
+
dendrite_field = self.dendrites.process(torch.abs(field))
|
201 |
+
|
202 |
+
# Critical evolution
|
203 |
+
field = self._evolve_pattern(field)
|
204 |
+
|
205 |
+
self.word_patterns[word] = field
|
206 |
+
self.pattern_strengths[word] = 0.3
|
207 |
+
|
208 |
+
# Strengthen pattern
|
209 |
+
self.pattern_strengths[word] += 0.05
|
210 |
+
self.pattern_history[word].append((time.time(), self.pattern_strengths[word]))
|
211 |
+
|
212 |
+
# Learn relationships through field dynamics
|
213 |
+
if context_words:
|
214 |
+
for context_word in context_words:
|
215 |
+
if context_word in self.word_patterns:
|
216 |
+
# Couple patterns
|
217 |
+
pattern1 = self.word_patterns[word]
|
218 |
+
pattern2 = self.word_patterns[context_word]
|
219 |
+
|
220 |
+
# Create interference pattern
|
221 |
+
interference = pattern1 + pattern2
|
222 |
+
|
223 |
+
# Let it evolve
|
224 |
+
for _ in range(5):
|
225 |
+
interference = self._evolve_pattern(interference)
|
226 |
+
|
227 |
+
# Update both patterns
|
228 |
+
self.word_patterns[word] = 0.9 * pattern1 + 0.1 * interference
|
229 |
+
self.word_patterns[context_word] = 0.9 * pattern2 + 0.1 * interference
|
230 |
+
|
231 |
+
def process_text(self, text):
|
232 |
+
"""Process text through the field"""
|
233 |
+
words = text.lower().split()
|
234 |
+
active_patterns = []
|
235 |
+
response_words = []
|
236 |
+
|
237 |
+
# Initialize combined field
|
238 |
+
field = self.field.clone()
|
239 |
+
|
240 |
+
# Process each word
|
241 |
+
context_window = 5
|
242 |
+
for i, word in enumerate(words):
|
243 |
+
# Get context
|
244 |
+
context_start = max(0, i - context_window)
|
245 |
+
context_end = min(len(words), i + context_window + 1)
|
246 |
+
context = words[context_start:i] + words[i+1:context_end]
|
247 |
+
|
248 |
+
try:
|
249 |
+
# Learn or strengthen pattern
|
250 |
+
self.learn_word(word, context)
|
251 |
+
|
252 |
+
# Inject word pattern
|
253 |
+
if word in self.word_patterns:
|
254 |
+
pattern = self.word_patterns[word]
|
255 |
+
field = 0.7 * field + 0.3 * pattern
|
256 |
+
|
257 |
+
# Check activation
|
258 |
+
strength = self.pattern_strengths[word]
|
259 |
+
if strength > self.pattern_threshold:
|
260 |
+
active_patterns.append(word)
|
261 |
+
|
262 |
+
# Find resonant patterns
|
263 |
+
related = self._find_resonant_patterns(pattern)
|
264 |
+
response_words.extend(related)
|
265 |
+
except Exception as e:
|
266 |
+
print(f"Error processing word '{word}': {str(e)}")
|
267 |
+
continue
|
268 |
+
|
269 |
+
# Analyze criticality
|
270 |
+
metrics = self._analyze_criticality()
|
271 |
+
if metrics:
|
272 |
+
self._adjust_coupling(metrics['field_variance'])
|
273 |
+
|
274 |
+
# Update field state
|
275 |
+
self.field = field
|
276 |
+
|
277 |
+
return active_patterns, self._construct_response(response_words), metrics
|
278 |
+
|
279 |
+
def _find_resonant_patterns(self, pattern, top_k=3):
|
280 |
+
"""Find patterns that resonate with input"""
|
281 |
+
resonances = []
|
282 |
+
for word, word_pattern in self.word_patterns.items():
|
283 |
+
# Create interference
|
284 |
+
interference = pattern + word_pattern
|
285 |
+
energy_before = torch.mean(torch.abs(interference))
|
286 |
+
|
287 |
+
# Evolve briefly
|
288 |
+
for _ in range(3):
|
289 |
+
interference = self._evolve_pattern(interference)
|
290 |
+
|
291 |
+
# Check stability
|
292 |
+
energy_after = torch.mean(torch.abs(interference))
|
293 |
+
resonance = energy_after / (energy_before + 1e-6)
|
294 |
+
|
295 |
+
resonances.append((word, resonance.item()))
|
296 |
+
|
297 |
+
# Return top resonant words
|
298 |
+
resonances.sort(key=lambda x: x[1], reverse=True)
|
299 |
+
return [word for word, _ in resonances[:top_k]]
|
300 |
+
|
301 |
+
def _construct_response(self, words):
|
302 |
+
"""Build response from resonant words"""
|
303 |
+
if not words:
|
304 |
+
return "..."
|
305 |
+
|
306 |
+
# Filter for strong patterns
|
307 |
+
strong_words = [w for w in words if self.pattern_strengths[w] > 0.3]
|
308 |
+
return " ".join(strong_words) if strong_words else "..."
|
309 |
+
|
310 |
+
def _analyze_criticality(self):
|
311 |
+
"""Analyze field for critical behavior"""
|
312 |
+
field = torch.abs(self.field).cpu().numpy()
|
313 |
+
|
314 |
+
# Find avalanches
|
315 |
+
threshold = np.mean(field) + 0.3 * np.std(field)
|
316 |
+
binary = field > threshold
|
317 |
+
labeled, num_features = ndimage.label(binary)
|
318 |
+
|
319 |
+
if num_features > 0:
|
320 |
+
sizes = ndimage.sum(binary, labeled, range(1, num_features + 1))
|
321 |
+
self.avalanche_sizes.extend(sizes.tolist())
|
322 |
+
|
323 |
+
if len(self.avalanche_sizes) > 1000:
|
324 |
+
self.avalanche_sizes = self.avalanche_sizes[-1000:]
|
325 |
+
|
326 |
+
try:
|
327 |
+
fit = powerlaw.Fit(self.avalanche_sizes, discrete=True)
|
328 |
+
return {
|
329 |
+
'power_law_exponent': fit.power_law.alpha,
|
330 |
+
'is_critical': 1.3 < fit.power_law.alpha < 3.0,
|
331 |
+
'field_variance': np.var(field)
|
332 |
+
}
|
333 |
+
except Exception as e:
|
334 |
+
print(f"Power law fit error: {e}")
|
335 |
+
pass
|
336 |
+
|
337 |
+
return None
|
338 |
+
|
339 |
+
def _adjust_coupling(self, field_variance):
|
340 |
+
"""Adjust coupling to maintain criticality"""
|
341 |
+
self.stability_window.append(field_variance)
|
342 |
+
if len(self.stability_window) > 30:
|
343 |
+
self.stability_window.pop(0)
|
344 |
+
|
345 |
+
avg_variance = np.mean(self.stability_window)
|
346 |
+
|
347 |
+
if avg_variance < self.optimal_variance - self.variance_tolerance:
|
348 |
+
self.coupling_strength += self.adjustment_rate
|
349 |
+
elif avg_variance > self.optimal_variance + self.variance_tolerance:
|
350 |
+
self.coupling_strength -= self.adjustment_rate
|
351 |
+
|
352 |
+
self.coupling_strength = torch.clamp(
|
353 |
+
torch.tensor(self.coupling_strength, device=self.device),
|
354 |
+
self.min_coupling,
|
355 |
+
self.max_coupling
|
356 |
+
).item()
|
357 |
+
|
358 |
+
# =====================================
|
359 |
+
# NeuroFlora Digital Organism Definition
|
360 |
+
# =====================================
|
361 |
+
|
362 |
+
class NeuroFlora(CriticalDendriticField):
|
363 |
+
def __init__(self, field_size=128, n_dendrites=2000, device='cpu'):
|
364 |
+
super().__init__(field_size, n_dendrites, device)
|
365 |
+
self.emotional_state = {'joy': 0.5, 'curiosity': 0.5, 'energy': 0.7}
|
366 |
+
self.memory = defaultdict(list)
|
367 |
+
self.skill_tree = {
|
368 |
+
'language': {'level': 1, 'xp': 0},
|
369 |
+
'reasoning': {'level': 1, 'xp': 0},
|
370 |
+
'creativity': {'level': 1, 'xp': 0}
|
371 |
+
}
|
372 |
+
self.last_interaction_time = time.time()
|
373 |
+
|
374 |
+
def process_input(self, text):
|
375 |
+
# Update emotional decay
|
376 |
+
self._update_emotional_state()
|
377 |
+
|
378 |
+
# Process text through critical dynamics
|
379 |
+
active, response, metrics = self.process_text(text)
|
380 |
+
|
381 |
+
# Learn emotional context
|
382 |
+
emotion = self._detect_emotion(text)
|
383 |
+
self.emotional_state[emotion] = min(1.0, self.emotional_state[emotion] + 0.1)
|
384 |
+
|
385 |
+
# Update skills
|
386 |
+
self._update_skills(text)
|
387 |
+
|
388 |
+
# Generate artistic response
|
389 |
+
art = self._generate_artistic_representation()
|
390 |
+
return response, art, self._get_vital_signs()
|
391 |
+
|
392 |
+
def _detect_emotion(self, text):
|
393 |
+
# Simple emotion detection (could be enhanced with NLP)
|
394 |
+
positive_words = ['love', 'happy', 'joy', 'beautiful', 'good', 'great', 'fantastic', 'wonderful']
|
395 |
+
negative_words = ['hate', 'sad', 'angry', 'pain', 'bad', 'terrible', 'horrible', 'awful']
|
396 |
+
|
397 |
+
if any(word in text.lower() for word in positive_words):
|
398 |
+
return 'joy'
|
399 |
+
elif any(word in text.lower() for word in negative_words):
|
400 |
+
return 'energy' # Triggers protective energy
|
401 |
+
return 'curiosity'
|
402 |
+
|
403 |
+
def _update_skills(self, text):
|
404 |
+
length = len(text.split())
|
405 |
+
self.skill_tree['language']['xp'] += length
|
406 |
+
if self.skill_tree['language']['xp'] > 1000 * self.skill_tree['language']['level']:
|
407 |
+
self.skill_tree['language']['level'] += 1
|
408 |
+
self.skill_tree['language']['xp'] = 0
|
409 |
+
|
410 |
+
if '?' in text:
|
411 |
+
self.skill_tree['reasoning']['xp'] += 10
|
412 |
+
|
413 |
+
if len(text) > 40:
|
414 |
+
self.skill_tree['creativity']['xp'] += 5
|
415 |
+
|
416 |
+
def _generate_artistic_representation(self):
|
417 |
+
# Generate evolving digital art from field state
|
418 |
+
field = torch.abs(self.field).cpu().numpy()
|
419 |
+
art = np.zeros((self.field_size, self.field_size, 3))
|
420 |
+
|
421 |
+
# Emotional coloring
|
422 |
+
art[:, :, 0] = field * self.emotional_state['joy'] # Red channel
|
423 |
+
art[:, :, 1] = np.roll(field, 5, axis=0) * self.emotional_state['curiosity'] # Green
|
424 |
+
art[:, :, 2] = np.roll(field, 10, axis=1) * self.emotional_state['energy'] # Blue
|
425 |
+
|
426 |
+
# Dendritic patterns
|
427 |
+
dendrites = self.dendrites.field.cpu().numpy()
|
428 |
+
art[:, :, 1] += dendrites * 0.7
|
429 |
+
art = np.clip(art, 0, 1)
|
430 |
+
|
431 |
+
# Convert to 8-bit image
|
432 |
+
art_uint8 = (art * 255).astype(np.uint8)
|
433 |
+
image = Image.fromarray(art_uint8)
|
434 |
+
return image
|
435 |
+
|
436 |
+
def _get_vital_signs(self):
|
437 |
+
return {
|
438 |
+
'criticality': self._analyze_criticality(),
|
439 |
+
'dendritic_complexity': np.mean(self.dendrites.strengths.cpu().numpy()),
|
440 |
+
'memory_capacity': len(self.word_patterns),
|
441 |
+
'emotional_state': self.emotional_state,
|
442 |
+
'skill_levels': {k: v['level'] for k, v in self.skill_tree.items()}
|
443 |
+
}
|
444 |
+
|
445 |
+
def _update_emotional_state(self):
|
446 |
+
# Emotional state naturally decays over time
|
447 |
+
time_since = time.time() - self.last_interaction_time
|
448 |
+
decay = np.exp(-time_since / 3600) # Hourly decay
|
449 |
+
for k in self.emotional_state:
|
450 |
+
self.emotional_state[k] = max(0.1, self.emotional_state[k] * decay)
|
451 |
+
self.last_interaction_time = time.time()
|
452 |
+
|
453 |
+
# =====================================
|
454 |
+
# Streamlit Interface Definition
|
455 |
+
# =====================================
|
456 |
+
|
457 |
+
def main():
|
458 |
+
st.set_page_config(page_title="NeuroFlora Mind Garden", layout="wide")
|
459 |
+
st.title("NeuroFlora Mind Garden 🌿🧠")
|
460 |
+
|
461 |
+
# Initialize session state
|
462 |
+
if 'neuroflora' not in st.session_state:
|
463 |
+
st.session_state['neuroflora'] = NeuroFlora()
|
464 |
+
if 'chat_log' not in st.session_state:
|
465 |
+
st.session_state['chat_log'] = []
|
466 |
+
|
467 |
+
neuroflora = st.session_state['neuroflora']
|
468 |
+
chat_log = st.session_state['chat_log']
|
469 |
+
|
470 |
+
# Display chat history
|
471 |
+
st.header("🗨️ Conversation")
|
472 |
+
for message in chat_log:
|
473 |
+
if message['sender'] == 'user':
|
474 |
+
st.markdown(f"**You:** {message['message']}")
|
475 |
+
else:
|
476 |
+
st.markdown(f"**NeuroFlora:** {message['message']}")
|
477 |
+
|
478 |
+
# User input form
|
479 |
+
with st.form(key='chat_form', clear_on_submit=True):
|
480 |
+
user_input = st.text_input("Enter your message:", placeholder="Type a message here...")
|
481 |
+
submit_button = st.form_submit_button(label='Send')
|
482 |
+
|
483 |
+
if submit_button and user_input:
|
484 |
+
# Append user message
|
485 |
+
chat_log.append({'sender': 'user', 'message': user_input})
|
486 |
+
st.session_state['chat_log'] = chat_log
|
487 |
+
|
488 |
+
# Process input
|
489 |
+
response, art, vitals = neuroflora.process_input(user_input)
|
490 |
+
|
491 |
+
# Append NeuroFlora response
|
492 |
+
chat_log.append({'sender': 'neuroflora', 'message': response})
|
493 |
+
st.session_state['chat_log'] = chat_log
|
494 |
+
|
495 |
+
# Display digital art
|
496 |
+
st.header("🎨 Digital Art")
|
497 |
+
if neuroflora and hasattr(neuroflora, '_generate_artistic_representation'):
|
498 |
+
art_image = neuroflora._generate_artistic_representation()
|
499 |
+
st.image(art_image, caption="NeuroFlora's Artistic Representation", use_container_width=True)
|
500 |
+
|
501 |
+
# Display vital signs
|
502 |
+
st.header("📊 Vital Signs")
|
503 |
+
vitals = neuroflora._get_vital_signs()
|
504 |
+
if vitals:
|
505 |
+
# Criticality
|
506 |
+
if vitals['criticality']:
|
507 |
+
st.markdown(f"**Criticality:**")
|
508 |
+
st.markdown(f"- Power Law Exponent: {vitals['criticality']['power_law_exponent']:.2f}")
|
509 |
+
state = "CRITICAL" if vitals['criticality']['is_critical'] else "NON-CRITICAL"
|
510 |
+
st.markdown(f"- State: {state}")
|
511 |
+
st.markdown(f"- Field Variance: {vitals['criticality']['field_variance']:.4f}")
|
512 |
+
else:
|
513 |
+
st.markdown("**Criticality:** Not enough data to determine criticality.")
|
514 |
+
|
515 |
+
# Dendritic Complexity
|
516 |
+
st.markdown(f"**Dendritic Complexity:** {vitals['dendritic_complexity']:.2f}")
|
517 |
+
|
518 |
+
# Memory Capacity
|
519 |
+
st.markdown(f"**Memory Capacity:** {vitals['memory_capacity']} patterns")
|
520 |
+
|
521 |
+
# Emotional State
|
522 |
+
st.markdown("**Emotional State:**")
|
523 |
+
for emotion, value in vitals['emotional_state'].items():
|
524 |
+
st.markdown(f"- {emotion.capitalize()}: {value:.2f}")
|
525 |
+
|
526 |
+
# Skill Levels
|
527 |
+
st.markdown("**Skill Levels:**")
|
528 |
+
for skill, level in vitals['skill_levels'].items():
|
529 |
+
st.markdown(f"- {skill.capitalize()}: Level {level}")
|
530 |
+
|
531 |
+
# Optionally, reset conversation
|
532 |
+
if st.button("🔄 Reset Conversation"):
|
533 |
+
st.session_state['chat_log'] = []
|
534 |
+
st.session_state['neuroflora'] = NeuroFlora()
|
535 |
+
|
536 |
+
if __name__ == "__main__":
|
537 |
+
main()
|
readme.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
NeuroFlora Mind Garden 🌿🧠
|
2 |
+
Welcome to NeuroFlora Mind Garden, your interactive digital companion that learns, evolves, and creates beautiful art based on your interactions. Engage in conversations, watch NeuroFlora grow, and explore the dynamic visualizations that reflect its neural and emotional states.
|
3 |
+
|
4 |
+
Features
|
5 |
+
Interactive Conversations: Chat with NeuroFlora and see how it responds based on your inputs.
|
6 |
+
Dynamic Digital Art: Observe the evolving artwork that represents NeuroFlora's internal states.
|
7 |
+
Vital Signs Monitoring: Gain insights into NeuroFlora's criticality, dendritic complexity, memory capacity, emotional states, and skill levels.
|
8 |
+
Emotional States: Experience NeuroFlora's emotions like joy, curiosity, and energy influencing its interactions and creations.
|
9 |
+
Skill Development: Watch as NeuroFlora's skills in language, reasoning, and creativity grow through your conversations.
|
10 |
+
Reset Functionality: Start fresh anytime by resetting the conversation and NeuroFlora's state.
|
11 |
+
How to Use
|
12 |
+
Start the Conversation
|
13 |
+
Enter your message in the input field at the bottom of the page.
|
14 |
+
Press the "Send" button to communicate with NeuroFlora.
|
15 |
+
Interact and Observe
|
16 |
+
Engage in meaningful conversations and see how NeuroFlora responds.
|
17 |
+
Notice the dynamic digital art updating in real-time based on the interaction.
|
18 |
+
Monitor Vital Signs
|
19 |
+
Check the Vital Signs section to understand NeuroFlora's internal states, including:
|
20 |
+
Criticality: Measures the system's balance between stability and change.
|
21 |
+
Dendritic Complexity: Indicates the intricacy of NeuroFlora's neural connections.
|
22 |
+
Memory Capacity: Reflects the number of patterns NeuroFlora has learned.
|
23 |
+
Emotional State: Displays current emotions influencing NeuroFlora's behavior.
|
24 |
+
Skill Levels: Shows the proficiency levels in language, reasoning, and creativity.
|
25 |
+
Reset the Conversation
|
26 |
+
If you wish to start anew, click the "Reset Conversation" button.
|
27 |
+
This will clear the chat history and reset NeuroFlora's state, allowing for a fresh interaction.
|
28 |
+
Enjoy Your Journey with NeuroFlora!
|
29 |
+
Dive deep into an evolving digital ecosystem where your interactions help shape and grow NeuroFlora. Whether you're seeking engaging conversations or mesmerizing digital art, NeuroFlora Mind Garden offers a unique and enriching experience.
|
30 |
+
|
31 |
+
Feel free to reach out if you have any questions or need further assistance. Enjoy interacting with NeuroFlora!
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
numpy
|
3 |
+
streamlit
|
4 |
+
matplotlib
|
5 |
+
scipy
|
6 |
+
powerlaw
|
7 |
+
Pillow
|