Fixes re: zognia's testing #2

Merged
skeh merged 11 commits from feat/zogpog into main 2025-01-24 08:28:14 +00:00
16 changed files with 203 additions and 124 deletions

View file

@ -27,6 +27,8 @@ class TwitchProcess(ChatProcess):
botname=None, emote_res=4.0, botname=None, emote_res=4.0,
# EventSub options # EventSub options
eventsub=True, eventsub_host='wss://ovtk.skeh.site/twitch', eventsub=True, eventsub_host='wss://ovtk.skeh.site/twitch',
# BTTV integration
bttv=False,
# Inheritance boilerplate # Inheritance boilerplate
**kwargs): **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
@ -61,7 +63,7 @@ class TwitchProcess(ChatProcess):
self.eventsub = TwitchEventSub(self.api, eventsub_host) self.eventsub = TwitchEventSub(self.api, eventsub_host)
self._sources.append(self.eventsub) self._sources.append(self.eventsub)
self.bttv = BTTV(target_data['user']['id']) self.bttv = BTTV(target_data['user']['id']) if bttv else None
def loop(self, next_state): def loop(self, next_state):
@ -98,7 +100,7 @@ class TwitchProcess(ChatProcess):
for event in chain(*(source.read(0.1) for source in self._sources)): for event in chain(*(source.read(0.1) for source in self._sources)):
# Retarget event # Retarget event
event.via = self._name event.via = self._name
if isinstance(event, Message): if self.bttv and isinstance(event, Message):
event = self.bttv.hydrate(event) event = self.bttv.hydrate(event)
self.publish(event) self.publish(event)
return 0 return 0

View file

@ -25,71 +25,62 @@ os.close(old_stderr)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def check_rate(index, channels, rate):
try:
return pyaudio.is_format_supported(rate,
output_channels=channels,
output_device=index,
output_format=pya.paFloat32)
except ValueError:
return False
alt_rates = [44100, 48000]
class Clip: class Clip:
def __init__(self, path, output_index, buffer_length=2048, speed=1, force_stereo=True): def __init__(self, path, samplerate=None, speed=1, keep_pitch=True, force_stereo=True):
_raw, native_rate = librosa.load(path, sr=None, dtype='float32', mono=False) self.path = path
self._channels = _raw.shape[0] if len(_raw.shape) == 2 else 1 raw, native_rate = librosa.load(self.path, sr=None, dtype='float32', mono=False)
if force_stereo and self._channels == 1:
_raw = np.resize(_raw, (2,*_raw.shape))
self._channels = 2
target_samplerate = native_rate self.channels = raw.shape[0] if len(raw.shape) == 2 else 1
if not check_rate(output_index, self._channels , native_rate): if force_stereo and self.channels == 1:
try: raw = np.resize(raw, (2,*raw.shape))
target_samplerate = next((rate for rate in alt_rates if check_rate(output_index, self._channels , rate))) self.channels = 2
except StopIteration:
logger.warn('Target audio device does not claim to support any sample rates! Attempting playback at native rate')
self._samplerate = target_samplerate
if native_rate != self._samplerate: self.samplerate = samplerate or native_rate
_raw = librosa.resample(_raw, native_rate, self._samplerate, fix=True, scale=True) if native_rate != self.samplerate:
raw = librosa.resample(raw, native_rate, self.samplerate, fix=True, scale=True)
self._raw = np.ascontiguousarray(self._stereo_transpose(_raw), dtype='float32') self.raw = np.ascontiguousarray(self._stereo_transpose(raw), dtype='float32')
if speed != 1: if speed != 1:
self.stretch(speed) self.stretch(speed, keep_pitch=keep_pitch)
self._pos = 0 @property
self._playing = False def length(self):
return self.raw.shape[0] / self.samplerate
def _stereo_transpose(self, ndata):
return ndata if self.channels == 1 else ndata.T
def stretch(self, speed, keep_pitch=True):
if keep_pitch:
stretched = tsm.wsola(self._stereo_transpose(self.raw), speed)
else:
stretched = librosa.resample(self._stereo_transpose(self.raw), self.samplerate * (1 / speed), self.samplerate, fix=False, scale=True)
self.raw = np.ascontiguousarray(self._stereo_transpose(stretched), dtype='float32')
def save(self, filename):
soundfile.write(filename, self._stereo_transpose(self.raw), self.samplerate)
class Stream:
def __init__(self, clip, output_index, buffer_length=4096):
self.clip = clip
self.pos = 0
self.playing = False
self._end_event = AioEvent() self._end_event = AioEvent()
self._stream = pyaudio.open( self._stream = pyaudio.open(
output_device_index=output_index, output_device_index=output_index,
format=pya.paFloat32, format=pya.paFloat32,
channels=self._channels, channels=self.clip.channels,
rate=self._samplerate, rate=self.clip.samplerate,
frames_per_buffer=buffer_length, frames_per_buffer=buffer_length,
output=True, output=True,
stream_callback=self._read_callback, stream_callback=self._read_callback,
start=False) start=False)
@property
def length(self):
return self._raw.shape[0] / self._samplerate
def _stereo_transpose(self, ndata):
return ndata if self._channels == 1 else ndata.T
def stretch(self, speed):
stretched = tsm.wsola(self._stereo_transpose(self._raw), speed)
self._raw = np.ascontiguousarray(self._stereo_transpose(stretched), dtype='float32')
def save(self, filename):
soundfile.write(filename, self._stereo_transpose(self._raw), self._samplerate)
def _play(self): def _play(self):
self._playing = True self.playing = True
self._pos = 0 self.pos = 0
if not self._stream.is_active(): if not self._stream.is_active():
self._stream.start_stream() self._stream.start_stream()
@ -97,38 +88,49 @@ class Clip:
def play(self): def play(self):
self._end_event.clear() self._end_event.clear()
self._play() self._play()
self._end_event.wait(timeout=self.length) self._end_event.wait(timeout=self.clip.length)
async def aplay(self): async def aplay(self):
self._end_event.clear() self._end_event.clear()
self._play() self._play()
try: try:
await self._end_event.coro_wait(timeout=self.length) await self._end_event.coro_wait(timeout=self.clip.length)
except asyncio.CancelledError: except asyncio.CancelledError:
self._playing = False self.playing = False
self._stream.stop_stream() self._stream.stop_stream()
def close(self): def close(self):
self._stream.stop_stream()
self._stream.close() self._stream.close()
def _read_callback(self, in_data, frame_count, time_info, status): def _read_callback(self, in_data, frame_count, time_info, status):
if self._channels > 1: if self.clip.channels > 1:
buffer = np.zeros((frame_count, self._channels), dtype='float32') buffer = np.zeros((frame_count, self.clip.channels), dtype='float32')
else: else:
buffer = np.zeros((frame_count,), dtype='float32') buffer = np.zeros((frame_count,), dtype='float32')
if self._playing: if self.playing:
newpos = self._pos + frame_count newpos = self.pos + frame_count
clip_chunk = self._raw[self._pos:newpos] clip_chunk = self.clip.raw[self.pos:newpos]
self._pos = newpos self.pos = newpos
buffer[0:clip_chunk.shape[0]] = clip_chunk buffer[0:clip_chunk.shape[0]] = clip_chunk
if self._pos >= self._raw.shape[0]: if self.pos >= self.clip.raw.shape[0]:
self._playing = False self.playing = False
self._end_event.set() self._end_event.set()
return buffer, pya.paContinue return buffer, pya.paContinue
@staticmethod
def check_rate(index, channels, rate):
try:
return pyaudio.is_format_supported(rate,
output_channels=channels,
output_device=index,
output_format=pya.paFloat32)
except ValueError:
return False
@staticmethod @staticmethod
def find_output_index(output): def find_output_index(output):
if output is None: if output is None:

View file

@ -219,9 +219,9 @@ class MainProcess:
plugin_module = import_or_reload_mod(module_name, plugin_module = import_or_reload_mod(module_name,
default_package='ovtk_audiencekit.plugins', default_package='ovtk_audiencekit.plugins',
external=False) external=False)
plugin = plugin_module.Plugin(self.chat_processes, self.event_queue, plugin_name, global_ctx, plugin = plugin_module.Plugin(self.chat_processes, self.event_queue, plugin_name, global_ctx)
**node.props, **secrets_for_mod, _children=node.nodes)
self.plugins[plugin_name] = plugin self.plugins[plugin_name] = plugin
await plugin._setup(*node.args[1:], **node.props, **secrets_for_mod)
# Register UI with webserver # Register UI with webserver
self.webserver.register_blueprint(plugin.blueprint) self.webserver.register_blueprint(plugin.blueprint)
except Exception as e: except Exception as e:

View file

@ -86,6 +86,18 @@ class PluginBase(ABC):
raise e raise e
raise PluginError(self._name, str(e)) from e raise PluginError(self._name, str(e)) from e
async def _setup(self, *args, **kwargs):
try:
res = self.setup(*args, **kwargs)
if asyncio.iscoroutinefunction(self.setup):
return await res
else:
return res
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise e
raise PluginError(self._name, str(e)) from e
# Base class helpers # Base class helpers
def broadcast(self, event): def broadcast(self, event):
"""Send event to every active chat""" """Send event to every active chat"""
@ -123,6 +135,10 @@ class PluginBase(ABC):
self._event_queue.put_nowait(event) self._event_queue.put_nowait(event)
# User-defined # User-defined
async def setup(self, *args, **kwargs):
"""Called when plugin is being loaded."""
pass
def close(self): def close(self):
"""Called when plugin is about to be unloaded. Use this to safely close any resouces if needed""" """Called when plugin is about to be unloaded. Use this to safely close any resouces if needed"""
pass pass
@ -143,7 +159,7 @@ class PluginBase(ABC):
pass pass
@abstractmethod @abstractmethod
async def run(self, _children=None, _ctx={}, **kwargs): async def run(self, *args, _children=None, _ctx={}, **kwargs):
""" """
Run plugin action, either due to a definition in the config, or due to another plugin Run plugin action, either due to a definition in the config, or due to another plugin
""" """

View file

@ -1,3 +1,3 @@
from .WebsocketServerProcess import WebsocketServerProcess from .WebsocketServerProcess import WebsocketServerProcess
from .MainProcess import MainProcess from .MainProcess import MainProcess
from .Clip import Clip from .Audio import Clip, Stream

View file

@ -1,27 +1,80 @@
import asyncio import asyncio
from collections import deque
import maya
from ovtk_audiencekit.plugins import PluginBase from ovtk_audiencekit.plugins import PluginBase
from ovtk_audiencekit.core import Clip from ovtk_audiencekit.core import Clip, Stream
class AudioAlert(PluginBase): class AudioAlert(PluginBase):
def __init__(self, *args, output=None, buffer_length=2048, cutoff_prevention_buffers=None, **kwargs): def setup(self, output=None, timeout_min=1, sample_rate=None, buffer_length=4096, force_stereo=True):
super().__init__(*args, **kwargs) self.force_stereo = force_stereo
if cutoff_prevention_buffers: self.timeout_min = timeout_min
self.logger.info('`cutoff_prevention_buffers` are depricated') self.clips = {}
self.streams = {}
self.tasks = set()
self.buffer_length = int(buffer_length)
self.output_index = Stream.find_output_index(output)
if sample_rate is None:
try:
sample_rate = next((rate for rate in [44100, 48000] if Stream.check_rate(self.output_index, 1, rate)))
except StopIteration:
self.logger.warn('Target audio device does not claim to support common sample rates! Attempting playback at native rate of audio')
self.sounds = {} self._cleanup_task = asyncio.create_task(self._cleanup())
self._buffer_length = int(buffer_length)
self._output_index = Clip.find_output_index(output) def run(self, path, speed=1, keep_pitch=False, immediate=True, poly=1, **kwargs):
poly = int(poly)
key = f'{path}@{speed}{"X" if keep_pitch else "x"}'
clip = self.clips.get(key, [None, None])[0]
if clip is None:
clip = Clip(path, speed=speed, keep_pitch=keep_pitch, force_stereo=self.force_stereo)
self.clips[key] = [clip, maya.now()]
else:
self.clips[key][1] = maya.now()
stream_dq = self.streams.get(key, None)
if stream_dq is None:
stream_dq = deque(maxlen=poly)
self.streams[key] = stream_dq
if stream_dq.maxlen != poly:
self.logger.warn('Cannot change poly while streams are active!')
if len(stream_dq) == stream_dq.maxlen:
stream_dq.rotate(1)
stream = stream_dq[0]
else:
stream = Stream(clip, self.output_index,
buffer_length=self.buffer_length)
stream_dq.append(stream)
def run(self, path, speed=1, immediate=True, **kwargs):
if self.sounds.get(path) is None:
self.sounds[path] = Clip(path,
self._output_index,
buffer_length=self._buffer_length,
speed=speed)
sound = self.sounds.get(path)
if immediate: if immediate:
asyncio.create_task(sound.aplay()) task = asyncio.create_task(stream.aplay())
task.add_done_callback(self.tasks.remove)
self.tasks.add(task)
else: else:
sound.play() stream.play()
def close(self):
self._cleanup_task.cancel()
for task in self.tasks:
task.cancel()
for stream_dq in self.streams.values():
for stream in stream_dq:
stream.close()
async def _cleanup(self):
while True:
await asyncio.sleep(60)
now = maya.now()
for key, [clip, last_used] in list(self.clips.items()):
if now >= last_used.add(minutes=self.timeout_min, seconds=clip.length):
self.logger.debug(f'Dropping {key}')
streams = self.streams.get(key, [])
for stream in streams:
stream.close()
del self.streams[key]
del self.clips[key]

View file

@ -35,8 +35,7 @@ owomap = {
} }
class JailPlugin(PluginBase): class JailPlugin(PluginBase):
def __init__(self, *args, min_level='vip', persist=True, **kwargs): def setup(self, min_level='vip', persist=True):
super().__init__(*args, **kwargs)
self.persist = persist self.persist = persist
self._cache = os.path.join(CACHE_DIR, 'Jail', 'sentences') self._cache = os.path.join(CACHE_DIR, 'Jail', 'sentences')
os.makedirs(os.path.dirname(self._cache), exist_ok=True) os.makedirs(os.path.dirname(self._cache), exist_ok=True)

View file

@ -6,14 +6,10 @@ from ovtk_audiencekit.plugins import PluginBase
class OBSWSPlugin(PluginBase): class OBSWSPlugin(PluginBase):
def __init__(self, *args, password=None, uri='ws://localhost:4455', **kwargs): async def setup(self, password=None, uri='ws://localhost:4455'):
super().__init__(*args, **kwargs)
self.uri = uri self.uri = uri
self.obsws = simpleobsws.WebSocketClient(url=uri, password=password) self.obsws = simpleobsws.WebSocketClient(url=uri, password=password)
asyncio.get_event_loop().run_until_complete(self.setup())
async def setup(self):
await self.obsws.connect() await self.obsws.connect()
success = await self.obsws.wait_until_identified() success = await self.obsws.wait_until_identified()
if not success: if not success:

View file

@ -4,8 +4,7 @@ from ovtk_audiencekit.plugins import PluginBase
class OSCPlugin(PluginBase): class OSCPlugin(PluginBase):
def __init__(self, *args, ip='localhost', port=None, **kwargs): def setup(self, ip='localhost', port=None):
super().__init__(*args, **kwargs)
if port is None: if port is None:
raise RuntimeError('A unique port must be specified') raise RuntimeError('A unique port must be specified')
self.client = SimpleUDPClient(ip, int(port)) self.client = SimpleUDPClient(ip, int(port))

View file

@ -64,8 +64,7 @@ class PhraseCounter:
class PhraseCounterPlugin(PluginBase): class PhraseCounterPlugin(PluginBase):
def __init__(self, *args, debounce_time=1, persist=False, **kwargs): def setup(self, debounce_time=1, persist=False):
super().__init__(*args, **kwargs)
self.debounce_time = debounce_time self.debounce_time = debounce_time
self.persist = persist self.persist = persist

View file

@ -9,10 +9,8 @@ from ovtk_audiencekit.chats.Twitch import Process as Twitch
class ShoutoutPlugin(PluginBase): class ShoutoutPlugin(PluginBase):
def __init__(self, *args, command='so', min_level='vip', def setup(self, command='so', min_level='vip',
text='Check out {link}!~ They were last streaming {last_game}', text='Check out {link}!~ They were last streaming {last_game}'):
**kwargs):
super().__init__(*args, **kwargs)
self.text = text self.text = text
if command: if command:
self.command = Command(name=command, help='Shoutout another user', required_level=min_level) self.command = Command(name=command, help='Shoutout another user', required_level=min_level)

View file

@ -8,24 +8,22 @@ from TTS.config import load_config
from ovtk_audiencekit.plugins import PluginBase from ovtk_audiencekit.plugins import PluginBase
from ovtk_audiencekit.events import Message, SysMessage from ovtk_audiencekit.events import Message, SysMessage
from ovtk_audiencekit.core import Clip from ovtk_audiencekit.core import Clip, Stream
from ovtk_audiencekit.core.Data import CACHE_DIR from ovtk_audiencekit.core.Data import CACHE_DIR
class TextToSpeechPlugin(PluginBase): class TextToSpeechPlugin(PluginBase):
def __init__(self, *args, output=None, cuda=None, def setup(self, output=None, cuda=None,
engine="tts_models/en/ljspeech/tacotron2-DDC", speaker_wav=None, engine="tts_models/en/ljspeech/tacotron2-DDC", speaker_wav=None, **kwargs):
_children=None, **kwargs):
super().__init__(*args, _children=_children)
self.speaker_wav = speaker_wav self.speaker_wav = speaker_wav
self._output_index = Clip.find_output_index(output) self.output_index = Stream.find_output_index(output)
conf_overrides = {k[2:]: v for k, v in kwargs.items() if k.startswith('o_')} conf_overrides = {k[2:]: v for k, v in kwargs.items() if k.startswith('o_')}
self._cache = os.path.join(CACHE_DIR, 'tts') self.cache_dir = os.path.join(CACHE_DIR, 'tts')
os.makedirs(os.path.dirname(self._cache), exist_ok=True) os.makedirs(os.path.dirname(self.cache_dir), exist_ok=True)
self.cuda = cuda self.cuda = cuda
@ -38,7 +36,7 @@ class TextToSpeechPlugin(PluginBase):
vocoder_path, vocoder_config_path = None, None vocoder_path, vocoder_config_path = None, None
if conf_overrides: if conf_overrides:
override_conf_path = os.path.join(self._cache, f'{self._name}_override.json') override_conf_path = os.path.join(self.cache_dir, f'{self._name}_override.json')
config = load_config(config_path) config = load_config(config_path)
for key, value in conf_overrides.items(): for key, value in conf_overrides.items():
@ -57,7 +55,7 @@ class TextToSpeechPlugin(PluginBase):
def make_tts_wav(self, text, filename=None): def make_tts_wav(self, text, filename=None):
if filename is None: if filename is None:
filename = os.path.join(self._cache, f'{uuid.uuid1()}.wav') filename = os.path.join(self.cache_dir, f'{uuid.uuid1()}.wav')
if self.speaker_wav: if self.speaker_wav:
wav = self.synthesizer.tts(text, None, 'en', self.speaker_wav) wav = self.synthesizer.tts(text, None, 'en', self.speaker_wav)
@ -74,17 +72,20 @@ class TextToSpeechPlugin(PluginBase):
text += '.' text += '.'
filename = self.make_tts_wav(text) filename = self.make_tts_wav(text)
# TODO: Play direct from memory # TODO: Play direct from memory
clip = Clip(filename, self._output_index, force_stereo=False) clip = Clip(filename, force_stereo=True)
stream = Stream(clip, self.output_index)
if wait: if wait:
async def play(): async def play():
await clip.aplay() await stream.aplay()
clip.close() stream.close()
os.remove(os.path.join(self.cache_dir, filename))
asyncio.create_task(play()) asyncio.create_task(play())
else: else:
clip.play() stream.play()
clip.close() stream.close()
os.remove(os.path.join(self.cache_dir, filename))
except Exception as e: except Exception as e:
print(e) self.logger.error(f"Failed to make speech from input: {e}")
if source_event := _ctx.get('event'): if source_event := _ctx.get('event'):
msg = SysMessage(self._name, 'Failed to make speech from input!!') msg = SysMessage(self._name, 'Failed to make speech from input!!')

View file

@ -16,4 +16,7 @@ class ChancePlugin(PluginBase):
raise ValueError('Chance must be a string (optionally ending in %) or number') raise ValueError('Chance must be a string (optionally ending in %) or number')
if random.random() < chance / 100: if random.random() < chance / 100:
await self.execute_kdl(_children, _ctx=_ctx) await self.execute_kdl([child for child in _children if child.name != 'or'], _ctx=_ctx)
else:
if elsenode := next((child for child in _children if child.name == 'or'), None):
await self.execute_kdl(elsenode.nodes, _ctx=_ctx)

View file

@ -31,7 +31,7 @@ class Cue:
def is_obsolete(self): def is_obsolete(self):
if self.repeat: if self.repeat:
return False return False
if self._next <= maya.now(): if self._next >= maya.now():
return False return False
return True return True
@ -74,7 +74,7 @@ class CuePlugin(PluginBase):
cue = Cue(repeat, **kwargs) cue = Cue(repeat, **kwargs)
async def handler(): async def handler():
# Repetion management # Repetition management
if not cue.enabled: if not cue.enabled:
return return
if cue.repeat: if cue.repeat:
@ -111,10 +111,16 @@ class CuePlugin(PluginBase):
except ValueError as e: except ValueError as e:
self.logger.error(f'Cannot schedule cue {name} at {at}: {e}') self.logger.error(f'Cannot schedule cue {name} at {at}: {e}')
def close(self):
self._cleanup_task.cancel()
for task in self.tasks.values():
self.scheduler.cancel(task)
self.scheduler._task.cancel()
async def _cleanup(self): async def _cleanup(self):
while True: while True:
await asyncio.sleep(60) await asyncio.sleep(60)
for name, (cue, _) in self.cues.items(): for name, (cue, _) in list(self.cues.items()):
if cue.is_obsolete(): if cue.is_obsolete():
del self.cues[name] del self.cues[name]
if task := self.tasks.get(name): if task := self.tasks.get(name):

View file

@ -14,6 +14,7 @@ from ovtk_audiencekit.utils import format_exception
class Scene: class Scene:
name: str name: str
group: str group: str
oneshot: bool
enter: Callable enter: Callable
exit: Callable exit: Callable
entry_context: dict = field(default_factory=dict) entry_context: dict = field(default_factory=dict)
@ -35,16 +36,16 @@ class ScenePlugin(PluginBase):
self.blueprint.add_url_rule('/<name>/<cmd>', 'api-sceneset', self.ui_setscene) self.blueprint.add_url_rule('/<name>/<cmd>', 'api-sceneset', self.ui_setscene)
self.blueprint.add_url_rule('/monitor', 'monitor', self.ui_monitor_ws, is_websocket=True) self.blueprint.add_url_rule('/monitor', 'monitor', self.ui_monitor_ws, is_websocket=True)
async def run(self, name, _children=None, _ctx={}, active=None, group=None, immediate=True, **kwargs): async def run(self, name, _children=None, _ctx={}, active=None, group=None, immediate=True, oneshot=False, **kwargs):
if _children is None and active is None: if _children is None and active is None:
raise UsageError('Either define a new scene or set `--active` to true / false') raise UsageError('Either define a new scene or set `--active` to true / false')
if _children: if _children:
await self.define(name, group, _children, default_active=active, ctx=_ctx) await self.define(name, group, _children, default_active=active, oneshot=oneshot, ctx=_ctx)
else: else:
await self.switch(name, active, is_immediate=immediate, ctx=_ctx) await self.switch(name, active, is_immediate=immediate, ctx=_ctx)
async def define(self, name, group, children, default_active=False, ctx={}): async def define(self, name, group, children, default_active=False, oneshot=False, ctx={}):
if self.scenes.get(name) is not None: if self.scenes.get(name) is not None:
raise UsageError(f'Scene with name "{name}" already exists!') raise UsageError(f'Scene with name "{name}" already exists!')
@ -63,7 +64,7 @@ class ScenePlugin(PluginBase):
async def exit(ctx): async def exit(ctx):
await self.execute_kdl(exit_nodes, _ctx=ctx) await self.execute_kdl(exit_nodes, _ctx=ctx)
scene = Scene(name, group, enter, exit) scene = Scene(name, group, oneshot, enter, exit)
self.scenes[name] = scene self.scenes[name] = scene
if default_active: if default_active:
@ -74,7 +75,9 @@ class ScenePlugin(PluginBase):
if scene is None: if scene is None:
raise UsageError(f'No defined scene with name "{name}"') raise UsageError(f'No defined scene with name "{name}"')
if active: if scene.oneshot:
await self._execute(scene, 'enter', is_immediate, ctx)
elif active:
if current := self.active.get(scene.group): if current := self.active.get(scene.group):
if current == scene: if current == scene:
return return

View file

@ -24,8 +24,10 @@
}) })
}) })
const toggle = async (group_name, scene_name) => { const toggle = async (group_name, scene_name) => {
if (!groups.value[group_name][scene_name].oneshot) {
if (inflight.value.includes(scene_name)) return if (inflight.value.includes(scene_name)) return
inflight.value.push(scene_name) inflight.value.push(scene_name)
}
const next_state = !groups.value[group_name][scene_name] const next_state = !groups.value[group_name][scene_name]
await fetch(`${scene_name}/${next_state ? 'activate' : 'deactivate'}`, { method: 'GET' }) await fetch(`${scene_name}/${next_state ? 'activate' : 'deactivate'}`, { method: 'GET' })
} }