mirror of
https://github.com/noctalia-dev/noctalia-shell.git
synced 2026-05-11 17:08:27 +08:00
feat(labwc): use native ext-workspace-v1 for workspaces and fix window tracking
This commit is contained in:
@@ -1,395 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
LabWC Workspace Helper for Noctalia Shell
|
||||
|
||||
This script connects to LabWC's ext-workspace-v1 protocol and outputs
|
||||
workspace state as JSON for the LabwcService to consume.
|
||||
|
||||
Usage:
|
||||
labwc-workspace-helper.py [--activate WORKSPACE_ID]
|
||||
|
||||
Output format (JSON lines):
|
||||
{"type": "state", "workspaces": [...], "groups": [...]}
|
||||
{"type": "error", "message": "..."}
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
import struct
|
||||
import signal
|
||||
|
||||
# Add vendor directory to path
|
||||
# Script is at: Scripts/python/src/compositor/
|
||||
# Vendor is at: Scripts/python/vendor/
|
||||
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
VENDOR_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..', '..', 'vendor'))
|
||||
sys.path.insert(0, VENDOR_DIR)
|
||||
|
||||
from wayland import protocol as wp
|
||||
from wayland.client import MakeDisplay, ServerDisconnected, NoXDGRuntimeDir
|
||||
|
||||
# Protocol XML paths
|
||||
PROTOCOLS_DIR = os.path.join(VENDOR_DIR, 'wayland', 'protocols')
|
||||
EXT_WORKSPACE_XML = os.path.join(PROTOCOLS_DIR, 'ext-workspace-v1.xml')
|
||||
|
||||
|
||||
def find_wayland_xml():
|
||||
"""Find wayland.xml using XDG_DATA_DIRS"""
|
||||
# Get XDG_DATA_DIRS, falling back to standard paths
|
||||
xdg_data_dirs = os.environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share')
|
||||
|
||||
for data_dir in xdg_data_dirs.split(':'):
|
||||
wayland_xml = os.path.join(data_dir, 'wayland', 'wayland.xml')
|
||||
if os.path.exists(wayland_xml):
|
||||
return wayland_xml
|
||||
|
||||
# Fallback to common paths if not found in XDG_DATA_DIRS
|
||||
fallback_paths = [
|
||||
'/usr/share/wayland/wayland.xml',
|
||||
'/usr/local/share/wayland/wayland.xml',
|
||||
]
|
||||
for path in fallback_paths:
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
WAYLAND_XML = find_wayland_xml()
|
||||
|
||||
|
||||
class WorkspaceState:
|
||||
"""Tracks the current state of all workspaces"""
|
||||
|
||||
def __init__(self):
|
||||
self.workspaces = {} # oid -> workspace data
|
||||
self.groups = {} # oid -> group data
|
||||
self.outputs = {} # oid -> output data
|
||||
self.pending_activate = None
|
||||
|
||||
def to_json(self):
|
||||
"""Convert current state to JSON-serializable dict"""
|
||||
ws_list = []
|
||||
for oid, ws in self.workspaces.items():
|
||||
ws_list.append({
|
||||
'id': ws.get('id', str(oid)),
|
||||
'oid': oid,
|
||||
'name': ws.get('name', ''),
|
||||
'state': ws.get('state', 0),
|
||||
'isActive': bool(ws.get('state', 0) & 1), # active bit
|
||||
'isUrgent': bool(ws.get('state', 0) & 2), # urgent bit
|
||||
'isHidden': bool(ws.get('state', 0) & 4), # hidden bit
|
||||
'coordinates': ws.get('coordinates', []),
|
||||
'groupOid': ws.get('group_oid'),
|
||||
'capabilities': ws.get('capabilities', 0),
|
||||
})
|
||||
|
||||
group_list = []
|
||||
for oid, grp in self.groups.items():
|
||||
group_list.append({
|
||||
'oid': oid,
|
||||
'outputs': list(grp.get('outputs', set())),
|
||||
'workspaces': list(grp.get('workspaces', set())),
|
||||
'capabilities': grp.get('capabilities', 0),
|
||||
})
|
||||
|
||||
return {
|
||||
'type': 'state',
|
||||
'workspaces': ws_list,
|
||||
'groups': group_list,
|
||||
}
|
||||
|
||||
|
||||
class LabwcWorkspaceClient:
|
||||
"""Client for LabWC's ext-workspace-v1 protocol"""
|
||||
|
||||
def __init__(self):
|
||||
self.state = WorkspaceState()
|
||||
self.display = None
|
||||
self.registry = None
|
||||
self.workspace_manager = None
|
||||
self.running = True
|
||||
self.ext_workspace_protocol = None
|
||||
|
||||
def output_json(self, data):
|
||||
"""Output JSON to stdout and flush"""
|
||||
print(json.dumps(data), flush=True)
|
||||
|
||||
def output_state(self):
|
||||
"""Output current workspace state as JSON"""
|
||||
self.output_json(self.state.to_json())
|
||||
|
||||
def output_error(self, message):
|
||||
"""Output error message as JSON"""
|
||||
self.output_json({'type': 'error', 'message': message})
|
||||
|
||||
def connect(self):
|
||||
"""Connect to Wayland display and bind to protocols"""
|
||||
try:
|
||||
# Load protocols
|
||||
base_protocol = wp.Protocol(WAYLAND_XML)
|
||||
self.ext_workspace_protocol = wp.Protocol(EXT_WORKSPACE_XML, base_protocol)
|
||||
|
||||
# Create display and connect
|
||||
Display = MakeDisplay(base_protocol)
|
||||
self.display = Display()
|
||||
|
||||
# Get registry
|
||||
self.registry = self.display.get_registry()
|
||||
self.registry.dispatcher['global'] = self._on_global
|
||||
self.registry.dispatcher['global_remove'] = self._on_global_remove
|
||||
|
||||
# Do initial roundtrip to get globals
|
||||
self.display.roundtrip()
|
||||
|
||||
if not self.workspace_manager:
|
||||
self.output_error('ext_workspace_manager_v1 not available - LabWC 0.8.3+ required')
|
||||
return False
|
||||
|
||||
# Do another roundtrip to get initial workspace state
|
||||
self.display.roundtrip()
|
||||
|
||||
return True
|
||||
|
||||
except NoXDGRuntimeDir:
|
||||
self.output_error('XDG_RUNTIME_DIR not set')
|
||||
return False
|
||||
except FileNotFoundError as e:
|
||||
self.output_error(f'Protocol file not found: {e}')
|
||||
return False
|
||||
except Exception as e:
|
||||
self.output_error(f'Failed to connect: {e}')
|
||||
return False
|
||||
|
||||
def _on_global(self, registry, name, interface, version):
|
||||
"""Handle registry global event"""
|
||||
if interface == 'ext_workspace_manager_v1':
|
||||
iface = self.ext_workspace_protocol['ext_workspace_manager_v1']
|
||||
self.workspace_manager = registry.bind(name, iface, version)
|
||||
self._setup_workspace_manager()
|
||||
elif interface == 'wl_output':
|
||||
iface = self.ext_workspace_protocol['wl_output']
|
||||
output = registry.bind(name, iface, min(version, 4))
|
||||
self.state.outputs[output.oid] = {'name': None}
|
||||
output.dispatcher['name'] = lambda o, n: self._on_output_name(o, n)
|
||||
output.dispatcher['done'] = lambda o: None
|
||||
|
||||
def _on_global_remove(self, registry, name):
|
||||
"""Handle registry global_remove event"""
|
||||
pass
|
||||
|
||||
def _on_output_name(self, output, name):
|
||||
"""Handle output name event"""
|
||||
if output.oid in self.state.outputs:
|
||||
self.state.outputs[output.oid]['name'] = name
|
||||
|
||||
def _setup_workspace_manager(self):
|
||||
"""Setup workspace manager event handlers"""
|
||||
self.workspace_manager.dispatcher['workspace_group'] = self._on_workspace_group
|
||||
self.workspace_manager.dispatcher['workspace'] = self._on_workspace
|
||||
self.workspace_manager.dispatcher['done'] = self._on_done
|
||||
self.workspace_manager.dispatcher['finished'] = self._on_finished
|
||||
|
||||
def _on_workspace_group(self, manager, group):
|
||||
"""Handle new workspace group"""
|
||||
self.state.groups[group.oid] = {
|
||||
'outputs': set(),
|
||||
'workspaces': set(),
|
||||
'capabilities': 0,
|
||||
}
|
||||
|
||||
group.dispatcher['capabilities'] = lambda g, c: self._on_group_capabilities(g, c)
|
||||
group.dispatcher['output_enter'] = lambda g, o: self._on_group_output_enter(g, o)
|
||||
group.dispatcher['output_leave'] = lambda g, o: self._on_group_output_leave(g, o)
|
||||
group.dispatcher['workspace_enter'] = lambda g, w: self._on_group_workspace_enter(g, w)
|
||||
group.dispatcher['workspace_leave'] = lambda g, w: self._on_group_workspace_leave(g, w)
|
||||
group.dispatcher['removed'] = lambda g: self._on_group_removed(g)
|
||||
|
||||
def _on_group_capabilities(self, group, capabilities):
|
||||
"""Handle group capabilities event"""
|
||||
if group.oid in self.state.groups:
|
||||
self.state.groups[group.oid]['capabilities'] = capabilities
|
||||
|
||||
def _on_group_output_enter(self, group, output):
|
||||
"""Handle output entering group"""
|
||||
if group.oid in self.state.groups and output:
|
||||
output_name = self.state.outputs.get(output.oid, {}).get('name', str(output.oid))
|
||||
self.state.groups[group.oid]['outputs'].add(output_name)
|
||||
|
||||
def _on_group_output_leave(self, group, output):
|
||||
"""Handle output leaving group"""
|
||||
if group.oid in self.state.groups and output:
|
||||
output_name = self.state.outputs.get(output.oid, {}).get('name', str(output.oid))
|
||||
self.state.groups[group.oid]['outputs'].discard(output_name)
|
||||
|
||||
def _on_group_workspace_enter(self, group, workspace):
|
||||
"""Handle workspace entering group"""
|
||||
if group.oid in self.state.groups and workspace:
|
||||
self.state.groups[group.oid]['workspaces'].add(workspace.oid)
|
||||
if workspace.oid in self.state.workspaces:
|
||||
self.state.workspaces[workspace.oid]['group_oid'] = group.oid
|
||||
|
||||
def _on_group_workspace_leave(self, group, workspace):
|
||||
"""Handle workspace leaving group"""
|
||||
if group.oid in self.state.groups and workspace:
|
||||
self.state.groups[group.oid]['workspaces'].discard(workspace.oid)
|
||||
if workspace.oid in self.state.workspaces:
|
||||
self.state.workspaces[workspace.oid]['group_oid'] = None
|
||||
|
||||
def _on_group_removed(self, group):
|
||||
"""Handle group removal"""
|
||||
if group.oid in self.state.groups:
|
||||
del self.state.groups[group.oid]
|
||||
|
||||
def _on_workspace(self, manager, workspace):
|
||||
"""Handle new workspace"""
|
||||
self.state.workspaces[workspace.oid] = {
|
||||
'id': None,
|
||||
'name': '',
|
||||
'state': 0,
|
||||
'coordinates': [],
|
||||
'group_oid': None,
|
||||
'capabilities': 0,
|
||||
'handle': workspace,
|
||||
}
|
||||
|
||||
workspace.dispatcher['id'] = lambda w, i: self._on_workspace_id(w, i)
|
||||
workspace.dispatcher['name'] = lambda w, n: self._on_workspace_name(w, n)
|
||||
workspace.dispatcher['coordinates'] = lambda w, c: self._on_workspace_coordinates(w, c)
|
||||
workspace.dispatcher['state'] = lambda w, s: self._on_workspace_state(w, s)
|
||||
workspace.dispatcher['capabilities'] = lambda w, c: self._on_workspace_capabilities(w, c)
|
||||
workspace.dispatcher['removed'] = lambda w: self._on_workspace_removed(w)
|
||||
|
||||
def _on_workspace_id(self, workspace, id_str):
|
||||
"""Handle workspace id event"""
|
||||
if workspace.oid in self.state.workspaces:
|
||||
self.state.workspaces[workspace.oid]['id'] = id_str
|
||||
|
||||
def _on_workspace_name(self, workspace, name):
|
||||
"""Handle workspace name event"""
|
||||
if workspace.oid in self.state.workspaces:
|
||||
self.state.workspaces[workspace.oid]['name'] = name
|
||||
|
||||
def _on_workspace_coordinates(self, workspace, coords_bytes):
|
||||
"""Handle workspace coordinates event"""
|
||||
if workspace.oid in self.state.workspaces:
|
||||
# Parse array of uint32
|
||||
coords = []
|
||||
if coords_bytes:
|
||||
for i in range(0, len(coords_bytes), 4):
|
||||
if i + 4 <= len(coords_bytes):
|
||||
coords.append(struct.unpack('I', coords_bytes[i:i+4])[0])
|
||||
self.state.workspaces[workspace.oid]['coordinates'] = coords
|
||||
|
||||
def _on_workspace_state(self, workspace, state):
|
||||
"""Handle workspace state event"""
|
||||
if workspace.oid in self.state.workspaces:
|
||||
self.state.workspaces[workspace.oid]['state'] = state
|
||||
|
||||
def _on_workspace_capabilities(self, workspace, capabilities):
|
||||
"""Handle workspace capabilities event"""
|
||||
if workspace.oid in self.state.workspaces:
|
||||
self.state.workspaces[workspace.oid]['capabilities'] = capabilities
|
||||
|
||||
def _on_workspace_removed(self, workspace):
|
||||
"""Handle workspace removal"""
|
||||
if workspace.oid in self.state.workspaces:
|
||||
del self.state.workspaces[workspace.oid]
|
||||
|
||||
def _on_done(self, manager):
|
||||
"""Handle done event - all state updates are complete"""
|
||||
self.output_state()
|
||||
|
||||
def _on_finished(self, manager):
|
||||
"""Handle finished event - manager is being destroyed"""
|
||||
self.running = False
|
||||
|
||||
def activate_workspace(self, workspace_id):
|
||||
"""Request activation of a workspace by ID or name"""
|
||||
for oid, ws in self.state.workspaces.items():
|
||||
ws_id = ws.get('id') or ws.get('name') or str(oid)
|
||||
if ws_id == workspace_id or ws.get('name') == workspace_id:
|
||||
handle = ws.get('handle')
|
||||
if handle:
|
||||
handle.activate()
|
||||
self.workspace_manager.commit()
|
||||
self.display.flush()
|
||||
return True
|
||||
return False
|
||||
|
||||
def run(self, activate_workspace=None):
|
||||
"""Main event loop"""
|
||||
if not self.connect():
|
||||
return 1
|
||||
|
||||
# Handle activation request
|
||||
if activate_workspace:
|
||||
# Do multiple roundtrips to ensure workspace state is populated
|
||||
for _ in range(5):
|
||||
self.display.roundtrip()
|
||||
if self.state.workspaces:
|
||||
break
|
||||
|
||||
if self.activate_workspace(activate_workspace):
|
||||
self.display.roundtrip()
|
||||
else:
|
||||
self.output_error(f'Workspace not found: {activate_workspace}')
|
||||
return 0
|
||||
|
||||
# Setup signal handlers
|
||||
def handle_signal(signum, frame):
|
||||
self.running = False
|
||||
signal.signal(signal.SIGTERM, handle_signal)
|
||||
signal.signal(signal.SIGINT, handle_signal)
|
||||
|
||||
# Output initial state
|
||||
self.output_state()
|
||||
|
||||
# Event loop
|
||||
try:
|
||||
while self.running:
|
||||
self.display.dispatch()
|
||||
except ServerDisconnected:
|
||||
self.output_error('Server disconnected')
|
||||
return 1
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='LabWC workspace helper for Noctalia Shell'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--activate', '-a',
|
||||
metavar='WORKSPACE',
|
||||
help='Activate a workspace by ID or name and exit'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Check for required protocol files
|
||||
if not WAYLAND_XML or not os.path.exists(WAYLAND_XML):
|
||||
print(json.dumps({
|
||||
'type': 'error',
|
||||
'message': 'Wayland protocol file not found. Check XDG_DATA_DIRS or install wayland-devel.'
|
||||
}), flush=True)
|
||||
return 1
|
||||
|
||||
if not os.path.exists(EXT_WORKSPACE_XML):
|
||||
print(json.dumps({
|
||||
'type': 'error',
|
||||
'message': f'ext-workspace protocol file not found: {EXT_WORKSPACE_XML}'
|
||||
}), flush=True)
|
||||
return 1
|
||||
|
||||
client = LabwcWorkspaceClient()
|
||||
return client.run(activate_workspace=args.activate)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
-21
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2016 Stephen Early
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
-7
@@ -1,7 +0,0 @@
|
||||
# python-wayland - Pure Python Wayland protocol implementation
|
||||
# https://github.com/sde1000/python-wayland
|
||||
# MIT License - Copyright (c) 2016 Stephen Early
|
||||
|
||||
from . import protocol
|
||||
from . import client
|
||||
from . import utils
|
||||
-279
@@ -1,279 +0,0 @@
|
||||
"""Wayland protocol client implementation
|
||||
|
||||
python-wayland - https://github.com/sde1000/python-wayland
|
||||
MIT License - Copyright (c) 2016 Stephen Early
|
||||
"""
|
||||
|
||||
from . import protocol as wayland_protocol
|
||||
import os
|
||||
import socket
|
||||
import select
|
||||
import struct
|
||||
import array
|
||||
import io
|
||||
|
||||
class ServerDisconnected(Exception):
|
||||
"""The server disconnected unexpectedly"""
|
||||
pass
|
||||
|
||||
class NoXDGRuntimeDir(Exception):
|
||||
"""The XDG_RUNTIME_DIR environment variable is not set"""
|
||||
pass
|
||||
|
||||
class ProtocolError(Exception):
|
||||
"""The server sent data that could not be decoded"""
|
||||
pass
|
||||
|
||||
class UnknownObjectError(Exception):
|
||||
"""The server sent an event for an object we don't know about"""
|
||||
def __init__(self, oid):
|
||||
self.oid = oid
|
||||
def __str__(self):
|
||||
return "UnknownObjectError({})".format(self.oid)
|
||||
|
||||
class DisplayError(Exception):
|
||||
"""The server sent a fatal error event
|
||||
|
||||
This error can be raised during dispatching of the default queue.
|
||||
"""
|
||||
def __init__(self, obj, code, codestr, message):
|
||||
self.obj = obj
|
||||
self.code = code
|
||||
self.codestr = codestr
|
||||
self.message = message
|
||||
def __str__(self):
|
||||
return "DisplayError({}, {} (\"{}\"), {})".format(
|
||||
self.obj, self.code, self.codestr, self.message)
|
||||
|
||||
class _Display:
|
||||
"""Additional methods for wl_display interface proxy
|
||||
|
||||
The wl_display proxy class obtained by loading the Wayland
|
||||
protocol XML file needs to be augmented with some additional
|
||||
methods to function as a full Wayland protocol client.
|
||||
"""
|
||||
def __init__(self, name_or_fd=None):
|
||||
self._f = None
|
||||
self._oids = iter(range(1, 0xff000000))
|
||||
self._reusable_oids = []
|
||||
self._default_queue = []
|
||||
super(_Display, self).__init__(self, self._get_new_oid(),
|
||||
self._default_queue, 1)
|
||||
if hasattr(name_or_fd, 'fileno'):
|
||||
self._f = name_or_fd
|
||||
self.log.info("connected to existing fd %d", self._f)
|
||||
else:
|
||||
xdg_runtime_dir = os.getenv('XDG_RUNTIME_DIR')
|
||||
if not xdg_runtime_dir:
|
||||
raise NoXDGRuntimeDir()
|
||||
if not name_or_fd:
|
||||
display = os.getenv('WAYLAND_DISPLAY')
|
||||
if not display:
|
||||
display = "wayland-0"
|
||||
else:
|
||||
display = name_or_fd
|
||||
if display.startswith('/'):
|
||||
path = display
|
||||
else:
|
||||
path = os.path.join(xdg_runtime_dir, display)
|
||||
self._f = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
||||
self._f.connect(path)
|
||||
self.log.info("connected to %s", path)
|
||||
|
||||
self._f.setblocking(0)
|
||||
|
||||
# Partial event left from last read
|
||||
self._read_partial_event = b''
|
||||
self._incoming_fds = []
|
||||
|
||||
self.objects = {self.oid: self}
|
||||
self._send_queue = []
|
||||
|
||||
self.dispatcher['delete_id'] = self._delete_id
|
||||
self.silence['delete_id'] = True
|
||||
self.dispatcher['error'] = self._error_event
|
||||
|
||||
def __del__(self):
|
||||
self.disconnect()
|
||||
|
||||
def disconnect(self):
|
||||
"""Disconnect from the server.
|
||||
|
||||
Closes the socket. After calling this method, all further
|
||||
calls to this proxy or any other proxies on the connection
|
||||
will fail.
|
||||
"""
|
||||
if self._f:
|
||||
self._f.close()
|
||||
self._f = None
|
||||
|
||||
def get_fd(self):
|
||||
"""Get the file descriptor number of the server connection.
|
||||
|
||||
This can be used in calls to select(), poll(), etc. to wait
|
||||
for events from the server.
|
||||
"""
|
||||
return self._f.fileno()
|
||||
|
||||
def _get_new_oid(self):
|
||||
if self._reusable_oids:
|
||||
return self._reusable_oids.pop()
|
||||
return next(self._oids)
|
||||
|
||||
def _delete_id(self, display, id_):
|
||||
self.log.info("server deleted %s", self.objects.get(id_, id_))
|
||||
if id_ in self.objects:
|
||||
self.objects[id_].oid = None
|
||||
del self.objects[id_]
|
||||
if id_ < 0xff000000:
|
||||
self._reusable_oids.append(id_)
|
||||
|
||||
def _error_event(self, *args):
|
||||
objs, (code, message) = args[:-2],args[-2:]
|
||||
raise DisplayError(str(objs), str(code), "", str(message))
|
||||
|
||||
def _queue_request(self, r, fds=[]):
|
||||
self.log.debug("queueing to send: %s with fds %s", r, fds)
|
||||
self._send_queue.append((r, fds))
|
||||
|
||||
def flush(self):
|
||||
"""Send buffered requests to the display server.
|
||||
|
||||
Will send as many requests as possible to the display server.
|
||||
Will not block; if sendmsg() would block, will leave events in
|
||||
the queue.
|
||||
|
||||
Returns True if the queue was emptied.
|
||||
"""
|
||||
while self._send_queue:
|
||||
b, fds = self._send_queue.pop(0)
|
||||
try:
|
||||
self._f.sendmsg([b], [(socket.SOL_SOCKET, socket.SCM_RIGHTS,
|
||||
array.array("i", fds))])
|
||||
for fd in fds:
|
||||
os.close(fd)
|
||||
except socket.error as e:
|
||||
if e.errno == 11:
|
||||
# Would block. Return the data to the head of the queue
|
||||
# and try again later!
|
||||
self.log.debug("flush would block; returning data to queue")
|
||||
self._send_queue.insert(0, (b, fds))
|
||||
return
|
||||
raise
|
||||
return True
|
||||
|
||||
def recv(self):
|
||||
"""Receive as much data as is available.
|
||||
|
||||
Returns True if any data was received. Will not block.
|
||||
"""
|
||||
data = None
|
||||
try:
|
||||
fds = array.array("i")
|
||||
data, ancdata, msg_flags, address = self._f.recvmsg(
|
||||
4096, socket.CMSG_SPACE(16 * fds.itemsize))
|
||||
for cmsg_level, cmsg_type, cmsg_data in ancdata:
|
||||
if (cmsg_level == socket.SOL_SOCKET and
|
||||
cmsg_type == socket.SCM_RIGHTS):
|
||||
fds.frombytes(cmsg_data[
|
||||
:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
|
||||
self._incoming_fds.extend(fds)
|
||||
if data:
|
||||
self._decode(data)
|
||||
return True
|
||||
else:
|
||||
raise ServerDisconnected()
|
||||
except socket.error as e:
|
||||
if e.errno == 11:
|
||||
# No data available; would otherwise block
|
||||
return
|
||||
raise
|
||||
|
||||
def dispatch(self):
|
||||
"""Dispatch the default event queue.
|
||||
|
||||
If the queue is empty, block until events are available and
|
||||
dispatch them.
|
||||
"""
|
||||
self.flush()
|
||||
while not self._default_queue:
|
||||
select.select([self._f], [], [])
|
||||
self.recv()
|
||||
self.dispatch_pending()
|
||||
|
||||
def dispatch_pending(self, queue=None):
|
||||
"""Dispatch pending events in an event queue.
|
||||
|
||||
If queue is None, dispatches from the default event queue.
|
||||
Will not read from the server connection.
|
||||
"""
|
||||
if not queue:
|
||||
queue = self._default_queue
|
||||
while queue:
|
||||
e = queue.pop(0)
|
||||
if isinstance(e, Exception):
|
||||
raise e
|
||||
proxy, event, args = e
|
||||
proxy.dispatch_event(event, args)
|
||||
|
||||
def roundtrip(self):
|
||||
"""Send a sync request to the server and wait for the reply.
|
||||
|
||||
Events are read from the server and dispatched if they are on
|
||||
the default event queue. This call blocks until the "done"
|
||||
event on the wl_callback generated by the sync request has
|
||||
been dispatched.
|
||||
"""
|
||||
ready = False
|
||||
def set_ready(callback, x):
|
||||
nonlocal ready
|
||||
ready = True
|
||||
l = self.sync()
|
||||
l.dispatcher['done'] = set_ready
|
||||
while not ready:
|
||||
self.dispatch()
|
||||
|
||||
def _decode(self, data):
|
||||
# There may be partial event data already received; add to it
|
||||
# if it's there
|
||||
if self._read_partial_event:
|
||||
data = self._read_partial_event + data
|
||||
while len(data) >= 8:
|
||||
oid, sizeop = struct.unpack("II", data[0 : 8])
|
||||
|
||||
size = sizeop >> 16
|
||||
op = sizeop & 0xffff
|
||||
|
||||
if len(data) < size:
|
||||
self.log.debug("partial event received: %d byte event, "
|
||||
"%d bytes available", size, len(data))
|
||||
break
|
||||
|
||||
argdata = io.BytesIO(data[8 : size])
|
||||
data = data [size : ]
|
||||
|
||||
obj = self.objects.get(oid, None)
|
||||
if obj:
|
||||
with argdata:
|
||||
e = obj._unmarshal_event(op, argdata, self._incoming_fds)
|
||||
self.log.debug(
|
||||
"queueing event: %s(%d) %s %s",
|
||||
e[0].interface.name, e[0].oid, e[1].name, e[2])
|
||||
obj.queue.append(e)
|
||||
else:
|
||||
self._default_queue.append(UnknownObjectError(oid))
|
||||
self._read_partial_event = data
|
||||
|
||||
def MakeDisplay(protocol):
|
||||
"""Create a Display class from a Wayland protocol definition
|
||||
|
||||
Args:
|
||||
protocol: a wayland.protocol.Protocol instance containing a
|
||||
core Wayland protocol definition.
|
||||
|
||||
Returns:
|
||||
A Display proxy class built from the specified protocol.
|
||||
"""
|
||||
class Display(_Display, protocol['wl_display'].client_proxy_class):
|
||||
pass
|
||||
return Display
|
||||
-665
@@ -1,665 +0,0 @@
|
||||
"""Wayland protocol parser and wire protocol implementation
|
||||
|
||||
python-wayland - https://github.com/sde1000/python-wayland
|
||||
MIT License - Copyright (c) 2016 Stephen Early
|
||||
"""
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
import struct
|
||||
import os
|
||||
import logging
|
||||
|
||||
def _description(d):
|
||||
assert d.tag == "description"
|
||||
return d.text, d.get('summary')
|
||||
|
||||
class NullArgumentException(Exception):
|
||||
"""None was passed where a value was expected"""
|
||||
pass
|
||||
|
||||
class DeletedProxyException(Exception):
|
||||
"""A request was made on an object that has already been deleted"""
|
||||
pass
|
||||
|
||||
class DuplicateInterfaceName(Exception):
|
||||
"""A duplicate interface name was detected.
|
||||
|
||||
A protocol file specified an interface name that already exists.
|
||||
"""
|
||||
pass
|
||||
|
||||
class ClientProxy:
|
||||
"""Abstract base class for a proxy to an interface.
|
||||
|
||||
Classes are derived from this for each interface in a protocol.
|
||||
Instances of these classes correspond to objects in the Wayland
|
||||
connection. Each class has a method for each request defined in
|
||||
the interface, and deals with despatching events received for the
|
||||
object.
|
||||
|
||||
Useful attributes:
|
||||
|
||||
interface (class attribute): the Interface this class is a proxy
|
||||
for
|
||||
|
||||
display: the wl_display this instance is connected to
|
||||
|
||||
oid: the object ID of this instance
|
||||
|
||||
version: the version of this object
|
||||
|
||||
dispatcher: dictionary mapping event names to callback functions
|
||||
|
||||
silence: dictionary of event names that will not be logged
|
||||
"""
|
||||
|
||||
def __init__(self, display, oid, queue, version):
|
||||
self.display = display
|
||||
self.oid = oid
|
||||
self.queue = queue
|
||||
self.version = version
|
||||
self.dispatcher = {}
|
||||
self.silence = {}
|
||||
self.destroyed = False
|
||||
self.log = logging.getLogger(__name__ + "." + self.interface.name)
|
||||
|
||||
def _marshal_request(self, request, *args):
|
||||
# args is a tuple when called; we make it a list so it's mutable,
|
||||
# because args are consumed in the 'for' loop
|
||||
args = list(args)
|
||||
al = []
|
||||
rval = None
|
||||
fl = []
|
||||
for a in request.args:
|
||||
b, r, fds = a.marshal_for_request(args, self)
|
||||
al.append(b)
|
||||
fl = fl + fds
|
||||
rval = rval or r
|
||||
assert len(args) == 0
|
||||
al = bytes().join(al)
|
||||
b = struct.pack('II', self.oid, ((len(al) + 8) << 16) | request.opcode)
|
||||
self.display._queue_request(b + al, fl)
|
||||
return rval
|
||||
|
||||
def _unmarshal_event(self, opcode, argdata, fd_source):
|
||||
event = self.interface.events_by_number[opcode]
|
||||
args = []
|
||||
for arg in event.args:
|
||||
v = arg.unmarshal_from_event(argdata, fd_source, self)
|
||||
args.append(v)
|
||||
return (self, event, args)
|
||||
|
||||
def set_queue(self, new_queue):
|
||||
# Sets the queue for events received from this object
|
||||
self.queue = new_queue
|
||||
|
||||
def dispatch_event(self, event, args):
|
||||
if self.destroyed:
|
||||
self.log.info("ignore event %s(%d).%s%s on destroyed proxy",
|
||||
self.interface.name,
|
||||
self.oid, event.name, args)
|
||||
return
|
||||
f = self.dispatcher.get(event.name, None)
|
||||
if f:
|
||||
if event.name not in self.silence:
|
||||
self.log.info("dispatch event %s(%d).%s%s",
|
||||
self.interface.name,
|
||||
self.oid, event.name, args)
|
||||
f(self, *args)
|
||||
else:
|
||||
if event.name not in self.silence:
|
||||
self.log.info("ignore event %s(%d).%s%s",
|
||||
self.interface.name,
|
||||
self.oid, event.name, args)
|
||||
|
||||
def __str__(self):
|
||||
return "{}({})".format(self.interface.name, self.oid)
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
class Arg:
|
||||
"""An argument to a request or event.
|
||||
|
||||
The request or event this argument belongs to is accessible using
|
||||
the "parent" attribute.
|
||||
|
||||
Has a name, type, optional description, and optional summary.
|
||||
|
||||
If this argument creates a new object, the interface for the new
|
||||
object is accessible as the "interface" attribute.
|
||||
|
||||
If the argument may be null (None), the "allow_null" attribute is
|
||||
True.
|
||||
"""
|
||||
def __init__(self, parent, arg):
|
||||
self.parent = parent
|
||||
|
||||
self.name = arg.get('name')
|
||||
self.type = arg.get('type')
|
||||
|
||||
self.description = None
|
||||
self.summary = arg.get('summary', None)
|
||||
self.allow_null = (arg.get('allow-null', None) == "true")
|
||||
|
||||
for c in arg:
|
||||
if c.tag == "description":
|
||||
self.description, self.summary = _description(c)
|
||||
|
||||
def marshal(self, args):
|
||||
"""Marshal the argument.
|
||||
|
||||
Implement this when marshalling for requests and events is the
|
||||
same operation.
|
||||
|
||||
args is the list of arguments still to marshal; this call
|
||||
removes the appropriate number of items from args.
|
||||
|
||||
The return value is a tuple of (bytes, optional return value,
|
||||
list of fds to send).
|
||||
"""
|
||||
raise RuntimeError
|
||||
|
||||
def unmarshal(self, argdata, fd_source):
|
||||
"""Unmarshal the argument.
|
||||
|
||||
Implement this when unmarshalling from requests and events is
|
||||
the same operation.
|
||||
|
||||
argdata is a file-like object providing access to the
|
||||
remaining marshalled arguments; this call will consume the
|
||||
appropriate number of bytes from this source
|
||||
|
||||
fd_source is an iterator object supplying fds that have been
|
||||
received over the connection
|
||||
|
||||
The return value is the value of the argument.
|
||||
"""
|
||||
raise RuntimeError
|
||||
|
||||
def marshal_for_request(self, args, proxy):
|
||||
"""Marshal the argument
|
||||
|
||||
args is the list of arguments still to marshal; this call
|
||||
removes the appropriate number of items from args
|
||||
|
||||
proxy is the interface proxy class instance being used for the
|
||||
call.
|
||||
|
||||
The return value is a tuple of (bytes, optional return value,
|
||||
list of fds to send)
|
||||
"""
|
||||
return self.marshal(args)
|
||||
|
||||
def unmarshal_from_event(self, argdata, fd_source, proxy):
|
||||
"""Unmarshal the argument
|
||||
|
||||
argdata is a file-like object providing access to the
|
||||
remaining marshalled arguments; this call will consume the
|
||||
appropriate number of bytes from this source
|
||||
|
||||
fd_source is an iterator object supplying fds that have been
|
||||
received over the connection
|
||||
|
||||
proxy is the interface proxy class instance being used for the
|
||||
event.
|
||||
|
||||
The return value is the value of the argument
|
||||
"""
|
||||
return self.unmarshal(argdata, fd_source)
|
||||
|
||||
class Arg_int(Arg):
|
||||
"""Signed 32-bit integer argument"""
|
||||
|
||||
def marshal(self, args):
|
||||
v = args.pop(0)
|
||||
return struct.pack('i', v), None, []
|
||||
|
||||
def unmarshal(self, argdata, fd_source):
|
||||
(v, ) = struct.unpack("i", argdata.read(4))
|
||||
return v
|
||||
|
||||
class Arg_uint(Arg):
|
||||
"""Unsigned 32-bit integer argument"""
|
||||
|
||||
def marshal(self, args):
|
||||
v = args.pop(0)
|
||||
return struct.pack('I', v), None, []
|
||||
|
||||
def unmarshal(self, argdata, fd_source):
|
||||
(v, ) = struct.unpack("I", argdata.read(4))
|
||||
return v
|
||||
|
||||
class Arg_new_id(Arg):
|
||||
"""Newly created object argument"""
|
||||
|
||||
def __init__(self, parent, arg):
|
||||
super(Arg_new_id, self).__init__(parent, arg)
|
||||
self.interface = arg.get('interface', None)
|
||||
if isinstance(parent, Event):
|
||||
assert self.interface
|
||||
|
||||
def marshal_for_request(self, args, proxy):
|
||||
nid = proxy.display._get_new_oid()
|
||||
if self.interface:
|
||||
# The interface type is part of the argument, and the
|
||||
# version of the newly created object is the same as the
|
||||
# version of the proxy.
|
||||
npc = self.parent.interface.protocol[self.interface]\
|
||||
.client_proxy_class
|
||||
version = proxy.version
|
||||
b = struct.pack('I', nid)
|
||||
else:
|
||||
# The interface and version are supplied by the caller,
|
||||
# and the argument is marshalled as string,uint32,uint32
|
||||
interface = args.pop(0)
|
||||
version = args.pop(0)
|
||||
npc = interface.client_proxy_class
|
||||
iname = interface.name.encode('utf-8')
|
||||
parts = (struct.pack('I',len(iname)+1),
|
||||
iname,
|
||||
b'\x00'*(4-(len(iname) % 4)),
|
||||
struct.pack('II',version,nid))
|
||||
b = b''.join(parts)
|
||||
new_proxy = npc(proxy.display, nid, proxy.display._default_queue,
|
||||
version)
|
||||
proxy.display.objects[nid] = new_proxy
|
||||
return b, new_proxy, []
|
||||
|
||||
def unmarshal_from_event(self, argdata, fd_source, proxy):
|
||||
assert self.interface
|
||||
(nid, ) = struct.unpack("I", argdata.read(4))
|
||||
npc = self.parent.interface.protocol[self.interface].client_proxy_class
|
||||
new_proxy = npc(proxy.display, nid, proxy.display._default_queue,
|
||||
proxy.version)
|
||||
proxy.display.objects[nid] = new_proxy
|
||||
return new_proxy
|
||||
|
||||
class Arg_string(Arg):
|
||||
"""String argument"""
|
||||
|
||||
def marshal(self, args):
|
||||
v = args.pop(0)
|
||||
if v is None:
|
||||
if self.allow_null:
|
||||
return struct.pack('I', 0), None, []
|
||||
raise NullArgumentException()
|
||||
estr = v.encode('utf-8')
|
||||
parts = (struct.pack('I',len(estr)+1),
|
||||
estr,
|
||||
b'\x00'*(4-(len(estr) % 4)))
|
||||
return b''.join(parts), None, []
|
||||
|
||||
def unmarshal(self, argdata, fd_source):
|
||||
# The length includes the terminating null byte
|
||||
(l, ) = struct.unpack("I", argdata.read(4))
|
||||
if l == 0:
|
||||
return None
|
||||
l = l-1
|
||||
s = argdata.read(l).decode('utf-8')
|
||||
argdata.read(4 - (l % 4))
|
||||
return s
|
||||
|
||||
class Arg_object(Arg):
|
||||
"""Existing object argument"""
|
||||
|
||||
def marshal(self, args):
|
||||
v = args.pop(0)
|
||||
if v:
|
||||
oid = v.oid
|
||||
else:
|
||||
if self.allow_null:
|
||||
oid = 0
|
||||
else:
|
||||
raise NullArgumentException()
|
||||
return struct.pack("I", oid), None, []
|
||||
|
||||
def unmarshal_from_event(self, argdata, fd_source, proxy):
|
||||
(v, ) = struct.unpack("I", argdata.read(4))
|
||||
if v == 0:
|
||||
return None
|
||||
return proxy.display.objects.get(v, None)
|
||||
|
||||
class Arg_fd(Arg):
|
||||
"""File descriptor argument"""
|
||||
|
||||
def marshal(self, args):
|
||||
v = args.pop(0)
|
||||
fd = os.dup(v)
|
||||
return b'', None, [fd]
|
||||
|
||||
def unmarshal(self, argdata, fd_source):
|
||||
return fd_source.pop(0)
|
||||
|
||||
class Arg_fixed(Arg):
|
||||
"""Signed 24.8 decimal number argument"""
|
||||
|
||||
def marshal(self, args):
|
||||
v = args.pop(0)
|
||||
if isinstance(v, int):
|
||||
m = v << 8
|
||||
else:
|
||||
m = (int(v) << 8) + int((v % 1.0) * 256)
|
||||
return struct.pack("i",m), None, []
|
||||
|
||||
def unmarshal(self, argdata, fd_source):
|
||||
b = argdata.read(4)
|
||||
(m, ) = struct.unpack("i",b)
|
||||
return float(m >> 8) + ((m & 0xff) / 256.0)
|
||||
|
||||
class Arg_array(Arg):
|
||||
"""Array argument"""
|
||||
|
||||
def marshal(self, args):
|
||||
v = args.pop(0)
|
||||
if v is None:
|
||||
if self.allow_null:
|
||||
return struct.pack('I', 0), None, []
|
||||
raise NullArgumentException()
|
||||
# v should be bytes
|
||||
parts = (struct.pack('I',len(v)),
|
||||
v,
|
||||
b'\x00'*(3 - ((len(v) - 1) % 4)) if len(v) > 0 else b'')
|
||||
return b''.join(parts), None, []
|
||||
|
||||
def unmarshal(self, argdata, fd_source):
|
||||
(l, ) = struct.unpack("I", argdata.read(4))
|
||||
if l == 0:
|
||||
return b''
|
||||
v = argdata.read(l)
|
||||
pad = 3 - ((l - 1) % 4)
|
||||
if pad:
|
||||
argdata.read(pad)
|
||||
return v
|
||||
|
||||
def _make_arg(parent, tag):
|
||||
t = tag.get("type")
|
||||
c = "Arg_" + tag.get("type")
|
||||
return globals()[c](parent, tag)
|
||||
|
||||
class Request:
|
||||
"""A request on an interface.
|
||||
|
||||
Requests have a name, optional type (to indicate whether the
|
||||
request destroys the object), optional "since version of
|
||||
interface", optional description, and optional summary.
|
||||
|
||||
If a request has an argument of type "new_id" then the request
|
||||
creates a new object; the Interface for this new object is
|
||||
accessible as the "creates" attribute.
|
||||
"""
|
||||
def __init__(self, interface, opcode, request):
|
||||
self.interface = interface
|
||||
self.opcode = opcode
|
||||
assert request.tag == "request"
|
||||
|
||||
self.name = request.get('name')
|
||||
self.type = request.get('type', None)
|
||||
self.since = int(request.get('since', 1))
|
||||
|
||||
self.is_destructor = (self.type == "destructor")
|
||||
|
||||
self.description = None
|
||||
self.summary = None
|
||||
|
||||
self.creates = None
|
||||
|
||||
self.args = []
|
||||
|
||||
for c in request:
|
||||
if c.tag == "description":
|
||||
self.description, self.summary = _description(c)
|
||||
elif c.tag == "arg":
|
||||
a = _make_arg(self, c)
|
||||
if a.type == "new_id":
|
||||
self.creates = a.interface
|
||||
self.args.append(a)
|
||||
|
||||
def __str__(self):
|
||||
return "{}.{}".format(self.interface.name,self.name)
|
||||
|
||||
def invoke(self, proxy, *args):
|
||||
"""Invoke this request on a client proxy."""
|
||||
if not proxy.oid:
|
||||
proxy.log.warning("request %s on deleted %s proxy",
|
||||
self.name, proxy.interface.name)
|
||||
raise DeletedProxyException
|
||||
if proxy.destroyed:
|
||||
proxy.log.info("request %s.%s%s on destroyed object; ignoring",
|
||||
proxy, self.name, args)
|
||||
return
|
||||
if proxy.version < self.since:
|
||||
proxy.log.error(
|
||||
"request %s.%s%s only exists from version %s, but proxy is "
|
||||
"version %s", proxy, self.name, args, self.since,
|
||||
proxy.version)
|
||||
return
|
||||
r = proxy._marshal_request(self, *args)
|
||||
if r:
|
||||
proxy.log.info(
|
||||
"request %s.%s%s -> %s", proxy, self.name, args, r)
|
||||
else:
|
||||
proxy.log.info("request %s.%s%s", proxy, self.name, args)
|
||||
if self.is_destructor:
|
||||
proxy.destroyed = True
|
||||
proxy.log.info(
|
||||
"%s proxy destroyed by destructor request %s%s",
|
||||
proxy, self.name, args)
|
||||
return r
|
||||
|
||||
class Event:
|
||||
"""An event on an interface.
|
||||
|
||||
Events have a number (which depends on the order in which they are
|
||||
declared in the protocol XML file), name, optional "since version
|
||||
of interface", optional description, optional summary, and a
|
||||
number of arguments.
|
||||
"""
|
||||
def __init__(self, interface, event, number):
|
||||
self.interface = interface
|
||||
assert event.tag == "event"
|
||||
|
||||
self.name = event.get('name')
|
||||
self.number = number
|
||||
self.since = int(event.get('since', 1))
|
||||
self.args = []
|
||||
self.description = None
|
||||
self.summary = None
|
||||
|
||||
for c in event:
|
||||
if c.tag == "description":
|
||||
self.description, self.summary = _description(c)
|
||||
elif c.tag == "arg":
|
||||
self.args.append(_make_arg(self, c))
|
||||
|
||||
def __str__(self):
|
||||
return "{}::{}".format(self.interface, self.name)
|
||||
|
||||
class Entry:
|
||||
"""An entry in an enumeration.
|
||||
|
||||
Has a name, integer value, optional description, optional summary,
|
||||
and optional "since version of interface".
|
||||
"""
|
||||
|
||||
def __init__(self, enum, entry):
|
||||
self.enum = enum
|
||||
assert entry.tag == "entry"
|
||||
|
||||
self.name = entry.get('name')
|
||||
self.value = int(entry.get('value'), base=0)
|
||||
self.description = None
|
||||
self.summary = entry.get('summary', None)
|
||||
self.since = int(entry.get('since', 1))
|
||||
|
||||
for c in entry:
|
||||
if c.tag == "description":
|
||||
self.description, self.summary = _description(c)
|
||||
|
||||
class Enum:
|
||||
"""An enumeration declared in an interface.
|
||||
|
||||
Enumerations have a name, optional "since version of interface",
|
||||
option description, optional summary, and a number of entries.
|
||||
|
||||
The entries are accessible by name in the dictionary available
|
||||
through the "entries" attribute. Further, if the Enum instance is
|
||||
accessed as a dictionary then if a string argument is used it
|
||||
returns the integer value of the corresponding entry, and if an
|
||||
integer argument is used it returns the name of the corresponding
|
||||
entry.
|
||||
"""
|
||||
def __init__(self, interface, enum):
|
||||
self.interface = interface
|
||||
assert enum.tag == "enum"
|
||||
|
||||
self.name = enum.get('name')
|
||||
self.since = int(enum.get('since', 1))
|
||||
self.entries = {}
|
||||
self.description = None
|
||||
self.summary = None
|
||||
self._values = {}
|
||||
self._names = {}
|
||||
|
||||
for c in enum:
|
||||
if c.tag == "description":
|
||||
self.description, self.summary = _description(c)
|
||||
elif c.tag == "entry":
|
||||
e = Entry(self, c)
|
||||
self.entries[e.name] = e
|
||||
self._values[e.name] = e.value
|
||||
self._names[e.value] = e.name
|
||||
|
||||
def __getitem__(self, i):
|
||||
if isinstance(i, int):
|
||||
return self._names.get(i)
|
||||
return self._values.get(i)
|
||||
|
||||
class Interface:
|
||||
"""A Wayland protocol interface.
|
||||
|
||||
Wayland interfaces have a name and version, plus a number of
|
||||
requests, events and enumerations. Optionally they have a
|
||||
description.
|
||||
|
||||
The name and version are accessible as the "name" and "version"
|
||||
attributes.
|
||||
|
||||
The requests and enums are accessible as dictionaries as the
|
||||
"requests" and "enums" attributes. The events are accessible by
|
||||
name as a dictionary as the "events_by_name" attribute, and by
|
||||
number as a list as the "events_by_number" attribute.
|
||||
|
||||
A client proxy class for this interface is available as the
|
||||
"client_proxy_class" attribute; instances of this class have
|
||||
methods corresponding to the requests, and deal with dispatching
|
||||
the events.
|
||||
"""
|
||||
|
||||
def __init__(self, protocol, interface):
|
||||
self.protocol = protocol
|
||||
assert interface.tag == "interface"
|
||||
|
||||
self.name = interface.get('name')
|
||||
self.version = int(interface.get('version'))
|
||||
assert self.version > 0
|
||||
self.description = None
|
||||
self.summary = None
|
||||
self.requests = {}
|
||||
self.events_by_name = {}
|
||||
self.events_by_number = []
|
||||
self.enums = {}
|
||||
|
||||
for c in interface:
|
||||
if c.tag == "description":
|
||||
self.description, self.summary = _description(c)
|
||||
elif c.tag == "request":
|
||||
e = Request(self, len(self.requests), c)
|
||||
self.requests[e.name] = e
|
||||
elif c.tag == "event":
|
||||
e = Event(self, c, len(self.events_by_number))
|
||||
self.events_by_name[e.name] = e
|
||||
self.events_by_number.append(e)
|
||||
elif c.tag == "enum":
|
||||
e = Enum(self, c)
|
||||
self.enums[e.name] = e
|
||||
|
||||
def client_proxy_request(x):
|
||||
def call_request(*args):
|
||||
return x.invoke(*args)
|
||||
return call_request
|
||||
d = {
|
||||
'__doc__': self.description,
|
||||
'interface': self,
|
||||
}
|
||||
for r in self.requests.values():
|
||||
d[r.name] = client_proxy_request(r)
|
||||
self.client_proxy_class = type(
|
||||
str(self.name + '_client_proxy'), (ClientProxy,), d)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __repr__(self):
|
||||
return "Interface('{}', {})".format(self.name, self.version)
|
||||
|
||||
class Protocol:
|
||||
"""A Wayland protocol.
|
||||
|
||||
A Wayland connection will often have multiple Wayland protocols
|
||||
running over it: the core protocol, plus a number of other
|
||||
protocols that add completely new functionality or extend the
|
||||
functionality of some other protocol.
|
||||
|
||||
See https://cgit.freedesktop.org/wayland/wayland-protocols for the
|
||||
current collection of Wayland protocols.
|
||||
|
||||
This Protocol class corresponds to one protocol XML file. These
|
||||
contain one or more interfaces, which are accessible in this class
|
||||
via the "interfaces" attribute which is a dictionary keyed by
|
||||
interface name. Once instantiated this class should be treated as
|
||||
immutable, with the only exception being that interfaces of
|
||||
"child" protocols that are loaded with this class instance as an
|
||||
ancestor will be added to the "interfaces" dictionary.
|
||||
|
||||
As a shortcut, accessing an instance of this class through
|
||||
__getitem__ (for example wayland['wl_display']) will access the
|
||||
interfaces dictionary.
|
||||
|
||||
The copyright notice from the XML file, if present, is accessible
|
||||
as the "copyright" attribute.
|
||||
"""
|
||||
def __init__(self, file, parent=None):
|
||||
"""Load a Wayland protocol file.
|
||||
|
||||
Args:
|
||||
file: a filename or file object containing an XML Wayland
|
||||
protocol description
|
||||
|
||||
parent: a Protocol object containing interfaces that are
|
||||
referred to by name in the XML protocol description
|
||||
"""
|
||||
tree = ET.parse(file)
|
||||
|
||||
protocol = tree.getroot()
|
||||
assert protocol.tag == "protocol"
|
||||
|
||||
self.copyright = None
|
||||
if parent:
|
||||
self.interfaces = parent.interfaces
|
||||
else:
|
||||
self.interfaces = {}
|
||||
|
||||
self.name = protocol.get('name')
|
||||
|
||||
for c in protocol:
|
||||
if c.tag == "copyright":
|
||||
self.copyright = c.text
|
||||
elif c.tag == "interface":
|
||||
i = Interface(self, c)
|
||||
if i.name in self.interfaces:
|
||||
raise DuplicateInterfaceName(i.name)
|
||||
self.interfaces[i.name] = i
|
||||
|
||||
def __getitem__(self, x):
|
||||
return self.interfaces.__getitem__(x)
|
||||
@@ -1,422 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<protocol name="ext_workspace_v1">
|
||||
<copyright>
|
||||
Copyright © 2019 Christopher Billington
|
||||
Copyright © 2020 Ilia Bozhinov
|
||||
Copyright © 2022 Victoria Brekenfeld
|
||||
|
||||
Permission to use, copy, modify, distribute, and sell this
|
||||
software and its documentation for any purpose is hereby granted
|
||||
without fee, provided that the above copyright notice appear in
|
||||
all copies and that both that copyright notice and this permission
|
||||
notice appear in supporting documentation, and that the name of
|
||||
the copyright holders not be used in advertising or publicity
|
||||
pertaining to distribution of the software without specific,
|
||||
written prior permission. The copyright holders make no
|
||||
representations about the suitability of this software for any
|
||||
purpose. It is provided "as is" without express or implied
|
||||
warranty.
|
||||
|
||||
THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
|
||||
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
|
||||
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
|
||||
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
|
||||
THIS SOFTWARE.
|
||||
</copyright>
|
||||
|
||||
<interface name="ext_workspace_manager_v1" version="1">
|
||||
<description summary="list and control workspaces">
|
||||
Workspaces, also called virtual desktops, are groups of surfaces. A
|
||||
compositor with a concept of workspaces may only show some such groups of
|
||||
surfaces (those of 'active' workspaces) at a time. 'Activating' a
|
||||
workspace is a request for the compositor to display that workspace's
|
||||
surfaces as normal, whereas the compositor may hide or otherwise
|
||||
de-emphasise surfaces that are associated only with 'inactive' workspaces.
|
||||
Workspaces are grouped by which sets of outputs they correspond to, and
|
||||
may contain surfaces only from those outputs. In this way, it is possible
|
||||
for each output to have its own set of workspaces, or for all outputs (or
|
||||
any other arbitrary grouping) to share workspaces. Compositors may
|
||||
optionally conceptually arrange each group of workspaces in an
|
||||
N-dimensional grid.
|
||||
|
||||
The purpose of this protocol is to enable the creation of taskbars and
|
||||
docks by providing them with a list of workspaces and their properties,
|
||||
and allowing them to activate and deactivate workspaces.
|
||||
|
||||
After a client binds the ext_workspace_manager_v1, each workspace will be
|
||||
sent via the workspace event.
|
||||
</description>
|
||||
|
||||
<event name="workspace_group">
|
||||
<description summary="a workspace group has been created">
|
||||
This event is emitted whenever a new workspace group has been created.
|
||||
|
||||
All initial details of the workspace group (outputs) will be
|
||||
sent immediately after this event via the corresponding events in
|
||||
ext_workspace_group_handle_v1 and ext_workspace_handle_v1.
|
||||
</description>
|
||||
<arg name="workspace_group" type="new_id" interface="ext_workspace_group_handle_v1"/>
|
||||
</event>
|
||||
|
||||
<event name="workspace">
|
||||
<description summary="workspace has been created">
|
||||
This event is emitted whenever a new workspace has been created.
|
||||
|
||||
All initial details of the workspace (name, coordinates, state) will
|
||||
be sent immediately after this event via the corresponding events in
|
||||
ext_workspace_handle_v1.
|
||||
|
||||
Workspaces start off unassigned to any workspace group.
|
||||
</description>
|
||||
<arg name="workspace" type="new_id" interface="ext_workspace_handle_v1"/>
|
||||
</event>
|
||||
|
||||
<request name="commit">
|
||||
<description summary="all requests about the workspaces have been sent">
|
||||
The client must send this request after it has finished sending other
|
||||
requests. The compositor must process a series of requests preceding a
|
||||
commit request atomically.
|
||||
|
||||
This allows changes to the workspace properties to be seen as atomic,
|
||||
even if they happen via multiple events, and even if they involve
|
||||
multiple ext_workspace_handle_v1 objects, for example, deactivating one
|
||||
workspace and activating another.
|
||||
</description>
|
||||
</request>
|
||||
|
||||
<event name="done">
|
||||
<description summary="all information about the workspaces and workspace groups has been sent">
|
||||
This event is sent after all changes in all workspaces and workspace groups have been
|
||||
sent.
|
||||
|
||||
This allows changes to one or more ext_workspace_group_handle_v1
|
||||
properties and ext_workspace_handle_v1 properties
|
||||
to be seen as atomic, even if they happen via multiple events.
|
||||
In particular, an output moving from one workspace group to
|
||||
another sends an output_enter event and an output_leave event to the two
|
||||
ext_workspace_group_handle_v1 objects in question. The compositor sends
|
||||
the done event only after updating the output information in both
|
||||
workspace groups.
|
||||
</description>
|
||||
</event>
|
||||
|
||||
<event name="finished" type="destructor">
|
||||
<description summary="the compositor has finished with the workspace_manager">
|
||||
This event indicates that the compositor is done sending events to the
|
||||
ext_workspace_manager_v1. The server will destroy the object
|
||||
immediately after sending this request.
|
||||
</description>
|
||||
</event>
|
||||
|
||||
<request name="stop">
|
||||
<description summary="stop sending events">
|
||||
Indicates the client no longer wishes to receive events for new
|
||||
workspace groups. However the compositor may emit further workspace
|
||||
events, until the finished event is emitted. The compositor is expected
|
||||
to send the finished event eventually once the stop request has been processed.
|
||||
|
||||
The client must not send any requests after this one, doing so will raise a wl_display
|
||||
invalid_object error.
|
||||
</description>
|
||||
</request>
|
||||
|
||||
</interface>
|
||||
|
||||
<interface name="ext_workspace_group_handle_v1" version="1">
|
||||
<description summary="a workspace group assigned to a set of outputs">
|
||||
A ext_workspace_group_handle_v1 object represents a workspace group
|
||||
that is assigned a set of outputs and contains a number of workspaces.
|
||||
|
||||
The set of outputs assigned to the workspace group is conveyed to the client via
|
||||
output_enter and output_leave events, and its workspaces are conveyed with
|
||||
workspace events.
|
||||
|
||||
For example, a compositor which has a set of workspaces for each output may
|
||||
advertise a workspace group (and its workspaces) per output, whereas a compositor
|
||||
where a workspace spans all outputs may advertise a single workspace group for all
|
||||
outputs.
|
||||
</description>
|
||||
|
||||
<enum name="group_capabilities" bitfield="true">
|
||||
<entry name="create_workspace" value="1" summary="create_workspace request is available"/>
|
||||
</enum>
|
||||
|
||||
<event name="capabilities">
|
||||
<description summary="compositor capabilities">
|
||||
This event advertises the capabilities supported by the compositor. If
|
||||
a capability isn't supported, clients should hide or disable the UI
|
||||
elements that expose this functionality. For instance, if the
|
||||
compositor doesn't advertise support for creating workspaces, a button
|
||||
triggering the create_workspace request should not be displayed.
|
||||
|
||||
The compositor will ignore requests it doesn't support. For instance,
|
||||
a compositor which doesn't advertise support for creating workspaces will ignore
|
||||
create_workspace requests.
|
||||
|
||||
Compositors must send this event once after creation of an
|
||||
ext_workspace_group_handle_v1. When the capabilities change, compositors
|
||||
must send this event again.
|
||||
</description>
|
||||
<arg name="capabilities" type="uint" summary="capabilities" enum="group_capabilities"/>
|
||||
</event>
|
||||
|
||||
<event name="output_enter">
|
||||
<description summary="output assigned to workspace group">
|
||||
This event is emitted whenever an output is assigned to the workspace
|
||||
group or a new `wl_output` object is bound by the client, which was already
|
||||
assigned to this workspace_group.
|
||||
</description>
|
||||
<arg name="output" type="object" interface="wl_output"/>
|
||||
</event>
|
||||
|
||||
<event name="output_leave">
|
||||
<description summary="output removed from workspace group">
|
||||
This event is emitted whenever an output is removed from the workspace
|
||||
group.
|
||||
</description>
|
||||
<arg name="output" type="object" interface="wl_output"/>
|
||||
</event>
|
||||
|
||||
<event name="workspace_enter">
|
||||
<description summary="workspace added to workspace group">
|
||||
This event is emitted whenever a workspace is assigned to this group.
|
||||
A workspace may only ever be assigned to a single group at a single point
|
||||
in time, but can be re-assigned during it's lifetime.
|
||||
</description>
|
||||
<arg name="workspace" type="object" interface="ext_workspace_handle_v1"/>
|
||||
</event>
|
||||
|
||||
<event name="workspace_leave">
|
||||
<description summary="workspace removed from workspace group">
|
||||
This event is emitted whenever a workspace is removed from this group.
|
||||
</description>
|
||||
<arg name="workspace" type="object" interface="ext_workspace_handle_v1"/>
|
||||
</event>
|
||||
|
||||
<event name="removed">
|
||||
<description summary="this workspace group has been removed">
|
||||
This event is send when the group associated with the ext_workspace_group_handle_v1
|
||||
has been removed. After sending this request the compositor will immediately consider
|
||||
the object inert. Any requests will be ignored except the destroy request.
|
||||
It is guaranteed there won't be any more events referencing this
|
||||
ext_workspace_group_handle_v1.
|
||||
|
||||
The compositor must remove all workspaces belonging to a workspace group
|
||||
via a workspace_leave event before removing the workspace group.
|
||||
</description>
|
||||
</event>
|
||||
|
||||
<request name="create_workspace">
|
||||
<description summary="create a new workspace">
|
||||
Request that the compositor create a new workspace with the given name
|
||||
and assign it to this group.
|
||||
|
||||
There is no guarantee that the compositor will create a new workspace,
|
||||
or that the created workspace will have the provided name.
|
||||
</description>
|
||||
<arg name="workspace" type="string"/>
|
||||
</request>
|
||||
|
||||
<request name="destroy" type="destructor">
|
||||
<description summary="destroy the ext_workspace_group_handle_v1 object">
|
||||
Destroys the ext_workspace_group_handle_v1 object.
|
||||
|
||||
This request should be send either when the client does not want to
|
||||
use the workspace group object any more or after the removed event to finalize
|
||||
the destruction of the object.
|
||||
</description>
|
||||
</request>
|
||||
</interface>
|
||||
|
||||
<interface name="ext_workspace_handle_v1" version="1">
|
||||
<description summary="a workspace handing a group of surfaces">
|
||||
A ext_workspace_handle_v1 object represents a workspace that handles a
|
||||
group of surfaces.
|
||||
|
||||
Each workspace has:
|
||||
- a name, conveyed to the client with the name event
|
||||
- potentially an id conveyed with the id event
|
||||
- a list of states, conveyed to the client with the state event
|
||||
- and optionally a set of coordinates, conveyed to the client with the
|
||||
coordinates event
|
||||
|
||||
The client may request that the compositor activate or deactivate the workspace.
|
||||
|
||||
Each workspace can belong to only a single workspace group.
|
||||
Depending on the compositor policy, there might be workspaces with
|
||||
the same name in different workspace groups, but these workspaces are still
|
||||
separate (e.g. one of them might be active while the other is not).
|
||||
</description>
|
||||
|
||||
<event name="id">
|
||||
<description summary="workspace id">
|
||||
If this event is emitted, it will be send immediately after the
|
||||
ext_workspace_handle_v1 is created or when an id is assigned to
|
||||
a workspace (at most once during it's lifetime).
|
||||
|
||||
An id will never change during the lifetime of the `ext_workspace_handle_v1`
|
||||
and is guaranteed to be unique during it's lifetime.
|
||||
|
||||
Ids are not human-readable and shouldn't be displayed, use `name` for that purpose.
|
||||
|
||||
Compositors are expected to only send ids for workspaces likely stable across multiple
|
||||
sessions and can be used by clients to store preferences for workspaces. Workspaces without
|
||||
ids should be considered temporary and any data associated with them should be deleted once
|
||||
the respective object is lost.
|
||||
</description>
|
||||
<arg name="id" type="string"/>
|
||||
</event>
|
||||
|
||||
<event name="name">
|
||||
<description summary="workspace name changed">
|
||||
This event is emitted immediately after the ext_workspace_handle_v1 is
|
||||
created and whenever the name of the workspace changes.
|
||||
|
||||
A name is meant to be human-readable and can be displayed to a user.
|
||||
Unlike the id it is neither stable nor unique.
|
||||
</description>
|
||||
<arg name="name" type="string"/>
|
||||
</event>
|
||||
|
||||
<event name="coordinates">
|
||||
<description summary="workspace coordinates changed">
|
||||
This event is used to organize workspaces into an N-dimensional grid
|
||||
within a workspace group, and if supported, is emitted immediately after
|
||||
the ext_workspace_handle_v1 is created and whenever the coordinates of
|
||||
the workspace change. Compositors may not send this event if they do not
|
||||
conceptually arrange workspaces in this way. If compositors simply
|
||||
number workspaces, without any geometric interpretation, they may send
|
||||
1D coordinates, which clients should not interpret as implying any
|
||||
geometry. Sending an empty array means that the compositor no longer
|
||||
orders the workspace geometrically.
|
||||
|
||||
Coordinates have an arbitrary number of dimensions N with an uint32
|
||||
position along each dimension. By convention if N > 1, the first
|
||||
dimension is X, the second Y, the third Z, and so on. The compositor may
|
||||
chose to utilize these events for a more novel workspace layout
|
||||
convention, however. No guarantee is made about the grid being filled or
|
||||
bounded; there may be a workspace at coordinate 1 and another at
|
||||
coordinate 1000 and none in between. Within a workspace group, however,
|
||||
workspaces must have unique coordinates of equal dimensionality.
|
||||
</description>
|
||||
<arg name="coordinates" type="array"/>
|
||||
</event>
|
||||
|
||||
<enum name="state" bitfield="true">
|
||||
<description summary="types of states on the workspace">
|
||||
The different states that a workspace can have.
|
||||
</description>
|
||||
|
||||
<entry name="active" value="1" summary="the workspace is active"/>
|
||||
<entry name="urgent" value="2" summary="the workspace requests attention"/>
|
||||
<entry name="hidden" value="4">
|
||||
<description summary="the workspace is not visible">
|
||||
The workspace is not visible in its workspace group, and clients
|
||||
attempting to visualize the compositor workspace state should not
|
||||
display such workspaces.
|
||||
</description>
|
||||
</entry>
|
||||
</enum>
|
||||
|
||||
<event name="state">
|
||||
<description summary="the state of the workspace changed">
|
||||
This event is emitted immediately after the ext_workspace_handle_v1 is
|
||||
created and each time the workspace state changes, either because of a
|
||||
compositor action or because of a request in this protocol.
|
||||
|
||||
Missing states convey the opposite meaning, e.g. an unset active bit
|
||||
means the workspace is currently inactive.
|
||||
</description>
|
||||
<arg name="state" type="uint" enum="state"/>
|
||||
</event>
|
||||
|
||||
<enum name="workspace_capabilities" bitfield="true">
|
||||
<entry name="activate" value="1" summary="activate request is available"/>
|
||||
<entry name="deactivate" value="2" summary="deactivate request is available"/>
|
||||
<entry name="remove" value="4" summary="remove request is available"/>
|
||||
<entry name="assign" value="8" summary="assign request is available"/>
|
||||
</enum>
|
||||
|
||||
<event name="capabilities">
|
||||
<description summary="compositor capabilities">
|
||||
This event advertises the capabilities supported by the compositor. If
|
||||
a capability isn't supported, clients should hide or disable the UI
|
||||
elements that expose this functionality. For instance, if the
|
||||
compositor doesn't advertise support for removing workspaces, a button
|
||||
triggering the remove request should not be displayed.
|
||||
|
||||
The compositor will ignore requests it doesn't support. For instance,
|
||||
a compositor which doesn't advertise support for remove will ignore
|
||||
remove requests.
|
||||
|
||||
Compositors must send this event once after creation of an
|
||||
ext_workspace_handle_v1 . When the capabilities change, compositors
|
||||
must send this event again.
|
||||
</description>
|
||||
<arg name="capabilities" type="uint" summary="capabilities" enum="workspace_capabilities"/>
|
||||
</event>
|
||||
|
||||
<event name="removed">
|
||||
<description summary="this workspace has been removed">
|
||||
This event is send when the workspace associated with the ext_workspace_handle_v1
|
||||
has been removed. After sending this request, the compositor will immediately consider
|
||||
the object inert. Any requests will be ignored except the destroy request.
|
||||
|
||||
It is guaranteed there won't be any more events referencing this
|
||||
ext_workspace_handle_v1.
|
||||
|
||||
The compositor must only remove a workspaces not currently belonging to any
|
||||
workspace_group.
|
||||
</description>
|
||||
</event>
|
||||
|
||||
<request name="destroy" type="destructor">
|
||||
<description summary="destroy the ext_workspace_handle_v1 object">
|
||||
Destroys the ext_workspace_handle_v1 object.
|
||||
|
||||
This request should be made either when the client does not want to
|
||||
use the workspace object any more or after the remove event to finalize
|
||||
the destruction of the object.
|
||||
</description>
|
||||
</request>
|
||||
|
||||
<request name="activate">
|
||||
<description summary="activate the workspace">
|
||||
Request that this workspace be activated.
|
||||
|
||||
There is no guarantee the workspace will be actually activated, and
|
||||
behaviour may be compositor-dependent. For example, activating a
|
||||
workspace may or may not deactivate all other workspaces in the same
|
||||
group.
|
||||
</description>
|
||||
</request>
|
||||
|
||||
<request name="deactivate">
|
||||
<description summary="deactivate the workspace">
|
||||
Request that this workspace be deactivated.
|
||||
|
||||
There is no guarantee the workspace will be actually deactivated.
|
||||
</description>
|
||||
</request>
|
||||
|
||||
<request name="assign">
|
||||
<description summary="assign workspace to group">
|
||||
Requests that this workspace is assigned to the given workspace group.
|
||||
|
||||
There is no guarantee the workspace will be assigned.
|
||||
</description>
|
||||
<arg name="workspace_group" type="object" interface="ext_workspace_group_handle_v1"/>
|
||||
</request>
|
||||
|
||||
<request name="remove">
|
||||
<description summary="remove the workspace">
|
||||
Request that this workspace be removed.
|
||||
|
||||
There is no guarantee the workspace will be actually removed.
|
||||
</description>
|
||||
</request>
|
||||
</interface>
|
||||
</protocol>
|
||||
-36
@@ -1,36 +0,0 @@
|
||||
"""Wayland utility functions
|
||||
|
||||
python-wayland - https://github.com/sde1000/python-wayland
|
||||
MIT License - Copyright (c) 2016 Stephen Early
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from .client import NoXDGRuntimeDir
|
||||
|
||||
class AnonymousFile(object):
|
||||
"""Create an anonymous file in XDG_RUNTIME_DIR"""
|
||||
def __init__(self, size):
|
||||
xdg_runtime_dir = os.getenv('XDG_RUNTIME_DIR')
|
||||
if not xdg_runtime_dir:
|
||||
raise NoXDGRuntimeDir()
|
||||
self._fd, name = tempfile.mkstemp(dir=xdg_runtime_dir)
|
||||
os.unlink(name)
|
||||
os.ftruncate(self._fd, size)
|
||||
|
||||
def fileno(self):
|
||||
if self._fd:
|
||||
return self._fd
|
||||
raise OSError
|
||||
|
||||
def close(self):
|
||||
if self._fd:
|
||||
os.close(self._fd)
|
||||
self._fd = None
|
||||
|
||||
def __enter__(self):
|
||||
return self._fd
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if self._fd:
|
||||
self.close()
|
||||
@@ -2,6 +2,7 @@ import QtQuick
|
||||
import Quickshell
|
||||
import Quickshell.Io
|
||||
import Quickshell.Wayland
|
||||
import Quickshell.WindowManager
|
||||
import qs.Commons
|
||||
|
||||
Item {
|
||||
@@ -10,167 +11,133 @@ Item {
|
||||
property ListModel workspaces: ListModel {}
|
||||
property var windows: []
|
||||
property int focusedWindowIndex: -1
|
||||
property var trackedToplevels: ({})
|
||||
property var trackedToplevels: new Set()
|
||||
|
||||
// LabWC typically has global workspaces (shared across all outputs)
|
||||
property bool globalWorkspaces: true
|
||||
|
||||
// Internal workspace state from helper
|
||||
property var workspaceData: ({})
|
||||
property int activeWorkspaceOid: -1
|
||||
// Map from native workspace id to the native Workspace object for activation
|
||||
property var nativeWorkspaceMap: ({})
|
||||
|
||||
// Set of workspace objects we've already connected signals to
|
||||
property var connectedWorkspaces: ({})
|
||||
|
||||
signal workspaceChanged
|
||||
signal activeWindowChanged
|
||||
signal windowListChanged
|
||||
signal displayScalesChanged
|
||||
|
||||
// Path to the helper script
|
||||
readonly property string helperScript: Qt.resolvedUrl("../../Scripts/python/src/compositor/labwc-workspace-helper.py").toString().replace("file://", "")
|
||||
|
||||
function initialize() {
|
||||
updateWindows();
|
||||
startWorkspaceHelper();
|
||||
Logger.i("LabwcService", "Service started");
|
||||
connectWorkspaceSignals();
|
||||
syncWorkspaces();
|
||||
Logger.i("LabwcService", "Service started (native ext-workspace-v1)");
|
||||
}
|
||||
|
||||
// Workspace helper process
|
||||
Process {
|
||||
id: workspaceHelper
|
||||
running: false
|
||||
command: ["python3", root.helperScript]
|
||||
// Watch for workspaces being added/removed from the model
|
||||
Connections {
|
||||
target: WindowManager.workspaces
|
||||
|
||||
stdout: SplitParser {
|
||||
onRead: function (line) {
|
||||
root.parseHelperOutput(line);
|
||||
}
|
||||
}
|
||||
|
||||
stderr: SplitParser {
|
||||
onRead: function (line) {
|
||||
Logger.w("LabwcService", "Helper stderr: " + line);
|
||||
}
|
||||
}
|
||||
|
||||
onExited: function (exitCode, exitStatus) {
|
||||
if (exitCode !== 0) {
|
||||
Logger.e("LabwcService", "Workspace helper exited with code: " + exitCode);
|
||||
}
|
||||
// Restart helper after a delay if it crashes
|
||||
if (root.visible !== false) {
|
||||
restartTimer.start();
|
||||
}
|
||||
function onValuesChanged() {
|
||||
root.connectWorkspaceSignals();
|
||||
Qt.callLater(root.syncWorkspaces);
|
||||
}
|
||||
}
|
||||
|
||||
Timer {
|
||||
id: restartTimer
|
||||
interval: 2000
|
||||
repeat: false
|
||||
onTriggered: {
|
||||
if (!workspaceHelper.running) {
|
||||
Logger.i("LabwcService", "Restarting workspace helper...");
|
||||
startWorkspaceHelper();
|
||||
}
|
||||
// Connect to property change signals on each native workspace object
|
||||
function connectWorkspaceSignals() {
|
||||
const nativeWs = WindowManager.workspaces.values;
|
||||
const newConnected = {};
|
||||
|
||||
for (const ws of nativeWs) {
|
||||
const key = ws.id || ws.toString();
|
||||
newConnected[key] = true;
|
||||
|
||||
if (connectedWorkspaces[key])
|
||||
continue;
|
||||
|
||||
ws.activeChanged.connect(() => {
|
||||
Qt.callLater(root.syncWorkspaces);
|
||||
});
|
||||
|
||||
ws.urgentChanged.connect(() => {
|
||||
Qt.callLater(root.syncWorkspaces);
|
||||
});
|
||||
|
||||
ws.shouldDisplayChanged.connect(() => {
|
||||
Qt.callLater(root.syncWorkspaces);
|
||||
});
|
||||
|
||||
ws.nameChanged.connect(() => {
|
||||
Qt.callLater(root.syncWorkspaces);
|
||||
});
|
||||
}
|
||||
|
||||
connectedWorkspaces = newConnected;
|
||||
}
|
||||
|
||||
function startWorkspaceHelper() {
|
||||
if (!workspaceHelper.running) {
|
||||
workspaceHelper.running = true;
|
||||
Logger.d("LabwcService", "Starting workspace helper: " + helperScript);
|
||||
}
|
||||
}
|
||||
function syncWorkspaces() {
|
||||
const nativeWs = WindowManager.workspaces.values;
|
||||
const groups = WindowManager.workspaceGroups.values;
|
||||
|
||||
function stopWorkspaceHelper() {
|
||||
if (workspaceHelper.running) {
|
||||
workspaceHelper.running = false;
|
||||
}
|
||||
}
|
||||
|
||||
function parseHelperOutput(line) {
|
||||
try {
|
||||
const data = JSON.parse(line);
|
||||
|
||||
if (data.type === "state") {
|
||||
processWorkspaceState(data);
|
||||
} else if (data.type === "error") {
|
||||
Logger.e("LabwcService", "Helper error: " + data.message);
|
||||
}
|
||||
} catch (e) {
|
||||
Logger.e("LabwcService", "Failed to parse helper output: " + e + " - " + line);
|
||||
}
|
||||
}
|
||||
|
||||
function processWorkspaceState(data) {
|
||||
const wsList = data.workspaces || [];
|
||||
const groups = data.groups || [];
|
||||
const oldActiveOid = activeWorkspaceOid;
|
||||
|
||||
// Clear and rebuild workspaces
|
||||
workspaces.clear();
|
||||
workspaceData = {};
|
||||
nativeWorkspaceMap = {};
|
||||
|
||||
let newActiveOid = -1;
|
||||
let idx = 1;
|
||||
|
||||
for (const ws of wsList) {
|
||||
// Skip hidden workspaces
|
||||
if (ws.isHidden) {
|
||||
for (const ws of nativeWs) {
|
||||
// Skip hidden workspaces (shouldDisplay = false means hidden)
|
||||
if (!ws.shouldDisplay) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Find which outputs this workspace's group spans
|
||||
let groupOutputs = [];
|
||||
if (ws.groupOid !== undefined && ws.groupOid !== null) {
|
||||
for (const grp of groups) {
|
||||
if (grp.oid === ws.groupOid && grp.outputs && grp.outputs.length > 0) {
|
||||
groupOutputs = grp.outputs;
|
||||
break;
|
||||
}
|
||||
let outputName = "";
|
||||
if (ws.group) {
|
||||
const groupScreens = ws.group.screens;
|
||||
if (groupScreens && groupScreens.length > 0) {
|
||||
outputName = groupScreens[0].name || "";
|
||||
}
|
||||
}
|
||||
|
||||
const wsEntry = {
|
||||
"id": ws.oid,
|
||||
"idx": ws.coordinates && ws.coordinates.length > 0 ? ws.coordinates[0] + 1 : idx,
|
||||
"id": ws.id || idx.toString(),
|
||||
"idx": idx,
|
||||
"name": ws.name || ("Workspace " + idx),
|
||||
"output": groupOutputs.length > 0 ? groupOutputs[0] : "",
|
||||
"isFocused": ws.isActive,
|
||||
"output": outputName,
|
||||
"isFocused": ws.active,
|
||||
"isActive": true,
|
||||
"isUrgent": ws.isUrgent,
|
||||
"isUrgent": ws.urgent,
|
||||
"isOccupied": false,
|
||||
"oid": ws.oid
|
||||
"oid": ws.id || idx.toString()
|
||||
};
|
||||
|
||||
workspaces.append(wsEntry);
|
||||
workspaceData[ws.oid] = wsEntry;
|
||||
|
||||
if (ws.isActive) {
|
||||
newActiveOid = ws.oid;
|
||||
}
|
||||
nativeWorkspaceMap[wsEntry.id] = ws;
|
||||
|
||||
idx++;
|
||||
}
|
||||
|
||||
activeWorkspaceOid = newActiveOid;
|
||||
|
||||
// Update windows with workspace info
|
||||
updateWindowWorkspaces();
|
||||
|
||||
// Emit signal if workspace changed
|
||||
if (oldActiveOid !== newActiveOid || workspaces.count > 0) {
|
||||
workspaceChanged();
|
||||
}
|
||||
workspaceChanged();
|
||||
}
|
||||
|
||||
function updateWindowWorkspaces() {
|
||||
// Update windows with active workspace ID
|
||||
// Note: ext-workspace-v1 doesn't provide window-to-workspace mapping
|
||||
// This requires ext-toplevel-workspace protocol which LabWC may not support yet
|
||||
// For now, assign all windows to the active workspace
|
||||
// ext-workspace-v1 doesn't provide window-to-workspace mapping
|
||||
// Assign all windows to the active workspace
|
||||
let activeId = "";
|
||||
for (let i = 0; i < workspaces.count; i++) {
|
||||
const ws = workspaces.get(i);
|
||||
if (ws.isFocused) {
|
||||
activeId = ws.id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (let i = 0; i < windows.length; i++) {
|
||||
if (activeWorkspaceOid > 0) {
|
||||
windows[i].workspaceId = activeWorkspaceOid;
|
||||
if (activeId) {
|
||||
windows[i].workspaceId = activeId;
|
||||
}
|
||||
}
|
||||
windowListChanged();
|
||||
@@ -184,7 +151,7 @@ Item {
|
||||
}
|
||||
|
||||
function connectToToplevel(toplevel) {
|
||||
if (!toplevel || !toplevel.address)
|
||||
if (!toplevel)
|
||||
return;
|
||||
|
||||
toplevel.activatedChanged.connect(() => {
|
||||
@@ -204,28 +171,41 @@ Item {
|
||||
function updateWindows() {
|
||||
const newWindows = [];
|
||||
const toplevels = ToplevelManager.toplevels?.values || [];
|
||||
const newTracked = {};
|
||||
|
||||
let focusedIdx = -1;
|
||||
let idx = 0;
|
||||
|
||||
// Find active workspace id
|
||||
let activeId = "";
|
||||
for (let i = 0; i < workspaces.count; i++) {
|
||||
const ws = workspaces.get(i);
|
||||
if (ws.isFocused) {
|
||||
activeId = ws.id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (const toplevel of toplevels) {
|
||||
if (!toplevel)
|
||||
continue;
|
||||
|
||||
const addr = toplevel.address || "";
|
||||
if (addr && !trackedToplevels[addr]) {
|
||||
if (!trackedToplevels.has(toplevel)) {
|
||||
connectToToplevel(toplevel);
|
||||
}
|
||||
if (addr) {
|
||||
newTracked[addr] = true;
|
||||
trackedToplevels.add(toplevel);
|
||||
}
|
||||
|
||||
// Get output name from toplevel's screen list
|
||||
const output = (toplevel.screens && toplevel.screens.length > 0) ? (toplevel.screens[0].name || "") : "";
|
||||
|
||||
// Use appId + title as a stable id since Toplevel has no address property
|
||||
const windowId = (toplevel.appId || "") + ":" + idx;
|
||||
|
||||
newWindows.push({
|
||||
"id": addr,
|
||||
"id": windowId,
|
||||
"appId": toplevel.appId || "",
|
||||
"title": toplevel.title || "",
|
||||
"workspaceId": activeWorkspaceOid > 0 ? activeWorkspaceOid : 1,
|
||||
"output": output,
|
||||
"workspaceId": activeId || "1",
|
||||
"isFocused": toplevel.activated || false,
|
||||
"toplevel": toplevel
|
||||
});
|
||||
@@ -235,8 +215,6 @@ Item {
|
||||
}
|
||||
idx++;
|
||||
}
|
||||
|
||||
trackedToplevels = newTracked;
|
||||
windows = newWindows;
|
||||
focusedWindowIndex = focusedIdx;
|
||||
|
||||
@@ -256,15 +234,12 @@ Item {
|
||||
}
|
||||
|
||||
function switchToWorkspace(workspace) {
|
||||
try {
|
||||
// Use the workspace name for activation via ext-workspace protocol
|
||||
// Names are "1", "2", "3", "4" as configured in labwc rc.xml
|
||||
const wsName = workspace.name || workspace.idx?.toString() || "1";
|
||||
|
||||
// Activate via ext-workspace protocol using workspace name
|
||||
Quickshell.execDetached(["python3", helperScript, "--activate", wsName]);
|
||||
} catch (e) {
|
||||
Logger.e("LabwcService", "Failed to switch workspace:", e);
|
||||
// Find the native Workspace object and activate it directly
|
||||
const nativeWs = nativeWorkspaceMap[workspace.id] || nativeWorkspaceMap[workspace.oid];
|
||||
if (nativeWs && nativeWs.canActivate) {
|
||||
nativeWs.activate();
|
||||
} else {
|
||||
Logger.w("LabwcService", "Cannot activate workspace: " + (workspace.name || workspace.id));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,7 +260,6 @@ Item {
|
||||
}
|
||||
|
||||
function logout() {
|
||||
stopWorkspaceHelper();
|
||||
try {
|
||||
// Exit labwc by sending SIGTERM to $LABWC_PID or using --exit flag
|
||||
Quickshell.execDetached(["sh", "-c", "labwc --exit || kill -s SIGTERM $LABWC_PID"]);
|
||||
@@ -306,8 +280,4 @@ Item {
|
||||
// de-activated until proper testing
|
||||
return null;
|
||||
}
|
||||
|
||||
Component.onDestruction: {
|
||||
stopWorkspaceHelper();
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user