diff --git a/.gitignore b/.gitignore index 6e75287..910b007 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ __pycache__ *.dot +*.mp4 diff --git a/README.md b/README.md index b430030..3a93733 100644 --- a/README.md +++ b/README.md @@ -2,41 +2,106 @@ A framework to mix and distribute live video feeds from interactive surfaces via WebRTC. -[add picture here] +![shared table surfaces with real and projected objects](assets/teaser.jpg) +Image credit: (c) Stadt Regensburg, (c) Stefan Effenhauser (see also the [VIGITIA](https://vigitia.de/) project) SurfaceStreams consists of a mixing server, and one or more clients. Each clients sends one audiostream and two video streams: a plain old webcam feed of the user called the _front stream_, and a second feed of a rectified interactive surface called the _surface stream_. The surface stream is expected to have any background removed and chroma-keyed with 100% bright green. The mixing server then composes a new surface stream for each client, consisting of the layered surface streams of the _other_ clients, and streams that back to each client (along with a single combined front stream of all individual front streams arranged side-by-side). +![diagram of system architecture](assets/diagram.png) + +## HowTo + +Here's an example walkthrough of how to connect an interactive surface with a browser client: + + * on a server host with sufficient CPU resources, run the mixing backend: `./webrtc_server.py` + * start the browser client: + * with Chrome or Firefox, go to `https://${SERVER_HOST}:8080/stream.html` + * allow access to camera/microphone + * you should then see your own webcam stream and a black canvas after a few seconds + * try doodling on the black canvas (left mouse button draws, right button erases) + * start the interactive surface: + * setup and calibrate [SurfaceCast](https://github.com/floe/surfacecast) to stream the surface on virtual camera `/dev/video20` (see Usage - Example 2) + * run the Python client: `./webrtc_client.py -t ${SERVER_HOST} -s /dev/video20 -f /dev/video0` (or whatever device your plain webcam is) + * put the `surface` window as fullscreen on the surface display, and the `front` window on the front display + * connect additional browser and/or surface clients (up to 4 in total) + ## Clients -* standalone Python client - * any two V4L2 video sources (also virtual ones, e.g. from https://github.com/floe/surfacecast) -* HTML5 client - * virtual drawing board surface -* VR client - * tbd +### Python commandline client parameters + +``` + --fake use fake sources (desc. from -f/-s) +``` +Mostly useful for testing, will create default outgoing streams with fake test data (TV test image, moving ball, tick sounds). +If any of `-f/-s/-a` are also given, the string will be interpreted as a GStreamer bin description. + +``` + -m, --main flag this client as main (lowest z) +``` +If a client does not have background filtering (i.e. a plain webcam), then you can use this flag to make sure that the surface stream from this client is always placed below all others. Note you can only have one "main" client per session, otherwise the surface mixing will get messed up. + +``` + -a AUDIO, --audio AUDIO + -f FRONT, --front FRONT + -s SURFACE, --surface SURFACE + audio/front/surface source (device name or pipeline) +``` +If any of these are given without ``--fake``, they will be interpreted as a device name (e.g. `/dev/video0`). Otherwise, they will be interpreted as a GStreamer bin description (e.g. `"videotestsrc ! timeoverlay"`). Note that in the second case the whole string needs to be quoted. + +``` + -p PORT, --port PORT server HTTPS listening port (8080) + -t TARGET, --target TARGET + server to connect to (127.0.0.1) +``` +Used to give the hostname or IP address of the server, and optionally a non-default port to connect to. + +``` + -u STUN, --stun STUN STUN server +``` +If you want to use a different STUN server than the default (stun://stun.l.google.com:19302), specify here. + +``` + -n NICK, --nick NICK client nickname +``` +Can be used to give a label (e.g. "Alice" or "Bob") to the frontstream. + +## Server + +``` + -s, --sink save all streams to MP4 file (default: False) + -o OUT, --out OUT MP4 output filename (default: surfacestreams-20220327-125732.mp4) +``` +If `-s/--sink` is given, write the combined front, surface, and audio streams to a MP4 file. Optional target filename can be set via `-o/--out`. Note that the file contains OPUS audio inside an MP4 container, which is not supported by all players. If necessary, use `scripts/playback.sh` to recode to MP3 and play all streams simultaneously in VLC. + +``` + -p PORT, --port PORT server HTTPS listening port (default: 8080) + -u STUN, --stun STUN STUN server (default: stun://stun.l.google.com:19302) +``` +If you want to use a different STUN server than the default (stun://stun.l.google.com:19302), or a different listening port, specify here. ## Requirements * Mixing server & standalone client * Ubuntu 20.04 LTS (Python 3.8, GStreamer 1.16) * Debian 11 "Bullseye" (Python 3.9, GStreamer 1.18) - * Install dependencies: `sudo apt install gstreamer1.0-libav gir1.2-soup-2.4 gir1.2-gstreamer-1.0 gir1.2-gst-plugins-bad-1.0 gir1.2-gst-plugins-base-1.0 gir1.2-nice-0.1 libnice10 gstreamer1.0-nice gstreamer1.0-plugins-bad` + * Install dependencies: `sudo apt install gstreamer1.0-libav gir1.2-soup-2.4 gir1.2-gstreamer-1.0 gir1.2-gst-plugins-bad-1.0 gir1.2-gst-plugins-base-1.0 gir1.2-nice-0.1 libnice10 gstreamer1.0-nice gstreamer1.0-plugins-bad gstreamer1.0-plugins-good gstreamer1.0-plugins-ugly` * HTML5 client * Firefox 78 ESR (Note: remember to enable OpenH264 plugin in `about:plugins`) - * Firefox 94/95 - * Chrome 92 + * Firefox 94 - 96 + * Chrome 92 - 97 ## Known issues * Server * The server will repeatedly issue the warning `[...]: loop detected in the graph of bin 'pipeline0'!!`, which can be safely ignored. - * Some race conditions when setting up the mixers still seem to be present, but hard to pin down. + * Some race conditions when setting up the mixers still seem to be present, but hard to pin down. This happens particularly when a client connects within a few seconds of the previous client, before negotiation has completed. Usually shows up as a black surface stream, restart the client in this case. * Python Client - * Using webcams as live sources (e.g. for the front stream) is somewhat hit-and-miss and depends on the pixel formats the webcam can deliver. Reliable results so far only with 24-bit RGB or 16-bit YUYV/YUV2 (see also [issue #4](https://github.com/floe/surfacestreams/issues/4)). The front/face cam needs to support 640x360 natively, the surface cam needs to support 1280x720 natively. Good results with Logitech C270 (front) and C920 (surface). + * Using webcams as live sources (e.g. for the front stream) is somewhat hit-and-miss and depends on the pixel formats the webcam can deliver. Reliable results so far only with 24-bit RGB or 16-bit YUYV/YUV2 (see also [issue #4](https://github.com/floe/surfacestreams/issues/4)). The front/face cam needs to support 640x360 natively, the surface cam needs to support 1280x720 natively. Good results with Logitech C270 (front/face) and C920 (surface). Note: environment variable `GST_V4L2_USE_LIBV4L2=1` can sometimes be used to fix format mismatch issues. * The Python client has a noticeable delay (sometimes on the order of 30 seconds) before the surface stream finally starts running, unlike e.g. the browser client (see also [issue #2](https://github.com/floe/surfacestreams/issues/2)). Once it runs, the delay is negligible, but the waiting time until things synchronize is iffy. + * A Raspberry Pi 4 is just barely fast enough to handle the incoming and outgoing streams _plus_ the SurfaceCast perspective transform. Overclocking to 1800 core/600 GPU is recommended. * HTML5 client - * not working on Chromium (probably a codec issue, see [#8](https://github.com/floe/surfacestreams/issues/8)) - * not working on Safari (reason unknown, see [#6](https://github.com/floe/surfacestreams/issues/6)) + * not working on Chromium (non-free codec problem, see [issue #8](https://github.com/floe/surfacestreams/issues/8)) + * not working on Safari (reason unknown, see [issue #6](https://github.com/floe/surfacestreams/issues/6)) diff --git a/count.wav b/assets/count.wav similarity index 100% rename from count.wav rename to assets/count.wav diff --git a/assets/diagram.png b/assets/diagram.png new file mode 100644 index 0000000..69e4edd Binary files /dev/null and b/assets/diagram.png differ diff --git a/assets/diagram.svg b/assets/diagram.svg new file mode 100644 index 0000000..fd4bcc6 --- /dev/null +++ b/assets/diagram.svg @@ -0,0 +1,2408 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + surface + + + + + + + + + front + + + + + + + + + + + + surface + + + + + + + + + + front + + + + + + + + + + + + + + + + + + + + surface + + + + + + + + + + + + + + + + + + + front + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + server + + incoming + outgoing + + + + + + + + + + + + + + + + + + diff --git a/assets/front.png b/assets/front.png new file mode 100644 index 0000000..0606e0a Binary files /dev/null and b/assets/front.png differ diff --git a/assets/front.svg b/assets/front.svg new file mode 100644 index 0000000..e8e586b --- /dev/null +++ b/assets/front.svg @@ -0,0 +1,99 @@ + + + + + + + + + + image/svg+xml + + + + + + + Note: this video streamis being recorded forresearch purposes.Contact floech@cs.aau.dkfor any questions. + + diff --git a/assets/teaser.jpg b/assets/teaser.jpg new file mode 100644 index 0000000..dc2dc38 Binary files /dev/null and b/assets/teaser.jpg differ diff --git a/cert.pem b/assets/tls-cert.pem similarity index 100% rename from cert.pem rename to assets/tls-cert.pem diff --git a/key.pem b/assets/tls-key.pem similarity index 100% rename from key.pem rename to assets/tls-key.pem diff --git a/assets/webrtc_test.html b/assets/webrtc_test.html new file mode 100644 index 0000000..2c2907f --- /dev/null +++ b/assets/webrtc_test.html @@ -0,0 +1,36 @@ + + + + + + + +
+ +
+ diff --git a/client.py b/client.py index 952d00f..034deb0 100644 --- a/client.py +++ b/client.py @@ -9,90 +9,189 @@ from gi.repository import GLib, Gst, Soup, GstWebRTC, GstSdp from gst_helpers import * +from webrtc_peer import WebRTCPeer # client object pool clients = {} -frontmixer = None -frontstream = None - -# links between individual client tee/mixer pairs -mixer_links = [] - # position offsets for 4 front streams # FIXME: how to handle > 4 clients? offsets = [ - (640,360), - ( 0, 0), - (640, 0), - ( 0,360) + (640,360), # bottom right + ( 0, 0), # top left + (640, 0), # top right + ( 0,360) # bottom left ] -class Client: - def __init__(self,name): +class BaseClient: + + def __init__(self,name,wrb): - self.wrb = None + self.wrb = wrb self.name = name - self.flags = {} - self.inputs = {} + self.queues = [] + self.reqpads = [] + + # link to sources + for name in ["surface","front","audio"]: + self.link_request_pads(get_by_name(name+"testsource"),"src_%u",self.wrb.bin,"sink_"+name,do_queue=False) + + # get a (new) pad from an element + def get_pad(self, el, tpl): + + pad = el.get_static_pad(tpl) + + # pad doesn't exist yet, so request a new one (and store it) + if pad == None: + pad = get_request_pad(el,tpl) + self.reqpads.append(pad) + # we have a static pad, check if it's already linked + else: + peer = pad.get_peer() + if peer: + peer.unlink(pad) + + return pad + + # convenience function to link request pads (and keep track of pads/queues) + def link_request_pads(self, el1, tpl1, el2, tpl2, do_queue=True, qp={}): + + pad1 = self.get_pad(el1,tpl1) + pad2 = self.get_pad(el2,tpl2) + + if do_queue: + queue = new_element("queue",qp) + add_and_link([queue]) + pad1.link(queue.get_static_pad("sink")) + queue.get_static_pad("src").link(pad2) + self.queues.append(queue) + else: + pad1.link(pad2) + + return pad2 + + +class Client(BaseClient): + + def __init__(self,name,wrb): + + super().__init__(name,wrb) self.outputs = {} self.mixers = {} clients[name] = self - def process(self, msg): - self.flags[msg] = True - logging.info("Setting flags for "+self.name+": "+str(self.flags)) + def remove(self): + logging.info("Removing client: "+self.name) + clients.pop(self.name) + + # pause, unlink, and remove the mixers + logging.debug(" Removing mixers...") + for i in self.mixers: + mixer = self.mixers[i] + mixer.set_state(Gst.State.NULL) + for p in mixer.sinkpads: + peer = p.get_peer() + if peer: + peer.unlink(p) + remove_element(mixer) + + # pause the bin + self.wrb.bin.set_state(Gst.State.NULL) + + # pause, unlink, and remove the output buffers + logging.debug(" Removing outputs...") + for i in self.outputs: + out_tee = self.outputs[i] + out_tee.set_state(Gst.State.NULL) + for p in out_tee.srcpads: + p.unlink(p.get_peer()) + remove_element(out_tee) + + # remove the bin + logging.debug(" Removing main bin...") + remove_element(self.wrb.bin) + + # remove the alphafilter, if exists + alpha = get_by_name("alpha_"+self.name) + if alpha: + alpha.set_state(Gst.State.NULL) + remove_element(alpha) + + # remove the textoverlay, if exists + text = get_by_name("text_"+self.name) + if text: + text.set_state(Gst.State.NULL) + remove_element(text) + + # remove queues from link_request_pad + logging.debug(" Removing queues...") + for q in self.queues: + q.set_state(Gst.State.NULL) + remove_element(q) + + # remove request pads + logging.debug(" Removing request pads...") + for p in self.reqpads: + el = p.get_parent_element() + if el != None: + el.release_request_pad(p) + + logging.info("Client "+self.name+" unlinked.") # create mixer & converter - def create_mixer(self,mtype,mixer,convert,caps): + def create_mixer(self,mtype,mixer,caps): if mtype in self.mixers: return logging.info(" creating "+mtype+" mixer for client "+self.name) + self.mixers[mtype] = mixer - add_and_link([mixer,convert,caps]) - link_to_inputselector(caps,"src",self.inputs[mtype]) + self.mixers[mtype+"_caps"] = caps + add_and_link([mixer,caps]) + + self.link_request_pads(caps,"src",self.wrb.bin,"sink_"+mtype,do_queue=False) + self.link_request_pads(get_by_name(mtype+"testsource"),"src_%u",mixer,"sink_%u") # link client to frontmixer def link_to_front(self): # FIXME: frontstream is separately encoded for each client ATM, should be one single encoder - if not "front" in self.inputs or not "front" in self.outputs: - return - logging.info(" linking client "+self.name+" to frontmixer") - # link frontstream tee to client-specific muxer - link_to_inputselector(frontstream,"src_%u",self.inputs["front"]) + # link frontstream tee to client + self.link_request_pads(get_by_name("frontstream"),"src_%u",self.wrb.bin,"sink_front",do_queue=False) + + # sanity check (important for sink client) + if not "front" in self.outputs: + return # request and link pads from tee and frontmixer - sinkpad = link_request_pads(self.outputs["front"],"src_%u",frontmixer,"sink_%u") + sinkpad = self.link_request_pads(self.outputs["front"],"src_%u",get_by_name("frontmixer"),"sink_%u") # set xpos/ypos properties on pad according to sequence number - padnum = int(sinkpad.get_name().split("_")[1]) + padnum = int(sinkpad.get_name().split("_")[1]) % len(offsets) sinkpad.set_property("xpos",offsets[padnum][0]) sinkpad.set_property("ypos",offsets[padnum][1]) # helper function to link source tees to destination mixers def link_streams_oneway(self,dest,prefix,qparams): - linkname = prefix+"_"+self.name+"_"+dest.name - if not linkname in mixer_links: + # sanity check (important for sink client) + if not prefix in self.outputs: + return - logging.info(" linking client "+self.name+" to "+prefix+"mixer "+dest.name) - # TODO: figure out the queue parameters - sinkpad = link_request_pads(self.outputs[prefix],"src_%u",dest.mixers[prefix],"sink_%u",qp=qparams) - mixer_links.append(linkname) + logging.info(" linking client "+self.name+" to "+prefix+"mixer "+dest.name) + sinkpad = self.link_request_pads(self.outputs[prefix],"src_%u",dest.mixers[prefix],"sink_%u",qp=qparams) - # for the "main" surface, destination mixer pad needs zorder = 0 - if prefix == "surface" and "main" in self.flags: - logging.info(" fixing zorder for main client") - sinkpad.set_property("zorder",0) + # for the "main" surface, destination mixer pad needs zorder = 0 + if prefix == "surface" and "main" in self.wrb.flags: + logging.info(" fixing zorder for main client") + sinkpad.set_property("zorder",0) # link all other clients to this mixer, this client to other mixers - def link_streams(self,clients,prefix,qparams): + def link_streams(self,prefix,qparams): for c in clients: @@ -107,58 +206,27 @@ def link_streams(self,clients,prefix,qparams): # for every _other_ tee, link that tee to my mixer other.link_streams_oneway(self,prefix,qparams) - # link all other clients to local mixer, this client to other mixers - def link_all_streams(self,clients): - self.link_streams(clients,"surface",{"max-size-buffers":1}) - self.link_streams(clients,"audio",{"max-size-time":100000000}) - - -# create single mixer for front stream -def create_frontmixer_queue(): - - global frontmixer - global frontstream - - if frontmixer != None or frontstream != None: - return - - logging.info(" creating frontmixer subqueue") - - frontmixer = new_element("compositor",myname="frontmixer") - frontstream = new_element("tee",{"allow-not-linked":True},myname="frontstream") - - add_and_link([ frontmixer, frontstream ]) + # link new client to mixers + def link_new_client(self): -# link new client to mixers -def link_new_client(client): + logging.info(" setting up mixers for new client "+self.name) - create_frontmixer_queue() + # create surface/audio mixers + self.create_mixer("surface", new_element("compositor",{"background":"black"}), new_element("capsfilter",{"caps":Gst.Caps.from_string("video/x-raw,format=AYUV,width=1280,height=720,framerate=15/1")})) + self.create_mixer( "audio", new_element("audiomixer" ), new_element("capsfilter",{"caps":Gst.Caps.from_string("audio/x-raw,format=S16LE,rate=48000,channels=1")})) - logging.info(" setting up mixers for new client "+client.name) - - # create surface/audio mixers for _all_ clients that don't have one yet - # needs to loop through all clients for the case where 2 or more clients - # appear simultaneously, otherwise there are no mixers to link to - if len(clients) >= 2: - for c in clients: - clients[c].create_mixer("surface", new_element("compositor",{"background":"black"}), new_element("videoconvert"), - new_element("capsfilter",{"caps":Gst.Caps.from_string("video/x-raw,format=YV12,width=1280,height=720,framerate=15/1")})) - clients[c].create_mixer( "audio", new_element("audiomixer"), new_element("audioconvert"), - new_element("capsfilter",{"caps":Gst.Caps.from_string("audio/x-raw,format=S16LE,rate=48000,channels=1")})) + # add missing frontmixer links + self.link_to_front() - # add missing frontmixer links - client.link_to_front() - - # add missing surface/audio mixer links - client.link_all_streams(clients) - - dump_debug("final") + # add missing surface/audio mixer links + # TODO: figure out the queue parameters (if any?) + self.link_streams("surface",{}) # {"max-size-buffers":1}) + self.link_streams("audio",{}) # {"max-size-time":100000000}) # new top-level element added to pipeline def on_element_added(thebin, element): # check format: {input,output}_IPADDR_PORT_{surface,front,audio} - # FIXME: fails if source name is not IPADDR_PORT elname = element.get_name().split("_") if len(elname) != 4: return @@ -166,17 +234,14 @@ def on_element_added(thebin, element): direction = elname[0] source = elname[1]+"_"+elname[2] stype = elname[3] - #logging.debug("New element:",direction,source,stype) + #logging.debug("New element: "+direction+" "+source+" "+stype) client = clients[source] - + # TODO: perhaps store the alpha element in outputs as well? if direction == "output": client.outputs[stype] = element - if direction == "input": - client.inputs[stype] = element # are all outputs in place? if len(client.outputs) == 3: logging.info("Client "+source+": all input/output elements complete.") - link_new_client(client) - + client.link_new_client() diff --git a/firewall.md b/firewall.md deleted file mode 100644 index a413bbc..0000000 --- a/firewall.md +++ /dev/null @@ -1,17 +0,0 @@ -# Firewall settings - -``` -Chain INPUT (policy DROP 405 packets, 44719 bytes) - pkts bytes target prot opt in out source destination - 26M 20G ACCEPT all -- lo any anywhere anywhere - 38M 13G ACCEPT all -- any any anywhere anywhere state RELATED,ESTABLISHED - 3036 135K ACCEPT tcp -- any any anywhere anywhere tcp dpt:http-alt - 730 108K ACCEPT udp -- any any anywhere anywhere udp dpts:50000:65535 - -Chain OUTPUT (policy DROP 449 packets, 55760 bytes) - pkts bytes target prot opt in out source destination - 26M 20G ACCEPT all -- any lo anywhere anywhere - 251M 351G ACCEPT all -- any any anywhere anywhere state RELATED,ESTABLISHED - 1841 292K ACCEPT udp -- any any anywhere anywhere udp dpts:50000:65535 - 24 5244 ACCEPT udp -- any any anywhere anywhere udp dpt:1900 -``` diff --git a/gst_helpers.py b/gst_helpers.py index 95e2079..6f20869 100644 --- a/gst_helpers.py +++ b/gst_helpers.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -import gi,logging,os +import gi,logging,os,signal gi.require_version('Gst', '1.0') gi.require_version('GLib', '2.0') from gi.repository import Gst, GLib @@ -55,43 +55,39 @@ def bus_call(bus, message, loop): logging.warning("Pipeline clock lost!") return True -# convenience function to link request pads -def link_request_pads(el1, tpl1, el2, tpl2, do_queue=True, qp={}): +# shortcut to request pad +def get_request_pad(el,tpl): + return el.request_pad(el.get_pad_template(tpl), None, None) - pad1 = el1.get_static_pad(tpl1) - if pad1 == None: - pad1 = el1.request_pad(el1.get_pad_template(tpl1), None, None) +# create single mixer for front stream +def create_frontmixer_queue(): - pad2 = el2.get_static_pad(tpl2) - if pad2 == None: - pad2 = el2.request_pad(el2.get_pad_template(tpl2), None, None) + logging.info("Creating frontmixer subqueue...") - if do_queue: - queue = new_element("queue",qp) - pipeline.add(queue) - queue.sync_state_with_parent() - pad1.link(queue.get_static_pad("sink")) - queue.get_static_pad("src").link(pad2) - else: - pad1.link(pad2) - return pad2 + frontmixer = new_element("compositor",myname="frontmixer") + capsfilter = new_element("capsfilter",{"caps":Gst.Caps.from_string("video/x-raw,format=YV12,width=1280,height=720,framerate=15/1")}) + frontstream = new_element("tee",{"allow-not-linked":True},myname="frontstream") + + add_and_link([ frontmixer, capsfilter, frontstream ]) -# link to input-selector and activate new link -def link_to_inputselector(el1, tpl1, el2): - pad = link_request_pads(el1,tpl1,el2,"sink_%u",do_queue=False) - el2.set_property("active-pad", pad) - return pad + frontsource = get_by_name("fronttestsource") + pad1 = get_request_pad(frontsource,"src_%u") + pad2 = get_request_pad(frontmixer,"sink_%u") + pad1.link(pad2) -def dump_debug(name="debug"): +# write out debug dot file (needs envvar GST_DEBUG_DUMP_DOT_DIR set) +def dump_debug(name="surfacestreams"): if os.getenv("GST_DEBUG_DUMP_DOT_DIR") == None: return logging.info("Writing graph snapshot to "+name+".dot") - # write out debug dot file (needs envvar GST_DEBUG_DUMP_DOT_DIR set) - Gst.debug_bin_to_dot_file(pipeline,Gst.DebugGraphDetails(15),name) + Gst.debug_bin_to_dot_file(pipeline,Gst.DebugGraphDetails.ALL,name) def get_by_name(name): return pipeline.get_by_name(name) +def remove_element(item): + return pipeline.remove(item) + # initialize pipeline and mainloop def init_pipeline(callback,mylevel=0): @@ -105,7 +101,10 @@ def init_pipeline(callback,mylevel=0): # configure the logger loglevels = { 0: logging.INFO, 1: logging.DEBUG, 2: logging.TRACE } - logging.basicConfig(format="%(levelname)s:: %(message)s",level=loglevels[mylevel]) + logging.basicConfig(format="[%(levelname)-5s] %(message)s",level=loglevels[mylevel]) + + # signal handler to dump graph dot file on SIGUSR1 + signal.signal(signal.SIGUSR1, lambda a,b: dump_debug()) Gst.init(None) pipeline = Gst.Pipeline() @@ -123,22 +122,18 @@ def connect_bus(msgtype, callback, *args): bus.connect(msgtype, callback, *args) # test sources as stream placeholders -def add_test_sources(frontdev="",surfdev="",fake=False,bgcol=0xFF00FF00,wave="ticks",perspective=None): +def add_test_sources(frontdev="",surfdev="",audiodev="",fake=False,bgcol=0xFF00FF00,wave="ticks"): if fake: - frontsrc = "videotestsrc is-live=true pattern=smpte ! timeoverlay" if frontdev == "" else frontdev + frontsrc = "videotestsrc is-live=true pattern=smpte ! timeoverlay text="+wave if frontdev == "" else frontdev surfsrc = "videotestsrc is-live=true pattern=ball background-color="+str(bgcol)+" ! timeoverlay" if surfdev == "" else surfdev - audiosrc = "audiotestsrc is-live=true wave="+wave - # audiosrc = "multifilesrc do-timestamp=true loop=true location=count.wav ! wavparse ignore-length=1 ! identity sync=true" + audiosrc = "audiotestsrc is-live=true wave="+wave if audiodev == "" else audiodev else: # FIXME: if a virtual device (e.g. v4l2loopback is used here, then it needs to use RGB pixel format, otherwise caps negotiation fails - frontsrc = "v4l2src do-timestamp=true device="+frontdev+" ! videorate ! videoconvert" - surfsrc = "v4l2src do-timestamp=true device="+surfdev+" ! videorate ! videoconvert" + frontsrc = "v4l2src do-timestamp=true device="+frontdev+" ! videorate ! videoconvert ! videocrop top=-1 bottom=-1 left=-1 right=-1" + surfsrc = "v4l2src do-timestamp=true device="+surfdev+" ! videorate ! videoconvert ! videocrop top=-1 bottom=-1 left=-1 right=-1" audiosrc = "alsasrc do-timestamp=true" # "audiorate ! audioconvert" - # FIXME still a bit hackish, maybe solveable without double videoconvert? - vc = None if perspective == None else new_element("videoconvert") - logging.debug(" Front Source: "+frontsrc) logging.debug("Surface Source: "+surfsrc) logging.debug(" Audio Source: "+audiosrc) @@ -148,8 +143,8 @@ def add_test_sources(frontdev="",surfdev="",fake=False,bgcol=0xFF00FF00,wave="ti new_element("tee",{"allow-not-linked":True},"fronttestsource") ]) - add_and_link([ Gst.parse_bin_from_description( surfsrc, True ), perspective, vc, # <-- NOTE - new_element("capsfilter",{"caps":Gst.Caps.from_string("video/x-raw,format=YV12,width=1280,height=720,framerate=15/1")}), + add_and_link([ Gst.parse_bin_from_description( surfsrc, True ), + new_element("capsfilter",{"caps":Gst.Caps.from_string("video/x-raw,format=AYUV,width=1280,height=720,framerate=15/1")}), new_element("tee",{"allow-not-linked":True},"surfacetestsource") ]) diff --git a/scripts/client.sh b/scripts/client.sh new file mode 100755 index 0000000..1748d49 --- /dev/null +++ b/scripts/client.sh @@ -0,0 +1,8 @@ +#!/bin/bash +while true ; do + (sleep 10 && wmctrl -F -r surface -e 0,0,1100,-1,-1 && sleep 1 && wmctrl -F -r surface -b add,fullscreen && wmctrl -F -r front -b add,fullscreen) & + ./webrtc_client.py -t butterbrot.org -f /dev/video-surf -s /dev/video20 + clear + echo Restarting, please wait... + sleep $((2+$RANDOM/2000)) +done diff --git a/scripts/playback.sh b/scripts/playback.sh new file mode 100755 index 0000000..851fd7e --- /dev/null +++ b/scripts/playback.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# VLC can't play Opus inside MP4 container +if ffmpeg -i "$1" |& grep Opus ; then + # in that case, recode Opus as MP3... + OUTPUT="${1/%.mp4/-fixed.mp4}" + ffmpeg -i "$1" -acodec mp3 -vcodec copy -map 0:0 -map 0:1 -map 0:2 "$OUTPUT" + # ... and replace original file + mv "$OUTPUT" "$1" +fi + +# play all video streams in parallel +vlc --sout-all --sout '#display' "$1" diff --git a/scripts/server.sh b/scripts/server.sh new file mode 100755 index 0000000..f52ecd1 --- /dev/null +++ b/scripts/server.sh @@ -0,0 +1,6 @@ +#!/bin/bash +export GST_DEBUG_DUMP_DOT_DIR=. +while true ; do + ./webrtc_server.py --sink + sleep 1 +done diff --git a/scripts/test.sh b/scripts/test.sh new file mode 100755 index 0000000..79860c9 --- /dev/null +++ b/scripts/test.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# on exit, kill the whole process group +trap 'trap - SIGTERM && kill 0' SIGINT SIGTERM EXIT + +URL="https://localhost:8080/stream.html" +LOG="$(date +%Y%m%d-%H%M%S).log" + +if [ "$1" = "" ] ; then + echo Usage: $0 default\|firefox\|chrome\|live-front\|live-surf\|live-both\|perspective + exit 1 +fi + +xterm -e "./webrtc_server.py |& tee $1-server-$LOG" & +[ $(jobs -p | wc -l) = 1 ] || { echo "Server failed to start." ; exit 1 ; } +sleep 2.5 + +xterm -e ./webrtc_client.py --fake -a "multifilesrc do-timestamp=true loop=true location=count.wav ! wavparse ignore-length=1 ! identity sync=true" & +[ $(jobs -p | wc -l) = 2 ] || { echo "Client failed to start." ; exit 1 ; } +sleep 2.5 + +case "$1" in + + default) + xterm -e ./webrtc_client.py --fake & + sleep 20 + ;; + + firefox) + firefox "$URL" + sleep 40 + ;; + + chrome) + /opt/google/chrome/chrome "$URL" + sleep 40 + ;; + + live-front) + xterm -e ./webrtc_client.py --fake \ + -f "v4l2src device=/dev/video0 do-timestamp=true ! videorate ! videoconvert" \ + -s "videotestsrc is-live=true pattern=ball background-color=0x00FF00FF ! timeoverlay" & + sleep 40 + ;; + + live-surf) + xterm -e ./webrtc_client.py --fake \ + -s "v4l2src device=/dev/video0 do-timestamp=true ! jpegdec ! videorate ! videoconvert" \ + -f "videotestsrc is-live=true pattern=smpte ! timeoverlay" & + sleep 40 + ;; + + live-both) + xterm -e ./webrtc_client.py -s /dev/video-surf -f /dev/video-face & + sleep 40 + ;; + + persp*) + xterm -e ./webrtc_client.py --fake -p 0.276051,-0.141577,408.683,-0.0140174,0.382771,129.458,-5.60278e-05,-0.00014859,0.846134 \ + -s "v4l2src device=/dev/video0 do-timestamp=true ! jpegdec ! videorate ! videoconvert" \ + -f "videotestsrc is-live=true pattern=smpte ! timeoverlay" & + sleep 40 + ;; + +esac + diff --git a/server.sh b/server.sh deleted file mode 100755 index 0d6b175..0000000 --- a/server.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -while true ; do - ./webrtc_server.py - sleep 1 -done diff --git a/adapter-8.1.0.js b/webclient/adapter-8.1.0.js similarity index 100% rename from adapter-8.1.0.js rename to webclient/adapter-8.1.0.js diff --git a/stream.html b/webclient/stream.html similarity index 100% rename from stream.html rename to webclient/stream.html diff --git a/stream.js b/webclient/stream.js similarity index 97% rename from stream.js rename to webclient/stream.js index 01722ee..9492f22 100644 --- a/stream.js +++ b/webclient/stream.js @@ -157,7 +157,7 @@ function playStream(videoElement, hostname, port, path, configuration, reportErr webrtcPeerConnection.onicecandidate = onIceCandidate; datastream = webrtcPeerConnection.createDataChannel("events"); - datastream.onopen = function(event) { datastream.send("Hi!"); console.log("Hi!"); } + datastream.onopen = function(event) { datastream.send("Hi from "+navigator.userAgent); } // datastream.onmessage = ... audiotrack = stream.getAudioTracks()[0]; @@ -179,6 +179,7 @@ function playStream(videoElement, hostname, port, path, configuration, reportErr websocketConnection = new WebSocket(wsUrl); websocketConnection.addEventListener("message", onServerMessage); //websocketConnection.onopen = function(event) { websocketConnection.send("Hoi!"); }; + console.log("Capture setup complete."); } ); } } diff --git a/vr.html b/webclient/vr.html similarity index 100% rename from vr.html rename to webclient/vr.html diff --git a/webrtc_client.py b/webrtc_client.py index b40a0a0..8bc4d39 100755 --- a/webrtc_client.py +++ b/webrtc_client.py @@ -10,6 +10,7 @@ from gst_helpers import * from webrtc_peer import WebRTCPeer +from client import BaseClient args = None sink = "" @@ -22,7 +23,8 @@ def ws_close_handler(connection, wrb): # outgoing Websocket connection def ws_conn_handler(session, result): connection = session.websocket_connect_finish(result) - wrb = WebRTCPeer(connection,"client",is_client=True,is_main=args.main) + wrb = WebRTCPeer(connection,"client",args.stun,is_client=True,is_main=args.main,nick=args.nick) + client = BaseClient("client",wrb) connection.connect("closed",ws_close_handler,wrb) # element message was posted on bus @@ -51,10 +53,8 @@ def on_element_added(thebin, element): logging.info("Starting audio output") add_and_link([ element, new_element("audioconvert"), new_element("autoaudiosink") ]) - dump_debug("client") - # "main" -print("SurfaceStreams frontend client v0.1\n") +print("\nSurfaceStreams frontend client v0.1.0 - https://github.com/floe/surfacestreams\n") parser = argparse.ArgumentParser() @@ -62,9 +62,12 @@ def on_element_added(thebin, element): parser.add_argument("-m","--main", help="flag this client as main (lowest z)",action="store_true") parser.add_argument("-d","--debug", help="more debug output (-dd=max)",action="count",default=0 ) parser.add_argument("-t","--target", help="server to connect to (%(default)s)", default="127.0.0.1") +parser.add_argument("-a","--audio", help="audio source (device name or pipeline)", default="" ) parser.add_argument("-f","--front", help="front image source (device or pipeline)",default="" ) parser.add_argument("-s","--surface",help="surface image source (device or pipeline)",default="" ) -parser.add_argument("-p","--perspective",help="perspective (9 floats: \"1,0,0,...\")",default="" ) +parser.add_argument("-u","--stun", help="STUN server", default="stun://stun.l.google.com:19302" ) +parser.add_argument("-p","--port", help="server HTTPS listening port", default=8080 ) +parser.add_argument("-n","--nick", help="client nickname", default="" ) args = parser.parse_args() print("Option",args,"\n") @@ -75,22 +78,11 @@ def on_element_added(thebin, element): if not args.fake and (args.front == "" or args.surface == ""): logging.warning("Need to either specify --fake for test sources, or -f/-s for source devices/pipelines.") -if args.perspective != "": - params = [ float(f) for f in args.perspective.split(",") ] - pt = new_element("perspective",{"matrix":params}) -else: - pt = None - -add_test_sources(args.front,args.surface,args.fake,perspective=pt) +add_test_sources(args.front,args.surface,args.audio,args.fake) session = Soup.Session() session.set_property("ssl-strict", False) -msg = Soup.Message.new("GET", "wss://"+args.target+":8080/ws") +msg = Soup.Message.new("GET", "wss://"+args.target+":"+str(args.port)+"/ws") session.websocket_connect_async(msg, None, None, None, ws_conn_handler) -#msg = Soup.Message.new("GET", "https://127.0.0.1:8080/stream.html") -#session.add_feature(Soup.Logger.new(Soup.LoggerLogLevel.BODY, -1)) -#session.queue_message(msg,ws_conn_handler,None) - run_mainloop() - diff --git a/webrtc_peer.py b/webrtc_peer.py index 5912779..1401136 100644 --- a/webrtc_peer.py +++ b/webrtc_peer.py @@ -9,16 +9,25 @@ from gst_helpers import * -VENCODER="queue max-size-buffers=1 ! x264enc bitrate=1500 speed-preset=ultrafast tune=zerolatency key-int-max=15 ! video/x-h264,profile=constrained-baseline ! queue max-size-time=100000000 ! h264parse ! rtph264pay config-interval=-1 ! application/x-rtp,media=video,encoding-name=H264," +VENCODER="queue ! x264enc bitrate=1500 speed-preset=ultrafast tune=zerolatency key-int-max=15 ! video/x-h264,profile=constrained-baseline ! queue ! h264parse ! " # TODO: vp8 would be better in terms of compatibility, but the quality is horrific? #VENCODER="queue max-size-buffers=1 ! vp8enc threads=2 deadline=2000 target-bitrate=600000 ! queue max-size-time=100000000 ! rtpvp8pay ! application/x-rtp,media=video,encoding-name=VP8," -AENCODER="queue ! opusenc ! rtpopuspay ! queue max-size-time=100000000 ! application/x-rtp,media=audio,encoding-name=OPUS," +# TODO: any other sensible audiocodec that can also be put into MP4 containers? +AENCODER="queue ! opusenc ! queue ! opusparse ! " -# TODO make stun server configurable? maybe print firewall info? -bindesc="webrtcbin name=webrtcbin stun-server=stun://stun.l.google.com:19302 "+\ - "videoconvert name=front ! "+VENCODER+"payload=96 ! webrtcbin. "+\ - "audioconvert name=audio ! "+AENCODER+"payload=97 ! webrtcbin. "+\ - "videoconvert name=surface ! "+VENCODER+"payload=98 ! webrtcbin. " +RTPVIDEO="rtph264pay config-interval=1 ! application/x-rtp,media=video,encoding-name=H264," +RTPAUDIO="rtpopuspay ! application/x-rtp,media=audio,encoding-name=OPUS," +FILESINK="mp4mux name=mux fragment-duration=1000 ! filesink sync=true location=" + +bindesc="webrtcbin name=webrtcbin stun-server=%s "+\ + "videoconvert name=front ! "+VENCODER+RTPVIDEO+"payload=96 ! webrtcbin. "+\ + "audioconvert name=audio ! "+AENCODER+RTPAUDIO+"payload=97 ! webrtcbin. "+\ + "videoconvert name=surface ! "+VENCODER+RTPVIDEO+"payload=98 ! webrtcbin. " + +filebin=FILESINK+"%s "+\ + "videoconvert name=front ! "+VENCODER+" mux. "+\ + "audioconvert name=audio ! "+AENCODER+" mux. "+\ + "videoconvert name=surface ! "+VENCODER+" mux. " response_type = { "offer": GstWebRTC.WebRTCSDPType.OFFER, @@ -61,40 +70,49 @@ def get_mids_from_sdp(sdptext): return result -class WebRTCPeer: +# base class: bin with 3 sink ghostpads +class StreamSink: - def __init__(self, connection, address, msghandler = None, is_client=False, is_main=False): + def __init__(self, name, param, bin_desc=filebin): - self.connection = connection - self.is_client = is_client - self.data_channel = None - self.address = address - self.mapping = None - self.msghandler = msghandler + self.name = name + bin_desc = bin_desc % param - self.bin = Gst.parse_bin_from_description(bindesc,False) - self.bin.set_name("bin_"+address) - add_and_link([self.bin]) + logging.info("Setting up stream handler for "+name) + logging.trace("Bin contents: "+bin_desc) - self.wrb = self.bin.get_by_name("webrtcbin") + self.bin = Gst.parse_bin_from_description(bin_desc,False) + self.bin.set_name("bin_"+name) + add_and_link([self.bin]) - # add ghostpads (proxy-pads) and input-selectors for the converters + # add ghostpads (proxy-pads) for name in ["surface","front","audio"]: + logging.debug("Creating "+name+" ghostpad for "+self.name) + element = self.bin.get_by_name(name) realpad = element.get_static_pad("sink") + ghostpad = Gst.GhostPad.new("sink_"+name,realpad) ghostpad.set_active(True) self.bin.add_pad(ghostpad) - selector = new_element("input-selector",myname="input_"+self.address+"_"+name) - add_and_link([selector]) - selector.get_static_pad("src").link(ghostpad) - # TODO: source name should be configurable - link_request_pads(get_by_name(name+"testsource"),"src_%u",selector,"sink_%u") +# specialization: containing WebRTCBin and _lots_ of plumbing +class WebRTCPeer(StreamSink): + + def __init__(self, connection, name, stun, is_client=False, is_main=False, nick=""): + + super().__init__(name,stun,bindesc) + + self.connection = connection + self.is_client = is_client + self.data_channel = None + self.mapping = None + self.flags = {} self.connection.connect("message",self.on_ws_message) + self.wrb = self.bin.get_by_name("webrtcbin") # connect signals (note: negotiation-needed will initially be empty on client side) self.wrb.connect("on-negotiation-needed", self.on_negotiation_needed) @@ -107,9 +125,19 @@ def __init__(self, connection, address, msghandler = None, is_client=False, is_m # send message to server if main client if is_main: - message = json.dumps({"type":"msg","data":"main"}) + message = json.dumps({"type":"msg","data":{"main":True}}) + self.connection.send_text(message) + + # send nickname to server if given + if nick != "": + message = json.dumps({"type":"msg","data":{"nick":nick}}) self.connection.send_text(message) + # application-level message + def process(self, msg): + self.flags.update(msg) + logging.debug("Setting flags for "+self.name+": "+str(self.flags)) + # message on WebRTC data channel def on_dc_message(self, wrb, message): logging.debug("New data channel message: "+message) @@ -121,8 +149,9 @@ def on_data_channel(self, wrb, data_channel): self.data_channel.connect("on-message-string", self.on_dc_message) self.data_channel.connect("on-message-data", self.on_dc_message) # FIXME: doesn't seem to send anything? - self.data_channel.emit("send-data",GLib.Bytes.new("Hi!".encode("utf-8"))) - self.data_channel.emit("send-string","Hi!") + hello = "Hi from "+self.name + self.data_channel.emit("send-data",GLib.Bytes.new(hello.encode("utf-8"))) + self.data_channel.emit("send-string",hello) # ICE connection candidate received, forward to peer def on_ice_candidate(self, wrb, index, candidate): @@ -174,7 +203,7 @@ def on_negotiation_created(self, promise, kind): # ... and send to peer. message = json.dumps({"type":"sdp","data":{"type":kind,"sdp":text},"mapping":mapping}) - logging.debug("Outgoing SDP: " + message) + logging.trace("Outgoing SDP: " + message) self.connection.send_text(message) # new pad appears on WebRTCBin element @@ -188,10 +217,11 @@ def on_pad_added(self, wrb, pad): return logging.info("New incoming stream, linking to decodebin...") + logging.trace("Stream caps: "+caps.to_string()) decodebin = new_element("decodebin",myname="decodebin_"+self.mapping[str(ssrc)]) decodebin.connect("pad-added", self.on_decodebin_pad) - self.wrb.parent.add(decodebin) # TODO or self.bin.add(...)? + self.wrb.parent.add(decodebin) decodebin.sync_state_with_parent() pad.link(decodebin.get_static_pad("sink")) @@ -202,6 +232,7 @@ def on_decodebin_pad(self, decodebin, pad): name = decodebin.get_name().split("_")[1] logging.info("Handling new decodebin pad of type: "+name) + logging.trace("Stream caps: "+pad.get_current_caps().to_string()) # add named ghostpads ("src_front" etc.) ghostpad = Gst.GhostPad.new("src_"+name,pad) @@ -209,14 +240,24 @@ def on_decodebin_pad(self, decodebin, pad): decodebin.parent.add_pad(ghostpad) alpha = None - # TODO: not sure, should we disable alpha filtering for main client or not? - if name == "surface" and self.msghandler and not "main" in self.msghandler.flags: - alpha = new_element("alpha", { "method": "green" } ) - - tee = new_element("tee",{"allow-not-linked":True},myname="output_"+self.address+"_"+name) - add_and_link([alpha,tee]) - last = tee if alpha == None else alpha - ghostpad.link(last.get_static_pad("sink")) + # disable alpha filtering for main client + if name == "surface" and not self.is_client and not "main" in self.flags: + logging.info("Adding alpha filter for "+self.name+" surface output") + alpha = new_element("alpha", { "method": "green" }, myname="alpha_"+self.name ) + + text = None + # add a nickname text overlay if given + if name == "front" and not self.is_client and "nick" in self.flags: + text = new_element("textoverlay",{"halignment":"left","valignment":"bottom","text":self.flags["nick"]},myname="text_"+self.name) + + tee = new_element("tee",{"allow-not-linked":True},myname="output_"+self.name+"_"+name) + add_and_link([text,alpha,tee]) + if text != None: + ghostpad.link(text.get_static_pad("video_sink")) + elif alpha != None: + ghostpad.link(alpha.get_static_pad("sink")) + else: + ghostpad.link(tee.get_static_pad("sink")) # incoming Websocket message def on_ws_message(self, connection, mtype, data): @@ -235,7 +276,7 @@ def on_ws_message(self, connection, mtype, data): return logging.info("Received SDP " + stype + ", parsing...") - logging.debug("Incoming SDP: " + json.dumps(msg)) + logging.trace("Incoming SDP: " + json.dumps(msg)) res, sdpmsg = GstSdp.sdp_message_new_from_text(sdp) # as client, we need to parse an OFFER, as server, we need to parse an ANSWER @@ -270,6 +311,5 @@ def on_ws_message(self, connection, mtype, data): self.wrb.emit("add-ice-candidate", sdpmlineindex, candidate) logging.trace("Incoming ICE candidate: " + json.dumps(msg)) - if msg["type"] == "msg" and self.msghandler: - self.msghandler.process(msg["data"]) - logging.debug("Incoming websocket message: "+msg["data"]) + if msg["type"] == "msg": + self.process(msg["data"]) diff --git a/webrtc_server.py b/webrtc_server.py index 0192c6b..b3a1d6d 100755 --- a/webrtc_server.py +++ b/webrtc_server.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 -import sys,gi,json +import sys,gi,json,argparse,datetime gi.require_version('GLib', '2.0') gi.require_version('Gst', '1.0') gi.require_version('Soup', '2.4') @@ -9,8 +9,10 @@ from gi.repository import GLib, Gst, Soup, GstWebRTC, GstSdp from gst_helpers import * +from webrtc_peer import * from client import * -from webrtc_peer import WebRTCPeer + +args = None # get address and port from client def get_client_address(client): @@ -23,24 +25,26 @@ def http_handler(server,msg,path,query,client,user_data): #flags[get_client_address(client)] = query content_type = "text/html" try: - data = open(path[1:],"r").read() + data = open("webclient"+path,"r").read() if path.endswith(".js"): content_type = "text/javascript" + msg.set_status(Soup.Status.OK) except: msg.set_status(Soup.Status.NOT_FOUND) + data=path+" not found" if path == "/quit": logging.info("Well... bye.") - quit_mainloop() - return + GLib.timeout_add(100,quit_mainloop) + data="Server exiting/restarting..." + msg.response_headers.append("Content-Type",content_type) msg.response_headers.append("Cache-Control","no-store") msg.response_body.append(data.encode("utf-8")) - msg.set_status(Soup.Status.OK) # Websocket connection was closed by remote -def ws_close_handler(connection, wrb): - # TODO actually handle closing (might be tricky, needs to rewire pipeline) +def ws_close_handler(connection, client): logging.info("WebSocket closed by remote.") + client.remove() # incoming Websocket connection def ws_conn_handler(server, connection, path, client, user_data): @@ -48,22 +52,45 @@ def ws_conn_handler(server, connection, path, client, user_data): source = get_client_address(client) logging.info("New WebSocket connection from: "+source) - new_client = Client(source) - wrb = WebRTCPeer(connection,source,new_client) - connection.connect("closed",ws_close_handler,wrb) + wrb = WebRTCPeer(connection,source,args.stun) + new_client = Client(source,wrb) + connection.connect("closed",ws_close_handler,new_client) # "main" -print("SurfaceStreams backend mixer v0.1\n") +print("\nSurfaceStreams backend mixing server v0.1.0 - https://github.com/floe/surfacestreams\n") print("Note: any GStreamer-WARNINGs about pipeline loops can be safely ignored.\n") -init_pipeline(on_element_added,mylevel=1) +parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) +outfile = datetime.datetime.now().strftime("surfacestreams-%Y%m%d-%H%M%S.mp4") + +parser.add_argument("-d","--debug", help="more debug output (-dd=max)", action="count",default=1 ) +parser.add_argument("-s","--sink", help="save all streams to MP4 file", action="store_true" ) +parser.add_argument("-p","--port", help="server HTTPS listening port", default=8080 ) +parser.add_argument("-o","--out", help="MP4 output filename", default=outfile ) +parser.add_argument("-u","--stun", help="STUN server", default="stun://stun.l.google.com:19302" ) + +args = parser.parse_args() +print("Option",args,"\n") -add_test_sources(fake=True,bgcol=0xFFFF00FF,wave="sine") +init_pipeline(on_element_added,args.debug) + +frontsrc = "filesrc location=assets/front.png ! pngdec ! videoconvert ! imagefreeze ! identity sync=true" +surfacesrc = "videotestsrc is-live=true pattern=solid-color foreground-color=0" #ball motion=sweep background-color=0 +audiosrc = "audiotestsrc is-live=true wave=silence" + +add_test_sources(frontsrc,surfacesrc,audiosrc,fake=True,bgcol=0xFFFF00FF,wave="sine") +create_frontmixer_queue() server = Soup.Server() server.add_handler("/",http_handler,None) server.add_websocket_handler("/ws",None,None,ws_conn_handler,None) -server.set_ssl_cert_file("cert.pem","key.pem") -server.listen_all(8080,Soup.ServerListenOptions.HTTPS) +server.set_ssl_cert_file("assets/tls-cert.pem","assets/tls-key.pem") +server.listen_all(int(args.port),Soup.ServerListenOptions.HTTPS) + +if args.sink: + logging.info("Adding file sink client...") + sink = StreamSink("file_sink",args.out) + client = Client("file_sink",sink) + client.link_new_client() run_mainloop()