Can't receive video from aiortc without offering webcam video on webpage
See original GitHub issueAlright, I’m pretty new to WebRTC, so it’s entirely possible that I’m doing something very wrong here.
I’ve got a web client, a Python client, and a signaling server. The Python client is generating a video from a series of images from OpenAI Gym’s Atari environment while an AI is playing it. I basically want to be able to stream that video to a web client in order to watch the AI play it, but I don’t actually need to send any media/data to the Python client while it’s running. I figured I could just send an offer to the python client without specifying any user media, but I get the following error when I try to generate an answer and start streaming:
Traceback (most recent call last):
File "./client.py", line 339, in <module>
loop.run_until_complete(run(
File "/usr/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "./client.py", line 303, in run
await pc.setLocalDescription(await pc.createAnswer())
File "/usr/local/lib/python3.8/dist-packages/aiortc/rtcpeerconnection.py", line 763, in setLocalDescription
t._currentDirection = and_direction(t.direction, t._offerDirection)
File "/usr/local/lib/python3.8/dist-packages/aiortc/rtcpeerconnection.py", line 242, in and_direction
return sdp.DIRECTIONS[sdp.DIRECTIONS.index(a) & sdp.DIRECTIONS.index(b)]
ValueError: None is not in list
The strange thing is that I can actually start the stream with no problems if I offer to stream my webcam video from the web client. I tried offering my microphone audio instead, but I got the same error as before. I guess I’m kinda confused about what this error means and why I can only seem to get this to work when I offer a video stream.
Here’s what my python client looks like (OpenAI Gym video generation and signaling code omitted):
async def run(url, pc, video_track, recorder):
async with websockets.connect(url) as websocket:
signaling = Signaling(websocket)
@pc.on("track")
async def on_track(track):
recorder.addTrack(track)
NAME = "Breakout"
ready = await signaling.recv()
assert isinstance(ready, Ready)
await signaling.send(Post(name=NAME))
while True:
obj = await signaling.recv()
if isinstance(obj, Offer):
print("got offer", obj.desc.sdp)
assert obj.name == NAME
await pc.setRemoteDescription(obj.desc)
await recorder.start()
print("Setting local description")
pc.addTrack(video_track)
# Error occurs during this call to pc.setLocalDescription
await pc.setLocalDescription(await pc.createAnswer())
print("Sending answer")
await signaling.send(Answer(
obj.endpoint,
obj.name,
pc.localDescription
))
print("Sent answer")
elif isinstance(obj, Ice):
assert obj.name == NAME
await pc.addIceCandidate(obj.candidate)
elif obj is BYE:
print("Exiting")
break
else:
print("unknown msg {}".format(obj))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sighost", default="localhost")
parser.add_argument("--sigport", default=7000)
args = parser.parse_args()
video_track = GymVideoStreamTrack()
recorder = MediaBlackhole()
pc = RTCPeerConnection()
loop = asyncio.get_event_loop()
try:
asyncio.run_coroutine_threadsafe(run_gym_env(video_track), loop)
loop.run_until_complete(run(
"ws://{}:{}".format(args.sighost, args.sigport),
pc,
video_track,
recorder
))
except KeyboardInterrupt:
pass
finally:
print("stopping recorder")
loop.run_until_complete(recorder.stop())
print("closing peer connection")
loop.run_until_complete(pc.close())
print("done")
Web Client HTML
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>WebRTC working example</title>
</head>
<body>
<div>
<input id="sigHost" type="text" value="localhost" />
<input id="sigEndpoint" type="text" value="" placeholder="Endpoint" />
<input id="sigRoom" type="text" value="Breakout" />
</div>
<button id="startReceive">Start Receive</button>
<div>
<video id="remoteStream" autoplay playsinline></video>
</div>
<script src="main.js"></script>
</body>
</html>
Web Client main.js
const PC_CONFIG = {}
let socket;
let dc;
document.getElementById("sigHost").value = window.location.hostname
let connect = (url) => {
socket = new WebSocket(url);
socket.onopen = () => {
console.log("Ready");
createPeerConnection();
// sending an offer with no media produces the error
// sendOffer();
// sending an offer with audio and no video produces the error as well
// navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {
// stream.getTracks().forEach((track) => {
// pc.addTrack(track, stream);
// });
//
// sendOffer();
// });
navigator.mediaDevices.getUserMedia({ video: true }).then((stream) => {
stream.getTracks().forEach((track) => {
pc.addTrack(track, stream);
});
sendOffer();
});
};
socket.onmessage = (ev) => {
let msg = JSON.parse(ev.data);
handleSignalingData(msg);
};
};
let sendData = (data) => {
socket.send(JSON.stringify(data));
};
let pc;
let localStream;
let remoteStreamElement = document.querySelector("#remoteStream");
document.getElementById("startReceive").onclick = () => {
connect("ws://" + document.getElementById("sigHost").value + ":7000")
};
let createPeerConnection = () => {
try {
pc = new RTCPeerConnection(PC_CONFIG);
pc.onicecandidate = onIceCandidate;
pc.onaddstream = onAddStream;
console.log("PeerConnection created");
} catch (error) {
console.error("PeerConnection failed: ", error);
}
};
let sendOffer = () => {
console.log("Send offer");
pc.createOffer().then(
setAndSendLocalDescription,
(error) => { console.error("Send offer failed: ", error); }
);
};
let setAndSendLocalDescription = (sessionDescription) => {
pc.setLocalDescription(sessionDescription);
console.log("Local description set");
sendData({
typ: "offer",
dat: {
endpoint: document.getElementById("sigEndpoint").value,
name: document.getElementById("sigRoom").value,
sdp: sessionDescription
}
});
};
let onIceCandidate = (event) => {
if (event.candidate) {
console.log("ICE candidate");
sendData({
typ: "ice",
dat: {
endpoint: document.getElementById("sigEndpoint").value,
name: document.getElementById("sigRoom").value,
sdp: event.candidate.candidate,
sdp_mid: event.candidate.sdpMid,
sdp_mline_index: event.candidate.sdpMLineIndex,
}
});
}
};
let onAddStream = (event) => {
console.log("Add track", event);
remoteStreamElement.srcObject = event.stream;
};
let handleSignalingData = (msg) => {
switch (msg.typ) {
case "answer":
console.log("Got Answer", msg)
pc.setRemoteDescription(new RTCSessionDescription({
type: "answer",
sdp: msg.dat.sdp
}));
break;
case "ice":
pc.addIceCandidate(new RTCIceCandidate(msg.dat.sdp));
break;
}
};
Apologies for the long code snippets. Most of this code is adapted from examples from this repo and signaling tutorials online. I can provide the signaling code if necessary.
Tried on: Ubuntu 20.04
- Firefox 86.0
- Chrome 88.0.4324.182
Issue Analytics
- State:
- Created 3 years ago
- Comments:6

Top Related StackOverflow Question
If that’s ok with you I will close this issue and you can track the Firefox-specific behaviour in #481
Awesome thanks! I had a roughly similar implementation, I see you used
pts, time_base = await self.next_timestamp()to determine the frame time, just noting these are hardcoded in https://github.com/aiortc/aiortc/blob/01ff209cc38e887edeb05cba1845cf458b31a0ac/src/aiortc/mediastreams.py#L119 and could set them to match the rate at which you generate the frames.