diff --git a/assignment-client/CMakeLists.txt b/assignment-client/CMakeLists.txt
index 5b92dfba99..830164eb60 100644
--- a/assignment-client/CMakeLists.txt
+++ b/assignment-client/CMakeLists.txt
@@ -5,7 +5,7 @@ setup_hifi_project(Core Gui Network Script Quick Widgets WebSockets)
 # link in the shared libraries
 link_hifi_libraries( 
   audio avatars octree environment gpu model fbx entities 
-  networking animation shared script-engine embedded-webserver
+  networking animation recording shared script-engine embedded-webserver
   controllers physics
 )
 
diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp
index 6f60ad179c..852e1d1389 100644
--- a/interface/src/avatar/MyAvatar.cpp
+++ b/interface/src/avatar/MyAvatar.cpp
@@ -35,7 +35,10 @@
 #include <TextRenderer3D.h>
 #include <UserActivityLogger.h>
 #include <AnimDebugDraw.h>
-
+#include <recording/Deck.h>
+#include <recording/Recorder.h>
+#include <recording/Clip.h>
+#include <recording/Frame.h>
 #include "devices/Faceshift.h"
 
 
@@ -77,6 +80,10 @@ const QString& DEFAULT_AVATAR_COLLISION_SOUND_URL = "https://hifi-public.s3.amaz
 const float MyAvatar::ZOOM_MIN = 0.5f;
 const float MyAvatar::ZOOM_MAX = 25.0f;
 const float MyAvatar::ZOOM_DEFAULT = 1.5f;
+static const QString HEADER_NAME = "com.highfidelity.recording.AvatarData";
+static recording::FrameType AVATAR_FRAME_TYPE = recording::Frame::TYPE_INVALID;
+static std::once_flag frameTypeRegistration;
+
 
 MyAvatar::MyAvatar(RigPointer rig) :
     Avatar(rig),
@@ -112,6 +119,19 @@ MyAvatar::MyAvatar(RigPointer rig) :
     _audioListenerMode(FROM_HEAD),
     _hmdAtRestDetector(glm::vec3(0), glm::quat())
 {
+    using namespace recording;
+
+    std::call_once(frameTypeRegistration, [] {
+        AVATAR_FRAME_TYPE = recording::Frame::registerFrameType(HEADER_NAME);
+    });
+
+    // FIXME how to deal with driving multiple avatars locally?  
+    Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this](Frame::Pointer frame) {
+        qDebug() << "Playback of avatar frame length: " << frame->data.size();
+        avatarStateFromFrame(frame->data, this);
+    });
+
+
     for (int i = 0; i < MAX_DRIVE_KEYS; i++) {
         _driveKeys[i] = 0.0f;
     }
@@ -235,14 +255,12 @@ void MyAvatar::update(float deltaTime) {
     simulate(deltaTime);
 }
 
+extern QByteArray avatarStateToFrame(const AvatarData* _avatar);
+extern void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar);
+
 void MyAvatar::simulate(float deltaTime) {
     PerformanceTimer perfTimer("simulate");
 
-    // Play back recording
-    if (_player && _player->isPlaying()) {
-        _player->play();
-    }
-
     if (_scale != _targetScale) {
         float scale = (1.0f - SMOOTHING_RATIO) * _scale + SMOOTHING_RATIO * _targetScale;
         setScale(scale);
@@ -310,7 +328,7 @@ void MyAvatar::simulate(float deltaTime) {
 
     // Record avatars movements.
     if (_recorder && _recorder->isRecording()) {
-        _recorder->record();
+        _recorder->recordFrame(AVATAR_FRAME_TYPE, avatarStateToFrame(this));
     }
 
     // consider updating our billboard
@@ -580,33 +598,35 @@ bool MyAvatar::isRecording() {
     return _recorder && _recorder->isRecording();
 }
 
-qint64 MyAvatar::recorderElapsed() {
+float MyAvatar::recorderElapsed() {
+    if (QThread::currentThread() != thread()) {
+        float result;
+        QMetaObject::invokeMethod(this, "recorderElapsed", Qt::BlockingQueuedConnection,
+                                  Q_RETURN_ARG(float, result));
+        return result;
+    }
     if (!_recorder) {
         return 0;
     }
-    if (QThread::currentThread() != thread()) {
-        qint64 result;
-        QMetaObject::invokeMethod(this, "recorderElapsed", Qt::BlockingQueuedConnection,
-                                  Q_RETURN_ARG(qint64, result));
-        return result;
-    }
-    return _recorder->elapsed();
+    return (float)_recorder->position() / MSECS_PER_SECOND;
 }
 
+QMetaObject::Connection _audioClientRecorderConnection;
+
 void MyAvatar::startRecording() {
     if (QThread::currentThread() != thread()) {
         QMetaObject::invokeMethod(this, "startRecording", Qt::BlockingQueuedConnection);
         return;
     }
-    if (!_recorder) {
-        _recorder = QSharedPointer<Recorder>::create(this);
-    }
+
+    _recorder = std::make_shared<recording::Recorder>();
     // connect to AudioClient's signal so we get input audio
     auto audioClient = DependencyManager::get<AudioClient>();
-    connect(audioClient.data(), &AudioClient::inputReceived, _recorder.data(),
-            &Recorder::recordAudio, Qt::QueuedConnection);
-
-    _recorder->startRecording();
+    _audioClientRecorderConnection = connect(audioClient.data(), &AudioClient::inputReceived, [] {
+        // FIXME, missing audio data handling
+    });
+    setRecordingBasis();
+    _recorder->start();
 }
 
 void MyAvatar::stopRecording() {
@@ -618,15 +638,14 @@ void MyAvatar::stopRecording() {
         return;
     }
     if (_recorder) {
-        // stop grabbing audio from the AudioClient
-        auto audioClient = DependencyManager::get<AudioClient>();
-        disconnect(audioClient.data(), 0, _recorder.data(), 0);
-
-        _recorder->stopRecording();
+        QObject::disconnect(_audioClientRecorderConnection);
+        _audioClientRecorderConnection = QMetaObject::Connection();
+        _recorder->stop();
+        clearRecordingBasis();
     }
 }
 
-void MyAvatar::saveRecording(QString filename) {
+void MyAvatar::saveRecording(const QString& filename) {
     if (!_recorder) {
         qCDebug(interfaceapp) << "There is no recording to save";
         return;
@@ -636,8 +655,10 @@ void MyAvatar::saveRecording(QString filename) {
                                   Q_ARG(QString, filename));
         return;
     }
+
     if (_recorder) {
-        _recorder->saveToFile(filename);
+        auto clip = _recorder->getClip();
+        recording::Clip::toFile(filename, clip);
     }
 }
 
@@ -646,15 +667,18 @@ void MyAvatar::loadLastRecording() {
         QMetaObject::invokeMethod(this, "loadLastRecording", Qt::BlockingQueuedConnection);
         return;
     }
-    if (!_recorder) {
+
+    if (!_recorder || !_recorder->getClip()) {
         qCDebug(interfaceapp) << "There is no recording to load";
         return;
     }
+
     if (!_player) {
-        _player = QSharedPointer<Player>::create(this);
+        _player = std::make_shared<recording::Deck>();
     }
 
-    _player->loadRecording(_recorder->getRecording());
+    _player->queueClip(_recorder->getClip());
+    _player->play();
 }
 
 void MyAvatar::startAnimation(const QString& url, float fps, float priority,
diff --git a/interface/src/avatar/MyAvatar.h b/interface/src/avatar/MyAvatar.h
index da836b7f15..52f1ffce3f 100644
--- a/interface/src/avatar/MyAvatar.h
+++ b/interface/src/avatar/MyAvatar.h
@@ -257,10 +257,10 @@ public slots:
     bool setJointReferential(const QUuid& id, int jointIndex);
 
     bool isRecording();
-    qint64 recorderElapsed();
+    float recorderElapsed();
     void startRecording();
     void stopRecording();
-    void saveRecording(QString filename);
+    void saveRecording(const QString& filename);
     void loadLastRecording();
 
     virtual void rebuildSkeletonBody() override;
@@ -311,8 +311,8 @@ private:
                         const glm::vec3& translation = glm::vec3(), const glm::quat& rotation = glm::quat(), float scale = 1.0f,
                         bool allowDuplicates = false, bool useSaved = true) override;
 
-    const RecorderPointer getRecorder() const { return _recorder; }
-    const PlayerPointer getPlayer() const { return _player; }
+    const recording::RecorderPointer getRecorder() const { return _recorder; }
+    const recording::DeckPointer getPlayer() const { return _player; }
 
     //void beginFollowingHMD();
     //bool shouldFollowHMD() const;
@@ -360,7 +360,7 @@ private:
 
     eyeContactTarget _eyeContactTarget;
 
-    RecorderPointer _recorder;
+    recording::RecorderPointer _recorder;
 
     glm::vec3 _trackedHeadPosition;
 
diff --git a/libraries/avatars/CMakeLists.txt b/libraries/avatars/CMakeLists.txt
index 849828bbf6..6d4d9cc341 100644
--- a/libraries/avatars/CMakeLists.txt
+++ b/libraries/avatars/CMakeLists.txt
@@ -1,3 +1,3 @@
 set(TARGET_NAME avatars)
 setup_hifi_library(Network Script)
-link_hifi_libraries(audio shared networking)
+link_hifi_libraries(audio shared networking recording)
diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp
index a698c6f374..c4e356d402 100644
--- a/libraries/avatars/src/AvatarData.cpp
+++ b/libraries/avatars/src/AvatarData.cpp
@@ -16,6 +16,9 @@
 #include <QtCore/QDataStream>
 #include <QtCore/QThread>
 #include <QtCore/QUuid>
+#include <QtCore/QJsonDocument>
+#include <QtCore/QJsonArray>
+#include <QtCore/QJsonObject>
 #include <QtNetwork/QNetworkReply>
 #include <QtNetwork/QNetworkRequest>
 
@@ -25,6 +28,10 @@
 #include <GLMHelpers.h>
 #include <StreamUtils.h>
 #include <UUID.h>
+#include <shared/JSONHelpers.h>
+#include <shared/UniformTransform.h>
+#include <recording/Deck.h>
+#include <recording/Clip.h>
 
 #include "AvatarLogging.h"
 #include "AvatarData.h"
@@ -62,7 +69,6 @@ AvatarData::AvatarData() :
     _targetVelocity(0.0f),
     _localAABox(DEFAULT_LOCAL_AABOX_CORNER, DEFAULT_LOCAL_AABOX_SCALE)
 {
-
 }
 
 AvatarData::~AvatarData() {
@@ -791,7 +797,7 @@ bool AvatarData::isPaused() {
     return _player && _player->isPaused();
 }
 
-qint64 AvatarData::playerElapsed() {
+float AvatarData::playerElapsed() {
     if (!_player) {
         return 0;
     }
@@ -801,10 +807,10 @@ qint64 AvatarData::playerElapsed() {
                                   Q_RETURN_ARG(qint64, result));
         return result;
     }
-    return _player->elapsed();
+    return (float)_player->position() / MSECS_PER_SECOND;
 }
 
-qint64 AvatarData::playerLength() {
+float AvatarData::playerLength() {
     if (!_player) {
         return 0;
     }
@@ -814,28 +820,24 @@ qint64 AvatarData::playerLength() {
                                   Q_RETURN_ARG(qint64, result));
         return result;
     }
-    return _player->getRecording()->getLength();
+    return _player->length() / MSECS_PER_SECOND;
 }
 
-int AvatarData::playerCurrentFrame() {
-    return (_player) ? _player->getCurrentFrame() : 0;
-}
-
-int AvatarData::playerFrameNumber() {
-    return (_player && _player->getRecording()) ? _player->getRecording()->getFrameNumber() : 0;
-}
-
-void AvatarData::loadRecording(QString filename) {
+void AvatarData::loadRecording(const QString& filename) {
     if (QThread::currentThread() != thread()) {
         QMetaObject::invokeMethod(this, "loadRecording", Qt::BlockingQueuedConnection,
                                   Q_ARG(QString, filename));
         return;
     }
-    if (!_player) {
-        _player = QSharedPointer<Player>::create(this);
+    using namespace recording;
+
+    ClipPointer clip = Clip::fromFile(filename);
+    if (!clip) {
+        qWarning() << "Unable to load clip data from " << filename;
     }
 
-    _player->loadFromFile(filename);
+    _player = std::make_shared<Deck>();
+    _player->queueClip(clip);
 }
 
 void AvatarData::startPlaying() {
@@ -843,70 +845,56 @@ void AvatarData::startPlaying() {
         QMetaObject::invokeMethod(this, "startPlaying", Qt::BlockingQueuedConnection);
         return;
     }
+
     if (!_player) {
-        _player = QSharedPointer<Player>::create(this);
+        qWarning() << "No clip loaded for playback";
+        return;
     }
-    _player->startPlaying();
+    setRecordingBasis();
+    _player->play();
 }
 
 void AvatarData::setPlayerVolume(float volume) {
-    if (_player) {
-        _player->setVolume(volume);
-    }
+    // FIXME 
 }
 
-void AvatarData::setPlayerAudioOffset(int audioOffset) {
-    if (_player) {
-        _player->setAudioOffset(audioOffset);
-    }
+void AvatarData::setPlayerAudioOffset(float audioOffset) {
+    // FIXME 
 }
 
-void AvatarData::setPlayerFrame(unsigned int frame) {
-    if (_player) {
-        _player->setCurrentFrame(frame);
-    }
-}
+void AvatarData::setPlayerTime(float time) {
+    if (!_player) {
+        qWarning() << "No player active";
+        return;
+    } 
 
-void AvatarData::setPlayerTime(unsigned int time) {
-    if (_player) {
-        _player->setCurrentTime(time);
-    }
+    _player->seek(time * MSECS_PER_SECOND);
 }
 
 void AvatarData::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
-    if (_player) {
-        _player->setPlayFromCurrentLocation(playFromCurrentLocation);
-    }
+    // FIXME 
 }
 
 void AvatarData::setPlayerLoop(bool loop) {
     if (_player) {
-        _player->setLoop(loop);
+        _player->loop(loop);
     }
 }
 
 void AvatarData::setPlayerUseDisplayName(bool useDisplayName) {
-    if(_player) {
-        _player->useDisplayName(useDisplayName);
-    }
+    // FIXME
 }
 
 void AvatarData::setPlayerUseAttachments(bool useAttachments) {
-    if(_player) {
-        _player->useAttachements(useAttachments);
-    }
+    // FIXME
 }
 
 void AvatarData::setPlayerUseHeadModel(bool useHeadModel) {
-    if(_player) {
-        _player->useHeadModel(useHeadModel);
-    }
+    // FIXME
 }
 
 void AvatarData::setPlayerUseSkeletonModel(bool useSkeletonModel) {
-    if(_player) {
-        _player->useSkeletonModel(useSkeletonModel);
-    }
+    // FIXME
 }
 
 void AvatarData::play() {
@@ -920,6 +908,10 @@ void AvatarData::play() {
     }
 }
 
+std::shared_ptr<UniformTransform> AvatarData::getRecordingBasis() const {
+    return _recordingBasis;
+}
+
 void AvatarData::pausePlayer() {
     if (!_player) {
         return;
@@ -929,7 +921,7 @@ void AvatarData::pausePlayer() {
         return;
     }
     if (_player) {
-        _player->pausePlayer();
+        _player->pause();
     }
 }
 
@@ -942,7 +934,7 @@ void AvatarData::stopPlaying() {
         return;
     }
     if (_player) {
-        _player->stopPlaying();
+        _player->stop();
     }
 }
 
@@ -1514,3 +1506,177 @@ void registerAvatarTypes(QScriptEngine* engine) {
         new AttachmentDataObject(), QScriptEngine::ScriptOwnership));
 }
 
+void AvatarData::setRecordingBasis(std::shared_ptr<UniformTransform> recordingBasis) {
+    if (!recordingBasis) {
+        recordingBasis = std::make_shared<UniformTransform>();
+        recordingBasis->rotation = getOrientation();
+        recordingBasis->translation = getPosition();
+        recordingBasis->scale = getTargetScale();
+    }
+    _recordingBasis = recordingBasis;
+}
+
+void AvatarData::clearRecordingBasis() {
+    _recordingBasis.reset();
+}
+
+static const QString JSON_AVATAR_BASIS = QStringLiteral("basisTransform");
+static const QString JSON_AVATAR_RELATIVE = QStringLiteral("relativeTransform");
+static const QString JSON_AVATAR_JOINT_ROTATIONS = QStringLiteral("jointRotations");
+static const QString JSON_AVATAR_HEAD = QStringLiteral("head");
+static const QString JSON_AVATAR_HEAD_ROTATION = QStringLiteral("rotation");
+static const QString JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS = QStringLiteral("blendShapes");
+static const QString JSON_AVATAR_HEAD_LEAN_FORWARD = QStringLiteral("leanForward");
+static const QString JSON_AVATAR_HEAD_LEAN_SIDEWAYS = QStringLiteral("leanSideways");
+static const QString JSON_AVATAR_HEAD_LOOKAT = QStringLiteral("lookAt");
+static const QString JSON_AVATAR_HEAD_MODEL = QStringLiteral("headModel");
+static const QString JSON_AVATAR_BODY_MODEL = QStringLiteral("bodyModel");
+static const QString JSON_AVATAR_DISPLAY_NAME = QStringLiteral("displayName");
+static const QString JSON_AVATAR_ATTACHEMENTS = QStringLiteral("attachments");
+
+
+// Every frame will store both a basis for the recording and a relative transform
+// This allows the application to decide whether playback should be relative to an avatar's 
+// transform at the start of playback, or relative to the transform of the recorded 
+// avatar
+QByteArray avatarStateToFrame(const AvatarData* _avatar) {
+    QJsonObject root;
+
+    if (!_avatar->getFaceModelURL().isEmpty()) {
+        root[JSON_AVATAR_HEAD_MODEL] = _avatar->getFaceModelURL().toString();
+    }
+    if (!_avatar->getSkeletonModelURL().isEmpty()) {
+        root[JSON_AVATAR_BODY_MODEL] = _avatar->getSkeletonModelURL().toString();
+    }
+    if (!_avatar->getDisplayName().isEmpty()) {
+        root[JSON_AVATAR_DISPLAY_NAME] = _avatar->getDisplayName();
+    }
+    if (!_avatar->getAttachmentData().isEmpty()) {
+        // FIXME serialize attachment data
+    }
+
+    auto recordingBasis = _avatar->getRecordingBasis();
+    if (recordingBasis) {
+        // FIXME if the resulting relative basis is identity, we shouldn't record anything
+        // Record the transformation basis
+        root[JSON_AVATAR_BASIS] = recordingBasis->toJson();
+
+        // Record the relative transform
+        auto relativeTransform = recordingBasis->relativeTransform(
+            UniformTransform(_avatar->getPosition(), _avatar->getOrientation(), _avatar->getTargetScale()));
+
+        root[JSON_AVATAR_RELATIVE] = relativeTransform.toJson();
+    }
+
+    QJsonArray jointRotations;
+    for (const auto& jointRotation : _avatar->getJointRotations()) {
+        jointRotations.push_back(toJsonValue(jointRotation));
+    }
+    root[JSON_AVATAR_JOINT_ROTATIONS] = jointRotations;
+
+    const HeadData* head = _avatar->getHeadData();
+    if (head) {
+        QJsonObject headJson;
+        QJsonArray blendshapeCoefficients;
+        for (const auto& blendshapeCoefficient : head->getBlendshapeCoefficients()) {
+            blendshapeCoefficients.push_back(blendshapeCoefficient);
+        }
+        headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS] = blendshapeCoefficients;
+        headJson[JSON_AVATAR_HEAD_ROTATION] = toJsonValue(head->getRawOrientation());
+        headJson[JSON_AVATAR_HEAD_LEAN_FORWARD] = QJsonValue(head->getLeanForward());
+        headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS] = QJsonValue(head->getLeanSideways());
+        vec3 relativeLookAt = glm::inverse(_avatar->getOrientation()) * 
+            (head->getLookAtPosition() - _avatar->getPosition());
+        headJson[JSON_AVATAR_HEAD_LOOKAT] = toJsonValue(relativeLookAt);
+        root[JSON_AVATAR_HEAD] = headJson;
+    }
+
+    return QJsonDocument(root).toBinaryData();
+}
+
+void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar) {
+    QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
+    QJsonObject root = doc.object();
+
+    if (root.contains(JSON_AVATAR_HEAD_MODEL)) {
+        auto faceModelURL = root[JSON_AVATAR_HEAD_MODEL].toString();
+        if (faceModelURL != _avatar->getFaceModelURL().toString()) {
+            _avatar->setFaceModelURL(faceModelURL);
+        }
+    }
+    if (root.contains(JSON_AVATAR_BODY_MODEL)) {
+        auto bodyModelURL = root[JSON_AVATAR_BODY_MODEL].toString();
+        if (bodyModelURL != _avatar->getSkeletonModelURL().toString()) {
+            _avatar->setSkeletonModelURL(bodyModelURL);
+        }
+    }
+    if (root.contains(JSON_AVATAR_DISPLAY_NAME)) {
+        auto newDisplayName = root[JSON_AVATAR_DISPLAY_NAME].toString();
+        if (newDisplayName != _avatar->getDisplayName()) {
+            _avatar->setDisplayName(newDisplayName);
+        }
+    } 
+
+    // During playback you can either have the recording basis set to the avatar current state
+    // meaning that all playback is relative to this avatars starting position, or
+    // the basis can be loaded from the recording, meaning the playback is relative to the 
+    // original avatar location
+    // The first is more useful for playing back recordings on your own avatar, while
+    // the latter is more useful for playing back other avatars within your scene.
+    auto currentBasis = _avatar->getRecordingBasis();
+    if (!currentBasis) {
+        currentBasis = UniformTransform::parseJson(root[JSON_AVATAR_BASIS]);
+    }
+
+    auto relativeTransform = UniformTransform::parseJson(root[JSON_AVATAR_RELATIVE]);
+    auto worldTransform = currentBasis->worldTransform(*relativeTransform);
+    _avatar->setPosition(worldTransform.translation);
+    _avatar->setOrientation(worldTransform.rotation);
+    _avatar->setTargetScale(worldTransform.scale);
+
+#if 0
+    if (root.contains(JSON_AVATAR_ATTACHEMENTS)) {
+        // FIXME de-serialize attachment data
+    }
+
+    // Joint rotations are relative to the avatar, so they require no basis correction
+    if (root.contains(JSON_AVATAR_JOINT_ROTATIONS)) {
+        QVector<quat> jointRotations;
+        QJsonArray jointRotationsJson = root[JSON_AVATAR_JOINT_ROTATIONS].toArray();
+        jointRotations.reserve(jointRotationsJson.size());
+        for (const auto& jointRotationJson : jointRotationsJson) {
+            jointRotations.push_back(quatFromJsonValue(jointRotationJson));
+        }
+    }
+
+    // Most head data is relative to the avatar, and needs no basis correction,
+    // but the lookat vector does need correction
+    HeadData* head = _avatar->_headData;
+    if (head && root.contains(JSON_AVATAR_HEAD)) {
+        QJsonObject headJson = root[JSON_AVATAR_HEAD].toObject();
+        if (headJson.contains(JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS)) {
+            QVector<float> blendshapeCoefficients;
+            QJsonArray blendshapeCoefficientsJson = headJson[JSON_AVATAR_HEAD_BLENDSHAPE_COEFFICIENTS].toArray();
+            for (const auto& blendshapeCoefficient : blendshapeCoefficientsJson) {
+                blendshapeCoefficients.push_back((float)blendshapeCoefficient.toDouble());
+            }
+            head->setBlendshapeCoefficients(blendshapeCoefficients);
+        }
+        if (headJson.contains(JSON_AVATAR_HEAD_ROTATION)) {
+            head->setOrientation(quatFromJsonValue(headJson[JSON_AVATAR_HEAD_ROTATION]));
+        }
+        if (headJson.contains(JSON_AVATAR_HEAD_LEAN_FORWARD)) {
+            head->setLeanForward((float)headJson[JSON_AVATAR_HEAD_LEAN_FORWARD].toDouble());
+        }
+        if (headJson.contains(JSON_AVATAR_HEAD_LEAN_SIDEWAYS)) {
+            head->setLeanSideways((float)headJson[JSON_AVATAR_HEAD_LEAN_SIDEWAYS].toDouble());
+        }
+        if (headJson.contains(JSON_AVATAR_HEAD_LOOKAT)) {
+            auto relativeLookAt = vec3FromJsonValue(headJson[JSON_AVATAR_HEAD_LOOKAT]);
+            if (glm::length2(relativeLookAt) > 0.01) {
+                head->setLookAtPosition((_avatar->getOrientation() * relativeLookAt) + _avatar->getPosition());
+            }
+        }
+    }
+#endif
+}
diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h
index 9079f15f53..ebb6e1a78f 100644
--- a/libraries/avatars/src/AvatarData.h
+++ b/libraries/avatars/src/AvatarData.h
@@ -134,6 +134,7 @@ class QDataStream;
 
 class AttachmentData;
 class JointData;
+struct UniformTransform;
 
 class AvatarData : public QObject {
     Q_OBJECT
@@ -332,6 +333,11 @@ public:
 
     bool shouldDie() const { return _owningAvatarMixer.isNull() || getUsecsSinceLastUpdate() > AVATAR_SILENCE_THRESHOLD_USECS; }
 
+    void clearRecordingBasis();
+    std::shared_ptr<UniformTransform> getRecordingBasis() const;
+    void setRecordingBasis(std::shared_ptr<UniformTransform> recordingBasis = std::shared_ptr<UniformTransform>());
+
+
 public slots:
     void sendAvatarDataPacket();
     void sendIdentityPacket();
@@ -344,17 +350,13 @@ public slots:
     
     bool isPlaying();
     bool isPaused();
-    qint64 playerElapsed();
-    qint64 playerLength();
-    int playerCurrentFrame();
-    int playerFrameNumber();
-    
-    void loadRecording(QString filename);
+    float playerElapsed();
+    float playerLength();
+    void loadRecording(const QString& filename);
     void startPlaying();
     void setPlayerVolume(float volume);
-    void setPlayerAudioOffset(int audioOffset);
-    void setPlayerFrame(unsigned int frame);
-    void setPlayerTime(unsigned int time);
+    void setPlayerAudioOffset(float audioOffset);
+    void setPlayerTime(float time);
     void setPlayFromCurrentLocation(bool playFromCurrentLocation);
     void setPlayerLoop(bool loop);
     void setPlayerUseDisplayName(bool useDisplayName);
@@ -364,7 +366,7 @@ public slots:
     void play();
     void pausePlayer();
     void stopPlaying();
-    
+
 protected:
     QUuid _sessionUUID;
     glm::vec3 _position = START_LOCATION;
@@ -418,7 +420,7 @@ protected:
     
     QWeakPointer<Node> _owningAvatarMixer;
     
-    PlayerPointer _player;
+    recording::DeckPointer _player;
     
     /// Loads the joint indices, names from the FST file (if any)
     virtual void updateJointMappings();
@@ -432,8 +434,13 @@ protected:
     SimpleMovingAverage _averageBytesReceived;
 
     QMutex avatarLock; // Name is redundant, but it aids searches.
+    
+    // During recording, this holds the starting position, orientation & scale of the recorded avatar
+    // During playback, it holds the 
+    std::shared_ptr<UniformTransform> _recordingBasis;
 
 private:
+    friend void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar);
     static QUrl _defaultFullAvatarModelUrl;
     // privatize the copy constructor and assignment operator so they cannot be called
     AvatarData(const AvatarData&);
diff --git a/libraries/avatars/src/HeadData.cpp b/libraries/avatars/src/HeadData.cpp
index e853a3c57e..e971b184c8 100644
--- a/libraries/avatars/src/HeadData.cpp
+++ b/libraries/avatars/src/HeadData.cpp
@@ -42,8 +42,20 @@ HeadData::HeadData(AvatarData* owningAvatar) :
     
 }
 
+glm::quat HeadData::getRawOrientation() const {
+    return glm::quat(glm::radians(glm::vec3(_basePitch, _baseYaw, _baseRoll)));
+}
+
+void HeadData::setRawOrientation(const glm::quat& q) {
+    auto euler = glm::eulerAngles(q);
+    _basePitch = euler.x;
+    _baseYaw = euler.y;
+    _baseRoll = euler.z;
+}
+
+
 glm::quat HeadData::getOrientation() const {
-    return _owningAvatar->getOrientation() * glm::quat(glm::radians(glm::vec3(_basePitch, _baseYaw, _baseRoll)));
+    return _owningAvatar->getOrientation() * getRawOrientation();
 }
 
 void HeadData::setOrientation(const glm::quat& orientation) {
diff --git a/libraries/avatars/src/HeadData.h b/libraries/avatars/src/HeadData.h
index e2c3f69c39..38503f6e1e 100644
--- a/libraries/avatars/src/HeadData.h
+++ b/libraries/avatars/src/HeadData.h
@@ -48,6 +48,8 @@ public:
     virtual float getFinalYaw() const { return _baseYaw; }
     virtual float getFinalPitch() const { return _basePitch; }
     virtual float getFinalRoll() const { return _baseRoll; }
+    virtual glm::quat getRawOrientation() const;
+    virtual void setRawOrientation(const glm::quat& orientation);
 
     glm::quat getOrientation() const;
     void setOrientation(const glm::quat& orientation);
diff --git a/libraries/avatars/src/Player.cpp b/libraries/avatars/src/Player.cpp
index 47fc1390d9..31efb4cd9c 100644
--- a/libraries/avatars/src/Player.cpp
+++ b/libraries/avatars/src/Player.cpp
@@ -9,6 +9,8 @@
 //  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
 //
 
+
+#if 0
 #include <AudioConstants.h>
 #include <GLMHelpers.h>
 #include <NodeList.h>
@@ -438,3 +440,4 @@ bool Player::computeCurrentFrame() {
     return true;
 }
 
+#endif
diff --git a/libraries/avatars/src/Player.h b/libraries/avatars/src/Player.h
index 96f3cbc268..558ff309e6 100644
--- a/libraries/avatars/src/Player.h
+++ b/libraries/avatars/src/Player.h
@@ -12,6 +12,9 @@
 #ifndef hifi_Player_h
 #define hifi_Player_h
 
+#include <recording/Forward.h>
+
+#if 0
 #include <AudioInjector.h>
 
 #include <QElapsedTimer>
@@ -86,5 +89,6 @@ private:
     bool _useHeadURL;
     bool _useSkeletonURL;
 };
+#endif
 
 #endif // hifi_Player_h
diff --git a/libraries/avatars/src/Recorder.cpp b/libraries/avatars/src/Recorder.cpp
index 5e47c296eb..343302d472 100644
--- a/libraries/avatars/src/Recorder.cpp
+++ b/libraries/avatars/src/Recorder.cpp
@@ -10,6 +10,7 @@
 //
 
 
+#if 0
 #include <GLMHelpers.h>
 #include <NodeList.h>
 #include <StreamUtils.h>
@@ -143,3 +144,4 @@ void Recorder::record() {
 void Recorder::recordAudio(const QByteArray& audioByteArray) {
     _recording->addAudioPacket(audioByteArray);
 }
+#endif
diff --git a/libraries/avatars/src/Recorder.h b/libraries/avatars/src/Recorder.h
index f81539a417..15bffcec8b 100644
--- a/libraries/avatars/src/Recorder.h
+++ b/libraries/avatars/src/Recorder.h
@@ -12,6 +12,9 @@
 #ifndef hifi_Recorder_h
 #define hifi_Recorder_h
 
+#include <recording/Forward.h>
+
+#if 0
 #include "Recording.h"
 
 template<class C>
@@ -49,6 +52,6 @@ private:
 
     AvatarData* _avatar;
 };
-
+#endif
 
 #endif // hifi_Recorder_h
diff --git a/libraries/avatars/src/Recording.cpp b/libraries/avatars/src/Recording.cpp
index 26c5ab66dd..884ed495be 100644
--- a/libraries/avatars/src/Recording.cpp
+++ b/libraries/avatars/src/Recording.cpp
@@ -9,6 +9,7 @@
 //  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
 //
 
+#if 0
 #include <AudioConstants.h>
 #include <GLMHelpers.h>
 #include <NetworkAccessManager.h>
@@ -659,3 +660,4 @@ RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString
     return recording;
 }
 
+#endif
diff --git a/libraries/avatars/src/Recording.h b/libraries/avatars/src/Recording.h
index 7657a12b46..a5829b1e2f 100644
--- a/libraries/avatars/src/Recording.h
+++ b/libraries/avatars/src/Recording.h
@@ -12,6 +12,8 @@
 #ifndef hifi_Recording_h
 #define hifi_Recording_h
 
+#if 0
+
 #include <QString>
 #include <QVector>
 
@@ -124,5 +126,6 @@ private:
 
 void writeRecordingToFile(RecordingPointer recording, const QString& filename);
 RecordingPointer readRecordingFromFile(RecordingPointer recording, const QString& filename);
-
+RecordingPointer readRecordingFromRecFile(RecordingPointer recording, const QString& filename, const QByteArray& byteArray);
+#endif
 #endif // hifi_Recording_h
diff --git a/libraries/recording/src/recording/Clip.cpp b/libraries/recording/src/recording/Clip.cpp
index 09acf0579f..ba13b359f0 100644
--- a/libraries/recording/src/recording/Clip.cpp
+++ b/libraries/recording/src/recording/Clip.cpp
@@ -35,7 +35,7 @@ Clip::Pointer Clip::duplicate() {
     Clip::Pointer result = std::make_shared<BufferClip>();
 
     Locker lock(_mutex);
-    float currentPosition = position();
+    Time currentPosition = position();
     seek(0);
 
     Frame::Pointer frame = nextFrame();
diff --git a/libraries/recording/src/recording/Clip.h b/libraries/recording/src/recording/Clip.h
index e7034ef077..70fc4b3f7f 100644
--- a/libraries/recording/src/recording/Clip.h
+++ b/libraries/recording/src/recording/Clip.h
@@ -28,11 +28,11 @@ public:
 
     Pointer duplicate();
 
-    virtual float duration() const = 0;
+    virtual Time duration() const = 0;
     virtual size_t frameCount() const = 0;
 
-    virtual void seek(float offset) = 0;
-    virtual float position() const = 0;
+    virtual void seek(Time offset) = 0;
+    virtual Time position() const = 0;
 
     virtual FramePointer peekFrame() const = 0;
     virtual FramePointer nextFrame() = 0;
diff --git a/libraries/recording/src/recording/Deck.cpp b/libraries/recording/src/recording/Deck.cpp
index f0db37078b..4349a39732 100644
--- a/libraries/recording/src/recording/Deck.cpp
+++ b/libraries/recording/src/recording/Deck.cpp
@@ -6,7 +6,116 @@
 //  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
 //
 
+#include "Deck.h"
+ 
+#include <NumericalConstants.h>
+#include <SharedUtil.h>
 
-// FIXME -- DO NOT include headers in empty CPP files, it produces warnings. Once we define new symbols
-// and some actual code here, we can uncomment this include.
-//#include "Deck.h"
+#include "Clip.h"
+#include "Frame.h"
+#include "Logging.h"
+
+using namespace recording;
+
+void Deck::queueClip(ClipPointer clip, Time timeOffset) {
+    if (!clip) {
+        qCWarning(recordingLog) << "Clip invalid, ignoring";
+        return;
+    }
+
+    // FIXME if the time offset is not zero, wrap the clip in a OffsetClip wrapper
+    _clips.push_back(clip);
+}
+
+void Deck::play() { 
+    if (_pause) {
+        _pause = false;
+        _startEpoch = usecTimestampNow() - (_position * USECS_PER_MSEC);
+        emit playbackStateChanged();
+        processFrames();
+    }
+}
+
+void Deck::pause() { 
+    if (!_pause) {
+        _pause = true;
+        emit playbackStateChanged();
+    }
+}
+
+Clip::Pointer Deck::getNextClip() {
+    Clip::Pointer result;
+    Time soonestFramePosition = INVALID_TIME;
+    for (const auto& clip : _clips) {
+        Time nextFramePosition = clip->position();
+        if (nextFramePosition < soonestFramePosition) {
+            result = clip;
+            soonestFramePosition = nextFramePosition;
+        }
+    }
+    return result;
+}
+
+void Deck::seek(Time position) {
+    _position = position;
+    // FIXME reset the frames to the appropriate spot
+    for (auto& clip : _clips) {
+        clip->seek(position);
+    }
+
+    if (!_pause) {
+        // FIXME what if the timer is already running?
+        processFrames();
+    }
+}
+
+Time Deck::position() const {
+    if (_pause) {
+        return _position;
+    }
+    return (usecTimestampNow() - _startEpoch) / USECS_PER_MSEC;
+}
+
+static const Time MIN_FRAME_WAIT_INTERVAL_MS = 1;
+
+void Deck::processFrames() {
+    if (_pause) {
+        return;
+    }
+
+    _position = position();
+    auto triggerPosition = _position + MIN_FRAME_WAIT_INTERVAL_MS;
+    Clip::Pointer nextClip;
+    for (nextClip = getNextClip(); nextClip; nextClip = getNextClip()) {
+        // If the clip is too far in the future, just break out of the handling loop
+        Time framePosition = nextClip->position();
+        if (framePosition > triggerPosition) {
+            break;
+        }
+
+        // Handle the frame and advance the clip
+        Frame::handleFrame(nextClip->nextFrame());
+    }
+
+
+    if (!nextClip) {
+        qCDebug(recordingLog) << "No more frames available";
+        // No more frames available, so handle the end of playback
+        if (_loop) {
+            qCDebug(recordingLog) << "Looping enabled, seeking back to beginning";
+            // If we have looping enabled, start the playback over
+            seek(0);
+        } else {
+            // otherwise pause playback
+            pause();
+        }
+        return;
+    } 
+
+    // If we have more clip frames available, set the timer for the next one
+    Time nextClipPosition = nextClip->position();
+    Time interval = nextClipPosition - _position;
+    _timer.singleShot(interval, [this] {
+        processFrames();
+    });
+}
diff --git a/libraries/recording/src/recording/Deck.h b/libraries/recording/src/recording/Deck.h
index 2ae8d6a7be..a3b4405210 100644
--- a/libraries/recording/src/recording/Deck.h
+++ b/libraries/recording/src/recording/Deck.h
@@ -10,26 +10,62 @@
 #ifndef hifi_Recording_Deck_h
 #define hifi_Recording_Deck_h
 
-#include "Forward.h"
+#include <utility>
+#include <list>
 
 #include <QtCore/QObject>
+#include <QtCore/QTimer>
+
+#include "Forward.h"
 
-class QIODevice;
 
 namespace recording {
 
 class Deck : public QObject {
+    Q_OBJECT
 public:
     using Pointer = std::shared_ptr<Deck>;
-
     Deck(QObject* parent = nullptr) : QObject(parent) {}
-    virtual ~Deck();
 
     // Place a clip on the deck for recording or playback
-    void queueClip(ClipPointer clip, float timeOffset = 0.0f);
-    void play(float timeOffset = 0.0f);
-    void reposition(float timeOffsetDelta);
-    void setPlaybackSpeed(float rate);
+    void queueClip(ClipPointer clip, Time timeOffset = 0.0f);
+
+    void play();
+    bool isPlaying() { return !_pause; }
+
+    void pause();
+    bool isPaused() const { return _pause; }
+
+    void stop() { pause(); seek(0.0f); }
+
+    Time length() const { return _length; }
+
+    void loop(bool enable = true) { _loop = enable; }
+    bool isLooping() const { return _loop; }
+
+    Time position() const;
+    void seek(Time position);
+
+    void setPlaybackSpeed(float factor) { _playbackSpeed = factor; }
+    float getPlaybackSpeed() { return _playbackSpeed; }
+
+signals:
+    void playbackStateChanged();
+
+private:
+    using Clips = std::list<ClipPointer>;
+
+    ClipPointer getNextClip();
+    void processFrames();
+
+    QTimer _timer;
+    Clips _clips;
+    quint64 _startEpoch { 0 };
+    Time _position { 0 };
+    float _playbackSpeed { 1.0f };
+    bool _pause { true };
+    bool _loop { false };
+    Time _length { 0 };
 };
 
 }
diff --git a/libraries/recording/src/recording/Forward.h b/libraries/recording/src/recording/Forward.h
index 5bd6dd917f..31aa40521c 100644
--- a/libraries/recording/src/recording/Forward.h
+++ b/libraries/recording/src/recording/Forward.h
@@ -12,11 +12,18 @@
 
 #include <memory>
 #include <list>
+#include <limits>
 
 namespace recording {
 
+using Time = uint32_t;
+
+static const Time INVALID_TIME = std::numeric_limits<uint32_t>::max();
+
 using FrameType = uint16_t;
 
+using FrameSize = uint16_t;
+
 struct Frame;
 
 using FramePointer = std::shared_ptr<Frame>;
diff --git a/libraries/recording/src/recording/Frame.cpp b/libraries/recording/src/recording/Frame.cpp
index aac8a4d9c3..ad2a583424 100644
--- a/libraries/recording/src/recording/Frame.cpp
+++ b/libraries/recording/src/recording/Frame.cpp
@@ -82,7 +82,8 @@ FrameType Frame::registerFrameType(const QString& frameTypeName) {
         Q_ASSERT(headerType == Frame::TYPE_HEADER);
         Q_UNUSED(headerType); // FIXME - build system on unix still not upgraded to Qt 5.5.1 so Q_ASSERT still produces warnings
     });
-    return frameTypes.registerValue(frameTypeName);
+    auto result = frameTypes.registerValue(frameTypeName);
+    return result;
 }
 
 QMap<QString, FrameType> Frame::getFrameTypes() {
@@ -102,3 +103,16 @@ Frame::Handler Frame::registerFrameHandler(FrameType type, Handler handler) {
     handlerMap[type] = handler;
     return result;
 }
+
+void Frame::handleFrame(const Frame::Pointer& frame) {
+    Handler handler; 
+    {
+        Locker lock(mutex);
+        auto iterator = handlerMap.find(frame->type);
+        if (iterator == handlerMap.end()) {
+            return;
+        }
+        handler = *iterator;
+    }
+    handler(frame);
+}
diff --git a/libraries/recording/src/recording/Frame.h b/libraries/recording/src/recording/Frame.h
index 0fb95c4b2e..563b042656 100644
--- a/libraries/recording/src/recording/Frame.h
+++ b/libraries/recording/src/recording/Frame.h
@@ -26,7 +26,7 @@ public:
     static const FrameType TYPE_INVALID = 0xFFFF;
     static const FrameType TYPE_HEADER = 0x0;
     FrameType type { TYPE_INVALID };
-    float timeOffset { 0 };
+    Time timeOffset { 0 };
     QByteArray data;
 
     Frame() {}
@@ -37,6 +37,7 @@ public:
     static QMap<QString, FrameType> getFrameTypes();
     static QMap<FrameType, QString> getFrameTypeNames();
     static Handler registerFrameHandler(FrameType type, Handler handler);
+    static void handleFrame(const Pointer& frame);
 };
 
 }
diff --git a/libraries/recording/src/recording/Recorder.cpp b/libraries/recording/src/recording/Recorder.cpp
index b2e7399cd4..f007367cae 100644
--- a/libraries/recording/src/recording/Recorder.cpp
+++ b/libraries/recording/src/recording/Recorder.cpp
@@ -9,25 +9,35 @@
 #include "Recorder.h"
 
 #include <NumericalConstants.h>
+#include <SharedUtil.h>
 
 #include "impl/BufferClip.h"
 #include "Frame.h"
 
 using namespace recording;
 
+Recorder::~Recorder() {
+
+}
+
+Time Recorder::position() {
+    return 0.0f;
+}
+
 void Recorder::start() {
     if (!_recording) {
         _recording = true;
         if (!_clip) {
             _clip = std::make_shared<BufferClip>();
         }
+        _startEpoch = usecTimestampNow();
         _timer.start();
         emit recordingStateChanged();
     }
 }
 
 void Recorder::stop() {
-    if (!_recording) {
+    if (_recording) {
         _recording = false;
         _elapsed = _timer.elapsed();
         emit recordingStateChanged();
@@ -50,13 +60,11 @@ void Recorder::recordFrame(FrameType type, QByteArray frameData) {
     Frame::Pointer frame = std::make_shared<Frame>();
     frame->type = type;
     frame->data = frameData;
-    frame->timeOffset = (float)(_elapsed + _timer.elapsed()) / MSECS_PER_SECOND;
+    frame->timeOffset = (usecTimestampNow() - _startEpoch) / USECS_PER_MSEC;
     _clip->addFrame(frame);
 }
 
 ClipPointer Recorder::getClip() {
-    auto result = _clip;
-    _clip.reset();
-    return result;
+    return _clip;
 }
 
diff --git a/libraries/recording/src/recording/Recorder.h b/libraries/recording/src/recording/Recorder.h
index deae543bb0..f8346456d4 100644
--- a/libraries/recording/src/recording/Recorder.h
+++ b/libraries/recording/src/recording/Recorder.h
@@ -20,18 +20,23 @@ namespace recording {
 // An interface for interacting with clips, creating them by recording or
 // playing them back.  Also serialization to and from files / network sources
 class Recorder : public QObject {
+    Q_OBJECT
 public:
     using Pointer = std::shared_ptr<Recorder>;
 
     Recorder(QObject* parent = nullptr) : QObject(parent) {}
     virtual ~Recorder();
 
+    Time position();
+
     // Start recording frames
     void start();
     // Stop recording
     void stop();
+
     // Test if recording is active
     bool isRecording();
+
     // Erase the currently recorded content
     void clear();
 
@@ -46,7 +51,8 @@ signals:
 private:
     QElapsedTimer _timer;
     ClipPointer _clip;
-    quint64 _elapsed;
+    quint64 _elapsed { 0 };
+    quint64 _startEpoch { 0 };
     bool _recording { false };
 };
 
diff --git a/libraries/recording/src/recording/impl/BufferClip.cpp b/libraries/recording/src/recording/impl/BufferClip.cpp
index 4d5a910d42..5dc75bbce2 100644
--- a/libraries/recording/src/recording/impl/BufferClip.cpp
+++ b/libraries/recording/src/recording/impl/BufferClip.cpp
@@ -8,24 +8,26 @@
 
 #include "BufferClip.h"
 
+#include <NumericalConstants.h>
+
 #include "../Frame.h"
 
 using namespace recording;
 
 
-void BufferClip::seek(float offset) {
+void BufferClip::seek(Time offset) {
     Locker lock(_mutex);
-    auto itr = std::lower_bound(_frames.begin(), _frames.end(), offset, 
-        [](Frame::Pointer a, float b)->bool{
+    auto itr = std::lower_bound(_frames.begin(), _frames.end(), offset,
+        [](Frame::Pointer a, Time b)->bool {
             return a->timeOffset < b;
         }
     );
     _frameIndex = itr - _frames.begin();
 }
 
-float BufferClip::position() const {
+Time BufferClip::position() const {
     Locker lock(_mutex);
-    float result = std::numeric_limits<float>::max();
+    Time result = INVALID_TIME;
     if (_frameIndex < _frames.size()) {
         result = _frames[_frameIndex]->timeOffset;
     }
@@ -77,7 +79,7 @@ void BufferClip::reset() {
     _frameIndex = 0;
 }
 
-float BufferClip::duration() const {
+Time BufferClip::duration() const {
     if (_frames.empty()) {
         return 0;
     }
diff --git a/libraries/recording/src/recording/impl/BufferClip.h b/libraries/recording/src/recording/impl/BufferClip.h
index b40687a4ec..bfb0234600 100644
--- a/libraries/recording/src/recording/impl/BufferClip.h
+++ b/libraries/recording/src/recording/impl/BufferClip.h
@@ -22,11 +22,11 @@ public:
 
     virtual ~BufferClip() {}
 
-    virtual float duration() const override;
+    virtual Time duration() const override;
     virtual size_t frameCount() const override;
 
-    virtual void seek(float offset) override;
-    virtual float position() const override;
+    virtual void seek(Time offset) override;
+    virtual Time position() const override;
 
     virtual FramePointer peekFrame() const override;
     virtual FramePointer nextFrame() override;
diff --git a/libraries/recording/src/recording/impl/FileClip.cpp b/libraries/recording/src/recording/impl/FileClip.cpp
index be7230e3f8..e64085517a 100644
--- a/libraries/recording/src/recording/impl/FileClip.cpp
+++ b/libraries/recording/src/recording/impl/FileClip.cpp
@@ -22,7 +22,7 @@
 
 using namespace recording;
 
-static const qint64 MINIMUM_FRAME_SIZE = sizeof(FrameType) + sizeof(float) + sizeof(uint16_t);
+static const qint64 MINIMUM_FRAME_SIZE = sizeof(FrameType) + sizeof(Time) + sizeof(FrameSize);
 
 static const QString FRAME_TYPE_MAP = QStringLiteral("frameTypes");
 
@@ -60,10 +60,10 @@ FrameHeaderList parseFrameHeaders(uchar* const start, const qint64& size) {
         FrameHeader header;
         memcpy(&(header.type), current, sizeof(FrameType));
         current += sizeof(FrameType);
-        memcpy(&(header.timeOffset), current, sizeof(float));
-        current += sizeof(float);
-        memcpy(&(header.size), current, sizeof(uint16_t));
-        current += sizeof(uint16_t);
+        memcpy(&(header.timeOffset), current, sizeof(Time));
+        current += sizeof(Time);
+        memcpy(&(header.size), current, sizeof(FrameSize));
+        current += sizeof(FrameSize);
         header.fileOffset = current - start;
         if (end - current < header.size) {
             current = end;
@@ -117,6 +117,7 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) {
             qWarning() << "Header missing frame type map, invalid file";
             return;
         }
+        qDebug() << translationMap;
 
         // Update the loaded headers with the frame data
         _frameHeaders.reserve(parsedFrameHeaders.size());
@@ -132,16 +133,21 @@ FileClip::FileClip(const QString& fileName) : _file(fileName) {
 
 // FIXME move to frame?
 bool writeFrame(QIODevice& output, const Frame& frame) {
+    if (frame.type == Frame::TYPE_INVALID) {
+        qWarning() << "Attempting to write invalid frame";
+        return true;
+    }
+
     auto written = output.write((char*)&(frame.type), sizeof(FrameType));
     if (written != sizeof(FrameType)) {
         return false;
     }
-    written = output.write((char*)&(frame.timeOffset), sizeof(float));
-    if (written != sizeof(float)) {
+    written = output.write((char*)&(frame.timeOffset), sizeof(Time));
+    if (written != sizeof(Time)) {
         return false;
     }
     uint16_t dataSize = frame.data.size();
-    written = output.write((char*)&dataSize, sizeof(uint16_t));
+    written = output.write((char*)&dataSize, sizeof(FrameSize));
     if (written != sizeof(uint16_t)) {
         return false;
     }
@@ -201,19 +207,19 @@ FileClip::~FileClip() {
     }
 }
 
-void FileClip::seek(float offset) {
+void FileClip::seek(Time offset) {
     Locker lock(_mutex);
     auto itr = std::lower_bound(_frameHeaders.begin(), _frameHeaders.end(), offset,
-        [](const FrameHeader& a, float b)->bool {
+        [](const FrameHeader& a, Time b)->bool {
             return a.timeOffset < b;
         }
     );
     _frameIndex = itr - _frameHeaders.begin();
 }
 
-float FileClip::position() const {
+Time FileClip::position() const {
     Locker lock(_mutex);
-    float result = std::numeric_limits<float>::max();
+    Time result = INVALID_TIME;
     if (_frameIndex < _frameHeaders.size()) {
         result = _frameHeaders[_frameIndex].timeOffset;
     }
@@ -260,7 +266,7 @@ void FileClip::addFrame(FramePointer) {
     throw std::runtime_error("File clips are read only");
 }
 
-float FileClip::duration() const {
+Time FileClip::duration() const {
     if (_frameHeaders.empty()) {
         return 0;
     }
diff --git a/libraries/recording/src/recording/impl/FileClip.h b/libraries/recording/src/recording/impl/FileClip.h
index 08eacd8337..78256dcf23 100644
--- a/libraries/recording/src/recording/impl/FileClip.h
+++ b/libraries/recording/src/recording/impl/FileClip.h
@@ -26,11 +26,11 @@ public:
     FileClip(const QString& file);
     virtual ~FileClip();
 
-    virtual float duration() const override;
+    virtual Time duration() const override;
     virtual size_t frameCount() const override;
 
-    virtual void seek(float offset) override;
-    virtual float position() const override;
+    virtual void seek(Time offset) override;
+    virtual Time position() const override;
 
     virtual FramePointer peekFrame() const override;
     virtual FramePointer nextFrame() override;
@@ -45,7 +45,7 @@ public:
 
     struct FrameHeader {
         FrameType type;
-        float timeOffset;
+        Time timeOffset;
         uint16_t size;
         quint64 fileOffset;
     };
diff --git a/libraries/script-engine/CMakeLists.txt b/libraries/script-engine/CMakeLists.txt
index 5e3d135034..3796abd92a 100644
--- a/libraries/script-engine/CMakeLists.txt
+++ b/libraries/script-engine/CMakeLists.txt
@@ -1,3 +1,3 @@
 set(TARGET_NAME script-engine)
 setup_hifi_library(Gui Network Script WebSockets Widgets)
-link_hifi_libraries(shared networking octree gpu procedural model model-networking fbx entities controllers animation audio physics)
+link_hifi_libraries(shared networking octree gpu procedural model model-networking recording avatars fbx entities controllers animation audio physics)
diff --git a/libraries/shared/src/shared/JSONHelpers.cpp b/libraries/shared/src/shared/JSONHelpers.cpp
new file mode 100644
index 0000000000..dc872574fe
--- /dev/null
+++ b/libraries/shared/src/shared/JSONHelpers.cpp
@@ -0,0 +1,57 @@
+//
+//  Created by Bradley Austin Davis on 2015/11/09
+//  Copyright 2013-2015 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+#include "JSONHelpers.h"
+
+#include <QtCore/QJsonValue>
+#include <QtCore/QJsonObject>
+#include <QtCore/QJsonArray>
+
+template <typename T> 
+QJsonValue glmToJson(const T& t) {
+    static const T DEFAULT_VALUE = T();
+    if (t == DEFAULT_VALUE) {
+        return QJsonValue();
+    }
+    QJsonArray result;
+    for (auto i = 0; i < t.length(); ++i) {
+        result.push_back(t[i]);
+    }
+    return result;
+}
+
+template <typename T> 
+T glmFromJson(const QJsonValue& json) {
+    static const T DEFAULT_VALUE = T();
+    T result;
+    if (json.isArray()) {
+        QJsonArray array = json.toArray();
+        size_t length = std::min(array.size(), result.length());
+        for (size_t i = 0; i < length; ++i) {
+            result[i] = (float)array[i].toDouble();
+        }
+    }
+    return result;
+}
+
+QJsonValue toJsonValue(const quat& q) {
+    return glmToJson(q);
+}
+
+QJsonValue toJsonValue(const vec3& v) {
+    return glmToJson(v);
+}
+
+quat quatFromJsonValue(const QJsonValue& q) {
+    return glmFromJson<quat>(q);
+}
+
+vec3 vec3FromJsonValue(const QJsonValue& v) {
+    return glmFromJson<vec3>(v);
+}
+
diff --git a/libraries/shared/src/shared/JSONHelpers.h b/libraries/shared/src/shared/JSONHelpers.h
new file mode 100644
index 0000000000..0eda8ac94a
--- /dev/null
+++ b/libraries/shared/src/shared/JSONHelpers.h
@@ -0,0 +1,23 @@
+//
+//  Created by Bradley Austin Davis on 2015/11/09
+//  Copyright 2013-2015 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+#pragma once
+#ifndef hifi_Shared_JSONHelpers_h
+#define hifi_Shared_JSONHelpers_h
+
+#include "../GLMHelpers.h"
+
+QJsonValue toJsonValue(const quat& q);
+
+QJsonValue toJsonValue(const vec3& q);
+
+quat quatFromJsonValue(const QJsonValue& q);
+
+vec3 vec3FromJsonValue(const QJsonValue& q);
+
+#endif
diff --git a/libraries/shared/src/shared/UniformTransform.cpp b/libraries/shared/src/shared/UniformTransform.cpp
new file mode 100644
index 0000000000..fdcf489154
--- /dev/null
+++ b/libraries/shared/src/shared/UniformTransform.cpp
@@ -0,0 +1,84 @@
+//
+//  Created by Bradley Austin Davis on 2015/11/09
+//  Copyright 2013-2015 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+#include "UniformTransform.h"
+
+#include "JSONHelpers.h"
+
+#include <QtCore/QJsonValue>
+#include <QtCore/QJsonObject>
+#include <QtCore/QJsonArray>
+
+#include <glm/gtc/matrix_transform.hpp>
+
+const float UniformTransform::DEFAULT_SCALE = 1.0f;
+
+std::shared_ptr<UniformTransform> UniformTransform::parseJson(const QJsonValue& basis) {
+    std::shared_ptr<UniformTransform> result = std::make_shared<UniformTransform>();
+    result->fromJson(basis);
+    return result;
+}
+
+static const QString JSON_TRANSLATION = QStringLiteral("translation");
+static const QString JSON_ROTATION = QStringLiteral("rotation");
+static const QString JSON_SCALE = QStringLiteral("scale");
+
+void UniformTransform::fromJson(const QJsonValue& basisValue) {
+    if (!basisValue.isObject()) {
+        return;
+    }
+    QJsonObject basis = basisValue.toObject();
+    if (basis.contains(JSON_ROTATION)) {
+        rotation = quatFromJsonValue(basis[JSON_ROTATION]);
+    }
+    if (basis.contains(JSON_TRANSLATION)) {
+        translation = vec3FromJsonValue(basis[JSON_TRANSLATION]);
+    }
+    if (basis.contains(JSON_SCALE)) {
+        scale = (float)basis[JSON_SCALE].toDouble();
+    }
+}
+
+glm::mat4 toMat4(const UniformTransform& transform) {
+    return glm::translate(glm::mat4(), transform.translation) * glm::mat4_cast(transform.rotation);
+}
+
+UniformTransform fromMat4(const glm::mat4& m) {
+    UniformTransform result;
+    result.translation = vec3(m[3]);
+    result.rotation = glm::quat_cast(m);
+    return result;
+}
+
+UniformTransform UniformTransform::relativeTransform(const UniformTransform& worldTransform) const {
+    UniformTransform result = fromMat4(glm::inverse(toMat4(*this)) * toMat4(worldTransform));
+    result.scale = scale / worldTransform.scale;
+    return result;
+}
+
+UniformTransform UniformTransform::worldTransform(const UniformTransform& relativeTransform) const {
+    UniformTransform result = fromMat4(toMat4(*this) * toMat4(relativeTransform));
+    result.scale = relativeTransform.scale * scale;
+    return result;
+}
+
+QJsonObject UniformTransform::toJson() const {
+    QJsonObject result;
+    auto json = toJsonValue(translation);
+    if (!json.isNull()) {
+        result[JSON_TRANSLATION] = json;
+    }
+    json = toJsonValue(rotation);
+    if (!json.isNull()) {
+        result[JSON_ROTATION] = json;
+    }
+    if (scale != DEFAULT_SCALE) {
+        result[JSON_SCALE] = scale;
+    }
+    return result;
+}
diff --git a/libraries/shared/src/shared/UniformTransform.h b/libraries/shared/src/shared/UniformTransform.h
new file mode 100644
index 0000000000..5b46de531e
--- /dev/null
+++ b/libraries/shared/src/shared/UniformTransform.h
@@ -0,0 +1,40 @@
+//
+//  Created by Bradley Austin Davis on 2015/11/09
+//  Copyright 2013-2015 High Fidelity, Inc.
+//
+//  Distributed under the Apache License, Version 2.0.
+//  See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
+//
+
+#pragma once
+#ifndef hifi_Shared_UniformTransform_h
+#define hifi_Shared_UniformTransform_h
+
+#include "../GLMHelpers.h"
+
+class QJsonValue;
+
+struct UniformTransform {
+    static const float DEFAULT_SCALE;
+    glm::vec3 translation;
+    glm::quat rotation;
+    float scale { DEFAULT_SCALE };
+
+    UniformTransform() {}
+
+    UniformTransform(const glm::vec3& translation, const glm::quat& rotation, const float& scale) 
+        : translation(translation), rotation(rotation), scale(scale) {}
+
+    UniformTransform relativeTransform(const UniformTransform& worldTransform) const;
+    glm::vec3 relativeVector(const UniformTransform& worldTransform) const;
+
+    UniformTransform worldTransform(const UniformTransform& relativeTransform) const;
+    glm::vec3 worldVector(const UniformTransform& relativeTransform) const;
+
+    QJsonObject toJson() const;
+    void fromJson(const QJsonValue& json);
+
+    static std::shared_ptr<UniformTransform> parseJson(const QJsonValue& json);
+};
+
+#endif