Device initialization and destruction was as simple as that:
void Scene::initializeOVR()
{
HMDInfo Info;
bool InfoLoaded;
OVR::System::Init();
m_manager = *OVR::DeviceManager::Create();
m_HMD = *m_manager->EnumerateDevices<HMDDevice>().CreateDevice();
if (m_HMD)
{
InfoLoaded = m_HMD->GetDeviceInfo(&Info);
m_sensor = *m_HMD->GetSensor();
}
else
{
m_sensor = *m_manager->EnumerateDevices<SensorDevice>().CreateDevice();
}
if(m_sensor)
{
m_sensorFusion.AttachToSensor(m_sensor);
}
}
void Scene::finalizeOVR()
{
m_sensor.Clear();
m_HMD.Clear();
m_manager.Clear();
System::Destroy();
}
And getting the orientation of the drift is as difficult as that:
if ( m_sensorFusion.IsAttachedToSensor() )
{
OVR::Quatf orientation = m_sensorFusion.GetOrientation();
m_viewAngles.x = orientation.x;
m_viewAngles.y = orientation.y;
m_viewAngles.z = orientation.z;
}
m_gpuKernel->setCamera( m_viewPos, m_viewDir, m_viewAngles );
if ( m_sensorFusion.IsAttachedToSensor() )
{
OVR::Quatf orientation = m_sensorFusion.GetOrientation();
m_viewAngles.x = orientation.x;
m_viewAngles.y = orientation.y;
m_viewAngles.z = orientation.z;
}
m_gpuKernel->setCamera( m_viewPos, m_viewDir, m_viewAngles );
Splitting the screen for both eyes was already implemented in my engine, originaly for 3D vision, but I have to admin that Oculus is way more convincing.
More to come...