Skip to content
Snippets Groups Projects
Commit b6cef738 authored by Pep Martí Saumell's avatar Pep Martí Saumell
Browse files

Remove comments

parent 6fd9d88b
No related branches found
No related tags found
No related merge requests found
Pipeline #4383 failed
#!/usr/bin/env python
# coding: utf-8
# In[13]:
from crocoddyl import * from crocoddyl import *
import pinocchio as pin import pinocchio as pin
import numpy as np import numpy as np
from crocoddyl.diagnostic import displayTrajectory from crocoddyl.diagnostic import displayTrajectory
# In[14]:
# LOAD ROBOT # LOAD ROBOT
robot = loadKinton() robot = loadKinton()
robot.initViewer(loadModel=True) robot.initViewer(loadModel=True)
...@@ -22,10 +12,6 @@ robot.framesForwardKinematics(robot.q0) ...@@ -22,10 +12,6 @@ robot.framesForwardKinematics(robot.q0)
rmodel = robot.model rmodel = robot.model
# In[15]:
# DEFINE TARGET POSITION # DEFINE TARGET POSITION
target_pos = np.array([0,0,1]) target_pos = np.array([0,0,1])
target_quat = pin.Quaternion(1, 0, 0, 0) target_quat = pin.Quaternion(1, 0, 0, 0)
...@@ -36,10 +22,6 @@ robot.viewer.gui.addXYZaxis('world/framegoal', [1., 0., 0., 1.], .015, 4) ...@@ -36,10 +22,6 @@ robot.viewer.gui.addXYZaxis('world/framegoal', [1., 0., 0., 1.], .015, 4)
robot.viewer.gui.applyConfiguration('world/framegoal', target_pos.tolist() + [target_quat[0], target_quat[1], target_quat[2], target_quat[3]]) robot.viewer.gui.applyConfiguration('world/framegoal', target_pos.tolist() + [target_quat[0], target_quat[1], target_quat[2], target_quat[3]])
robot.viewer.gui.refresh() robot.viewer.gui.refresh()
# In[16]:
# ACTUATION MODEL # ACTUATION MODEL
distanceRotorCOG = 0.1525 distanceRotorCOG = 0.1525
cf = 6.6e-5 cf = 6.6e-5
...@@ -98,27 +80,22 @@ terminalCostModel.addCost(name="pos", weight=0, cost=goalTrackingCost) ...@@ -98,27 +80,22 @@ terminalCostModel.addCost(name="pos", weight=0, cost=goalTrackingCost)
runningModel = IntegratedActionModelEuler(DifferentialActionModelUAM(robot.model, actModel, runningCostModel)) runningModel = IntegratedActionModelEuler(DifferentialActionModelUAM(robot.model, actModel, runningCostModel))
terminalModel = IntegratedActionModelEuler(DifferentialActionModelUAM(robot.model, actModel, terminalCostModel)) terminalModel = IntegratedActionModelEuler(DifferentialActionModelUAM(robot.model, actModel, terminalCostModel))
# In[ ]:
# DEFINING THE SHOOTING PROBLEM & SOLVING # DEFINING THE SHOOTING PROBLEM & SOLVING
import time
# Defining the time duration for running action models and the terminal one # Defining the time duration for running action models and the terminal one
dt = 1e-3 dt = 1e-2
runningModel.timeStep = dt runningModel.timeStep = dt
# For this optimal control problem, we define 250 knots (or running action T = 100
# models) plus a terminal knot
print "----------MARK 1----------"
T = 1000
q0 = rmodel.referenceConfigurations["initial_pose"].copy() q0 = rmodel.referenceConfigurations["initial_pose"].copy()
v0 = pin.utils.zero(rmodel.nv) v0 = pin.utils.zero(rmodel.nv)
x0 = m2a(np.concatenate([q0, v0])) x0 = m2a(np.concatenate([q0, v0]))
rmodel.defaultState = x0.copy() rmodel.defaultState = x0.copy()
print "----------MARK 2----------" t = time.time()
problem = ShootingProblem(x0, [runningModel] * T, terminalModel) problem = ShootingProblem(x0, [runningModel] * T, terminalModel)
print "TIME: Shooting problem, " + str(time.time()-t)
# Creating the DDP solver for this OC problem, defining a logger # Creating the DDP solver for this OC problem, defining a logger
fddp = SolverFDDP(problem) fddp = SolverFDDP(problem)
...@@ -126,7 +103,6 @@ fddp.callback = [CallbackDDPVerbose()] ...@@ -126,7 +103,6 @@ fddp.callback = [CallbackDDPVerbose()]
fddp.callback.append(CallbackDDPLogger()) fddp.callback.append(CallbackDDPLogger())
#fddp.setCallbacks([CallbackVerbose()]) #fddp.setCallbacks([CallbackVerbose()])
print "----------MARK 3----------"
# us0 = [ # us0 = [
# m.differential.quasiStatic(d.differential, rmodel.defaultState) # m.differential.quasiStatic(d.differential, rmodel.defaultState)
# if isinstance(m, IntegratedActionModelEuler) else np.zeros(0) # if isinstance(m, IntegratedActionModelEuler) else np.zeros(0)
...@@ -134,20 +110,13 @@ print "----------MARK 3----------" ...@@ -134,20 +110,13 @@ print "----------MARK 3----------"
# xs0 = [problem.initialState]*len(fddp.models()) # xs0 = [problem.initialState]*len(fddp.models())
# Solving it with the DDP algorithm # Solving it with the DDP algorithm
print "----------SOLVING----------"
#fddp.solve(init_xs=xs0, init_us=us0) #fddp.solve(init_xs=xs0, init_us=us0)
t = time.time()
fddp.solve() fddp.solve()
print "TIME: Solve time, " + str(time.time()-t)
# In[6]:
displayTrajectory(robot, fddp.xs, runningModel.timeStep) displayTrajectory(robot, fddp.xs, runningModel.timeStep)
# In[7]:
# Control trajectory # Control trajectory
f1 = [] f1 = []
f2 = []; f2 = [];
...@@ -177,10 +146,6 @@ for x in fddp.xs: ...@@ -177,10 +146,6 @@ for x in fddp.xs:
Vy.append(x[14]) Vy.append(x[14])
Vz.append(x[15]) Vz.append(x[15])
# In[9]:
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
t = np.arange(0., 1, dt) t = np.arange(0., 1, dt)
...@@ -210,11 +175,4 @@ plt.title('State - Velocity') ...@@ -210,11 +175,4 @@ plt.title('State - Velocity')
plt.ylabel('Velocity, [m/s]') plt.ylabel('Velocity, [m/s]')
plt.xlabel('[s]') plt.xlabel('[s]')
plt.show()
# In[ ]:
# In[ ]:
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment