Home › Forum › SofaPython3 › Programming with SofaPython3 › Aruco marker tracking scene.
Tagged: Linux_ubuntu, SOFA_2012
- This topic has 0 replies, 1 voice, and was last updated 3 years, 2 months ago by Pasquale94.
Viewing 1 post (of 1 total)
-
AuthorPosts
-
15 September 2021 at 14:28 #20357Pasquale94Blocked
Hello everyone.
I will leave here my solution to track an object with aruco marker with a camera and follow the movements in simulation.I use as camera a Intel RealSense D435.
# Required import for python import Sofa import math import numpy as np import os path = os.path.dirname(os.path.abspath(__file__)) import cv2 import cv2.aruco as aruco from numpy import add import glob import pyrealsense2 as rs import cv2 # Choose in your script to activate or not the GUI USE_GUI = True # Start streaming def main(): import SofaRuntime import Sofa.Gui root = Sofa.Core.Node("root") addScene(root) Sofa.Simulation.init(root) if not USE_GUI: for iteration in range(10): Sofa.Simulation.animate(root, root.dt.value) else: Sofa.Gui.GUIManager.Init("myscene", "qglviewer") Sofa.Gui.GUIManager.addGUI(root, __file__) Sofa.Gui.GUIManager.SetDimension(1080, 1080) Sofa.Gui.GUIManager.MainLoop(root) Sofa.Gui.GUIManager.closeGUI() def createScene(root): root.gravity=[0, 0, 0] root.dt=0.02 root.addObject('DefaultVisualManagerLoop') root.addObject('DefaultAnimationLoop') root.addObject('VisualStyle', displayFlags="hideCollisionModels showVisualModels showForceFields") root.addObject('RequiredPlugin', pluginName="SofaImplicitOdeSolver SofaConstraint SofaLoader SofaOpenglVisual SofaBoundaryCondition SofaGeneralLoader SofaGeneralSimpleFem SoftRobots SofaPython3 SofaOpenglVisual SofaSparseSolver SofaPreconditioner SofaMiscCollision SofaExporter SofaMiscFem SofaMiscForceField") root.addObject('DefaultPipeline', name="CollisionPipeline") root.addObject('BruteForceDetection', name="N2") root.addObject('DefaultContactManager', name="CollisionResponse", response="default") root.addObject('DiscreteIntersection') root.addObject('LocalMinDistance', name='Proximity', alarmDistance=5, contactDistance=1, angleCone=0.0) root.addObject('BackgroundSetting', color=[0, 0.168627, 0.211765]) newSphere = root.addChild('Sphere') newSphere.addObject('EulerImplicitSolver') newSphere.addObject('CGLinearSolver', threshold='1e-09', tolerance='1e-09', iterations='200') newSphere.addObject('MeshVTKLoader' , name = 'sphereloader', filename = path + r'/mesh/cube.vtu') newSphere.addObject('MechanicalObject' , src = '@sphereloader', name = 'tetrasphere', template = 'Vec3d', listening =True) newSphere.addObject('TetrahedronSetTopologyContainer' , src = '@sphereloader', name = 'topo') newSphere.addObject('TetrahedronSetTopologyModifier' , name = 'Modifier') newSphere.addObject('TetrahedronSetGeometryAlgorithms' , drawTriangles = '1', name = 'GeomAlgo', template = 'Vec3d') newSphere.addObject('UniformMass', totalMass="1") newSphere.addObject(Controller(node=root, translation = root.Sphere.tetrasphere.position)) collisionsphere = newSphere.addChild('collisionSphere') collisionsphere.addObject('MeshObjLoader', name = 'loadersphere', filename = path + r'/mesh/cube.obj') collisionsphere.addObject('MeshTopology' , src = '@loadersphere', name = 'topo') collisionsphere.addObject('MechanicalObject' , name = 'collisMech') #collisionsphere.addObject('SphereCollisionModel' , selfCollision = False) #collisionFinger.addObject('LineCollisionModel' , selfCollision = False) collisionsphere.addObject('PointCollisionModel' , selfCollision = False) collisionsphere.addObject('BarycentricMapping') modelVisu = newSphere.addChild('visu') modelVisu.addObject('MeshObjLoader', name='loader', filename = path + r'/mesh/cube.obj') modelVisu.addObject('OglModel', src='@loader', color='yellow') modelVisu.addObject('BarycentricMapping') return root class Controller(Sofa.Core.Controller): """ This controller monitors new sphere objects. Press ctrl and the L key to make spheres falling! """ def __init__(self, *args, **kwargs): Sofa.Core.Controller.__init__(self, *args, **kwargs) self.translation = kwargs['translation'] self.traslazione = np.array((0,0,0)).reshape(1,3) def onAnimateEndEvent(self, event): # Configure color streams pipeline = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 60) #Start stream pipeline.start(config) frames = pipeline.wait_for_frames() color_frame = frames.get_color_frame() color_image = np.asanyarray(color_frame.get_data()) gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY) # aruco marker detection codes markerSize = 5 totalMarkers = 250 key = getattr(aruco, f'DICT_{markerSize}X{markerSize}_{totalMarkers}') arucoDict = aruco.Dictionary_get(key) arucoParam = aruco.DetectorParameters_create() bboxs, ids, rejected = aruco.detectMarkers(gray, arucoDict, parameters = arucoParam) #Intel Realsense camera Matrix mtx = [[613.8421020507812, 0, 321.9654541015625], [0, 614.451416015625,249.003173828125], [ 0,0,1]] mtx= np.array(mtx).reshape(3,3) #distortion coefficient of the camera dist = [-1.71057285e-02, -7.15934343e-05, -1.00114895e-03, 1.15475573e-02, 5.46266277e-07] dist = np.array(dist).reshape(5,1) rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(bboxs, 0.05, mtx, dist) print(ids) # If the marker is detected track the traslations if ids == 0: self.tvecs = np.array(tvecs).reshape(1,3) self.traslazione = np.hstack((self.traslazione,self.tvecs)) size = np.size(self.traslazione) print(size) if size ==3: for i in range(0, len(self.translation)): self.translation[i][0] = self.translation[i][0] + (self.tvecs[0][0])*1000 self.translation[i][1] = self.translation[i][1] + (self.tvecs[0][1])*1000 self.translation[i][2] = self.translation[i][2] + (self.tvecs[0][2])*1000 if size>=6: self.trasl = [self.traslazione[0][size-6],self.traslazione[0][size-5],self.traslazione[0][size-4]] self.trasl = np.array(self.trasl).reshape(1,3) if np.linalg.norm((self.trasl-self.tvecs)*1000) >= 2 : for i in range(0, len(self.translation)): self.translation[i][0] = self.translation[i][0] + ((self.trasl[0][0] - self.tvecs[0][0])*1000) self.translation[i][1] = self.translation[i][1] + ((self.trasl[0][1] - self.tvecs[0][1])*1000) self.translation[i][2] = self.translation[i][2] + ((self.trasl[0][2] - self.tvecs[0][2])*1000) k = cv2.waitKey(60) & 0xff pipeline.stop() # Function used only if this script is called from a python environment if __name__ == '__main__': main()
The code for now works only in traslation.
I tried to add a rotation matrix to the mesh positions using the variable rvecs from marker extimated pose of the marker, but the cube in the scene rotates but became “flat”, i also tried to use rvecs in the rotation field of the mechanicalobject of the cube, but nothing happens.
So I want to ask the devs if they can help me to improve the scene.
I tag @hugo as reference.
Thanks in advance every one. -
AuthorPosts
Viewing 1 post (of 1 total)
- You must be logged in to reply to this topic.