Commit 6d8f8985 authored by Yuxuan Yang's avatar Yuxuan Yang
Browse files

Merge branch 'master' into 'master'

Master

See merge request !2
parents 4679d496 5ae2ec47
......@@ -4,9 +4,7 @@ Yuxuan Yang, Johannes A. Stork, and Todor Stoyanov
## TODO
- [x] test fork from public repo and merge request
- [ ] combine all training models and generating data python scripts to two files(one for position twist representation, the other for position quanternion representation)
- [ ] add results analyse and plot scripts
- [ ] python script for animation based on pyBullet (pyBullet setup instruction)
- [x] python script for animation based on pyBullet
- [ ] model predictive control script (ROS and Bullet(C++) setup instruction)
## Demo
......@@ -20,19 +18,53 @@ control based on our learned model
![](imgs/control_1.gif) ![](imgs/control_2.gif)
## Installation
This codebase ips tested with Ubuntu 18.04 LTS, Python 3.7.4, PyTorch 1.7.1, and CUDA 10.2
Dependencies:
kornia == 0.5.6 (https://kornia.github.io/)
pybullet
### Install Dependencies if using Conda
numpy == 1.19.3
### Install Dependencies if using Docker
scipy == 1.7.3
tqdm
## Evaluation
----------
There is a trained model in the folder ./trained_models/bullet/epoch_best.pth.
The corresponding data can be download here (https://cloud.oru.se/s/m6NJp6Z6jPqpnHB)
Put the data in ./data/bullet/
run
## Training
`python gen_rollout.py`
to generate data, and run
## Citing
`python visualization.py`
to visualize the generated data and the corresponding ground truth.
## Training
-----------
If you want to train it yourself, you can also download the data mentioned in previous section, and run
`bash ./scripts/train_inbilstm_pt_action.sh`
## Citing
-----------
If you find this codebase useful in your research, please consider citing:
@inproceedings{yang2021learning,
title={Learning to Propagate Interaction Effects for Modeling Deformable Linear Objects Dynamics},
author={Yang, Yuxuan and Stork, Johannes A. and Stoyanov, Todor},
booktitle={2021 IEEE International Conference on Robotics and Automation (ICRA)},
pages={1950--1957},
year={2021},
organization={IEEE}
}
......@@ -16,4 +16,41 @@ class RopeDataset(Dataset):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.len
\ No newline at end of file
return self.len
class RopeDataset_2step(Dataset):
def __init__(self, data):
self.len = data.shape[0]*(data.shape[1]-2)
particle_num = data.shape[-2]
self.x0_data = data[:,:-2,:,:].reshape([self.len,particle_num,-1]).astype(np.float32)
self.x1_data = data[:,1:-1,:,:].reshape([self.len,particle_num,-1]).astype(np.float32)
self.y_data = data[:,2:,:,:].reshape([self.len,particle_num,-1]).astype(np.float32)
# print("init dataset, shape: ", data.shape, "x reshape: ", self.x_data.shape,"self.len",self.len)
def __getitem__(self,index):
return self.x0_data[index], self.x1_data[index], self.y_data[index]
def __len__(self):
return self.len
class RopeDataset_2step_gpu(Dataset):
def __init__(self, data):
self.len = data.shape[0]*(data.shape[1]-2)
particle_num = data.shape[-2]
data = torch.tensor(data.astype(np.float32))
self.x0_data = data[:,:-2,:,:].reshape([self.len,particle_num,-1]).cuda()
self.x1_data = data[:,1:-1,:,:].reshape([self.len,particle_num,-1]).cuda()
self.y_data = data[:,2:,:,:].reshape([self.len,particle_num,-1]).cuda()
# print("init dataset, shape: ", data.shape, "x reshape: ", self.x_data.shape,"self.len",self.len)
def __getitem__(self,index):
data = self.x0_data[index], self.x1_data[index], self.y_data[index]
return data
def __len__(self):
return self.len
import os
import random
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
# import kornia
from utils import *
from RopeDataset import RopeDataset
from models import InteractionNetwork_bilstm_rel_twist
from tqdm import tqdm
import argparse
from scipy.spatial.transform import Rotation as R
def convert_pt2pq(data_orig, offset_q, controlled_q, len_segment, twist_axis):
n_cases, n_steps, n_nodes, _ = data_orig.shape
# get relative twist
twist_orig = data_orig[:,:,:,3]
# Segments are modeled as cylinders with heights along cylinders' y axes
y_axis =(data_orig[:,:,1:,:3]-data_orig[:,:,:-1,:3])
y_axis = y_axis / np.linalg.norm(y_axis,axis=-1,keepdims=True)
# calculate positions of the segments' center of mass
p0 = data_orig[:,:,1:,:3]
delta_p = y_axis * len_segment * 0.5
positions = p0 - delta_p
# calculate orientations of the segments through modified Euler angle (Ry(a)Rx'(b)Ry(-a)Ry(c))
# two steps: bending (Ry(a)Rx'(b)Ry(-a)); twisting (Ry(c))
# 1. calculate relative rotations with respect to bending
y_axis_0 = y_axis[:,:,:-1]
y_axis_1 = y_axis[:,:,1:]
angles_cos = np.sum(y_axis_0.reshape(-1,3)*y_axis_1.reshape(-1,3),axis=-1, keepdims=True)
angles = np.arccos(angles_cos)
rot_axis = (np.cross(y_axis_0,y_axis_1)).reshape(-1,3)
rot_axis = rot_axis / np.linalg.norm(rot_axis,axis=-1,keepdims=True)
rot_vect = angles*rot_axis
rel_R = R.from_rotvec(rot_vect)
rel_rotM = rel_R.as_matrix().reshape(n_cases*n_steps,n_nodes-2,3,3)
base_q = offset_q
base_q = np.tile(base_q,(n_cases*n_steps,1))
recovered_q = np.zeros((n_cases*n_steps,n_nodes-1,4))
recovered_q[:,0]=base_q
# iterativley calculate the orientations of the segments
for i in range(n_nodes-2):
if twist_axis == 'y':
recovered_q[:,i+1]=(R.from_matrix(rel_rotM[:,i])*R.from_quat(recovered_q[:,i])*R.from_euler('y',twist_orig[:,:,i+1])).as_quat()
elif twist_axis == 'z':
recovered_q[:,i+1]=(R.from_matrix(rel_rotM[:,i])*R.from_quat(recovered_q[:,i])*R.from_euler('z',twist_orig[:,:,i+1])).as_quat()
recovered_q = recovered_q.reshape(n_cases,n_steps,n_nodes-1,4)
# construct data, (cases, steps, nodes, state), state: position(0:3), orientation(6:10)
recovered_data = np.zeros((n_cases, n_steps, n_nodes-1,13))
recovered_data[:,:,:,:3] = positions
recovered_data[:,:,:,6:10] = recovered_q
recovered_data[:,:,-1,6:10] = controlled_q
return recovered_data
def gen_data(model, criterion, simulate_time_step, data_gt, data_gt_idx, stat_r, stat_v, stat_a, len_segment, twist_axis):
data_gt_denorm = data_gt
batch_data_pred_0 = data_gt_denorm[:,0,:,:]
batch_data_pred_1 = data_gt_denorm[:,1,:,:]
data_pred_denorm = np.zeros((data_gt_denorm.shape[0],simulate_time_step,data_gt_denorm.shape[2],data_gt_denorm.shape[3]))
data_pred_denorm[:,0,:,:] = batch_data_pred_0
data_pred_denorm[:,1,:,:] = batch_data_pred_1
data_pred_denorm_pt = np.zeros((data_gt_denorm.shape[0],simulate_time_step,data_gt_denorm.shape[2]+1,8))
losses = 0
for i in range(1,simulate_time_step-1):
label_data = data_gt[:,i+1,:,:]
label_data = torch.Tensor(label_data).cuda()
batch_data_pred_0 = torch.Tensor(batch_data_pred_0).cuda()
batch_data_pred_1 = torch.Tensor(batch_data_pred_1).cuda()
batch_data_pred_0_gt = torch.Tensor(data_gt_denorm[:,i-1,:,:]).cuda()
batch_data_pred_1_gt = torch.Tensor(data_gt_denorm[:,i,:,:]).cuda()
object_for_relation_net, object_for_object_net, sender_relations, receiver_relations, target_gt, _ = prepare_data_pt_rel_twist(1, batch_data_pred_0_gt, batch_data_pred_1_gt, label_data, n_objects, n_relations, len_segment, twist_axis)
if i == 1:
batch_data_pred_0 = torch.Tensor(data_gt[:,i-1,:,:]).cuda()
batch_data_pred_1 = torch.Tensor(data_gt[:,i,:,:]).cuda()
object_for_relation_net, object_for_object_net, sender_relations, receiver_relations, target, _ = prepare_data_pt_rel_twist(1, batch_data_pred_0, batch_data_pred_1, label_data, n_objects, n_relations, len_segment, twist_axis)
else:
object_for_relation_net, object_for_object_net, sender_relations, receiver_relations, target, _ = prepare_data_pt_rel_twist(1, batch_data_pred_0, batch_data_pred_1, label_data, n_objects, n_relations, len_segment, twist_axis)
# object_for_relation_net[:,:,:8] = torch.tensor(data_pred_denorm_pt[0,i,:,:8]).cuda()
object_for_relation_net[:,:,8:12] = (object_for_relation_net[:,:,8:12]-stat_a[0])/stat_a[1]
predicted = model(object_for_relation_net,object_for_object_net, sender_relations, receiver_relations,stat_r, stat_v, 0)
target = target_gt
target_n = (target - stat_v[0,4:])/stat_v[1,4:]
# denormalize
predicted_denorm = predicted * stat_v[1,4:] + stat_v[0,4:]
predicted_denorm = predicted_denorm.cpu().detach().numpy()
loss = criterion(predicted[:,0:4], target_n[:,0:4])
losses+= loss.item()
# update
# (positions of first two and last two nodes are known, they are controlled, so do the twist of first and last node (they are zeros) )
predicted_denorm[:2,:3] = target.cpu().numpy()[:2,:3]
predicted_denorm[:1,3] = target.cpu().numpy()[:1,3]
predicted_denorm[-2:,:3] = target.cpu().numpy()[-2:,:3]
predicted_denorm[-1:,3] = target.cpu().numpy()[-1:,3]
data_pred_denorm_pt[:,i+1,:,:4] = object_for_relation_net[0,:,:4].detach().cpu().numpy() + predicted_denorm
data_pred_denorm_pt[0,i+1,:,4:] = predicted_denorm
#
if twist_axis == 'y':
data_pred_denorm[:,i+1:i+2] = convert_pt2pq(data_pred_denorm_pt[:,i+1:i+2], data_gt[:,i+1,0,6:10],data_gt[:,i+1,-1,6:10], len_segment[1], twist_axis)
elif twist_axis == 'z':
data_pred_denorm[:,i+1:i+2] = convert_pt2pq(data_pred_denorm_pt[:,i+1:i+2], data_gt[:,i+1,0,6:10],data_gt[:,i+1,-1,6:10], len_segment[2], twist_axis)
# update constrained nodes (first and last segments are controlled)
# didn't update velocities which are not important here
data_pred_denorm[0,i+1,-1:] = data_gt[0,i+1,-1:]
data_pred_denorm[0,i+1,:1] = data_gt[0,i+1,:1]
batch_data_pred_0 = batch_data_pred_1.detach().cpu().numpy()
batch_data_pred_1 = data_pred_denorm[:,i+1]
print("aver loss:",losses/simulate_time_step)
return data_pred_denorm,losses/simulate_time_step
if __name__ == '__main__':
# --- load data ---
parser = argparse.ArgumentParser()
parser.add_argument('--pair_wise_type', default='rel', help='relative state| full state concatenate, pair-wise relation type') # 'rel' or 'full'
parser.add_argument('--num_rnnlayer', type=int, default=2, help="the number of bilstm layers")
args = parser.parse_args()
train_scenario = 2700
DATA_PATH = os.getcwd()+'/data/bullet/'
data_file = DATA_PATH + 'data_orig.npy'
data = np.load(data_file)
if data.shape[-1] == 7:
print('fix state to 13 dims')
cases, steps, nodes, _ = data.shape
zero_array = np.zeros((cases,steps,nodes,3))
data = np.concatenate([data[:,:,:,:3], zero_array, data[:,:,:,3:], zero_array],-1)
train_data = data[0:train_scenario]
valid_data = data[train_scenario:]
print('training_data shape', train_data.shape)
print('valid_data shape',valid_data.shape)
# DATA_PATH = "/home/yuxuan/code/test/gitlab/IN4Rope/rope_data/synetic_bullet/"
stat_r = np.load(DATA_PATH+"relation_vector_stat_pt.npy") # stat of relative position, relative twist, relative delta pos, relative delta twist
stat_a = np.load(DATA_PATH+"a_stat_pt.npy")
stat_v = np.load(DATA_PATH+"v_stat_pt.npy") # stat of position, twist, delta pos, delta twist
################################
### properties about the dlo ###
len_segment = np.array([0, 0.21,0])
twist_axis = 'y'
n_objects = 16+1
#################
################################
### paramters about simulation and network ###
simulate_time_step = 298
num_layer = args.num_rnnlayer # 1,2,3
hidden_size = 150
n_relations = (n_objects-1)*2
####################
if args.pair_wise_type == 'rel':
model_parameter_path = os.getcwd()+'/trained_models/bullet/epoch_best.pth'
SAVE_DATA_FOLDER = os.getcwd()+"/generated_data/bullet_new/"
object_net_dim = 4+3 # [dx,dy,dz, dtwist, attri(3)]
relation_net_dim = 8+(4+3)*2 # [x,y,z,twist, (dx,dy,dz,dtwist)*2, attri(3)*2]
network_chosen = InteractionNetwork_bilstm_rel_twist
else:
raise AssertionError("Unsupported pair-wise relation type, either relative ('rel') or full ('full')")
network = network_chosen(relation_net_dim, object_net_dim, hidden_size, num_layer)
network.load_state_dict(torch.load(model_parameter_path, map_location='cpu'))
network.eval()
network = network.cuda()
# criterion = nn.L1Loss()
criterion = nn.MSELoss()
criterion = criterion.cuda()
stat_r_ts = torch.Tensor(stat_r).cuda()
stat_v_ts = torch.Tensor(stat_v).cuda()
stat_a_ts = torch.Tensor(stat_a).cuda()
os.system('mkdir -p ' + SAVE_DATA_FOLDER)
cases_idx = list(range(0,20)) # index for the cases in validation dataset
data_generated_all = np.zeros([len(cases_idx),simulate_time_step,n_objects-1,13])
losses = []
for i, scenrio in tqdm(enumerate(cases_idx)):
print('generating secnario',scenrio)
start_step = 0 #
data_gt = np.array([valid_data[scenrio,start_step:start_step+simulate_time_step+1,:,:]])
data_gt_idx = [scenrio]
data_generated, loss = gen_data(network, criterion, simulate_time_step, data_gt, data_gt_idx, stat_r_ts, stat_v_ts,stat_a_ts, len_segment, twist_axis)
data_generated_all[i,:,:,:]=data_generated[0].copy()
losses.append(loss)
np.save(SAVE_DATA_FOLDER+"data_generated_all_valid_rollout",data_generated_all)
print('data save to ',SAVE_DATA_FOLDER+"data_generated_all_valid_rollout.npy")
\ No newline at end of file
import os
import random
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
# import kornia
from utils import *
from torch.utils.tensorboard import SummaryWriter
import tqdm
from RopeDataset import RopeDataset
from models import InteractionNetwork_bilstm, InteractionNetwork_bilstm_full
from tqdm import tqdm
import argparse
def gen_data(model, criterion, simulate_time_step, data_gt, data_gt_idx, stat_r, stat_v):
data_gt_denorm = data_gt
batch_data_pred = data_gt_denorm[:,0,:,:]
data_pred_denorm = np.zeros((data_gt_denorm.shape[0],simulate_time_step+1,data_gt_denorm.shape[2],data_gt_denorm.shape[3]))
data_pred_denorm[:,0,:,:] = data_gt_denorm[:,0,:,:]
losses = 0
for i in range(0,simulate_time_step):
label_data = data_gt[:,i+1,:,:]
label_data = torch.Tensor(label_data).cuda()
batch_data_pred = torch.Tensor(batch_data_pred).cuda()
object_for_relation_net, object_for_object_net, sender_relations, receiver_relations, target = prepare_data_pt(1, batch_data_pred, label_data, n_objects, n_relations)
predicted = model(object_for_relation_net,object_for_object_net, sender_relations, receiver_relations,stat_r, stat_v, 0)
target = (target - stat_v[0,4:])/stat_v[1,4:]
# denormalize
predicted_denorm = predicted * stat_v[1,4:] + stat_v[0,4:]
predicted_denorm = np.array(predicted_denorm.cpu().detach().numpy())
loss = criterion(predicted, target)
losses+= loss.item()
# update
data_pred_denorm[0,i+1,:,:4] = batch_data_pred[0,:,:4].detach().cpu().numpy() + predicted_denorm
data_pred_denorm[0,i+1,:,4:] = predicted_denorm
# update constrained nodes
data_pred_denorm[0,i+1,-2:] = data_gt[:,i+1,-2:]
data_pred_denorm[0,i+1,:2] = data_gt[:,i+1,:2]
batch_data_pred = data_pred_denorm[:,i+1]
print("aver loss:",losses/simulate_time_step)
return data_pred_denorm,losses/simulate_time_step
if __name__ == '__main__':
# --- load data ---
parser = argparse.ArgumentParser()
parser.add_argument('--pair_wise_type', default='rel', help='relative state| full state concatenate, pair-wise relation type') # 'rel' or 'full'
parser.add_argument('--num_rnnlayer', type=int, default=2, help="the number of bilstm layers")
args = parser.parse_args()
timestep_per_scenario = 300
timestep_per_scenario_train = timestep_per_scenario-1
train_scenario = 2700
DATA_PATH = os.getcwd()+'/data/'
data_file = DATA_PATH + 'data_pt.npy'
data = np.load(data_file)
train_data = data[0:train_scenario]
valid_data = data[train_scenario:]
print('training_data', train_data.shape)
print('valid_data',valid_data.shape)
stat_r = np.load(DATA_PATH+"relation_vector_stat_pt.npy") # stat of relative position, relative twist, relative delta pos, relative delta twist
stat_v = np.load(DATA_PATH+"v_stat_pt.npy") # stat of position, twist, delta pos, delta twist
#################
simulate_time_step = 299
num_layer = args.num_rnnlayer # 1,2,3
hidden_size = 150
n_objects = 16+1
n_relations = (n_objects-1)*2
if args.pair_wise_type == 'rel':
model_parameter_path = os.getcwd()+'/trained_models/inbilstm_pt.pth'
SAVE_DATA_FOLDER = os.getcwd()+"/generated_data/inbilstm_pt"
object_net_dim = 4+2 # [dx,dy,dz, dtwist, attri(2)]
relation_net_dim = 8+2*2 # [x,y,z,twist, dx,dy,dz,dtwist, attri(2)]
network_chosen = InteractionNetwork_bilstm
elif args.pair_wise_type == 'full':
model_parameter_path = os.getcwd()+'/trained_models/inbilstm_pt_full.pth'
SAVE_DATA_FOLDER = os.getcwd()+"/generated_data/inbilstm_pt_full"
object_net_dim = 4+2 # [dx,dy,dz, dtwist, attri(2)]
relation_net_dim = 8*2+2*2 # [x,y,z,twist, dx,dy,dz,dtwist, attri(2)]*2
network_chosen = InteractionNetwork_bilstm_full
stat_r = stat_v
else:
raise AssertionError("Unsupported pair-wise relation type, either relative ('rel') or full ('full')")
network = network_chosen(relation_net_dim, object_net_dim, hidden_size, num_layer)
network.load_state_dict(torch.load(model_parameter_path))
network.eval()
network = network.cuda()
# criterion = nn.L1Loss()
criterion = nn.MSELoss()
criterion = criterion.cuda()
stat_r_ts = torch.Tensor(stat_r).cuda()
stat_v_ts = torch.Tensor(stat_v).cuda()
os.system('mkdir -p ' + SAVE_DATA_FOLDER)
data_generated_all = np.zeros([300,simulate_time_step+1,n_objects,8])
losses = []
for i, scenrio in tqdm(enumerate(list(range(300)))):
print('generating secnario',scenrio)
start_step = 0 #
data_gt = np.array([valid_data[scenrio,start_step:start_step+simulate_time_step+1,:,:]])
data_gt_idx = [scenrio]
data_generated, loss = gen_data(network, criterion, simulate_time_step, data_gt, data_gt_idx, stat_r_ts, stat_v_ts)
data_generated_all[i,:,:,:]=data_generated[0].copy()
losses.append(loss)
np.save(SAVE_DATA_FOLDER+"data_generated_all_valid_rollout",data_generated_all)
\ No newline at end of file
......@@ -88,19 +88,20 @@ class ParticlePredictor(nn.Module):
B, N, D = x.size()
x = x.view(B * N, D)
x = self.linear_1(self.relu(self.linear_0(x)))
x = self.linear_2(self.relu(self.linear_1(x)))
# x = self.linear_2(self.relu(self.linear_1(x))) # original code, repeated linear_1
x = self.linear_2(self.relu((x)))
x = self.linear_3(self.relu(x))
if self.residual:
x = x + res.view(B * N, self.output_size)
return x
class InteractionNetwork_bilstm(nn.Module):
class InteractionNetwork_bilstm_rel_twist(nn.Module):
'''
'''
def __init__(self, relation_net_dim, object_net_dim, hidden_size, num_layers):
super(InteractionNetwork_bilstm, self).__init__()
super(InteractionNetwork_bilstm_rel_twist, self).__init__()
self.state_encoder_model = ParticleEncode(object_net_dim, hidden_size, hidden_size)
self.relational_model = RelationalModel(relation_net_dim, hidden_size, hidden_size)
......@@ -126,88 +127,25 @@ class InteractionNetwork_bilstm(nn.Module):
receivers = receiver_relations_t.bmm(object_for_relation_net)
# relation-centric network
delta = senders[:,:,:-2] - receivers[:,:,:-2]
delta = (delta-stat_r[0])/stat_r[1]
delta = senders[:,:,:8] - receivers[:,:,:8]
delta = (delta-stat_r[0])/stat_r[1]
input_for_relation_network = torch.cat([delta,senders[:,:,-2:],receivers[:,:,-2:]],2)
input_for_relation_network = torch.cat([delta,senders[:,:,-7:],receivers[:,:,-7:]],2)
relation_encode = self.relational_model(input_for_relation_network)
# state-encoder network
object_for_object_net_norm = (object_for_relation_net[:,:,4:-2]- stat_v[0,4:])/stat_v[1,4:]
object_for_object_net_norm = torch.cat([object_for_object_net_norm,object_for_relation_net[:,:,-2:]],axis=-1)
encodered_state = self.state_encoder_model(object_for_object_net_norm)
# object-centric network
### propogate through bi-gru
effect_agg = receiver_relations.bmm(relation_encode)
rnn_input = effect_agg.transpose(0,1)
rnn_out, _ = self.bi_gru(rnn_input)
rnn_out = rnn_out.transpose(0,1).view(batch_size,n_objects,-1)
# print("effect_agg.size()",effect_agg.size())
predicted= self.particle_predictor(torch.cat([encodered_state, rnn_out], 2))
return predicted
class InteractionNetwork_bilstm_full(nn.Module):
'''
normalized relative state
prediction network only take velocity
only take pair-wise relation for comparision
'''
def __init__(self, relation_net_dim, object_net_dim, hidden_size, num_layers):
super(InteractionNetwork_bilstm_full, self).__init__()
self.state_encoder_model = ParticleEncode(object_net_dim, hidden_size, hidden_size)
self.relational_model = RelationalModel(relation_net_dim, hidden_size, hidden_size)
# input: (1) particle effect
self.particle_predictor = ParticlePredictor(
hidden_size+hidden_size*2, hidden_size, 4)
# self.particle_predictor = ParticlePredictor(
# 150, 150, 4, True)
self.bi_gru = nn.LSTM(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=False,
bidirectional=True)
def forward(self, object_for_relation_net, object_for_object_net, sender_relations, receiver_relations, stat_r, stat_v, pstep):
ori_size = object_for_relation_net.size()
batch_size, n_objects, n_state_all = ori_size # state_all = state + state_attr
sender_relations_t = sender_relations.permute(0, 2, 1)
receiver_relations_t = receiver_relations.permute(0, 2, 1)
n_object_for_relation_net = (object_for_relation_net[:,:,:-2] - stat_v[0]) / stat_v[1]
n_object_for_relation_net = torch.cat([n_object_for_relation_net,object_for_relation_net[:,:,-2:]],-1)
senders = sender_relations_t.bmm(n_object_for_relation_net)
receivers = receiver_relations_t.bmm(n_object_for_relation_net)
# relation-centric network
input_for_relation_network = torch.cat([senders,receivers],2)
relation_encode = self.relational_model(input_for_relation_network)