tensorflow のgpu使用率が低いです 原因は不明
gpuを動かすとpc本体から高めのギギギ という音が鳴ります
他でgpuに負荷をかけるようなことはしてないので 初めてなりました
これまずいやつ?
gpuは使えてます たぶん(二行ぐらいのコードのやつで試した)
環境
os win
tensorflow 2.2
cuda 10.1
cudnn 7.6.5
gpu rtx 2800 ti
使用率の低かった
コードに関しては大きめのモデルでランダムの行列をinputで2000回ほど動かすだけのものです
from tensorflow.python.client import device_lib device_lib.list_local_devices()
結果
device_type: "CPU"
device_type: "XLA_CPU"
device_type: "GPU"
import tensorflow as tf print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
結果
Num GPUs Available: 1
試したこと
ググる→出てこない
他に何すればいいかわからない
何んで低いか教えてください
追記 コード 文字数制限のため一部省いてます
class QNetwork : def __init__(self,learning_rate, state_size, action_size): self.input1 = Input(shape=(state_size.shape)) self.a=Conv2D(32,kernel_size=(3,3),padding='same',activation=LeakyReLU(alpha=0.01), use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.input1) self.a=Conv2D(32,(3,3),strides=1,padding='same',activation=LeakyReLU(alpha=0.01), use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.a) self.a=MaxPooling2D(pool_size=(2, 2))(self.a) self.a=Conv2D(64,kernel_size=(3, 3), padding='same', data_format=None, dilation_rate=(1, 1),activation=LeakyReLU(alpha=0.01), use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.a) self.a=Conv2D(64,kernel_size=(3, 3), padding='same', data_format=None, dilation_rate=(1, 1),activation=LeakyReLU(alpha=0.01), use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.a) self.a=MaxPooling2D(pool_size=(2, 2))(self.a) self.a=Flatten()(self.a) CNN 結構層数は多い self.a=Dense(200,activation='relu', use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.a) self.a=Dense(200,activation='relu', use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.a) self.a=Dense(850,activation='relu', use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.a) #------------------------------------------------------------------------------- # < Value function > self.ve=Dense(206,activation='relu', use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.a) self.ve=NoisyDense(1, use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.ve) self.dv=Dense(206,activation='relu' ,use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.a) self.dv=NoisyDense(1, use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.dv)#Doring self.v = Concatenate()([self.ve,self.dv])#状態価値と行動価値結合 self.v = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], axis=1, keepdims=True), output_shape=(1,))(self.v) #-------------------------------------------------------------------------------- # < Action Mean > self.mu=Dense(action_size,activation='relu', use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.), bias_constraint=max_norm(2.))(self.a) #lllll self.l = Dense(int(action_size * (action_size + 1) / 2), activation="linear", name='l0')(self.a) self.l = Lambda(lambda x: tfp.math.fill_triangular(x))(self.l) self.l = Lambda(lambda x: tf.linalg.set_diag(x, tf.exp(tf.linalg.diag_part(x))))(self.l) self.l = Lambda(lambda x: tf.matmul(x,x))(self.l) # < Action function > self.u = Input(shape=(action_size,), name='action_input') self.u_mu = keras.layers.Subtract()([self.u, self.mu]) self.u_mu_l = keras.layers.Dot(axes=1)([self.u_mu, self.l]) # transpose 自動でされてた self.u_mu_l = keras.layers.Dot(axes=1)([self.u_mu_l, self.u_mu]) self.A = Lambda(lambda x: -1.0/2.0 * x)(self.u_mu_l) self.Q = keras.layers.Add()([self.A, self.v]) # Input and Output ----------------------------------------------------- self.net_q = Model(input=[self.input1,self.u], output=[self.Q]) self.net_a = Model(input=[self.input1], output=[self.mu]) self.net_v = Model(input=[self.input1], output=[self.v]) self.adm = Adam(lr=learning_rate,beta_1=0.9, beta_2=0.999, amsgrad=False) # 誤差を減らす学習方法はAdam # self.inputs.compile(loss='mse', optimizer=self.optimizer) self.net_q.compile(loss=huberloss, optimizer=self.adm,metrics=['accuracy']) c=[np.random.rand(1,720,480,3),np.random.rand(1,2)] print("uiiiiiiiiiiiiiiiiiiiiiiiiiiii") a=QNetwork(0.001, np.zeros((720,480,3)),2) print("uiiiiiiiiiiiiiiiiiiiiiiiiiiii") for _ in range(4*5*10*5): a.net_q.predict_on_batch(c) print("ok")
回答1件
あなたの回答
tips
プレビュー