Notice
Recent Posts
Recent Comments
관리 메뉴

안까먹을라고 쓰는 블로그

TensorFlow 함수정리 본문

Language/Python

TensorFlow 함수정리

YawnsDuzin 2019. 11. 2. 17:31

 

반응형

 

#SoftMax
import tensorflow as tf
import numpy as np

# a = np.array([0.3, 2.9, 4.0])
a = np.array([1.2, 0.9, 0.4])

exp_a = np.exp(a)
print(exp_a)     # [ 1.34985881 18.17414537 54.59815003]

sum_exp_a = np.sum(exp_a)
print(sum_exp_a) # 74.1221542101633

y = exp_a / sum_exp_a
print(y)         # [0.01821127 0.24519181 0.73659691]


=========================================================================
[3.32011692 2.45960311 1.4918247 ]
7.271544731534767
[0.45659032 0.33825043 0.20515925]

 

import tensorflow as tf
import numpy as np

a = np.array([[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]])
print(a)
print(a.ndim)
print(a.shape)
===========================================================================
[[0 1 2 3 4 5 6]
 [0 1 2 3 4 5 6]]
2
(2, 7)

 

b = np.array([[[1,2,3],
             [4,5,6],
             [7,8,9],
             [10,11,12]]])
print(b)
print(b.ndim)
print(b.shape)

===========================================================================
[[[ 1  2  3]
  [ 4  5  6]
  [ 7  8  9]
  [10 11 12]]]
3
(1, 4, 3)

 

sess = tf.Session()
sess.run(tf.global_variables_initializer())

d = tf.constant([[1.,2.], [3.,4.]])
print(sess.run(tf.shape(d)))

===========================================================================
[2 2]

 

e = tf.constant([[
                     [
                         [1,2,3,4],
                         [5,6,7,8],
                         [9,10,11,12]
                     ],
                     [
                         [13,14,15,16],
                         [17,18,19,20],
                         [21,22,23,24]
                     ] 
                ]])

print(e.shape)

===========================================================================

(1, 2, 3, 4)

 

# reduce_mean
# 평균값을 구해준다. 
print('=====d=====')
print(sess.run(d))
print(sess.run(tf.reduce_mean(d)))
print(sess.run(tf.reduce_mean(d, axis = 0)))
print(sess.run(tf.reduce_mean(d, axis = 1)))
print('0=====e=====')
print(sess.run(tf.reduce_mean(e, axis = 0)))
print('1=====e=====')
print(sess.run(tf.reduce_mean(e, axis = 1)))
print('2=====e=====')
print(sess.run(tf.reduce_mean(e, axis = 2)))
print('3=====e=====')
print(sess.run(tf.reduce_mean(e, axis = 3)))

===========================================================================

=====d=====
[[1. 2.]
 [3. 4.]]
2.5
[2. 3.]
[1.5 3.5]
0=====e=====
[[[ 1  2  3  4]
  [ 5  6  7  8]
  [ 9 10 11 12]]

 [[13 14 15 16]
  [17 18 19 20]
  [21 22 23 24]]]
1=====e=====
[[[ 7  8  9 10]
  [11 12 13 14]
  [15 16 17 18]]]
2=====e=====
[[[ 5  6  7  8]
  [17 18 19 20]]]
3=====e=====
[[[ 2  6 10]
  [14 18 22]]]

 

# reshape(대상, 형태)
# 형변환 - 배열차수를 변경한다
g = tf.constant([[[0, 1, 2], 
                 [3, 4, 5]],
                [[6, 7, 8], 
                 [9, 10, 11]]])

print('-g-----------------')
print(sess.run(tf.shape(g)))

print('\r\n-h1-----------------')
h = tf.reshape(g, shape = [-1, 3])
print(sess.run(h))
print(h.shape)
print('\r\n-h2-----------------')
h2 = tf.reshape(g, shape = [-1, 1, 3])
print(sess.run(h2))
print(h2.shape)

===========================================================================

-g-----------------
[2 2 3]

-h1-----------------
[[ 0  1  2]
 [ 3  4  5]
 [ 6  7  8]
 [ 9 10 11]]
(4, 3)

-h2-----------------
[[[ 0  1  2]]

 [[ 3  4  5]]

 [[ 6  7  8]]

 [[ 9 10 11]]]
(4, 1, 3)

 

# argmax(대상, axis = )
# 가장 높은 수의 인덱스를 반환한다.

j = tf.constant([[0,1,2],
                 [2,1,0],
                 [0,1,2],
                 [1,2,0]])

print(sess.run(tf.argmax(j)))
print(sess.run(tf.argmax(j, axis = 0)))
print(sess.run(tf.argmax(j, axis = 1)))     # 얘를 많이 쓴다

===========================================================================

[1 3 0]
[1 3 0]
[2 0 2 1]

 

# one_hot(대상, depth = )
# reshape 와 반대개념으로 배열차수를 늘려준다.
print('\r\n-k-----------------')
k = tf.constant([[0], [1], [2], [0]])
print(sess.run(k))
print(k.shape)

print('\r\n-m-----------------')
m = tf.one_hot(k, depth = 3)
print(sess.run(m))
print(m.shape)

print('\r\n-n-----------------')
n = tf.one_hot(k, depth = 4)
print(sess.run(n))
print(n.shape)

print('\r\n-o-----------------')
o = tf.reshape(m, shape = [-1, 3])
print(sess.run(o))
print(o.shape)

===========================================================================

-k-----------------
[[0]
 [1]
 [2]
 [0]]
(4, 1)

-m-----------------
[[[1. 0. 0.]]

 [[0. 1. 0.]]

 [[0. 0. 1.]]

 [[1. 0. 0.]]]
(4, 1, 3)

-n-----------------
[[[1. 0. 0. 0.]]

 [[0. 1. 0. 0.]]

 [[0. 0. 1. 0.]]

 [[1. 0. 0. 0.]]]
(4, 1, 4)

-o-----------------
[[1. 0. 0.]
 [0. 1. 0.]
 [0. 0. 1.]
 [1. 0. 0.]]
(4, 3)

 

# cast[대상, 타입]
# 형 변환 함수

p = tf.constant([True, False, 1 == 0, 1 == 1])

print(sess.run(p))
print(sess.run(tf.cast(p, tf.int32)))

===========================================================================

[ True False False  True]
[1 0 0 1]
반응형
Comments