|
| 1 | +import numpy as np |
| 2 | +import tensorflow as tf |
| 3 | +from tf_octConv import * |
| 4 | +from tf_cnn_basic import * |
| 5 | +from oct_Resnet_unit import * |
| 6 | + |
| 7 | + |
| 8 | +G = 1 |
| 9 | +alpha = 0.25 |
| 10 | +use_fp16 = True |
| 11 | +k_sec = {2: 3, 3: 4, 4: 6, 5: 3} |
| 12 | + |
| 13 | + |
| 14 | +def get_before_pool(): |
| 15 | + data = tf.Variable(name="data") |
| 16 | + data = tf.cast(x=data, dtype=np.float16) if use_fp16 else data |
| 17 | + |
| 18 | + # conv1 |
| 19 | + conv1 = Conv_BN_AC(data=data, num_filter=64, kernel=(7, 7), name='conv1', pad=(3, 3), stride=(2, 2)) |
| 20 | + pool1 = Pooling(data=conv1, pool_type="max", kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="pool1") |
| 21 | + |
| 22 | + # conv2 |
| 23 | + num_in = 32 |
| 24 | + num_mid = 64 |
| 25 | + num_out = 256 |
| 26 | + i = 1 |
| 27 | + hf_conv1_x, lf_conv1_x = Residual_Unit_first( |
| 28 | + data=pool1, |
| 29 | + alpha=alpha, |
| 30 | + num_in=(num_in if i == 1 else num_out), |
| 31 | + num_mid=num_mid, |
| 32 | + num_out=num_out, |
| 33 | + name=('conv2_B%02d' % i), |
| 34 | + first_block=(i == 1), |
| 35 | + stride=((1, 1) if (i == 1) else (1, 1))) |
| 36 | + |
| 37 | + for i in range(2, k_sec[2] + 1): |
| 38 | + hf_conv2_x, lf_conv2_x = Residual_Unit( |
| 39 | + hf_data=(hf_conv1_x if i == 1 else hf_conv2_x), |
| 40 | + lf_data=(lf_conv1_x if i == 1 else lf_conv2_x), |
| 41 | + alpha=alpha, |
| 42 | + num_in=(num_in if i == 1 else num_out), |
| 43 | + num_mid=num_mid, |
| 44 | + num_out=num_out, |
| 45 | + name=('conv2_B%02d' % i), |
| 46 | + first_block=(i == 1), |
| 47 | + stride=((1, 1) if (i == 1) else (1, 1))) |
| 48 | + |
| 49 | + # conv3 |
| 50 | + num_in = num_out |
| 51 | + num_mid = int(num_mid * 2) |
| 52 | + num_out = int(num_out * 2) |
| 53 | + for i in range(1, k_sec[3] + 1): |
| 54 | + hf_conv3_x, lf_conv3_x = Residual_Unit( |
| 55 | + hf_data=(hf_conv2_x if i == 1 else hf_conv3_x), |
| 56 | + lf_data=(lf_conv2_x if i == 1 else lf_conv3_x), |
| 57 | + alpha=alpha, |
| 58 | + num_in=(num_in if i == 1 else num_out), |
| 59 | + num_mid=num_mid, |
| 60 | + num_out=num_out, |
| 61 | + name=('conv3_B%02d' % i), |
| 62 | + first_block=(i == 1), |
| 63 | + stride=((2, 2) if (i == 1) else (1, 1))) |
| 64 | + |
| 65 | + |
| 66 | + # conv4 |
| 67 | + num_in = num_out |
| 68 | + num_mid = int(num_mid * 2) |
| 69 | + num_out = int(num_out * 2) |
| 70 | + for i in range(1, k_sec[4] + 1): |
| 71 | + hf_conv4_x, lf_conv4_x = Residual_Unit( |
| 72 | + hf_data=(hf_conv3_x if i == 1 else hf_conv4_x), |
| 73 | + lf_data=(lf_conv3_x if i == 1 else lf_conv4_x), |
| 74 | + alpha=alpha, |
| 75 | + num_in=(num_in if i == 1 else num_out), |
| 76 | + num_mid=num_mid, |
| 77 | + num_out=num_out, |
| 78 | + name=('conv4_B%02d' % i), |
| 79 | + first_block=(i == 1), |
| 80 | + stride=((2, 2) if (i == 1) else (1, 1))) |
| 81 | + |
| 82 | + |
| 83 | + # conv5 |
| 84 | + num_in = num_out |
| 85 | + num_mid = int(num_mid * 2) |
| 86 | + num_out = int(num_out * 2) |
| 87 | + i = 1 |
| 88 | + conv5_x = Residual_Unit_last( |
| 89 | + hf_data=hf_conv4_x, |
| 90 | + lf_data=lf_conv4_x, |
| 91 | + alpha=alpha, |
| 92 | + num_in=(num_in if i == 1 else num_out), |
| 93 | + num_mid=num_mid, |
| 94 | + num_out=num_out, |
| 95 | + name=('conv5_B%02d' % i), |
| 96 | + first_block=(i == 1), |
| 97 | + stride=((2, 2) if (i == 1) else (1, 1))) |
| 98 | + |
| 99 | + for i in range(2, k_sec[5] + 1): |
| 100 | + conv5_x = Residual_Unit_norm(data=conv5_x, |
| 101 | + num_in=num_out, |
| 102 | + num_mid=num_mid, |
| 103 | + num_out=num_out, |
| 104 | + name=('conv5_B%02d' % i), |
| 105 | + first_block=(i == 1), |
| 106 | + stride=((2, 2) if (i == 1) else (1, 1))) |
| 107 | + |
| 108 | + output = tf.cast(x=conv5_x, dtype=np.float32) if use_fp16 else conv5_x |
| 109 | + # output |
| 110 | + return output |
| 111 | + |
| 112 | + |
| 113 | +def get_linear(num_classes=1000): |
| 114 | + before_pool = get_before_pool() |
| 115 | + pool5 = Pooling(data=before_pool, pool_type="avg", kernel=(7, 7), stride=(1, 1), name="global-pool") |
| 116 | + flat5 = tf.layers.Flatten(input=pool5, name='flatten') |
| 117 | + fc6 = tf.layers.dense(inputs=flat5, units=num_classes, name='classifier') |
| 118 | + return fc6 |
| 119 | + |
| 120 | + |
| 121 | +def get_symbol(num_classes=1000): |
| 122 | + fc6 = get_linear(num_classes) |
| 123 | + softmax = tf.nn.softmax(logits=fc6, name='softmax') |
| 124 | + sys_out = softmax |
| 125 | + return sys_out |
| 126 | + |
0 commit comments