Hihocoder之conv2d()

时间:2023-12-25 23:43:07

http://hihocoder.com/contest/tupu2018/problem/2

题目2 : ​Standard 2D Convolution

时间限制:5000ms
单点时限:1000ms
内存限制:256MB

描述

Implement such below standard convolution

conv2d(input tensor, filters = 8, kernel size=[5,5], stride = 2, zero padding = 'SAME', activation = relu)

The shape of input tensor:  [height = 32, width = 32, channels = 3]

输入

The first line will contain an image. The value of pixels is [0, 255]. The image should be preprocessed (/127.5 - 1) before fed into the convolution function. Weights[kernel height, kernel width, input channels, output channels] and biases[output channels] are followed in the next two lines respectively.

输出

Print the result tensor in one line printed in the same way as input file. The precision is (1E-4).

注意

All data are arranged into one line using C-like order, with the last axis index changing fastest, back to the first axis index changing slowest.

样例输入
Download the sample input from:
https://media.hihocoder.com/contests/tupu-campus-hiring-2017/conv_sample_input.txt 
样例输出
Download the sample output from:
https://media.hihocoder.com/contests/tupu-campus-hiring-2017/conv_sample_output.txt 

题意:实现conv2d()卷积函数。

卷积神经网络

#input channels = 3, output channels = 8

#input_tensor = 32 * 32 * 3

#weights = 5 * 5 * 3 * 8

#biases = 1*8

参数解释:stride是步长参数;zero padding表示是否用零填充边缘进行,same表示在stride = 1的时候输出矩阵大小不变; activation是激励函数;ReLU函数为f(x) = max(x, 0)。

 #include <iostream>
 #include <stdio.h>
 #include <cmath>
 #include <vector>
 #include <string>
 using namespace std;
 #define Height 32
 #define Width 32
 #define Channels 3
 #define Filters 8
 #define kernel 5
 #define Eps 1e-5
 float weight[kernel][kernel][Channels][Filters];
 float biases[Filters];
 , , , string padding = "SAME", string activation = "relu")
 {
     int feaMapH = ceil(Height * 1.0 / stride);
     ) * stride + kernelSize;
     ;
     int paddingR = (HeightAfterPadding - Height) - paddingL;
     vector<vector< vector<float> > >a;

     ;i < HeightAfterPadding;i++){
         vector< vector<float> >b;
         ; j < HeightAfterPadding;j++){
             vector<float>c;
             ;k < Channels;k++){
                 c.push_back();
             }
             b.push_back(c);
         }
         a.push_back(b);
     }
     //cout << HeightAfterPadding << endl;

     ;i < Height;i++){
         ; j < Width;j++){
             ;k < Channels;k++){
                 a[paddingL + i][paddingL + j][k] = ((float)(inputTensor[i][j][k])) / 127.5 - 1.0;
             }
         }
     }

     ;i <= HeightAfterPadding - kernelSize;i += stride){
         ; j <= HeightAfterPadding - kernelSize;j += stride){

             ;nn < filters;nn++){
                 ;
                 ;k < kernelSize;k++){
                     ;l < kernelSize;l++){
                         ;mm < Channels;mm++){
                             sum += a[i + k][j + l][mm] * weight[k][l][mm][nn];
                         }
                     }
                 }
                 sum += biases[nn];
                 if(sum < Eps) cout << "0.0000 " ;
                 else printf("%.4f ",sum);
             }
             //cout << endl;
         }
     }
     return ;
 }
 int main()
 {
     int a[Height][Width][Channels];
     //freopen("conv_sample_input.txt","r",stdin);
     ;i < Height;i++){
         ;j < Width;j++){
             ;k < Channels;k++){
                 cin >> a[i][j][k];
             }
         }
     }

     ;i < kernel;i++){
         ;j < kernel;j++){
             ;k < Channels;k++){
                 ;l < Filters;l++){
                     cin >> weight[i][j][k][l];
                 }
             }
         }
     }

     ;l < Filters;l++){
         cin >> biases[l];
     }
     conv2d(a,Filters,kernel);
     ;
 }

附:FFT矩阵卷积的快速乘法