@phdthesis{oai:kitami-it.repo.nii.ac.jp:02000405, author = {TUERSUNJIANG YIMAMU and トルスンジャン・イマム}, month = {Mar}, note = {Recently, object detection based on deep convolutional neural networks (CNNs) have achieved remarkable result and successfully applied many real-world applications. However, scale variation problem in multiscale object detection still is challenging problem, especially for small objects. Concerning the above problem, we proposed a new detection network with an efficient feature fusion module based on SSD using VGG-16 as backbone called Multi-path Feature Fusion Single Shot Multibox Detector (MF-SSD). The proposed feature fusion module consists of two newly designed modules with dilated convolution, which fuses features from shallow layers (mainly contain boundary information) to highRecently, object detection based on deep convolutional neural networks (CNNs) have achieved remarkable result and successfully applied many real-world applications. However, scale variation problem in multiscale object detection still is challenging problem, especially for small objects. Concerning the above problem, we proposed a new detection network with an efficient feature fusion module based on SSD using VGG-16 as backbone called Multi-path Feature Fusion Single Shot Multibox Detector (MF-SSD). The proposed feature fusion module consists of two newly designed modules with dilated convolution, which fuses features from shallow layers (mainly contain boundary information) to higher level features (mainly contain semantic reach information) without reducing the original resolution of the feature map. We have conducted experiments on three datasets to explicate the efficacy of our proposed detector. The proposed MF-SSD with input size 512×512 achieved 81.5% mAP and 34.1 % mAP on PASCAL VOC test set and MS COCO test-dev, respectively. Experimental results show the proposed feature fusion module can improve both semantic and boundary information for object detectioner level features (mainly contain semantic reach information) without reducing the original resolution of the feature map. We have conducted experiments on three datasets to explicate the efficacy of our proposed detector. The proposed MF-SSD with input size 512×512 achieved 81.5% mAP and 34.1 % mAP on PASCAL VOC test set and MS COCO test-dev, respectively. Experimental results show the proposed feature fusion module can improve both semantic and boundary information for object detection.}, school = {北見工業大学}, title = {Feature Extraction for Single Shot Multibox Object Detector}, year = {2023} }