Skip to content

Commit

Permalink
Merge pull request spmallick#255 from vikasguptaiisc/master
Browse files Browse the repository at this point in the history
added code for Age Gender
  • Loading branch information
spmallick committed Feb 19, 2019
2 parents 9b4cc29 + c4618dc commit 1f57eec
Show file tree
Hide file tree
Showing 14 changed files with 2,996 additions and 10 deletions.
141 changes: 141 additions & 0 deletions AgeGender/AgeGender.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/dnn.hpp>
#include <tuple>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <iterator>
using namespace cv;
using namespace cv::dnn;
using namespace std;

tuple<Mat, vector<vector<int>>> getFaceBox(Net net, Mat &frame, double conf_threshold)
{
Mat frameOpenCVDNN = frame.clone();
int frameHeight = frameOpenCVDNN.rows;
int frameWidth = frameOpenCVDNN.cols;
double inScaleFactor = 1.0;
Size size = Size(300, 300);
// std::vector<int> meanVal = {104, 117, 123};
Scalar meanVal = Scalar(104, 117, 123);

cv::Mat inputBlob;
cv::dnn::blobFromImage(frameOpenCVDNN, inputBlob, inScaleFactor, size, meanVal, true, false);

net.setInput(inputBlob, "data");
cv::Mat detection = net.forward("detection_out");

cv::Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());

vector<vector<int>> bboxes;

for(int i = 0; i < detectionMat.rows; i++)
{
float confidence = detectionMat.at<float>(i, 2);

if(confidence > conf_threshold)
{
int x1 = static_cast<int>(detectionMat.at<float>(i, 3) * frameWidth);
int y1 = static_cast<int>(detectionMat.at<float>(i, 4) * frameHeight);
int x2 = static_cast<int>(detectionMat.at<float>(i, 5) * frameWidth);
int y2 = static_cast<int>(detectionMat.at<float>(i, 6) * frameHeight);
vector<int> box = {x1, y1, x2, y2};
bboxes.push_back(box);
cv::rectangle(frameOpenCVDNN, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(0, 255, 0),2, 4);
}
}

return make_tuple(frameOpenCVDNN, bboxes);
}

int main(int argc, char** argv)
{
string faceProto = "opencv_face_detector.pbtxt";
string faceModel = "opencv_face_detector_uint8.pb";

string ageProto = "age_deploy.prototxt";
string ageModel = "age_net.caffemodel";

string genderProto = "gender_deploy.prototxt";
string genderModel = "gender_net.caffemodel";

Scalar MODEL_MEAN_VALUES = Scalar(78.4263377603, 87.7689143744, 114.895847746);

vector<string> ageList = {"(0-2)", "(4-6)", "(8-12)", "(15-20)", "(25-32)",
"(38-43)", "(48-53)", "(60-100)"};

vector<string> genderList = {"Male", "Female"};

// Load Network
Net ageNet = readNet(ageModel, ageProto);
Net genderNet = readNet(genderModel, genderProto);
Net faceNet = readNet(faceModel, faceProto);

VideoCapture cap;
if (argc > 1)
cap.open(argv[1]);
else
cap.open(0);

while(waitKey(1) < 0) {
// read frame
Mat frame;
cap.read(frame);
if (frame.empty())
{
waitKey();
break;
}

vector<vector<int>> bboxes;
Mat frameFace;
tie(frameFace, bboxes) = getFaceBox(faceNet, frame, 0.7);

if(bboxes.size() == 0) {
cout << "No face detected, checking next frame." << endl;
continue;
}
for (auto it = begin(bboxes); it != end(bboxes); ++it) {
Rect rec(it->at(0), it->at(1), it->at(2) - it->at(0), it->at(3) - it->at(1));
Mat face = frame(rec); // take the ROI of box on the frame

Mat blob;
blob = blobFromImage(face, 1, Size(227, 227), MODEL_MEAN_VALUES, false);
genderNet.setInput(blob);
// string gender_preds;
vector<float> genderPreds = genderNet.forward();
// printing gender here
// find max element index
// distance function does the argmax() work in C++
int max_index_gender = std::distance(genderPreds.begin(), max_element(genderPreds.begin(), genderPreds.end()));
string gender = genderList[max_index_gender];
cout << "Gender: " << gender << endl;

/* // Uncomment if you want to iterate through the gender_preds vector
for(auto it=begin(gender_preds); it != end(gender_preds); ++it) {
cout << *it << endl;
}
*/

ageNet.setInput(blob);
vector<float> agePreds = ageNet.forward();
/* // uncomment below code if you want to iterate through the age_preds
* vector
cout << "PRINTING AGE_PREDS" << endl;
for(auto it = age_preds.begin(); it != age_preds.end(); ++it) {
cout << *it << endl;
}
*/

// finding maximum indicd in the age_preds vector
int max_indice_age = std::distance(agePreds.begin(), max_element(agePreds.begin(), agePreds.end()));
string age = ageList[max_indice_age];
cout << "Age: " << age << endl;
string label = gender + ", " + age; // label
cv::putText(frameFace, label, Point(it->at(0), it->at(1) -20), cv::FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, cv::LINE_AA);
imshow("Frame", frameFace);
imwrite("out.jpg",frameFace);
}

}
}
88 changes: 88 additions & 0 deletions AgeGender/AgeGender.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
# Import required modules
import cv2 as cv
import math
import time
import argparse

def getFaceBox(net, frame, conf_threshold=0.7):
frameOpencvDnn = frame.copy()
frameHeight = frameOpencvDnn.shape[0]
frameWidth = frameOpencvDnn.shape[1]
blob = cv.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)

net.setInput(blob)
detections = net.forward()
bboxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
bboxes.append([x1, y1, x2, y2])
cv.rectangle(frameOpencvDnn, (x1, y1), (x2, y2), (0, 255, 0), int(round(frameHeight/150)), 8)
return frameOpencvDnn, bboxes


parser = argparse.ArgumentParser(description='Use this script to run age and gender recognition using OpenCV.')
parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.')

args = parser.parse_args()

faceProto = "opencv_face_detector.pbtxt"
faceModel = "opencv_face_detector_uint8.pb"

ageProto = "age_deploy.prototxt"
ageModel = "age_net.caffemodel"

genderProto = "gender_deploy.prototxt"
genderModel = "gender_net.caffemodel"

MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
ageList = ['(0 - 2)', '(4 - 6)', '(8 - 12)', '(15 - 20)', '(25 - 32)', '(38 - 43)', '(48 - 53)', '(60 - 100)']
genderList = ['Male', 'Female']

# Load network
ageNet = cv.dnn.readNet(ageModel, ageProto)
genderNet = cv.dnn.readNet(genderModel, genderProto)
faceNet = cv.dnn.readNet(faceModel, faceProto)

# Open a video file or an image file or a camera stream
cap = cv.VideoCapture(args.input if args.input else 0)

while cv.waitKey(1) < 0:
# Read frame
t = time.time()
hasFrame, frame = cap.read()
if not hasFrame:
cv.waitKey()
break

frameFace, bboxes = getFaceBox(faceNet, frame)
if not bboxes:
print("No face Detected, Checking next frame")
continue

for bbox in bboxes:
# print(bbox)
face = frame[bbox[1]:bbox[3],bbox[0]:bbox[2]]

blob = cv.dnn.blobFromImage(face, 1, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
genderNet.setInput(blob)
genderPreds = genderNet.forward()
gender = genderList[genderPreds[0].argmax()]
# print("Gender Output : {}".format(genderPreds))
print("Gender : {}".format(gender))

ageNet.setInput(blob)
agePreds = ageNet.forward()
age = ageList[agePreds[0].argmax()]
# print("Age Output : {}".format(agePreds))
print("Age : {}".format(age))

label = "{}, {}".format(gender, age)
cv.putText(frameFace, label, (bbox[0], bbox[1]-20), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2, cv.LINE_AA)
cv.imshow("Age Gender Demo", frameFace)
cv.imwrite("out.jpg",frameFace)
print("time : {:.3f}".format(time.time() - t))
19 changes: 19 additions & 0 deletions AgeGender/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
cmake_minimum_required(VERSION 2.8.12)

PROJECT(age_gender)
# PATH to OpenCV ( uncomment and provide path if required )
# set(OpenCV_DIR /Users/visionifai/codes/installations/OpenCV4/lib/cmake/opencv4/)

find_package( OpenCV REQUIRED )

set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)

include_directories( ${OpenCV_INCLUDE_DIRS})

MACRO(add_example name)
ADD_EXECUTABLE(${name} ${name}.cpp)
TARGET_LINK_LIBRARIES(${name} ${OpenCV_LIBS})
ENDMACRO()

add_example(AgeGender)
26 changes: 26 additions & 0 deletions AgeGender/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
## Code for Age Gender recognition using Deep Learning

### Models
Download models from

Gender Net : https://www.dropbox.com/s/iyv483wz7ztr9gh/gender_net.caffemodel?dl=0"

Age Net : https://www.dropbox.com/s/xfb20y596869vbb/age_net.caffemodel?dl=0"

### Run Code

#### C++
```
cmake .
make
./AgeGender <input_file>(Leave blank for webcam)
```

#### Python
```
python AgeGender.py --input <input_file>(Leave blank for webcam)
```

### Sample Result

![](sample-output.jpg)
Loading

0 comments on commit 1f57eec

Please sign in to comment.