|
|
|
@ -29,11 +29,42 @@
|
|
|
|
|
#include <string>
|
|
|
|
|
#include <iostream>
|
|
|
|
|
#include <vector>
|
|
|
|
|
#include <queue>
|
|
|
|
|
#include <mutex>
|
|
|
|
|
#include <thread>
|
|
|
|
|
|
|
|
|
|
#include "nvCVOpenCV.h"
|
|
|
|
|
#include "nvVideoEffects.h"
|
|
|
|
|
#include "opencv2/opencv.hpp"
|
|
|
|
|
|
|
|
|
|
#include "../../../utils/httplib.h"
|
|
|
|
|
|
|
|
|
|
enum FrameType {
|
|
|
|
|
TYPE_ORIGINAL,
|
|
|
|
|
TYPE_OUTLINE,
|
|
|
|
|
TYPE_PROCESSED,
|
|
|
|
|
TYPE_MAX
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct FrameQueue {
|
|
|
|
|
std::queue<cv::Mat> frameQueue;
|
|
|
|
|
std::mutex frameMutex;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
std::vector<FrameQueue> frameQueues(TYPE_MAX);
|
|
|
|
|
|
|
|
|
|
// Convert cv::Mat to JPEG buffer
|
|
|
|
|
std::vector<uchar> matToBytes(const cv::Mat& img) {
|
|
|
|
|
std::vector<uchar> buf;
|
|
|
|
|
cv::imencode(".jpg", img, buf);
|
|
|
|
|
return buf;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void pushToFrameQueue(int type, const cv::Mat& frame) {
|
|
|
|
|
std::lock_guard<std::mutex> lock(frameQueues[type].frameMutex);
|
|
|
|
|
frameQueues[type].frameQueue.push(frame.clone());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
|
#define strcasecmp _stricmp
|
|
|
|
|
#include <Windows.h>
|
|
|
|
@ -77,6 +108,7 @@ std::string FLAG_modelDir;
|
|
|
|
|
std::string FLAG_outDir;
|
|
|
|
|
std::string FLAG_outFile;
|
|
|
|
|
std::string FLAG_bgFile;
|
|
|
|
|
int FLAG_camIndex = 0;
|
|
|
|
|
|
|
|
|
|
static bool GetFlagArgVal(const char *flag, const char *arg, const char **val) {
|
|
|
|
|
if (*arg != '-') return false;
|
|
|
|
@ -176,7 +208,7 @@ static int ParseMyArgs(int argc, char **argv) {
|
|
|
|
|
GetFlagArgVal("cam_res", arg, &FLAG_camRes) || GetFlagArgVal("mode", arg, &FLAG_mode) ||
|
|
|
|
|
GetFlagArgVal("progress", arg, &FLAG_progress) || GetFlagArgVal("show", arg, &FLAG_show) ||
|
|
|
|
|
GetFlagArgVal("comp_mode", arg, &FLAG_compMode) || GetFlagArgVal("blur_strength", arg, &FLAG_blurStrength) ||
|
|
|
|
|
GetFlagArgVal("cuda_graph", arg, &FLAG_cudaGraph) )) {
|
|
|
|
|
GetFlagArgVal("cuda_graph", arg, &FLAG_cudaGraph) || GetFlagArgVal("cam_index", arg, &FLAG_camIndex))) {
|
|
|
|
|
continue;
|
|
|
|
|
} else if (GetFlagArgVal("help", arg, &help)) {
|
|
|
|
|
return NVCV_ERR_HELP;
|
|
|
|
@ -709,7 +741,6 @@ bail:
|
|
|
|
|
FXApp::Err FXApp::processMovie(const char *inFile, const char *outFile) {
|
|
|
|
|
float ms = 0.0f;
|
|
|
|
|
FXApp::Err appErr = errNone;
|
|
|
|
|
const int camIndex = 0;
|
|
|
|
|
NvCV_Status vfxErr = NVCV_SUCCESS;
|
|
|
|
|
bool ok;
|
|
|
|
|
cv::Mat result;
|
|
|
|
@ -725,7 +756,7 @@ FXApp::Err FXApp::processMovie(const char *inFile, const char *outFile) {
|
|
|
|
|
if (inFile) {
|
|
|
|
|
reader.open(inFile);
|
|
|
|
|
} else {
|
|
|
|
|
reader.open(camIndex);
|
|
|
|
|
reader.open(FLAG_camIndex);
|
|
|
|
|
if (!FLAG_camRes.empty()) {
|
|
|
|
|
int camWidth, camHeight, n;
|
|
|
|
|
n = sscanf(FLAG_camRes.c_str(), "%d%*[xX]%d", &camWidth, &camHeight);
|
|
|
|
@ -847,6 +878,10 @@ FXApp::Err FXApp::processMovie(const char *inFile, const char *outFile) {
|
|
|
|
|
CV_8UC3); // Make sure the result is allocated. TODO: allocate outsifde of the loop?
|
|
|
|
|
BAIL_IF_NULL(result.data, vfxErr, NVCV_ERR_MEMORY);
|
|
|
|
|
result.setTo(cv::Scalar::all(0)); // TODO: This may no longer be necessary since we no longer coerce to 16:9
|
|
|
|
|
|
|
|
|
|
cv::Mat originalImg;
|
|
|
|
|
_srcImg.copyTo(originalImg);
|
|
|
|
|
|
|
|
|
|
switch (_compMode) {
|
|
|
|
|
case compNone:
|
|
|
|
|
_srcImg.copyTo(result);
|
|
|
|
@ -876,7 +911,7 @@ FXApp::Err FXApp::processMovie(const char *inFile, const char *outFile) {
|
|
|
|
|
} else { // If the webcam was cropped, also crop the compositing
|
|
|
|
|
cv::Rect rect(0, (_srcImg.rows - _srcVFX.height) / 2, _srcVFX.width, _srcVFX.height);
|
|
|
|
|
cv::Mat subResult = result(rect);
|
|
|
|
|
overlay(_srcImg(rect), _dstImg(rect), 0.5, subResult);
|
|
|
|
|
overlay(_srcImg(rect), _dstImg(rect), 3, subResult);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case compGreen: {
|
|
|
|
@ -918,7 +953,92 @@ FXApp::Err FXApp::processMovie(const char *inFile, const char *outFile) {
|
|
|
|
|
}
|
|
|
|
|
if (_show) {
|
|
|
|
|
drawFrameRate(result);
|
|
|
|
|
cv::imshow("Output", result);
|
|
|
|
|
|
|
|
|
|
// Ensure _dstImg is grayscale
|
|
|
|
|
if (_dstImg.channels() == 3) {
|
|
|
|
|
cv::cvtColor(_dstImg, _dstImg, cv::COLOR_BGR2GRAY);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Find edges
|
|
|
|
|
cv::Mat edges;
|
|
|
|
|
cv::Canny(_dstImg, edges, 100, 200);
|
|
|
|
|
|
|
|
|
|
// Dilate the edges
|
|
|
|
|
cv::Mat dilatedEdges;
|
|
|
|
|
const int iterations = 3; // higher = less detail but less errors
|
|
|
|
|
cv::dilate(edges, dilatedEdges, cv::Mat(), cv::Point(-1, -1), iterations);
|
|
|
|
|
|
|
|
|
|
// Ensure _srcImg and outlineResult are the same size and type
|
|
|
|
|
cv::Mat outlineResult = cv::Mat::zeros(_srcImg.size(), _srcImg.type());
|
|
|
|
|
cv::Mat dilatedMask;
|
|
|
|
|
cv::dilate(_dstImg, dilatedMask, cv::Mat(), cv::Point(-1, -1), iterations);
|
|
|
|
|
|
|
|
|
|
// Convert dilatedMask to three channels if needed
|
|
|
|
|
if (dilatedMask.channels() == 1) {
|
|
|
|
|
cv::cvtColor(dilatedMask, dilatedMask, cv::COLOR_GRAY2BGR);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Find contours
|
|
|
|
|
std::vector<std::vector<cv::Point>> contours;
|
|
|
|
|
std::vector<cv::Vec4i> hierarchy;
|
|
|
|
|
cv::findContours(dilatedEdges, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
|
|
|
|
|
|
|
|
|
|
// Find the largest contour
|
|
|
|
|
cv::Mat contourTest = cv::Mat::zeros(_srcImg.size(), _srcImg.type());
|
|
|
|
|
double maxArea = 0;
|
|
|
|
|
int largestContourIndex = -1;
|
|
|
|
|
for (size_t i = 0; i < contours.size(); ++i) {
|
|
|
|
|
double area = cv::contourArea(contours[i]);
|
|
|
|
|
if (area > maxArea) {
|
|
|
|
|
maxArea = area;
|
|
|
|
|
largestContourIndex = i;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (largestContourIndex >= 0) {
|
|
|
|
|
// Fill the largest contour
|
|
|
|
|
cv::drawContours(contourTest, contours, largestContourIndex, cv::Scalar(255, 255, 0), cv::FILLED);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert contourTest to three channels if needed
|
|
|
|
|
if (contourTest.channels() == 1) {
|
|
|
|
|
cv::cvtColor(contourTest, contourTest, cv::COLOR_GRAY2BGR);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Combine the dilated mask and filled contour
|
|
|
|
|
cv::Mat filledRegion;
|
|
|
|
|
cv::bitwise_or(dilatedMask, contourTest, filledRegion);
|
|
|
|
|
|
|
|
|
|
// Apply the outline color to the filled region
|
|
|
|
|
cv::Vec3b outlineColor(0, 0, 255);
|
|
|
|
|
for (int y = 0; y < filledRegion.rows; ++y) {
|
|
|
|
|
for (int x = 0; x < filledRegion.cols; ++x) {
|
|
|
|
|
if (filledRegion.at<cv::Vec3b>(y, x) != cv::Vec3b(0, 0, 0)) {
|
|
|
|
|
outlineResult.at<cv::Vec3b>(y, x) = outlineColor;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Regular outeline
|
|
|
|
|
cv::Vec3b outlineColor2(255, 0, 255);
|
|
|
|
|
for (int y = 0; y < dilatedEdges.rows; ++y) {
|
|
|
|
|
for (int x = 0; x < dilatedEdges.cols; ++x) {
|
|
|
|
|
if (dilatedEdges.at<uchar>(y, x) > 0) {
|
|
|
|
|
outlineResult.at<cv::Vec3b>(y, x) = outlineColor2;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pushToFrameQueue(0, originalImg);
|
|
|
|
|
pushToFrameQueue(1, result);
|
|
|
|
|
pushToFrameQueue(2, outlineResult);
|
|
|
|
|
|
|
|
|
|
// Display the results
|
|
|
|
|
cv::imshow("Original", originalImg);
|
|
|
|
|
cv::imshow("Overlay", result);
|
|
|
|
|
cv::imshow("OutlineTest", outlineResult);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int key = cv::waitKey(1);
|
|
|
|
|
if (key > 0) {
|
|
|
|
|
appErr = processKey(key);
|
|
|
|
@ -970,6 +1090,58 @@ bool isCompModeEnumValid(const FXApp::CompMode& mode)
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void startHttpServer() {
|
|
|
|
|
httplib::Server svr;
|
|
|
|
|
|
|
|
|
|
auto streamHandler = [](FrameType frameType) {
|
|
|
|
|
return [frameType](const httplib::Request&, httplib::Response& res) {
|
|
|
|
|
res.set_content_provider(
|
|
|
|
|
"multipart/x-mixed-replace; boundary=frame",
|
|
|
|
|
[frameType](size_t offset, httplib::DataSink& sink) {
|
|
|
|
|
while (true) {
|
|
|
|
|
cv::Mat frame;
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(frameQueues[frameType].frameMutex);
|
|
|
|
|
if (!frameQueues[frameType].frameQueue.empty()) {
|
|
|
|
|
frame = frameQueues[frameType].frameQueue.front();
|
|
|
|
|
frameQueues[frameType].frameQueue.pop();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!frame.empty()) {
|
|
|
|
|
std::vector<uchar> bytes = matToBytes(frame);
|
|
|
|
|
|
|
|
|
|
// Write multipart frame
|
|
|
|
|
std::string header = "--frame\r\nContent-Type: image/jpeg\r\n\r\n";
|
|
|
|
|
sink.write(header.data(), header.size());
|
|
|
|
|
sink.write(reinterpret_cast<const char*>(bytes.data()), bytes.size());
|
|
|
|
|
sink.write("\r\n", 2);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(30)); // Wait to avoid busy-waiting
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true; // Continue streaming
|
|
|
|
|
}
|
|
|
|
|
);
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Array of route names corresponding to FrameType enum
|
|
|
|
|
const std::array<std::string, TYPE_MAX> routeNames = {
|
|
|
|
|
"original", "outline", "processed"
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Set up routes for each frame type
|
|
|
|
|
for (int i = 0; i < TYPE_MAX; ++i) {
|
|
|
|
|
std::string route = "/video_" + routeNames[i];
|
|
|
|
|
svr.Get(route, streamHandler(static_cast<FrameType>(i)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
svr.listen("0.0.0.0", 8080); // Start server on port 8080
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int main(int argc, char **argv) {
|
|
|
|
|
int nErrs = 0;
|
|
|
|
|
nErrs = ParseMyArgs(argc, argv);
|
|
|
|
@ -978,6 +1150,45 @@ int main(int argc, char **argv) {
|
|
|
|
|
return nErrs;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::thread serverThread(startHttpServer);
|
|
|
|
|
|
|
|
|
|
//httplib::Server svr;
|
|
|
|
|
|
|
|
|
|
//svr.Get("/video", [](const httplib::Request&, httplib::Response& res) {
|
|
|
|
|
// cv::VideoCapture cap(0); // Open webcam
|
|
|
|
|
// if (!cap.isOpened()) {
|
|
|
|
|
// std::cerr << "Error: Cannot open webcam\n";
|
|
|
|
|
// return;
|
|
|
|
|
// }
|
|
|
|
|
// // Content provider for streaming
|
|
|
|
|
// res.set_content_provider(
|
|
|
|
|
// "multipart/x-mixed-replace; boundary=frame",
|
|
|
|
|
// [cap](size_t offset, httplib::DataSink& sink) mutable {
|
|
|
|
|
// cv::Mat frame;
|
|
|
|
|
// cap >> frame;
|
|
|
|
|
|
|
|
|
|
// if (!frame.empty()) {
|
|
|
|
|
// std::vector<uchar> bytes = matToBytes(frame);
|
|
|
|
|
|
|
|
|
|
// // Write multipart content
|
|
|
|
|
// std::string header = "--frame\r\nContent-Type: image/jpeg\r\n\r\n";
|
|
|
|
|
// sink.write(header.data(), header.size());
|
|
|
|
|
// sink.write(reinterpret_cast<const char*>(bytes.data()), bytes.size());
|
|
|
|
|
// sink.write("\r\n", 2);
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// return true; // Continue streaming
|
|
|
|
|
// },
|
|
|
|
|
// [](bool success) {
|
|
|
|
|
// // Completion handler when stream ends
|
|
|
|
|
// }
|
|
|
|
|
// );
|
|
|
|
|
// });
|
|
|
|
|
|
|
|
|
|
//svr.listen("0.0.0.0", 8080);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
FXApp::Err fxErr = FXApp::errNone;
|
|
|
|
|
FXApp app;
|
|
|
|
|
|
|
|
|
|