Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bryce2 dev #45

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Submodule ebsynth updated from 000000 to aad93b
14 changes: 4 additions & 10 deletions src/gmask.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
#include "opencvutils.h"

using namespace std;
using namespace cv;
Expand Down Expand Up @@ -45,16 +46,7 @@ void GMask::setMask(std::shared_ptr<QImage> mask){
void GMask::createMask(std::shared_ptr<QImage> currFrame, int i){

// Detect edges
Mat mat;
switch (currFrame->format())
{
case QImage::Format_RGB32:
mat = Mat(currFrame->height(), currFrame->width(), CV_8UC4, (void*)currFrame->constBits(), currFrame->bytesPerLine());
cvtColor(mat, mat, COLOR_RGBA2RGB);
break;
default:
break;
}
Mat mat = qimage_to_mat_ref(*currFrame);

// First, blur
Mat blurMat(mat.size(), CV_8UC3);
Expand Down Expand Up @@ -124,4 +116,6 @@ void GMask::createMask(std::shared_ptr<QImage> currFrame, int i){
mask.convertTo(mask,CV_8UC1,1);
QImage imgIn = QImage(mask.data, mask.cols, mask.rows, static_cast<int>(mask.step), QImage::Format_Grayscale8);
m_mask = make_shared<QImage>(imgIn);

imgIn.save("./imgIn.jpg");
}
13 changes: 13 additions & 0 deletions src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,13 @@
#include "stylizer.h"

#include "iohandler.h"

//#include "gedge.h"
//#include "optical-flow/simpleflow.h"
//#include "opencvutils.h"
//#include "advector.h"
//#include "gpos.h"

#include "opencvutils.h"
#include "advector.h"
#include "gpos.h"
Expand Down Expand Up @@ -69,6 +76,12 @@ int main(int argc, char *argv[])

ioHandler.loadInputData(inputFrames, keyframes);

GMask g_mask = GMask(inputFrames[10]);

// std::shared_ptr<QImage> currFrame(new QImage("./data/minitest/video/000.jpg"));

// GEdge guide(currFrame);

Stylizer style(inputFrames, keyframes, ioHandler);
// style.generateGuides();
style.run();
Expand Down
29 changes: 29 additions & 0 deletions styletransfer/style.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import sys

content_image_path = sys.arg[1]
style_image_path = sys.arg[2]
output_image_path = sys.arg[3]

# Load content and style images (see example in the attached colab).
content_image = plt.imread(content_image_path)
style_image = plt.imread(style_image_path)
# Convert to float32 numpy array, add batch dimension, and normalize to range [0, 1]. Example using numpy:
content_image = content_image.astype(np.float32)[np.newaxis, ...] / 255.
style_image = style_image.astype(np.float32)[np.newaxis, ...] / 255.
# Optionally resize the images. It is recommended that the style image is about
# 256 pixels (this size was used when training the style transfer network).
# The content image can be any size.
style_image = tf.image.resize(style_image, (256, 256))

# Load image stylization module.
hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')

# Stylize image.
outputs = hub_module(tf.constant(content_image), tf.constant(style_image))
stylized_image = outputs[0]

plt.imsave(output_image_path,np.array(stylized_image[0]))