Fix some flake8 errors in cleanup and nsfw_detect
Just some minor code cleanup
This commit is contained in:
parent
47ff3a1152
commit
b8def71a94
|
@ -18,7 +18,10 @@
|
|||
and limitations under the License.
|
||||
"""
|
||||
|
||||
import os, sys, time, datetime
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import datetime
|
||||
from fhost import app
|
||||
|
||||
os.chdir(os.path.dirname(sys.argv[0]))
|
||||
|
@ -33,7 +36,7 @@ maxd = 365
|
|||
for f in files:
|
||||
stat = os.stat(f)
|
||||
systime = time.time()
|
||||
age = datetime.timedelta(seconds = systime - stat.st_mtime).days
|
||||
age = datetime.timedelta(seconds=(systime - stat.st_mtime)).days
|
||||
|
||||
maxage = mind + (-maxd + mind) * (stat.st_size / maxs - 1) ** 3
|
||||
|
||||
|
|
|
@ -24,21 +24,30 @@ import sys
|
|||
from io import BytesIO
|
||||
from subprocess import run, PIPE, DEVNULL
|
||||
|
||||
os.environ["GLOG_minloglevel"] = "2" # seriously :|
|
||||
import caffe
|
||||
|
||||
os.environ["GLOG_minloglevel"] = "2" # seriously :|
|
||||
|
||||
|
||||
class NSFWDetector:
|
||||
def __init__(self):
|
||||
|
||||
npath = os.path.join(os.path.dirname(__file__), "nsfw_model")
|
||||
self.nsfw_net = caffe.Net(os.path.join(npath, "deploy.prototxt"),
|
||||
self.nsfw_net = caffe.Net(
|
||||
os.path.join(npath, "deploy.prototxt"),
|
||||
os.path.join(npath, "resnet_50_1by2_nsfw.caffemodel"),
|
||||
caffe.TEST)
|
||||
self.caffe_transformer = caffe.io.Transformer({'data': self.nsfw_net.blobs['data'].data.shape})
|
||||
self.caffe_transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost
|
||||
self.caffe_transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
|
||||
self.caffe_transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
|
||||
self.caffe_transformer.set_channel_swap('data', (2, 1, 0)) # swap channels from RGB to BGR
|
||||
self.caffe_transformer = caffe.io.Transformer({
|
||||
'data': self.nsfw_net.blobs['data'].data.shape
|
||||
})
|
||||
# move image channels to outermost
|
||||
self.caffe_transformer.set_transpose('data', (2, 0, 1))
|
||||
# subtract the dataset-mean value in each channel
|
||||
self.caffe_transformer.set_mean('data', np.array([104, 117, 123]))
|
||||
# rescale from [0, 1] to [0, 255]
|
||||
self.caffe_transformer.set_raw_scale('data', 255)
|
||||
# swap channels from RGB to BGR
|
||||
self.caffe_transformer.set_channel_swap('data', (2, 1, 0))
|
||||
|
||||
def _compute(self, img):
|
||||
image = caffe.io.load_image(BytesIO(img))
|
||||
|
@ -54,8 +63,8 @@ class NSFWDetector:
|
|||
|
||||
input_name = self.nsfw_net.inputs[0]
|
||||
output_layers = ["prob"]
|
||||
all_outputs = self.nsfw_net.forward_all(blobs=output_layers,
|
||||
**{input_name: transformed_image})
|
||||
all_outputs = self.nsfw_net.forward_all(
|
||||
blobs=output_layers, **{input_name: transformed_image})
|
||||
|
||||
outputs = all_outputs[output_layers[0]][0].astype(float)
|
||||
|
||||
|
@ -63,7 +72,10 @@ class NSFWDetector:
|
|||
|
||||
def detect(self, fpath):
|
||||
try:
|
||||
ff = run(["ffmpegthumbnailer", "-m", "-o-", "-s256", "-t50%", "-a", "-cpng", "-i", fpath], stdout=PIPE, stderr=DEVNULL, check=True)
|
||||
ff = run([
|
||||
"ffmpegthumbnailer", "-m", "-o-", "-s256", "-t50%", "-a",
|
||||
"-cpng", "-i", fpath
|
||||
], stdout=PIPE, stderr=DEVNULL, check=True)
|
||||
image_data = ff.stdout
|
||||
except:
|
||||
return -1.0
|
||||
|
@ -72,6 +84,7 @@ class NSFWDetector:
|
|||
|
||||
return scores[1]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
n = NSFWDetector()
|
||||
|
||||
|
|
Loading…
Reference in New Issue