import tensorflow as tf q = tf.FIFOQueue(,"float32")
counter = tf.Variable(0.0)
add_op = tf.assign_add(counter, tf.constant(1.0))
enqueueData_op = q.enqueue(counter) sess = tf.Session()
qr = tf.train.QueueRunner(q, enqueue_ops=[add_op, enqueueData_op] * )
sess.run(tf.initialize_all_variables())
enqueue_threads = qr.create_threads(sess, start=True) coord = tf.train.Coordinator()
enqueue_threads = qr.create_threads(sess, coord = coord,start=True) for i in range(, ):
print(sess.run(q.dequeue()))
coord.request_stop()
coord.join(enqueue_threads)
import os path = 'F:\\lj\\aa\\VOCdevkit\\VOC2012\\JPEGImages\\'
filenames=os.listdir(path)
strText = "" with open("E:\\train_list.csv", "w") as fid:
for a in range(len(filenames)):
strText = path+filenames[a] + "," + filenames[a].split('_')[] + "\n"
fid.write(strText)
fid.close()
import cv2
import tensorflow as tf image_add_list = []
image_label_list = []
with open("E:\\train_list.csv") as fid:
for image in fid.readlines():
image_add_list.append(image.strip().split(",")[0])
image_label_list.append(image.strip().split(",")[1]) img=tf.image.convert_image_dtype(tf.image.decode_jpeg(tf.read_file('F:\\lj\\aa\\VOCdevkit\\VOC2012\\JPEGImages\\2007_000250.jpg'),channels=1),dtype=tf.float32)
print(img)
import cv2
import tensorflow as tf image_add_list = []
image_label_list = []
with open("E:\\train_list.csv") as fid:
for image in fid.readlines():
image_add_list.append(image.strip().split(",")[0])
image_label_list.append(image.strip().split(",")[1]) def get_image(image_path):
return tf.image.convert_image_dtype(tf.image.decode_jpeg(tf.read_file(image_path), channels=1),dtype=tf.uint8) img = tf.image.convert_image_dtype(tf.image.decode_jpeg(tf.read_file('F:\\lj\\aa\\VOCdevkit\\VOC2012\\JPEGImages\\2007_000250.jpg'), channels=1),dtype=tf.float32) with tf.Session() as sess:
cv2Img = sess.run(img)
img2 = cv2.resize(cv2Img, (200,200))
cv2.imshow('image', img2)
cv2.waitKey(0)
import numpy as np
import tensorflow as tf a_data = 0.834
b_data = [17]
c_data = np.array([[0,1,2],[3,4,5]])
c = c_data.astype(np.uint8)
c_raw = c.tostring() #转化成字符串 example = tf.train.Example(
features=tf.train.Features(
feature={
'a': tf.train.Feature(float_list=tf.train.FloatList(value=[a_data])),
'b': tf.train.Feature(int64_list=tf.train.Int64List(value=b_data)),
'c': tf.train.Feature(bytes_list=tf.train.BytesList(value=[c_raw]))
}
)
)
import numpy as np
import tensorflow as tf writer = tf.python_io.TFRecordWriter("E:\\trainArray.tfrecords")
for _ in range(100):
randomArray = np.random.random((1,3))
array_raw = randomArray.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[0])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[array_raw]))
}))
writer.write(example.SerializeToString())
writer.close()
import os
import tensorflow as tf
from PIL import Image path = "E:\\tupian"
filenames=os.listdir(path)
writer = tf.python_io.TFRecordWriter("E:\\train.tfrecords") for name in filenames:
class_path = path + os.sep + name
for img_name in os.listdir(class_path):
img_path = class_path+os.sep+img_name
img = Image.open(img_path)
img = img.resize((500,500))
img_raw = img.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[int(name.split("_")[0])])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}))
writer.write(example.SerializeToString())
import cv2
import tensorflow as tf filename = "E:\\train.tfrecords"
filename_queue = tf.train.string_input_producer([filename]) reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) #返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'image' : tf.FixedLenFeature([], tf.string),
}) img = tf.decode_raw(features['image'], tf.uint8)
img = tf.reshape(img, [300, 300,3]) img = tf.cast(img, tf.float32) * (1. / 128) - 0.5
label = tf.cast(features['label'], tf.int32)
import cv2
import tensorflow as tf filename = "E:\\train.tfrecords"
filename_queue = tf.train.string_input_producer([filename]) reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) #返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'image' : tf.FixedLenFeature([], tf.string),
}) img = tf.decode_raw(features['image'], tf.uint8)
img = tf.reshape(img, [300, 300,3]) sess = tf.Session()
init = tf.initialize_all_variables() sess.run(init)
threads = tf.train.start_queue_runners(sess=sess) img = tf.cast(img, tf.float32) * (1. / 128) - 0.5
label = tf.cast(features['label'], tf.int32) print(img)
# imgcv2 = sess.run(img)
# cv2.imshow("cool",imgcv2)
# cv2.waitKey(0)
import cv2
import tensorflow as tf filename = "E:\\train.tfrecords" def read_and_decode(filename):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) #返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'image' : tf.FixedLenFeature([], tf.string),
}) img = tf.decode_raw(features['image'], tf.uint8)
img = tf.reshape(img, [300, 300,3]) img = tf.cast(img, tf.float32) * (1. / 128) - 0.5
label = tf.cast(features['label'], tf.int32)
return img,label img,label = read_and_decode(filename) img_batch,label_batch = tf.train.shuffle_batch([img,label],batch_size=1,capacity=10,min_after_dequeue=1) init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
threads = tf.train.start_queue_runners(sess=sess) for _ in range(10):
val = sess.run(img_batch)
label = sess.run(label_batch)
val.resize((300,300,3))
cv2.imshow("cool",val)
cv2.waitKey()
print(label)
吴裕雄 python深度学习与实践(12)的更多相关文章
-
吴裕雄 python深度学习与实践(13)
import numpy as np import matplotlib.pyplot as plt x_data = np.random.randn(10) print(x_data) y_data ...
-
吴裕雄 python深度学习与实践(6)
from pylab import * import pandas as pd import matplotlib.pyplot as plot import numpy as np filePath ...
-
吴裕雄 python深度学习与实践(18)
# coding: utf-8 import time import numpy as np import tensorflow as tf import _pickle as pickle impo ...
-
吴裕雄 python深度学习与实践(17)
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import time # 声明输 ...
-
吴裕雄 python深度学习与实践(16)
import struct import numpy as np import matplotlib.pyplot as plt dateMat = np.ones((7,7)) kernel = n ...
-
吴裕雄 python深度学习与实践(15)
import tensorflow as tf import tensorflow.examples.tutorials.mnist.input_data as input_data mnist = ...
-
吴裕雄 python深度学习与实践(14)
import numpy as np import tensorflow as tf import matplotlib.pyplot as plt threshold = 1.0e-2 x1_dat ...
-
吴裕雄 python深度学习与实践(11)
import numpy as np from matplotlib import pyplot as plt A = np.array([[5],[4]]) C = np.array([[4],[6 ...
-
吴裕雄 python深度学习与实践(10)
import tensorflow as tf input1 = tf.constant(1) print(input1) input2 = tf.Variable(2,tf.int32) print ...
随机推荐
- Mecanim动画系统 制作流程
-
在应用程序级别之外使用注册为 allowDefinition='MachineToApplication' 的节是错误的
在MVC中添加授权认证配置之后报了这样的错 原因是在整个MVC项目中有两个Web.Config文件存在authentication节点,一个Web.Config文件在View目录下,一个在根目录下 解 ...
-
Networkcommd V3 新功能之一 拒绝服务攻击防护 ( DOSProtection)
NetworkComms网络通信框架序言 DOSProtection类能够防御拒绝服务攻击 启用方法: NetworkComms.DOSProtection.Enabled = true; 相关资料:
-
leetcode 96 Unique Binary Search Trees ----- java
Given n, how many structurally unique BST's (binary search trees) that store values 1...n? For examp ...
-
23、获取app所占据的内存
public static void getRunningAppProcessInfo(ActivityManager mActivityManager) { //ActivityManager mA ...
-
C++学习笔记之输入、输出和文件
一.流的概念 数据从内存的一个地址移动到另一个地址称为数据流动——流操作 流操作是通过缓冲区(buffer)机制实现的. 缓冲区:内存的一块区域——用作文件与内存交换数据. 数据从文件中读出:文件 → ...
-
JS对select动态添加options操作[IE&;FireFox兼容]
<select id="ddlResourceType" onchange="getvalue(this)"> </select> 动态 ...
-
JS获取验证码后倒计时不受刷新及关闭影响
HTML部分 <input type="button" id="code_btn" value="获取验证码"> JS部分 // ...
-
MSB8013
解决方案: 去掉勾选
-
去除Many2one字段的“创建并编辑”选项
要去除Many2one字段的“创建并编辑”选项,只要在view.xml里对应的field定义里增加options="{'no_create_edit':1}即可 <field name ...