marion 4 gadi atpakaļ
revīzija
fa4b0f20af

+ 7 - 0
.gitignore

@@ -0,0 +1,7 @@
+*.pyc
+*.gz
+.idea
+bin
+output
+vendor
+.DS_Store

+ 0 - 0
test/__init__.py


+ 8 - 0
test/json_test.py

@@ -0,0 +1,8 @@
+import json
+a = [
+    dict(username="dev_1009048", password="e60be16cbfeebdb4d4c7dd7c0a6f2f67", nickname="", avatar=""),
+    dict(username="dev_1009049", password="e60be16cbfeebdb4d4c7dd7c0a6f2f67", nickname="", avatar=""),
+    dict(username="dev_1009050", password="e60be16cbfeebdb4d4c7dd7c0a6f2f67", nickname="", avatar="")
+]
+b = json.dumps(a)
+print(b)

+ 18 - 0
test/jwt.py

@@ -0,0 +1,18 @@
+from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
+
+
+def verify_auth_token(token):
+    s = Serializer('hard to guess string')
+    try:
+        data = s.loads(token)
+    except:
+        return None
+    return data
+
+
+def test():
+    print(verify_auth_token(
+        'eyJhbGciOiJIUzI1NiIsImlhdCI6MTUzNzg0Njc4MywiZXhwIjoxNTM5MDU2MzgzfQ.eyJpZCI6MzI0NTEzNH0.RqRoad8H5oAGY6L3qMLcxCUYE5Fl7-MXtpSyfSU4aqU'))
+
+
+test()

+ 52 - 0
test/md5_test.py

@@ -0,0 +1,52 @@
+import hashlib
+import urllib
+import datetime
+import json
+import requests
+
+def generate_mengwang_pwd(userid, pwd, timestamp):
+    """
+    add by zhouhl 2017-07-25
+    对梦网的用户密码加密
+    """
+    constant_string = '00000000'
+    tmp_string = ''.join([userid.upper(), constant_string, pwd, timestamp])
+    print(tmp_string)
+
+    md5 = hashlib.md5()
+    md5.update(tmp_string.encode())
+    return md5.hexdigest()
+
+def md5test():
+    timestamp = datetime.datetime.strftime(
+        datetime.datetime.now(), '%m%d%H%M%S')
+    ciphertext_pwd = generate_mengwang_pwd('JS5112', '858965', timestamp)
+    # 梦网的content需要先GBK编码,再urlencode
+    multimt = []
+    tmp_i = {}
+    tmp_i['mobile'] = '18665318962'
+    content = '今天天气转凉,大家请注意防寒保暖。'.encode('GBK')
+    tmp_content = urllib.parse.urlencode({'content': content})
+    tmp_i['content'] = tmp_content.split('=')[-1]
+    multimt.append(tmp_i)
+
+
+    data = {
+        'userid': 'JS5112',
+        'pwd': ciphertext_pwd,
+        'multimt': multimt,
+        'timestamp': timestamp,
+    }
+    jsonStr = json.dumps(data)
+    print(jsonStr)
+    try:
+        response = requests.post(
+            'http://61.145.229.28:7902/sms/v2/std/multi_send',
+            headers={'Content-Type': 'application/json'},
+            data=jsonStr,
+        )
+        print(response.content.decode('utf-8'))
+    except Exception as e:
+        print(e)
+
+md5test()

+ 0 - 0
test/pyTorch/__init__.py


+ 31 - 0
test/pyTorch/torch_auto_grad.py

@@ -0,0 +1,31 @@
+import torch
+from torch.autograd import Variable
+
+# 变量
+
+x = Variable(torch.ones(2, 2), requires_grad=True)
+print(x)
+
+y = x + 2
+print(y)
+
+z = y * y * 3
+out = z.mean()
+print(z)
+print(out)
+
+# 梯度
+
+out.backward()
+print(x.grad)
+
+a = torch.randn(3)
+a = Variable(a, requires_grad=True)
+b = a * 2
+while b.data.norm() < 1000:
+    b = b * 2
+print(b)
+
+gradients = torch.FloatTensor([0.1, 1.0, 0.0001])
+b.backward(gradients)
+print(a.grad)

+ 48 - 0
test/pyTorch/torch_get_start.py

@@ -0,0 +1,48 @@
+from __future__ import print_function
+import torch
+import numpy as np
+
+# pyTorch tensoor
+
+x = torch.Tensor(5, 3)
+print('matrix x:\n', x)
+print('matrix x size:\n', x.size())
+
+y = torch.rand(5, 3)
+print('matrix x + y:\n', x + y)
+
+print('matrix x + y again:\n', torch.add(x, y))
+
+result = torch.Tensor(5, 3)
+torch.add(x, y, out=result)
+print('matrix x + y result:\n', result)
+
+y.add_(x)
+print('matrix x add to y:\n', y)
+
+print('col 2 of matrix x:\n', x[:, 1])
+
+# convert to numpy
+
+a = torch.ones(5)
+print('torch array full with number 1:\n', a)
+
+b = a.numpy()
+print('numpy array:\n', b)
+
+a.add_(1)
+print('torch array after change:\n', a)
+print('numpy array after change:\n', b)
+
+na = np.ones(5)
+nb = torch.from_numpy(na)
+np.add(na, 1, out=na)
+print('another numpy array after change:\n', na)
+print('another torch array after change:\n', nb)
+
+# CUDA
+
+if torch.cuda.is_available():
+    x = x.cuda()
+    y = y.cuda()
+    print('result from GPU:\n', x + y)

+ 27 - 0
test/random_test.py

@@ -0,0 +1,27 @@
+import random
+
+
+def random_amount(count, total, min_unit=1):
+    amount = total / min_unit
+    arr = [0] * count
+    v = i = 0
+    while i < count:
+        r = random.randint(11, 99) / 100
+
+        arr[i] = r
+        v += arr[i]
+        i += 1
+
+    ratio = 1.0 / v
+    result = [0] * count
+    b = total
+    for j in range(0, count - 1):
+        result[j] = round(round(arr[j] * ratio * amount) * min_unit)
+        b -= result[j]
+
+    result[count - 1] = b
+    return result
+
+
+def test():
+    print(random_amount(3, 6))

+ 49 - 0
test/snowflake.py

@@ -0,0 +1,49 @@
+import datetime
+import time
+
+# twitter's snowflake parameters
+twepoch = 1288834974657
+datacenter_id_bits = 5
+worker_id_bits = 5
+sequence_id_bits = 12
+max_datacenter_id = 1 << datacenter_id_bits
+max_worker_id = 1 << worker_id_bits
+max_sequence_id = 1 << sequence_id_bits
+max_timestamp = 1 << (64 - datacenter_id_bits - worker_id_bits - sequence_id_bits)
+
+
+def make_snowflake(timestamp_ms, datacenter_id, worker_id, sequence_id, twepoch=twepoch):
+    """generate a twitter-snowflake id, based on
+    https://github.com/twitter/snowflake/blob/master/src/main/scala/com/twitter/service/snowflake/IdWorker.scala
+    :param: timestamp_ms time since UNIX epoch in milliseconds"""
+
+    sid = ((int(timestamp_ms) - twepoch) % max_timestamp) << datacenter_id_bits << worker_id_bits << sequence_id_bits
+    sid += (datacenter_id % max_datacenter_id) << worker_id_bits << sequence_id_bits
+    sid += (worker_id % max_worker_id) << sequence_id_bits
+    sid += sequence_id % max_sequence_id
+
+    return sid
+
+
+def melt(snowflake_id, twepoch=twepoch):
+    """inversely transform a snowflake id back to its parts."""
+    sequence_id = snowflake_id & (max_sequence_id - 1)
+    worker_id = (snowflake_id >> sequence_id_bits) & (max_worker_id - 1)
+    datacenter_id = (snowflake_id >> sequence_id_bits >> worker_id_bits) & (max_datacenter_id - 1)
+    timestamp_ms = snowflake_id >> sequence_id_bits >> worker_id_bits >> datacenter_id_bits
+    timestamp_ms += twepoch
+
+    return timestamp_ms, int(datacenter_id), int(worker_id), int(sequence_id)
+
+
+def local_datetime(timestamp_ms):
+    """convert millisecond timestamp to local datetime object."""
+    return datetime.datetime.fromtimestamp(timestamp_ms / 1000.)
+
+def test():
+    # print(local_datetime(t0))
+    for i in range(0, 100):
+        t0 = int(time.time() * 1000)
+        print(make_snowflake(t0, 0, 0, 0))
+
+test()

+ 36 - 0
test/test01.py

@@ -0,0 +1,36 @@
+import tensorflow as tf
+import numpy as np
+import os
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
+
+
+def foo01():
+    x = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]])
+    w = tf.Variable(initial_value=x)
+    sess = tf.Session()
+    sess.run(tf.global_variables_initializer())
+    print(sess.run(w))
+
+
+def foo02():
+    x = tf.Variable(3)
+    y = tf.Variable(5)
+    z = tf.add(x, y)
+    sess = tf.Session()
+    sess.run(tf.global_variables_initializer())
+    print(sess.run(z))
+
+
+def foo03():
+    a = tf.Variable(tf.ones([3, 2]))
+    b = tf.Variable(tf.ones([2, 3]))
+    product = tf.matmul(5.0 * a, 4.0 * b)
+    sess = tf.Session()
+    sess.run(tf.global_variables_initializer())
+    print(sess.run(product))
+
+
+print(tf.__path__)
+foo01()
+foo02()
+foo03()

+ 18 - 0
test/test02.py

@@ -0,0 +1,18 @@
+import tensorflow.examples.tutorials.mnist.input_data as ipt
+import tensorflow.contrib.learn.python.learn.datasets.base as base
+import tensorflow.models.image.cifar10.cifar10 as cifar10
+iris_data, iris_label = base.load_iris()
+house_data, house_label = base.load_boston()
+cifar10.maybe_download_and_extract()
+images, labels = cifar10.distorted_inputs()
+
+mnist = ipt.read_data_sets("MNIST_data/", one_hot=True)
+
+print(mnist.train.images.shape)
+print(mnist.train.labels.shape)
+print(mnist.validation.images.shape)
+print(mnist.validation.labels.shape)
+print(mnist.test.images.shape)
+print(mnist.test.labels.shape)
+print(images)
+print(labels)

+ 26 - 0
test/test03.py

@@ -0,0 +1,26 @@
+import tensorflow as tf
+import tensorflow.examples.tutorials.mnist.input_data as input_data
+import os
+
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
+
+mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
+x = tf.placeholder(tf.float32, [None, 784])
+y_actual = tf.placeholder(tf.float32, shape=[None, 10])
+W = tf.Variable(tf.zeros([784, 10]))  # 初始化权值W
+b = tf.Variable(tf.zeros([10]))  # 初始化偏置项b
+y_predict = tf.nn.softmax(tf.matmul(x, W) + b)  # 加权变换并进行softmax回归,得到预测概率
+cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_actual * tf.log(y_predict)))  # 求交叉熵
+train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)  # 用梯度下降法使得残差最小
+
+correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_actual, 1))  # 在测试阶段,测试准确度计算
+accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 多个批次的准确度均值
+
+init = tf.global_variables_initializer()
+with tf.Session() as sess:
+    sess.run(init)
+    for i in range(1000):  # 训练阶段,迭代1000次
+        batch_xs, batch_ys = mnist.train.next_batch(100)  # 按批次训练,每批100行数据
+        sess.run(train_step, feed_dict={x: batch_xs, y_actual: batch_ys})  # 执行训练
+        if i % 100 == 0:  # 每训练100次,测试一次
+            print("accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images, y_actual: mnist.test.labels}))

+ 70 - 0
test/test04.py

@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+import tensorflow as tf
+import tensorflow.examples.tutorials.mnist.input_data as input_data
+
+mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)  # 下载并加载mnist数据
+x = tf.placeholder(tf.float32, [None, 784])  # 输入的数据占位符
+y_actual = tf.placeholder(tf.float32, shape=[None, 10])  # 输入的标签占位符
+
+
+# 定义一个函数,用于初始化所有的权值 W
+def weight_variable(shape):
+    initial = tf.truncated_normal(shape, stddev=0.1)
+    return tf.Variable(initial)
+
+
+# 定义一个函数,用于初始化所有的偏置项 b
+def bias_variable(shape):
+    initial = tf.constant(0.1, shape=shape)
+    return tf.Variable(initial)
+
+
+# 定义一个函数,用于构建卷积层
+def conv2d(x, W):
+    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
+
+
+# 定义一个函数,用于构建池化层
+def max_pool(x):
+    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
+
+
+# 构建网络
+x_image = tf.reshape(x, [-1, 28, 28, 1])  # 转换输入数据shape,以便于用于网络中
+W_conv1 = weight_variable([5, 5, 1, 32])
+b_conv1 = bias_variable([32])
+h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)  # 第一个卷积层
+h_pool1 = max_pool(h_conv1)  # 第一个池化层
+
+W_conv2 = weight_variable([5, 5, 32, 64])
+b_conv2 = bias_variable([64])
+h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)  # 第二个卷积层
+h_pool2 = max_pool(h_conv2)  # 第二个池化层
+
+W_fc1 = weight_variable([7 * 7 * 64, 1024])
+b_fc1 = bias_variable([1024])
+h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])  # reshape成向量
+h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)  # 第一个全连接层
+
+keep_prob = tf.placeholder("float")
+h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)  # dropout层
+
+W_fc2 = weight_variable([1024, 10])
+b_fc2 = bias_variable([10])
+y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)  # softmax层
+
+cross_entropy = -tf.reduce_sum(y_actual * tf.log(y_predict))  # 交叉熵
+train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)  # 梯度下降法
+correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_actual, 1))
+accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 精确度计算
+sess = tf.InteractiveSession()
+sess.run(tf.initialize_all_variables())
+for i in range(20000):
+    batch = mnist.train.next_batch(50)
+    if i % 100 == 0:  # 训练100次,验证一次
+        train_acc = accuracy.eval(feed_dict={x: batch[0], y_actual: batch[1], keep_prob: 1.0})
+        print('step', i, 'training accuracy', train_acc)
+        train_step.run(feed_dict={x: batch[0], y_actual: batch[1], keep_prob: 0.5})
+
+test_acc = accuracy.eval(feed_dict={x: mnist.test.images, y_actual: mnist.test.labels, keep_prob: 1.0})
+print("test accuracy", test_acc)