aboutsummaryrefslogtreecommitdiffstats
path: root/tests/utils.py
blob: 03b780f8ee8d2cdf51a9746a0d6963e96ef0ced0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os.path


def load_nist_vectors(vector_data, op):
    section, count, data = None, None, {}

    for line in vector_data:
        line = line.strip()

        # Blank lines are ignored
        if not line:
            continue

        # Lines starting with # are comments
        if line.startswith("#"):
            continue

        # Look for section headers
        if line.startswith("[") and line.endswith("]"):
            section = line[1:-1]
            data[section] = {}
            continue

        # Build our data using a simple Key = Value format
        name, value = line.split(" = ")

        # COUNT is a special token that indicates a new block of data
        if name.upper() == "COUNT":
            count = value
            data[section][count] = {}
        # For all other tokens we simply want the name, value stored in
        # the dictionary
        else:
            data[section][count][name.lower()] = value.encode("ascii")

    # We want to test only for a particular operation, we sort them for the
    # benefit of the tests of this function.
    return [v for k, v in sorted(data[op].items(), key=lambda kv: kv[0])]


def load_nist_vectors_from_file(filename, op):
    base = os.path.join(
        os.path.dirname(__file__), "primitives", "vectors", "NIST",
    )
    with open(os.path.join(base, filename), "r") as vector_file:
        return load_nist_vectors(vector_file, op)


def load_cryptrec_vectors_from_file(filename):
    base = os.path.join(
        os.path.dirname(__file__), "primitives", "vectors", "CRYPTREC",
    )
    with open(os.path.join(base, filename), "r") as vector_file:
        return load_cryptrec_vectors(vector_file)


def load_cryptrec_vectors(vector_data):
    cryptrec_list = []

    for line in vector_data:
        line = line.strip()

        # Blank lines and comments are ignored
        if not line or line.startswith("#"):
            continue

        if line.startswith("K"):
            key = line.split(" : ")[1].replace(" ", "").encode("ascii")
        elif line.startswith("P"):
            pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
        elif line.startswith("C"):
            ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
            # after a C is found the K+P+C tuple is complete
            # there are many P+C pairs for each K
            cryptrec_list.append({
                "key": key,
                "plaintext": pt,
                "ciphertext": ct
            })
    return cryptrec_list


def load_openssl_vectors_from_file(filename):
    base = os.path.join(
        os.path.dirname(__file__), "primitives", "vectors", "OpenSSL",
    )
    with open(os.path.join(base, filename), "r") as vector_file:
        return load_openssl_vectors(vector_file)


def load_openssl_vectors(vector_data):
    vectors = []

    for line in vector_data:
        line = line.strip()

        # Blank lines and comments are ignored
        if not line or line.startswith("#"):
            continue

        vector = line.split(":")
        vectors.append({
            "key": vector[1].encode("ascii"),
            "iv": vector[2].encode("ascii"),
            "plaintext": vector[3].encode("ascii"),
            "ciphertext": vector[4].encode("ascii"),
        })
    return vectors


def load_hash_vectors(vector_data):
    vectors = []

    for line in vector_data:
        line = line.strip()

        if not line or line.startswith("#") or line.startswith("["):
            continue

        if line.startswith("Len"):
            length = int(line.split(" = ")[1])
        elif line.startswith("Msg"):
            """
            In the NIST vectors they have chosen to represent an empty
            string as hex 00, which is of course not actually an empty
            string. So we parse the provided length and catch this edge case.
            """
            msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
        elif line.startswith("MD"):
            md = line.split(" = ")[1]
            # after MD is found the Msg+MD tuple is complete
            vectors.append((msg, md))
        else:
            raise ValueError("Unknown line in hash vector")
    return vectors


def load_hash_vectors_from_file(filename):
    base = os.path.join(
        os.path.dirname(__file__), "primitives", "vectors"
    )
    with open(os.path.join(base, filename), "r") as vector_file:
        return load_hash_vectors(vector_file)