1 #ifndef VARIANT4_RANDOM_MATH_H 2 #define VARIANT4_RANDOM_MATH_H 63 #define FORCEINLINE __attribute__((always_inline)) inline 64 #elif defined(_MSC_VER) 65 #define FORCEINLINE __forceinline 67 #define FORCEINLINE inline 71 #ifndef UNREACHABLE_CODE 73 #define UNREACHABLE_CODE __builtin_unreachable() 74 #elif defined(_MSC_VER) 75 #define UNREACHABLE_CODE __assume(false) 77 #define UNREACHABLE_CODE 89 REG_BITS =
sizeof(
v4_reg) * 8,
94 const struct V4_Instruction* op = code + i; \ 95 const v4_reg src = r[op->src_index]; \ 96 v4_reg* dst = r + op->dst_index; \ 103 *dst += src + op->C; \ 110 const uint32_t shift = src % REG_BITS; \ 111 *dst = (*dst >> shift) | (*dst << ((REG_BITS - shift) % REG_BITS)); \ 116 const uint32_t shift = src % REG_BITS; \ 117 *dst = (*dst << shift) | (*dst >> ((REG_BITS - shift) % REG_BITS)); \ 131 #define V4_EXEC_10(j) \ 171 static FORCEINLINE void check_data(
size_t* data_index,
const size_t bytes_needed,
int8_t* data,
const size_t data_size)
173 if (*data_index + bytes_needed > data_size)
200 memset(data, 0,
sizeof(data));
208 size_t data_index =
sizeof(data);
226 uint32_t inst_data[9] = { 0, 1, 2, 3, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF, 0xFFFFFF };
231 int rotate_count = 0;
233 memset(latency, 0,
sizeof(latency));
234 memset(asic_latency, 0,
sizeof(asic_latency));
235 memset(alu_busy, 0,
sizeof(alu_busy));
236 memset(is_rotation, 0,
sizeof(is_rotation));
237 memset(rotated, 0,
sizeof(rotated));
238 is_rotation[
ROR] =
true;
239 is_rotation[
ROL] =
true;
244 int total_iterations = 0;
253 if (total_iterations > 256)
256 check_data(&data_index, 1, data,
sizeof(data));
268 check_data(&data_index, 1, data,
sizeof(data));
269 opcode = (data[data_index++] >= 0) ?
ROR :
ROL;
271 else if (opcode >= 6)
277 opcode = (opcode <= 2) ?
MUL : (opcode - 2);
283 const int a = dst_index;
287 if (((opcode ==
ADD) || (opcode ==
SUB) || (opcode ==
XOR)) && (
a == b))
295 if (is_rotation[opcode] && rotated[
a])
303 if ((opcode !=
MUL) && ((inst_data[
a] & 0xFFFF00) == (opcode << 8) + ((inst_data[b] & 255) << 16)))
309 int next_latency = (latency[
a] > latency[b]) ? latency[
a] : latency[b];
313 for (
int i = op_ALUs[opcode] - 1; i >= 0; --i)
315 if (!alu_busy[next_latency][i])
318 if ((opcode ==
ADD) && alu_busy[next_latency + 1][i])
324 if (is_rotation[opcode] && (next_latency < rotate_count * op_latency[opcode]))
341 if (next_latency > latency[
a] + 7)
346 next_latency += op_latency[opcode];
350 if (is_rotation[opcode])
356 alu_busy[next_latency - op_latency[opcode]][alu_index] =
true;
357 latency[
a] = next_latency;
360 asic_latency[
a] = ((asic_latency[
a] > asic_latency[b]) ? asic_latency[
a] : asic_latency[b]) + asic_op_latency[opcode];
362 rotated[
a] = is_rotation[opcode];
364 inst_data[
a] = code_size + (opcode << 8) + ((inst_data[b] & 255) << 16);
366 code[code_size].
opcode = opcode;
369 code[code_size].
C = 0;
379 alu_busy[next_latency - op_latency[opcode] + 1][alu_index] =
true;
382 check_data(&data_index,
sizeof(
uint32_t), data,
sizeof(data));
404 const int prev_code_size = code_size;
409 for (
int i = 1; i < 4; ++i)
411 if (asic_latency[i] < asic_latency[min_idx]) min_idx = i;
412 if (asic_latency[i] > asic_latency[max_idx]) max_idx = i;
416 const uint8_t opcode = pattern[(code_size - prev_code_size) % 3];
417 latency[min_idx] = latency[max_idx] + op_latency[opcode];
418 asic_latency[min_idx] = asic_latency[max_idx] + asic_op_latency[opcode];
420 code[code_size].
opcode = opcode;
423 code[code_size].
C = 0;
436 code[code_size].
C = 0;
unsigned __int64 uint64_t
const GenericPointer< typename T::ValueType > T2 T::AllocatorType & a
void hash_extra_blake(const void *data, size_t length, char *hash)
void * memcpy(void *a, const void *b, size_t c)