|
|
@@ -184,6 +184,9 @@ int main(int argc, char ** argv) {
|
|
|
// extra text to insert in each client's prompt in order to make it larger
|
|
|
const int32_t n_junk = std::max(1, params.n_junk);
|
|
|
|
|
|
+ // signed seed, use negative values to indicate different seeds for the different clients
|
|
|
+ const int32_t & sseed = params.sampling.seed;
|
|
|
+
|
|
|
// init llama.cpp
|
|
|
llama_backend_init();
|
|
|
llama_numa_init(params.numa);
|
|
|
@@ -219,12 +222,21 @@ int main(int argc, char ** argv) {
|
|
|
|
|
|
const int n_ctx = llama_n_ctx(ctx);
|
|
|
|
|
|
+ if (sseed >= 0) {
|
|
|
+ LOG_INF("%s: initializing all samplers with the same RNG seed: %d (use a negative seed to have different seeds)\n", __func__, sseed);
|
|
|
+ } else {
|
|
|
+ LOG_INF("%s: initializing samplers with different RNG seeds, starting from %d\n", __func__, sseed);
|
|
|
+ }
|
|
|
+
|
|
|
std::vector<client> clients(n_clients);
|
|
|
for (size_t i = 0; i < clients.size(); ++i) {
|
|
|
auto & client = clients[i];
|
|
|
client.id = i;
|
|
|
client.smpl = common_sampler_init(model, params.sampling);
|
|
|
- //params.sampling.seed++;
|
|
|
+
|
|
|
+ if (sseed < 0) {
|
|
|
+ params.sampling.seed--;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
std::vector<llama_token> tokens_system;
|