admin管理员组文章数量:1405323
So I have a method with simple lambda functions I use to update my weights and I want to try different functions but I also want to have default parameter for the decay:
void ema_update(int i, const NetImpl& mdl, void (&updwp)(torch::Tensor& w, const torch::Tensor& w1, double decay = 0.999) = [](torch::Tensor& w, const torch::Tensor& w1, double decay) {
w.set_data(w.data().detach() * decay + w1.detach() * (1. - decay));
}, void (&updw)(torch::Tensor& w, const torch::Tensor& w1, double decay = 0.999) = [](torch::Tensor& w, const torch::Tensor& w1, double decay) {
w = w.detach() * decay;
w += w1.detach() * (1. - decay);
}) {
updw(layers[i]vtr1->weight, mdl.layers[i]vtr1->weight);
updw(layers[i]vtr2->weight, mdl.layers[i]vtr2->weight);
updw(layers[i]vtr3->weight, mdl.layers[i]vtr3->weight);
updw(layers[i].lin1->weight, mdl.layers[i].lin1->weight);
updw(layers[i].lin2->weight, mdl.layers[i].lin2->weight);
updw(layers[i].lin3->weight, mdl.layers[i].lin3->weight);
updw(layers[i]v1->weight, mdl.layers[i]v1->weight);
updw(layers[i]v2->weight, mdl.layers[i]v2->weight);
updw(layers[i]v3->weight, mdl.layers[i]v3->weight);
updw(layers[i]v4->weight, mdl.layers[i]v4->weight);
updw(layers[i].rnnresh, mdl.layers[i].rnnresh);
if (layers[i].mha->in_proj_weight.defined())
updw(layers[i].mha->in_proj_weight, mdl.layers[i].mha->in_proj_weight);
if (layers[i].mha->k_proj_weight.defined())
updw(layers[i].mha->k_proj_weight, mdl.layers[i].mha->k_proj_weight);
if (layers[i].mha->q_proj_weight.defined())
updw(layers[i].mha->q_proj_weight, mdl.layers[i].mha->q_proj_weight);
if (layers[i].mha->v_proj_weight.defined())
updw(layers[i].mha->v_proj_weight, mdl.layers[i].mha->v_proj_weight);
for (size_t pi = 0; pi < layers[i].trans->decoder.ptr()->parameters().size(); ++pi)
updwp(layers[i].trans->decoder.ptr()->parameters()[pi], mdl.layers[i].trans->decoder.ptr()->parameters()[pi].data());//torch::nn::init::xavier_uniform_(p).cuda();
for (size_t pi = 0; pi < layers[i].trans->encoder.ptr()->parameters().size(); ++pi)
updwp(layers[i].trans->encoder.ptr()->parameters()[pi], mdl.layers[i].trans->encoder.ptr()->parameters()[pi].data());
for (size_t pi = 0; pi < layers[i].rnn1->all_weights().size(); ++pi)
updwp(layers[i].rnn1->all_weights()[pi], mdl.layers[i].rnn1->all_weights()[pi].data());
}
Here I specify all the layers I need to update and the small lambdas are default parameters - however I can't set default parameters on the function pointer prototypes for the decay.
MSVC says:
error C2383: 'updwp': default-arguments are not allowed on this symbol
It doesn't matter if I use reference or pointer to the function.
I'm open to alternative suggestions to make both lambdas and decay default parameters.
Also it was originally:
void ema_update(int i, const NetImpl& mdl, double decay = 0.999, void (&updwp)(torch::Tensor& w, const torch::Tensor& w1) = [](torch::Tensor& w, const torch::Tensor& w1) {
w.set_data(w.data().detach() * decay + w1.detach() * (1. - decay));
}, void (&updw)(torch::Tensor& w, const torch::Tensor& w1) = [](torch::Tensor& w, const torch::Tensor& w1) {
w = w.detach() * decay;
w += w1.detach() * (1. - decay);
})
But that doesn't work either.
Self contained example:
void f(void (*pf)(int &a, double b=0.77) = [] (int &a, double b){
a *= b;
}) {
int a = 9;
pf(a);
}
void f1(double b=0.77, void (*pf)(int &a) = [] (int &a){
a *= b;
}) {
int a = 9;
pf(a);
}
I also tried std functions:
#include <functional>
void f3(double b=0.77, std::function<void (double b, int &a)> fa = [] (double b, int &a){
a *= b;
}) {
int a = 9;
std::function f = std::bind(fa, b);
f(a);
}
So I have a method with simple lambda functions I use to update my weights and I want to try different functions but I also want to have default parameter for the decay:
void ema_update(int i, const NetImpl& mdl, void (&updwp)(torch::Tensor& w, const torch::Tensor& w1, double decay = 0.999) = [](torch::Tensor& w, const torch::Tensor& w1, double decay) {
w.set_data(w.data().detach() * decay + w1.detach() * (1. - decay));
}, void (&updw)(torch::Tensor& w, const torch::Tensor& w1, double decay = 0.999) = [](torch::Tensor& w, const torch::Tensor& w1, double decay) {
w = w.detach() * decay;
w += w1.detach() * (1. - decay);
}) {
updw(layers[i].cnvtr1->weight, mdl.layers[i].cnvtr1->weight);
updw(layers[i].cnvtr2->weight, mdl.layers[i].cnvtr2->weight);
updw(layers[i].cnvtr3->weight, mdl.layers[i].cnvtr3->weight);
updw(layers[i].lin1->weight, mdl.layers[i].lin1->weight);
updw(layers[i].lin2->weight, mdl.layers[i].lin2->weight);
updw(layers[i].lin3->weight, mdl.layers[i].lin3->weight);
updw(layers[i].cnv1->weight, mdl.layers[i].cnv1->weight);
updw(layers[i].cnv2->weight, mdl.layers[i].cnv2->weight);
updw(layers[i].cnv3->weight, mdl.layers[i].cnv3->weight);
updw(layers[i].cnv4->weight, mdl.layers[i].cnv4->weight);
updw(layers[i].rnnresh, mdl.layers[i].rnnresh);
if (layers[i].mha->in_proj_weight.defined())
updw(layers[i].mha->in_proj_weight, mdl.layers[i].mha->in_proj_weight);
if (layers[i].mha->k_proj_weight.defined())
updw(layers[i].mha->k_proj_weight, mdl.layers[i].mha->k_proj_weight);
if (layers[i].mha->q_proj_weight.defined())
updw(layers[i].mha->q_proj_weight, mdl.layers[i].mha->q_proj_weight);
if (layers[i].mha->v_proj_weight.defined())
updw(layers[i].mha->v_proj_weight, mdl.layers[i].mha->v_proj_weight);
for (size_t pi = 0; pi < layers[i].trans->decoder.ptr()->parameters().size(); ++pi)
updwp(layers[i].trans->decoder.ptr()->parameters()[pi], mdl.layers[i].trans->decoder.ptr()->parameters()[pi].data());//torch::nn::init::xavier_uniform_(p).cuda();
for (size_t pi = 0; pi < layers[i].trans->encoder.ptr()->parameters().size(); ++pi)
updwp(layers[i].trans->encoder.ptr()->parameters()[pi], mdl.layers[i].trans->encoder.ptr()->parameters()[pi].data());
for (size_t pi = 0; pi < layers[i].rnn1->all_weights().size(); ++pi)
updwp(layers[i].rnn1->all_weights()[pi], mdl.layers[i].rnn1->all_weights()[pi].data());
}
Here I specify all the layers I need to update and the small lambdas are default parameters - however I can't set default parameters on the function pointer prototypes for the decay.
MSVC says:
error C2383: 'updwp': default-arguments are not allowed on this symbol
It doesn't matter if I use reference or pointer to the function.
I'm open to alternative suggestions to make both lambdas and decay default parameters.
Also it was originally:
void ema_update(int i, const NetImpl& mdl, double decay = 0.999, void (&updwp)(torch::Tensor& w, const torch::Tensor& w1) = [](torch::Tensor& w, const torch::Tensor& w1) {
w.set_data(w.data().detach() * decay + w1.detach() * (1. - decay));
}, void (&updw)(torch::Tensor& w, const torch::Tensor& w1) = [](torch::Tensor& w, const torch::Tensor& w1) {
w = w.detach() * decay;
w += w1.detach() * (1. - decay);
})
But that doesn't work either.
Self contained example:
void f(void (*pf)(int &a, double b=0.77) = [] (int &a, double b){
a *= b;
}) {
int a = 9;
pf(a);
}
void f1(double b=0.77, void (*pf)(int &a) = [] (int &a){
a *= b;
}) {
int a = 9;
pf(a);
}
https://wandbox./permlink/XmZtzsxmcwgUIbJy
I also tried std functions:
#include <functional>
void f3(double b=0.77, std::function<void (double b, int &a)> fa = [] (double b, int &a){
a *= b;
}) {
int a = 9;
std::function f = std::bind(fa, b);
f(a);
}
https://wandbox./permlink/Jvv3Dw2gSemkyaxt
Share Improve this question edited Mar 8 at 11:27 AnArrayOfFunctions asked Mar 8 at 11:08 AnArrayOfFunctionsAnArrayOfFunctions 3,7853 gold badges35 silver badges75 bronze badges 6 | Show 1 more comment1 Answer
Reset to default 0I've found a solution - which is the simplest (maybe not the most elegant):
I need to pass by reference because in the original code a
is not simple integer but an object with getter and setter I use inside the lambda.
#include <functional>
void f4(double b=0.77, std::function<void (double b, int &a)> fa= [] (double b, int &a){
a *= b;
}) {
int a = 9;
auto f = [fa, b] (int &a) {fa(b, a);};
f(a);
}
https://wandbox./permlink/uKwqe1reWwwEJAGC
本文标签:
版权声明:本文标题:c++ - Why can't I add default-argument to my lambda function parameters (or is there an alternative way to do it)? - Sta 内容由网友自发贡献,该文观点仅代表作者本人, 转载请联系作者并注明出处:http://www.betaflare.com/web/1744897376a2631145.html, 本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容,一经查实,本站将立刻删除。
auto wrapped = [pf, b](int& a) { return pf(a, b); };
– Passer By Commented Mar 8 at 11:44