18 #include <netlink-local.h>
19 #include <netlink-tc.h>
20 #include <netlink/netlink.h>
21 #include <netlink/cache.h>
22 #include <netlink/utils.h>
23 #include <netlink/route/tc.h>
24 #include <netlink/route/qdisc.h>
25 #include <netlink/route/qdisc-modules.h>
26 #include <netlink/route/class.h>
27 #include <netlink/route/class-modules.h>
28 #include <netlink/route/link.h>
29 #include <netlink/route/sch/tbf.h>
32 #define TBF_ATTR_LIMIT 0x01
33 #define TBF_ATTR_RATE 0x02
34 #define TBF_ATTR_PEAKRATE 0x10
35 #define TBF_ATTR_MPU 0x80
38 static inline struct rtnl_tbf *tbf_qdisc(
struct rtnl_qdisc *qdisc)
40 return (
struct rtnl_tbf *) qdisc->q_subdata;
43 static inline struct rtnl_tbf *tbf_alloc(
struct rtnl_qdisc *qdisc)
45 if (!qdisc->q_subdata)
46 qdisc->q_subdata = calloc(1,
sizeof(
struct rtnl_tbf));
48 return tbf_qdisc(qdisc);
51 static struct nla_policy tbf_policy[TCA_TBF_MAX+1] = {
52 [TCA_TBF_PARMS] = { .
minlen =
sizeof(
struct tc_tbf_qopt) },
55 static int tbf_msg_parser(
struct rtnl_qdisc *q)
58 struct nlattr *tb[TCA_TBF_MAX + 1];
61 err = tca_parse(tb, TCA_TBF_MAX, (
struct rtnl_tca *) q, tbf_policy);
67 return nl_errno(ENOMEM);
69 if (tb[TCA_TBF_PARMS]) {
70 struct tc_tbf_qopt opts;
73 nla_memcpy(&opts, tb[TCA_TBF_PARMS],
sizeof(opts));
74 tbf->qt_limit = opts.limit;
75 tbf->qt_mpu = opts.rate.mpu;
77 rtnl_copy_ratespec(&tbf->qt_rate, &opts.rate);
78 tbf->qt_rate_txtime = opts.buffer;
81 tbf->qt_rate_bucket = bufsize;
83 rtnl_copy_ratespec(&tbf->qt_peakrate, &opts.peakrate);
84 tbf->qt_peakrate_txtime = opts.mtu;
87 tbf->qt_peakrate_bucket = bufsize;
89 tbf->qt_mask = (TBF_ATTR_LIMIT | TBF_ATTR_MPU | TBF_ATTR_RATE |
96 static int tbf_dump_brief(
struct rtnl_qdisc *qdisc,
struct nl_dump_params *p,
100 char *ru, *rubit, *limu;
101 struct rtnl_tbf *tbf = tbf_qdisc(qdisc);
110 dp_dump(p,
" rate %.2f%s/s (%.0f%s) limit %.2f%s",
111 r, ru, rbit, rubit, lim, limu);
117 static int tbf_dump_full(
struct rtnl_qdisc *qdisc,
struct nl_dump_params *p,
120 struct rtnl_tbf *tbf = tbf_qdisc(qdisc);
131 dp_dump(p,
"mpu %u rate-bucket-size %1.f%s "
132 "rate-cell-size %.1f%s\n",
133 tbf->qt_mpu, bs, bu, cl, cu);
137 if (tbf->qt_mask & TBF_ATTR_PEAKRATE) {
138 char *pru, *prbu, *bsu, *clu;
139 double pr, prb, bs, cl;
147 dp_dump_line(p, line++,
" peak-rate %.2f%s/s (%.0f%s) "
148 "bucket-size %.1f%s cell-size %.1f%s",
150 pr, pru, prb, prbu, bs, bsu, cl, clu);
157 static struct nl_msg *tbf_get_opts(
struct rtnl_qdisc *qdisc)
159 struct tc_tbf_qopt opts;
160 struct rtnl_tbf *tbf;
164 int required = TBF_ATTR_RATE | TBF_ATTR_LIMIT;
166 memset(&opts, 0,
sizeof(opts));
168 tbf = tbf_qdisc(qdisc);
172 if (!(tbf->qt_mask & required) != required)
175 opts.limit = tbf->qt_limit;
176 opts.buffer = tbf->qt_rate_txtime;
177 tbf->qt_rate.rs_mpu = tbf->qt_mpu;
178 rtnl_rcopy_ratespec(&opts.rate, &tbf->qt_rate);
181 1 << tbf->qt_rate.rs_cell_log,
182 tbf->qt_rate.rs_rate);
184 if (tbf->qt_mask & TBF_ATTR_PEAKRATE) {
185 opts.mtu = tbf->qt_peakrate_txtime;
186 tbf->qt_peakrate.rs_mpu = tbf->qt_mpu;
187 rtnl_rcopy_ratespec(&opts.peakrate, &tbf->qt_peakrate);
191 1 << tbf->qt_peakrate.rs_cell_log,
192 tbf->qt_peakrate.rs_rate);
197 goto nla_put_failure;
199 NLA_PUT(msg, TCA_TBF_PARMS,
sizeof(opts), &opts);
200 NLA_PUT(msg, TCA_TBF_RTAB,
sizeof(rtab), rtab);
202 if (tbf->qt_mask & TBF_ATTR_PEAKRATE)
203 NLA_PUT(msg, TCA_TBF_PTAB,
sizeof(ptab), ptab);
225 struct rtnl_tbf *tbf;
227 tbf = tbf_alloc(qdisc);
229 return nl_errno(ENOMEM);
231 tbf->qt_limit = limit;
232 tbf->qt_mask |= TBF_ATTR_LIMIT;
237 static inline double calc_limit(
struct rtnl_ratespec *spec,
int latency,
242 limit = (double) spec->rs_rate * ((
double) latency / 1000000.);
268 struct rtnl_tbf *tbf;
269 double limit, limit2;
271 tbf = tbf_alloc(qdisc);
273 return nl_errno(ENOMEM);
275 if (!(tbf->qt_mask & TBF_ATTR_RATE))
276 return nl_error(EINVAL,
"The rate must be specified before "
277 "limit can be calculated based on latency.");
279 limit = calc_limit(&tbf->qt_rate, latency, tbf->qt_rate_bucket);
281 if (tbf->qt_mask & TBF_ATTR_PEAKRATE) {
282 limit2 = calc_limit(&tbf->qt_peakrate, latency,
283 tbf->qt_peakrate_bucket);
299 struct rtnl_tbf *tbf;
301 tbf = tbf_qdisc(qdisc);
302 if (tbf && (tbf->qt_mask & TBF_ATTR_LIMIT))
303 return tbf->qt_limit;
316 struct rtnl_tbf *tbf;
318 tbf = tbf_alloc(qdisc);
320 return nl_errno(ENOMEM);
323 tbf->qt_mask |= TBF_ATTR_MPU;
335 struct rtnl_tbf *tbf;
337 tbf = tbf_qdisc(qdisc);
338 if (tbf && (tbf->qt_mask & TBF_ATTR_MPU))
344 static inline int calc_cell_log(
int cell,
int bucket)
354 while ((bucket >> cell) > 255)
372 struct rtnl_tbf *tbf;
375 tbf = tbf_alloc(qdisc);
377 return nl_errno(ENOMEM);
379 cell_log = calc_cell_log(cell, bucket);
383 tbf->qt_rate.rs_rate = rate;
384 tbf->qt_rate_bucket = bucket;
385 tbf->qt_rate.rs_cell_log = cell_log;
387 tbf->qt_mask |= TBF_ATTR_RATE;
399 struct rtnl_tbf *tbf;
401 tbf = tbf_qdisc(qdisc);
402 if (tbf && (tbf->qt_mask & TBF_ATTR_RATE))
403 return tbf->qt_rate.rs_rate;
415 struct rtnl_tbf *tbf;
417 tbf = tbf_qdisc(qdisc);
418 if (tbf && (tbf->qt_mask & TBF_ATTR_RATE))
419 return tbf->qt_rate_bucket;
431 struct rtnl_tbf *tbf;
433 tbf = tbf_qdisc(qdisc);
434 if (tbf && (tbf->qt_mask & TBF_ATTR_RATE))
435 return (1 << tbf->qt_rate.rs_cell_log);
451 struct rtnl_tbf *tbf;
454 tbf = tbf_alloc(qdisc);
456 return nl_errno(ENOMEM);
458 cell_log = calc_cell_log(cell, bucket);
462 tbf->qt_peakrate.rs_rate = rate;
463 tbf->qt_peakrate_bucket = bucket;
464 tbf->qt_peakrate.rs_cell_log = cell_log;
467 tbf->qt_mask |= TBF_ATTR_PEAKRATE;
479 struct rtnl_tbf *tbf;
481 tbf = tbf_qdisc(qdisc);
482 if (tbf && (tbf->qt_mask & TBF_ATTR_PEAKRATE))
483 return tbf->qt_peakrate.rs_rate;
495 struct rtnl_tbf *tbf;
497 tbf = tbf_qdisc(qdisc);
498 if (tbf && (tbf->qt_mask & TBF_ATTR_PEAKRATE))
499 return tbf->qt_peakrate_bucket;
511 struct rtnl_tbf *tbf;
513 tbf = tbf_qdisc(qdisc);
514 if (tbf && (tbf->qt_mask & TBF_ATTR_PEAKRATE))
515 return (1 << tbf->qt_peakrate.rs_cell_log);
524 .qo_msg_parser = tbf_msg_parser,
527 .qo_get_opts = tbf_get_opts,
530 static void __init tbf_init(
void)
535 static void __exit tbf_exit(
void)