diff --git a/sbin/ipfw/dummynet.c b/sbin/ipfw/dummynet.c index 24d835fbb98e..88367694a404 100644 --- a/sbin/ipfw/dummynet.c +++ b/sbin/ipfw/dummynet.c @@ -1,1995 +1,1996 @@ /*- * Codel/FQ_Codel and PIE/FQ_PIE Code: * Copyright (C) 2016 Centre for Advanced Internet Architectures, * Swinburne University of Technology, Melbourne, Australia. * Portions of this code were made possible in part by a gift from * The Comcast Innovation Fund. * Implemented by Rasool Al-Saadi * * Copyright (c) 2002-2003,2010 Luigi Rizzo * * Redistribution and use in source forms, with and without modification, * are permitted provided that this entire comment appears intact. * * Redistribution in binary form may occur without any restrictions. * Obviously, it would be nice if you gave credit where credit is due * but requiring it would be too onerous. * * This software is provided ``AS IS'' without any warranties of any kind. * * $FreeBSD$ * * dummynet support */ #define NEW_AQM +#include #include #include /* XXX there are several sysctl leftover here */ #include #include "ipfw2.h" #ifdef NEW_AQM #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* inet_ntoa */ static struct _s_x dummynet_params[] = { { "plr", TOK_PLR }, { "noerror", TOK_NOERROR }, { "buckets", TOK_BUCKETS }, { "dst-ip", TOK_DSTIP }, { "src-ip", TOK_SRCIP }, { "dst-port", TOK_DSTPORT }, { "src-port", TOK_SRCPORT }, { "proto", TOK_PROTO }, { "weight", TOK_WEIGHT }, { "lmax", TOK_LMAX }, { "maxlen", TOK_LMAX }, { "all", TOK_ALL }, { "mask", TOK_MASK }, /* alias for both */ { "sched_mask", TOK_SCHED_MASK }, { "flow_mask", TOK_FLOW_MASK }, { "droptail", TOK_DROPTAIL }, { "ecn", TOK_ECN }, { "red", TOK_RED }, { "gred", TOK_GRED }, #ifdef NEW_AQM { "codel", TOK_CODEL}, /* Codel AQM */ { "fq_codel", TOK_FQ_CODEL}, /* FQ-Codel */ { "pie", TOK_PIE}, /* PIE AQM */ { "fq_pie", TOK_FQ_PIE}, /* FQ-PIE */ #endif { "bw", TOK_BW }, { "bandwidth", TOK_BW }, { "delay", TOK_DELAY }, { "link", TOK_LINK }, { "pipe", TOK_PIPE }, { "queue", TOK_QUEUE }, { "flowset", TOK_FLOWSET }, { "sched", TOK_SCHED }, { "pri", TOK_PRI }, { "priority", TOK_PRI }, { "type", TOK_TYPE }, { "flow-id", TOK_FLOWID}, { "dst-ipv6", TOK_DSTIP6}, { "dst-ip6", TOK_DSTIP6}, { "src-ipv6", TOK_SRCIP6}, { "src-ip6", TOK_SRCIP6}, { "profile", TOK_PROFILE}, { "burst", TOK_BURST}, { "dummynet-params", TOK_NULL }, { NULL, 0 } /* terminator */ }; #ifdef NEW_AQM /* AQM/extra sched parameters tokens*/ static struct _s_x aqm_params[] = { { "target", TOK_TARGET}, { "interval", TOK_INTERVAL}, { "limit", TOK_LIMIT}, { "flows", TOK_FLOWS}, { "quantum", TOK_QUANTUM}, { "ecn", TOK_ECN}, { "noecn", TOK_NO_ECN}, { "tupdate", TOK_TUPDATE}, { "max_burst", TOK_MAX_BURST}, { "max_ecnth", TOK_MAX_ECNTH}, { "alpha", TOK_ALPHA}, { "beta", TOK_BETA}, { "capdrop", TOK_CAPDROP}, { "nocapdrop", TOK_NO_CAPDROP}, { "onoff", TOK_ONOFF}, { "dre", TOK_DRE}, { "ts", TOK_TS}, { "derand", TOK_DERAND}, { "noderand", TOK_NO_DERAND}, { NULL, 0 } /* terminator */ }; #endif #define O_NEXT(p, len) ((void *)((char *)p + len)) static void oid_fill(struct dn_id *oid, int len, int type, uintptr_t id) { oid->len = len; oid->type = type; oid->subtype = 0; oid->id = id; } /* make room in the buffer and move the pointer forward */ static void * o_next(struct dn_id **o, int len, int type) { struct dn_id *ret = *o; oid_fill(ret, len, type, 0); *o = O_NEXT(*o, len); return ret; } #ifdef NEW_AQM /* Codel flags */ enum { CODEL_ECN_ENABLED = 1 }; /* PIE flags, from PIE kernel module */ enum { PIE_ECN_ENABLED = 1, PIE_CAPDROP_ENABLED = 2, PIE_ON_OFF_MODE_ENABLED = 4, PIE_DEPRATEEST_ENABLED = 8, PIE_DERAND_ENABLED = 16 }; #define PIE_FIX_POINT_BITS 13 #define PIE_SCALE (1L<15) return -1; for (i = 0; ioid, l, DN_CMD_GET, DN_API_VERSION); ep->oid.len = l; ep->oid.subtype = subtype; ep->nr = nr; ret = do_cmd(-IP_DUMMYNET3, ep, (uintptr_t)&l); if (ret) { free(ep); errx(EX_DATAERR, "Error getting extra parameters\n"); } switch (subtype) { case DN_AQM_PARAMS: if( !strcasecmp(ep->name, "codel")) { us_to_time(ep->par[0], strt1); us_to_time(ep->par[1], strt2); l = sprintf(out, " AQM CoDel target %s interval %s", strt1, strt2); if (ep->par[2] & CODEL_ECN_ENABLED) l = sprintf(out + l, " ECN"); else l += sprintf(out + l, " NoECN"); } else if( !strcasecmp(ep->name, "pie")) { us_to_time(ep->par[0], strt1); us_to_time(ep->par[1], strt2); us_to_time(ep->par[2], strt3); l = sprintf(out, " AQM type PIE target %s tupdate %s alpha " "%g beta %g max_burst %s max_ecnth %.3g", strt1, strt2, ep->par[4] / (float) PIE_SCALE, ep->par[5] / (float) PIE_SCALE, strt3, ep->par[3] / (float) PIE_SCALE ); if (ep->par[6] & PIE_ECN_ENABLED) l += sprintf(out + l, " ECN"); else l += sprintf(out + l, " NoECN"); if (ep->par[6] & PIE_CAPDROP_ENABLED) l += sprintf(out + l, " CapDrop"); else l += sprintf(out + l, " NoCapDrop"); if (ep->par[6] & PIE_ON_OFF_MODE_ENABLED) l += sprintf(out + l, " OnOff"); if (ep->par[6] & PIE_DEPRATEEST_ENABLED) l += sprintf(out + l, " DRE"); else l += sprintf(out + l, " TS"); if (ep->par[6] & PIE_DERAND_ENABLED) l += sprintf(out + l, " Derand"); else l += sprintf(out + l, " NoDerand"); } break; case DN_SCH_PARAMS: if (!strcasecmp(ep->name,"FQ_CODEL")) { us_to_time(ep->par[0], strt1); us_to_time(ep->par[1], strt2); l = sprintf(out," FQ_CODEL target %s interval %s" " quantum %jd limit %jd flows %jd", strt1, strt2, (intmax_t) ep->par[3], (intmax_t) ep->par[4], (intmax_t) ep->par[5] ); if (ep->par[2] & CODEL_ECN_ENABLED) l += sprintf(out + l, " ECN"); else l += sprintf(out + l, " NoECN"); l += sprintf(out + l, "\n"); } else if (!strcasecmp(ep->name,"FQ_PIE")) { us_to_time(ep->par[0], strt1); us_to_time(ep->par[1], strt2); us_to_time(ep->par[2], strt3); l = sprintf(out, " FQ_PIE target %s tupdate %s alpha " "%g beta %g max_burst %s max_ecnth %.3g" " quantum %jd limit %jd flows %jd", strt1, strt2, ep->par[4] / (float) PIE_SCALE, ep->par[5] / (float) PIE_SCALE, strt3, ep->par[3] / (float) PIE_SCALE, (intmax_t) ep->par[7], (intmax_t) ep->par[8], (intmax_t) ep->par[9] ); if (ep->par[6] & PIE_ECN_ENABLED) l += sprintf(out + l, " ECN"); else l += sprintf(out + l, " NoECN"); if (ep->par[6] & PIE_CAPDROP_ENABLED) l += sprintf(out + l, " CapDrop"); else l += sprintf(out + l, " NoCapDrop"); if (ep->par[6] & PIE_ON_OFF_MODE_ENABLED) l += sprintf(out + l, " OnOff"); if (ep->par[6] & PIE_DEPRATEEST_ENABLED) l += sprintf(out + l, " DRE"); else l += sprintf(out + l, " TS"); if (ep->par[6] & PIE_DERAND_ENABLED) l += sprintf(out + l, " Derand"); else l += sprintf(out + l, " NoDerand"); l += sprintf(out + l, "\n"); } break; } free(ep); } #endif #if 0 static int sort_q(void *arg, const void *pa, const void *pb) { int rev = (co.do_sort < 0); int field = rev ? -co.do_sort : co.do_sort; long long res = 0; const struct dn_flow_queue *a = pa; const struct dn_flow_queue *b = pb; switch (field) { case 1: /* pkts */ res = a->len - b->len; break; case 2: /* bytes */ res = a->len_bytes - b->len_bytes; break; case 3: /* tot pkts */ res = a->tot_pkts - b->tot_pkts; break; case 4: /* tot bytes */ res = a->tot_bytes - b->tot_bytes; break; } if (res < 0) res = -1; if (res > 0) res = 1; return (int)(rev ? res : -res); } #endif /* print a mask and header for the subsequent list of flows */ static void print_mask(struct ipfw_flow_id *id) { if (!IS_IP6_FLOW_ID(id)) { printf(" " "mask: %s 0x%02x 0x%08x/0x%04x -> 0x%08x/0x%04x\n", id->extra ? "queue," : "", id->proto, id->src_ip, id->src_port, id->dst_ip, id->dst_port); } else { char buf[255]; printf("\n mask: %sproto: 0x%02x, flow_id: 0x%08x, ", id->extra ? "queue," : "", id->proto, id->flow_id6); inet_ntop(AF_INET6, &(id->src_ip6), buf, sizeof(buf)); printf("%s/0x%04x -> ", buf, id->src_port); inet_ntop(AF_INET6, &(id->dst_ip6), buf, sizeof(buf)); printf("%s/0x%04x\n", buf, id->dst_port); } } static void print_header(struct ipfw_flow_id *id) { if (!IS_IP6_FLOW_ID(id)) printf("BKT Prot ___Source IP/port____ " "____Dest. IP/port____ " "Tot_pkt/bytes Pkt/Byte Drp\n"); else printf("BKT ___Prot___ _flow-id_ " "______________Source IPv6/port_______________ " "_______________Dest. IPv6/port_______________ " "Tot_pkt/bytes Pkt/Byte Drp\n"); } static void list_flow(struct buf_pr *bp, struct dn_flow *ni) { char buff[255]; struct protoent *pe = NULL; struct in_addr ina; struct ipfw_flow_id *id = &ni->fid; pe = getprotobynumber(id->proto); /* XXX: Should check for IPv4 flows */ bprintf(bp, "%3u%c", (ni->oid.id) & 0xff, id->extra ? '*' : ' '); if (!IS_IP6_FLOW_ID(id)) { if (pe) bprintf(bp, "%-4s ", pe->p_name); else bprintf(bp, "%4u ", id->proto); ina.s_addr = htonl(id->src_ip); bprintf(bp, "%15s/%-5d ", inet_ntoa(ina), id->src_port); ina.s_addr = htonl(id->dst_ip); bprintf(bp, "%15s/%-5d ", inet_ntoa(ina), id->dst_port); } else { /* Print IPv6 flows */ if (pe != NULL) bprintf(bp, "%9s ", pe->p_name); else bprintf(bp, "%9u ", id->proto); bprintf(bp, "%7d %39s/%-5d ", id->flow_id6, inet_ntop(AF_INET6, &(id->src_ip6), buff, sizeof(buff)), id->src_port); bprintf(bp, " %39s/%-5d ", inet_ntop(AF_INET6, &(id->dst_ip6), buff, sizeof(buff)), id->dst_port); } pr_u64(bp, &ni->tot_pkts, 4); pr_u64(bp, &ni->tot_bytes, 8); bprintf(bp, "%2u %4u %3u", ni->length, ni->len_bytes, ni->drops); } static void print_flowset_parms(struct dn_fs *fs, char *prefix) { int l; char qs[30]; char plr[30]; char red[200]; /* Display RED parameters */ l = fs->qsize; if (fs->flags & DN_QSIZE_BYTES) { if (l >= 8192) sprintf(qs, "%d KB", l / 1024); else sprintf(qs, "%d B", l); } else sprintf(qs, "%3d sl.", l); if (fs->plr) sprintf(plr, "plr %f", 1.0 * fs->plr / (double)(0x7fffffff)); else plr[0] = '\0'; if (fs->flags & DN_IS_RED) { /* RED parameters */ sprintf(red, "\n\t %cRED w_q %f min_th %d max_th %d max_p %f", (fs->flags & DN_IS_GENTLE_RED) ? 'G' : ' ', 1.0 * fs->w_q / (double)(1 << SCALE_RED), fs->min_th, fs->max_th, 1.0 * fs->max_p / (double)(1 << SCALE_RED)); if (fs->flags & DN_IS_ECN) strlcat(red, " (ecn)", sizeof(red)); #ifdef NEW_AQM /* get AQM parameters */ } else if (fs->flags & DN_IS_AQM) { get_extra_parms(fs->fs_nr, red, DN_AQM_PARAMS); #endif } else sprintf(red, "droptail"); if (prefix[0]) { printf("%s %s%s %d queues (%d buckets) %s\n", prefix, qs, plr, fs->oid.id, fs->buckets, red); prefix[0] = '\0'; } else { printf("q%05d %s%s %d flows (%d buckets) sched %d " "weight %d lmax %d pri %d %s\n", fs->fs_nr, qs, plr, fs->oid.id, fs->buckets, fs->sched_nr, fs->par[0], fs->par[1], fs->par[2], red); if (fs->flags & DN_HAVE_MASK) print_mask(&fs->flow_mask); } } static void print_extra_delay_parms(struct dn_profile *p) { double loss; if (p->samples_no <= 0) return; loss = p->loss_level; loss /= p->samples_no; printf("\t profile: name \"%s\" loss %f samples %d\n", p->name, loss, p->samples_no); } static void flush_buf(char *buf) { if (buf[0]) printf("%s\n", buf); buf[0] = '\0'; } /* * generic list routine. We expect objects in a specific order, i.e. * PIPES AND SCHEDULERS: * link; scheduler; internal flowset if any; instances * we can tell a pipe from the number. * * FLOWSETS: * flowset; queues; * link i (int queue); scheduler i; si(i) { flowsets() : queues } */ static void list_pipes(struct dn_id *oid, struct dn_id *end) { char buf[160]; /* pending buffer */ int toPrint = 1; /* print header */ struct buf_pr bp; buf[0] = '\0'; bp_alloc(&bp, 4096); for (; oid != end; oid = O_NEXT(oid, oid->len)) { if (oid->len < sizeof(*oid)) errx(1, "invalid oid len %d\n", oid->len); switch (oid->type) { default: flush_buf(buf); printf("unrecognized object %d size %d\n", oid->type, oid->len); break; case DN_TEXT: /* list of attached flowsets */ { int i, l; struct { struct dn_id id; uint32_t p[0]; } *d = (void *)oid; l = (oid->len - sizeof(*oid))/sizeof(d->p[0]); if (l == 0) break; printf(" Children flowsets: "); for (i = 0; i < l; i++) printf("%u ", d->p[i]); printf("\n"); break; } case DN_CMD_GET: if (g_co.verbose) printf("answer for cmd %d, len %d\n", oid->type, oid->id); break; case DN_SCH: { struct dn_sch *s = (struct dn_sch *)oid; flush_buf(buf); printf(" sched %d type %s flags 0x%x %d buckets %d active\n", s->sched_nr, s->name, s->flags, s->buckets, s->oid.id); #ifdef NEW_AQM char parms[200]; get_extra_parms(s->sched_nr, parms, DN_SCH_PARAMS); printf("%s",parms); #endif if (s->flags & DN_HAVE_MASK) print_mask(&s->sched_mask); } break; case DN_FLOW: if (toPrint != 0) { print_header(&((struct dn_flow *)oid)->fid); toPrint = 0; } list_flow(&bp, (struct dn_flow *)oid); printf("%s\n", bp.buf); bp_flush(&bp); break; case DN_LINK: { struct dn_link *p = (struct dn_link *)oid; double b = p->bandwidth; char bwbuf[30]; char burst[5 + 7]; /* This starts a new object so flush buffer */ flush_buf(buf); /* data rate */ if (b == 0) sprintf(bwbuf, "unlimited "); else if (b >= 1000000000) sprintf(bwbuf, "%7.3f Gbit/s", b/1000000000); else if (b >= 1000000) sprintf(bwbuf, "%7.3f Mbit/s", b/1000000); else if (b >= 1000) sprintf(bwbuf, "%7.3f Kbit/s", b/1000); else sprintf(bwbuf, "%7.3f bit/s ", b); if (humanize_number(burst, sizeof(burst), p->burst, "", HN_AUTOSCALE, 0) < 0 || g_co.verbose) sprintf(burst, "%d", (int)p->burst); sprintf(buf, "%05d: %s %4d ms burst %s", p->link_nr % DN_MAX_ID, bwbuf, p->delay, burst); } break; case DN_FS: print_flowset_parms((struct dn_fs *)oid, buf); break; case DN_PROFILE: flush_buf(buf); print_extra_delay_parms((struct dn_profile *)oid); } flush_buf(buf); // XXX does it really go here ? } bp_free(&bp); } /* * Delete pipe, queue or scheduler i */ int ipfw_delete_pipe(int do_pipe, int i) { struct { struct dn_id oid; uintptr_t a[1]; /* add more if we want a list */ } cmd; oid_fill((void *)&cmd, sizeof(cmd), DN_CMD_DELETE, DN_API_VERSION); cmd.oid.subtype = (do_pipe == 1) ? DN_LINK : ( (do_pipe == 2) ? DN_FS : DN_SCH); cmd.a[0] = i; i = do_cmd(IP_DUMMYNET3, &cmd, cmd.oid.len); if (i) { i = 1; warn("rule %u: setsockopt(IP_DUMMYNET_DEL)", i); } return i; } /* * Code to parse delay profiles. * * Some link types introduce extra delays in the transmission * of a packet, e.g. because of MAC level framing, contention on * the use of the channel, MAC level retransmissions and so on. * From our point of view, the channel is effectively unavailable * for this extra time, which is constant or variable depending * on the link type. Additionally, packets may be dropped after this * time (e.g. on a wireless link after too many retransmissions). * We can model the additional delay with an empirical curve * that represents its distribution. * * cumulative probability * 1.0 ^ * | * L +-- loss-level x * | ****** * | * * | ***** * | * * | ** * | * * +-------*-------------------> * delay * * The empirical curve may have both vertical and horizontal lines. * Vertical lines represent constant delay for a range of * probabilities; horizontal lines correspond to a discontinuty * in the delay distribution: the link will use the largest delay * for a given probability. * * To pass the curve to dummynet, we must store the parameters * in a file as described below, and issue the command * * ipfw pipe config ... bw XXX profile ... * * The file format is the following, with whitespace acting as * a separator and '#' indicating the beginning a comment: * * samples N * the number of samples used in the internal * representation (2..1024; default 100); * * loss-level L * The probability above which packets are lost. * (0.0 <= L <= 1.0, default 1.0 i.e. no loss); * * name identifier * Optional a name (listed by "ipfw pipe show") * to identify the distribution; * * "delay prob" | "prob delay" * One of these two lines is mandatory and defines * the format of the following lines with data points. * * XXX YYY * 2 or more lines representing points in the curve, * with either delay or probability first, according * to the chosen format. * The unit for delay is milliseconds. * * Data points does not need to be ordered or equal to the number * specified in the "samples" line. ipfw will sort and interpolate * the curve as needed. * * Example of a profile file: name bla_bla_bla samples 100 loss-level 0.86 prob delay 0 200 # minimum overhead is 200ms 0.5 200 0.5 300 0.8 1000 0.9 1300 1 1300 * Internally, we will convert the curve to a fixed number of * samples, and when it is time to transmit a packet we will * model the extra delay as extra bits in the packet. * */ #define ED_MAX_LINE_LEN 256+ED_MAX_NAME_LEN #define ED_TOK_SAMPLES "samples" #define ED_TOK_LOSS "loss-level" #define ED_TOK_NAME "name" #define ED_TOK_DELAY "delay" #define ED_TOK_PROB "prob" #define ED_TOK_BW "bw" #define ED_SEPARATORS " \t\n" #define ED_MIN_SAMPLES_NO 2 /* * returns 1 if s is a non-negative number, with at least one '.' */ static int is_valid_number(const char *s) { int i, dots_found = 0; int len = strlen(s); for (i = 0; i 1)) return 0; return 1; } /* * Take as input a string describing a bandwidth value * and return the numeric bandwidth value. * set clocking interface or bandwidth value */ static void -read_bandwidth(char *arg, int *bandwidth, char *if_name, int namelen) +read_bandwidth(char *arg, uint32_t *bandwidth, char *if_name, int namelen) { - if (*bandwidth != -1) + if (*bandwidth != (uint32_t)-1) warnx("duplicate token, override bandwidth value!"); if (arg[0] >= 'a' && arg[0] <= 'z') { if (!if_name) { errx(1, "no if support"); } if (namelen >= IFNAMSIZ) warn("interface name truncated"); namelen--; /* interface name */ strlcpy(if_name, arg, namelen); *bandwidth = 0; } else { /* read bandwidth value */ - int bw; + uint64_t bw; char *end = NULL; bw = strtoul(arg, &end, 0); if (*end == 'K' || *end == 'k') { end++; bw *= 1000; } else if (*end == 'M' || *end == 'm') { end++; bw *= 1000000; } else if (*end == 'G' || *end == 'g') { end++; bw *= 1000000000; } if ((*end == 'B' && _substrcmp2(end, "Bi", "Bit/s") != 0) || _substrcmp2(end, "by", "bytes") == 0) bw *= 8; - if (bw < 0) + if (bw > UINT_MAX) errx(EX_DATAERR, "bandwidth too large"); - *bandwidth = bw; + *bandwidth = (uint32_t)bw; if (if_name) if_name[0] = '\0'; } } struct point { double prob; double delay; }; static int compare_points(const void *vp1, const void *vp2) { const struct point *p1 = vp1; const struct point *p2 = vp2; double res = 0; res = p1->prob - p2->prob; if (res == 0) res = p1->delay - p2->delay; if (res < 0) return -1; else if (res > 0) return 1; else return 0; } #define ED_EFMT(s) EX_DATAERR,"error in %s at line %d: "#s,filename,lineno static void load_extra_delays(const char *filename, struct dn_profile *p, struct dn_link *link) { char line[ED_MAX_LINE_LEN]; FILE *f; int lineno = 0; int i; int samples = -1; double loss = -1.0; char profile_name[ED_MAX_NAME_LEN]; int delay_first = -1; int do_points = 0; struct point points[ED_MAX_SAMPLES_NO]; int points_no = 0; /* XXX link never NULL? */ p->link_nr = link->link_nr; profile_name[0] = '\0'; f = fopen(filename, "r"); if (f == NULL) err(EX_UNAVAILABLE, "fopen: %s", filename); while (fgets(line, ED_MAX_LINE_LEN, f)) { /* read commands */ char *s, *cur = line, *name = NULL, *arg = NULL; ++lineno; /* parse the line */ while (cur) { s = strsep(&cur, ED_SEPARATORS); if (s == NULL || *s == '#') break; if (*s == '\0') continue; if (arg) errx(ED_EFMT("too many arguments")); if (name == NULL) name = s; else arg = s; } if (name == NULL) /* empty line */ continue; if (arg == NULL) errx(ED_EFMT("missing arg for %s"), name); if (!strcasecmp(name, ED_TOK_SAMPLES)) { if (samples > 0) errx(ED_EFMT("duplicate ``samples'' line")); if (atoi(arg) <=0) errx(ED_EFMT("invalid number of samples")); samples = atoi(arg); if (samples>ED_MAX_SAMPLES_NO) errx(ED_EFMT("too many samples, maximum is %d"), ED_MAX_SAMPLES_NO); do_points = 0; } else if (!strcasecmp(name, ED_TOK_BW)) { char buf[IFNAMSIZ]; read_bandwidth(arg, &link->bandwidth, buf, sizeof(buf)); } else if (!strcasecmp(name, ED_TOK_LOSS)) { if (loss != -1.0) errx(ED_EFMT("duplicated token: %s"), name); if (!is_valid_number(arg)) errx(ED_EFMT("invalid %s"), arg); loss = atof(arg); if (loss > 1) errx(ED_EFMT("%s greater than 1.0"), name); do_points = 0; } else if (!strcasecmp(name, ED_TOK_NAME)) { if (profile_name[0] != '\0') errx(ED_EFMT("duplicated token: %s"), name); strlcpy(profile_name, arg, sizeof(profile_name)); do_points = 0; } else if (!strcasecmp(name, ED_TOK_DELAY)) { if (do_points) errx(ED_EFMT("duplicated token: %s"), name); delay_first = 1; do_points = 1; } else if (!strcasecmp(name, ED_TOK_PROB)) { if (do_points) errx(ED_EFMT("duplicated token: %s"), name); delay_first = 0; do_points = 1; } else if (do_points) { if (!is_valid_number(name) || !is_valid_number(arg)) errx(ED_EFMT("invalid point found")); if (delay_first) { points[points_no].delay = atof(name); points[points_no].prob = atof(arg); } else { points[points_no].delay = atof(arg); points[points_no].prob = atof(name); } if (points[points_no].prob > 1.0) errx(ED_EFMT("probability greater than 1.0")); ++points_no; } else { errx(ED_EFMT("unrecognised command '%s'"), name); } } fclose (f); if (samples == -1) { warnx("'%s' not found, assuming 100", ED_TOK_SAMPLES); samples = 100; } if (loss == -1.0) { warnx("'%s' not found, assuming no loss", ED_TOK_LOSS); loss = 1; } /* make sure that there are enough points. */ if (points_no < ED_MIN_SAMPLES_NO) errx(ED_EFMT("too few samples, need at least %d"), ED_MIN_SAMPLES_NO); qsort(points, points_no, sizeof(struct point), compare_points); /* interpolation */ for (i = 0; isamples[ix] = x1; } else { double m = (y2-y1)/(x2-x1); double c = y1 - m*x1; for (; ixsamples[ix] = (ix - c)/m; } } p->samples_no = samples; p->loss_level = loss * samples; strlcpy(p->name, profile_name, sizeof(p->name)); } #ifdef NEW_AQM /* Parse AQM/extra scheduler parameters */ static int process_extra_parms(int *ac, char **av, struct dn_extra_parms *ep, uint16_t type) { int i; /* use kernel defaults */ for (i=0; ipar[i] = -1; switch(type) { case TOK_CODEL: case TOK_FQ_CODEL: /* Codel * 0- target, 1- interval, 2- flags, * FQ_CODEL * 3- quantum, 4- limit, 5- flows */ if (type==TOK_CODEL) ep->par[2] = 0; else ep->par[2] = CODEL_ECN_ENABLED; while (*ac > 0) { int tok = match_token(aqm_params, *av); (*ac)--; av++; switch(tok) { case TOK_TARGET: if (*ac <= 0 || time_to_us(av[0]) < 0) errx(EX_DATAERR, "target needs time\n"); ep->par[0] = time_to_us(av[0]); (*ac)--; av++; break; case TOK_INTERVAL: if (*ac <= 0 || time_to_us(av[0]) < 0) errx(EX_DATAERR, "interval needs time\n"); ep->par[1] = time_to_us(av[0]); (*ac)--; av++; break; case TOK_ECN: ep->par[2] = CODEL_ECN_ENABLED; break; case TOK_NO_ECN: ep->par[2] &= ~CODEL_ECN_ENABLED; break; /* Config fq_codel parameters */ case TOK_QUANTUM: if (type != TOK_FQ_CODEL) errx(EX_DATAERR, "quantum is not for codel\n"); if (*ac <= 0 || !is_valid_number(av[0])) errx(EX_DATAERR, "quantum needs number\n"); ep->par[3]= atoi(av[0]); (*ac)--; av++; break; case TOK_LIMIT: if (type != TOK_FQ_CODEL) errx(EX_DATAERR, "limit is not for codel, use queue instead\n"); if (*ac <= 0 || !is_valid_number(av[0])) errx(EX_DATAERR, "limit needs number\n"); ep->par[4] = atoi(av[0]); (*ac)--; av++; break; case TOK_FLOWS: if (type != TOK_FQ_CODEL) errx(EX_DATAERR, "flows is not for codel\n"); if (*ac <= 0 || !is_valid_number(av[0])) errx(EX_DATAERR, "flows needs number\n"); ep->par[5] = atoi(av[0]); (*ac)--; av++; break; default: printf("%s is Invalid parameter\n", av[-1]); } } break; case TOK_PIE: case TOK_FQ_PIE: /* PIE * 0- target , 1- tupdate, 2- max_burst, * 3- max_ecnth, 4- alpha, * 5- beta, 6- flags * FQ_CODEL * 7- quantum, 8- limit, 9- flows */ if ( type == TOK_PIE) ep->par[6] = PIE_CAPDROP_ENABLED | PIE_DEPRATEEST_ENABLED | PIE_DERAND_ENABLED; else /* for FQ-PIE, use TS mode */ ep->par[6] = PIE_CAPDROP_ENABLED | PIE_DERAND_ENABLED | PIE_ECN_ENABLED; while (*ac > 0) { int tok = match_token(aqm_params, *av); (*ac)--; av++; switch(tok) { case TOK_TARGET: if (*ac <= 0 || time_to_us(av[0]) < 0) errx(EX_DATAERR, "target needs time\n"); ep->par[0] = time_to_us(av[0]); (*ac)--; av++; break; case TOK_TUPDATE: if (*ac <= 0 || time_to_us(av[0]) < 0) errx(EX_DATAERR, "tupdate needs time\n"); ep->par[1] = time_to_us(av[0]); (*ac)--; av++; break; case TOK_MAX_BURST: if (*ac <= 0 || time_to_us(av[0]) < 0) errx(EX_DATAERR, "max_burst needs time\n"); ep->par[2] = time_to_us(av[0]); (*ac)--; av++; break; case TOK_MAX_ECNTH: if (*ac <= 0 || !is_valid_number(av[0])) errx(EX_DATAERR, "max_ecnth needs number\n"); ep->par[3] = atof(av[0]) * PIE_SCALE; (*ac)--; av++; break; case TOK_ALPHA: if (*ac <= 0 || !is_valid_number(av[0])) errx(EX_DATAERR, "alpha needs number\n"); ep->par[4] = atof(av[0]) * PIE_SCALE; (*ac)--; av++; break; case TOK_BETA: if (*ac <= 0 || !is_valid_number(av[0])) errx(EX_DATAERR, "beta needs number\n"); ep->par[5] = atof(av[0]) * PIE_SCALE; (*ac)--; av++; break; case TOK_ECN: ep->par[6] |= PIE_ECN_ENABLED; break; case TOK_NO_ECN: ep->par[6] &= ~PIE_ECN_ENABLED; break; case TOK_CAPDROP: ep->par[6] |= PIE_CAPDROP_ENABLED; break; case TOK_NO_CAPDROP: ep->par[6] &= ~PIE_CAPDROP_ENABLED; break; case TOK_ONOFF: ep->par[6] |= PIE_ON_OFF_MODE_ENABLED; break; case TOK_DRE: ep->par[6] |= PIE_DEPRATEEST_ENABLED; break; case TOK_TS: ep->par[6] &= ~PIE_DEPRATEEST_ENABLED; break; case TOK_DERAND: ep->par[6] |= PIE_DERAND_ENABLED; break; case TOK_NO_DERAND: ep->par[6] &= ~PIE_DERAND_ENABLED; break; /* Config fq_pie parameters */ case TOK_QUANTUM: if (type != TOK_FQ_PIE) errx(EX_DATAERR, "quantum is not for pie\n"); if (*ac <= 0 || !is_valid_number(av[0])) errx(EX_DATAERR, "quantum needs number\n"); ep->par[7]= atoi(av[0]); (*ac)--; av++; break; case TOK_LIMIT: if (type != TOK_FQ_PIE) errx(EX_DATAERR, "limit is not for pie, use queue instead\n"); if (*ac <= 0 || !is_valid_number(av[0])) errx(EX_DATAERR, "limit needs number\n"); ep->par[8] = atoi(av[0]); (*ac)--; av++; break; case TOK_FLOWS: if (type != TOK_FQ_PIE) errx(EX_DATAERR, "flows is not for pie\n"); if (*ac <= 0 || !is_valid_number(av[0])) errx(EX_DATAERR, "flows needs number\n"); ep->par[9] = atoi(av[0]); (*ac)--; av++; break; default: printf("%s is invalid parameter\n", av[-1]); } } break; } return 0; } #endif /* * configuration of pipes, schedulers, flowsets. * When we configure a new scheduler, an empty pipe is created, so: * * do_pipe = 1 -> "pipe N config ..." only for backward compatibility * sched N+Delta type fifo sched_mask ... * pipe N+Delta * flowset N+Delta pipe N+Delta (no parameters) * sched N type wf2q+ sched_mask ... * pipe N * * do_pipe = 2 -> flowset N config * flowset N parameters * * do_pipe = 3 -> sched N config * sched N parameters (default no pipe) * optional Pipe N config ... * pipe ==> */ void ipfw_config_pipe(int ac, char **av) { int i; u_int j; char *end; struct dn_id *buf, *base; struct dn_sch *sch = NULL; struct dn_link *p = NULL; struct dn_fs *fs = NULL; struct dn_profile *pf = NULL; struct ipfw_flow_id *mask = NULL; #ifdef NEW_AQM struct dn_extra_parms *aqm_extra = NULL; struct dn_extra_parms *sch_extra = NULL; int lmax_extra; #endif int lmax; uint32_t _foo = 0, *flags = &_foo , *buckets = &_foo; /* * allocate space for 1 header, * 1 scheduler, 1 link, 1 flowset, 1 profile */ lmax = sizeof(struct dn_id); /* command header */ lmax += sizeof(struct dn_sch) + sizeof(struct dn_link) + sizeof(struct dn_fs) + sizeof(struct dn_profile); #ifdef NEW_AQM /* Extra Params */ lmax_extra = sizeof(struct dn_extra_parms); /* two lmax_extra because one for AQM params and another * sch params */ lmax += lmax_extra*2; #endif av++; ac--; /* Pipe number */ if (ac && isdigit(**av)) { i = atoi(*av); av++; ac--; } else i = -1; if (i <= 0) errx(EX_USAGE, "need a pipe/flowset/sched number"); base = buf = safe_calloc(1, lmax); /* all commands start with a 'CONFIGURE' and a version */ o_next(&buf, sizeof(struct dn_id), DN_CMD_CONFIG); base->id = DN_API_VERSION; switch (g_co.do_pipe) { case 1: /* "pipe N config ..." */ /* Allocate space for the WF2Q+ scheduler, its link * and the FIFO flowset. Set the number, but leave * the scheduler subtype and other parameters to 0 * so the kernel will use appropriate defaults. * XXX todo: add a flag to record if a parameter * is actually configured. * If we do a 'pipe config' mask -> sched_mask. * The FIFO scheduler and link are derived from the * WF2Q+ one in the kernel. */ #ifdef NEW_AQM sch_extra = o_next(&buf, lmax_extra, DN_TEXT); sch_extra ->oid.subtype = 0; /* don't configure scheduler */ #endif sch = o_next(&buf, sizeof(*sch), DN_SCH); p = o_next(&buf, sizeof(*p), DN_LINK); #ifdef NEW_AQM aqm_extra = o_next(&buf, lmax_extra, DN_TEXT); aqm_extra ->oid.subtype = 0; /* don't configure AQM */ #endif fs = o_next(&buf, sizeof(*fs), DN_FS); sch->sched_nr = i; sch->oid.subtype = 0; /* defaults to WF2Q+ */ mask = &sch->sched_mask; flags = &sch->flags; buckets = &sch->buckets; *flags |= DN_PIPE_CMD; p->link_nr = i; /* This flowset is only for the FIFO scheduler */ fs->fs_nr = i + 2*DN_MAX_ID; fs->sched_nr = i + DN_MAX_ID; break; case 2: /* "queue N config ... " */ #ifdef NEW_AQM aqm_extra = o_next(&buf, lmax_extra, DN_TEXT); aqm_extra ->oid.subtype = 0; #endif fs = o_next(&buf, sizeof(*fs), DN_FS); fs->fs_nr = i; mask = &fs->flow_mask; flags = &fs->flags; buckets = &fs->buckets; break; case 3: /* "sched N config ..." */ #ifdef NEW_AQM sch_extra = o_next(&buf, lmax_extra, DN_TEXT); sch_extra ->oid.subtype = 0; #endif sch = o_next(&buf, sizeof(*sch), DN_SCH); #ifdef NEW_AQM aqm_extra = o_next(&buf, lmax_extra, DN_TEXT); aqm_extra ->oid.subtype = 0; #endif fs = o_next(&buf, sizeof(*fs), DN_FS); sch->sched_nr = i; mask = &sch->sched_mask; flags = &sch->flags; buckets = &sch->buckets; /* fs is used only with !MULTIQUEUE schedulers */ fs->fs_nr = i + DN_MAX_ID; fs->sched_nr = i; break; } /* set to -1 those fields for which we want to reuse existing * values from the kernel. * Also, *_nr and subtype = 0 mean reuse the value from the kernel. * XXX todo: support reuse of the mask. */ if (p) p->bandwidth = -1; for (j = 0; j < sizeof(fs->par)/sizeof(fs->par[0]); j++) fs->par[j] = -1; while (ac > 0) { double d; int tok = match_token(dummynet_params, *av); ac--; av++; switch(tok) { case TOK_NOERROR: NEED(fs, "noerror is only for pipes"); fs->flags |= DN_NOERROR; break; case TOK_PLR: NEED(fs, "plr is only for pipes"); NEED1("plr needs argument 0..1\n"); d = strtod(av[0], NULL); if (d > 1) d = 1; else if (d < 0) d = 0; fs->plr = (int)(d*0x7fffffff); ac--; av++; break; case TOK_QUEUE: NEED(fs, "queue is only for pipes or flowsets"); NEED1("queue needs queue size\n"); end = NULL; fs->qsize = strtoul(av[0], &end, 0); if (*end == 'K' || *end == 'k') { fs->flags |= DN_QSIZE_BYTES; fs->qsize *= 1024; } else if (*end == 'B' || _substrcmp2(end, "by", "bytes") == 0) { fs->flags |= DN_QSIZE_BYTES; } ac--; av++; break; case TOK_BUCKETS: NEED(fs, "buckets is only for pipes or flowsets"); NEED1("buckets needs argument\n"); *buckets = strtoul(av[0], NULL, 0); ac--; av++; break; case TOK_FLOW_MASK: case TOK_SCHED_MASK: case TOK_MASK: NEED(mask, "tok_mask"); NEED1("mask needs mask specifier\n"); /* * per-flow queue, mask is dst_ip, dst_port, * src_ip, src_port, proto measured in bits */ bzero(mask, sizeof(*mask)); end = NULL; while (ac >= 1) { uint32_t *p32 = NULL; uint16_t *p16 = NULL; uint32_t *p20 = NULL; struct in6_addr *pa6 = NULL; uint32_t a; tok = match_token(dummynet_params, *av); ac--; av++; switch(tok) { case TOK_ALL: /* * special case, all bits significant * except 'extra' (the queue number) */ mask->dst_ip = ~0; mask->src_ip = ~0; mask->dst_port = ~0; mask->src_port = ~0; mask->proto = ~0; n2mask(&mask->dst_ip6, 128); n2mask(&mask->src_ip6, 128); mask->flow_id6 = ~0; *flags |= DN_HAVE_MASK; goto end_mask; case TOK_QUEUE: mask->extra = ~0; *flags |= DN_HAVE_MASK; goto end_mask; case TOK_DSTIP: mask->addr_type = 4; p32 = &mask->dst_ip; break; case TOK_SRCIP: mask->addr_type = 4; p32 = &mask->src_ip; break; case TOK_DSTIP6: mask->addr_type = 6; pa6 = &mask->dst_ip6; break; case TOK_SRCIP6: mask->addr_type = 6; pa6 = &mask->src_ip6; break; case TOK_FLOWID: mask->addr_type = 6; p20 = &mask->flow_id6; break; case TOK_DSTPORT: p16 = &mask->dst_port; break; case TOK_SRCPORT: p16 = &mask->src_port; break; case TOK_PROTO: break; default: ac++; av--; /* backtrack */ goto end_mask; } if (ac < 1) errx(EX_USAGE, "mask: value missing"); if (*av[0] == '/') { a = strtoul(av[0]+1, &end, 0); if (pa6 == NULL) a = (a == 32) ? ~0 : (1 << a) - 1; } else a = strtoul(av[0], &end, 0); if (p32 != NULL) *p32 = a; else if (p16 != NULL) { if (a > 0xFFFF) errx(EX_DATAERR, "port mask must be 16 bit"); *p16 = (uint16_t)a; } else if (p20 != NULL) { if (a > 0xfffff) errx(EX_DATAERR, "flow_id mask must be 20 bit"); *p20 = (uint32_t)a; } else if (pa6 != NULL) { if (a > 128) errx(EX_DATAERR, "in6addr invalid mask len"); else n2mask(pa6, a); } else { if (a > 0xFF) errx(EX_DATAERR, "proto mask must be 8 bit"); mask->proto = (uint8_t)a; } if (a != 0) *flags |= DN_HAVE_MASK; ac--; av++; } /* end while, config masks */ end_mask: break; #ifdef NEW_AQM case TOK_CODEL: case TOK_PIE: NEED(fs, "codel/pie is only for flowsets"); fs->flags &= ~(DN_IS_RED|DN_IS_GENTLE_RED); fs->flags |= DN_IS_AQM; strlcpy(aqm_extra->name, av[-1], sizeof(aqm_extra->name)); aqm_extra->oid.subtype = DN_AQM_PARAMS; process_extra_parms(&ac, av, aqm_extra, tok); break; case TOK_FQ_CODEL: case TOK_FQ_PIE: if (!strcmp(av[-1],"type")) errx(EX_DATAERR, "use type before fq_codel/fq_pie"); NEED(sch, "fq_codel/fq_pie is only for schd"); strlcpy(sch_extra->name, av[-1], sizeof(sch_extra->name)); sch_extra->oid.subtype = DN_SCH_PARAMS; process_extra_parms(&ac, av, sch_extra, tok); break; #endif case TOK_RED: case TOK_GRED: NEED1("red/gred needs w_q/min_th/max_th/max_p\n"); fs->flags |= DN_IS_RED; if (tok == TOK_GRED) fs->flags |= DN_IS_GENTLE_RED; /* * the format for parameters is w_q/min_th/max_th/max_p */ if ((end = strsep(&av[0], "/"))) { double w_q = strtod(end, NULL); if (w_q > 1 || w_q <= 0) errx(EX_DATAERR, "0 < w_q <= 1"); fs->w_q = (int) (w_q * (1 << SCALE_RED)); } if ((end = strsep(&av[0], "/"))) { fs->min_th = strtoul(end, &end, 0); if (*end == 'K' || *end == 'k') fs->min_th *= 1024; } if ((end = strsep(&av[0], "/"))) { fs->max_th = strtoul(end, &end, 0); if (*end == 'K' || *end == 'k') fs->max_th *= 1024; } if ((end = strsep(&av[0], "/"))) { double max_p = strtod(end, NULL); if (max_p > 1 || max_p < 0) errx(EX_DATAERR, "0 <= max_p <= 1"); fs->max_p = (int)(max_p * (1 << SCALE_RED)); } ac--; av++; break; case TOK_ECN: fs->flags |= DN_IS_ECN; break; case TOK_DROPTAIL: NEED(fs, "droptail is only for flowsets"); fs->flags &= ~(DN_IS_RED|DN_IS_GENTLE_RED); break; case TOK_BW: NEED(p, "bw is only for links"); NEED1("bw needs bandwidth or interface\n"); read_bandwidth(av[0], &p->bandwidth, NULL, 0); ac--; av++; break; case TOK_DELAY: NEED(p, "delay is only for links"); NEED1("delay needs argument 0..10000ms\n"); p->delay = strtoul(av[0], NULL, 0); ac--; av++; break; case TOK_TYPE: { int l; NEED(sch, "type is only for schedulers"); NEED1("type needs a string"); l = strlen(av[0]); if (l == 0 || l > 15) errx(1, "type %s too long\n", av[0]); strlcpy(sch->name, av[0], sizeof(sch->name)); sch->oid.subtype = 0; /* use string */ #ifdef NEW_AQM /* if fq_codel is selected, consider all tokens after it * as parameters */ if (!strcasecmp(av[0],"fq_codel") || !strcasecmp(av[0],"fq_pie")){ strlcpy(sch_extra->name, av[0], sizeof(sch_extra->name)); sch_extra->oid.subtype = DN_SCH_PARAMS; process_extra_parms(&ac, av, sch_extra, tok); } else { ac--;av++; } #else ac--;av++; #endif break; } case TOK_WEIGHT: NEED(fs, "weight is only for flowsets"); NEED1("weight needs argument\n"); fs->par[0] = strtol(av[0], &end, 0); ac--; av++; break; case TOK_LMAX: NEED(fs, "lmax is only for flowsets"); NEED1("lmax needs argument\n"); fs->par[1] = strtol(av[0], &end, 0); ac--; av++; break; case TOK_PRI: NEED(fs, "priority is only for flowsets"); NEED1("priority needs argument\n"); fs->par[2] = strtol(av[0], &end, 0); ac--; av++; break; case TOK_SCHED: case TOK_PIPE: NEED(fs, "pipe/sched"); NEED1("pipe/link/sched needs number\n"); fs->sched_nr = strtoul(av[0], &end, 0); ac--; av++; break; case TOK_PROFILE: NEED((!pf), "profile already set"); NEED(p, "profile"); { NEED1("extra delay needs the file name\n"); pf = o_next(&buf, sizeof(*pf), DN_PROFILE); load_extra_delays(av[0], pf, p); //XXX can't fail? --ac; ++av; } break; case TOK_BURST: NEED(p, "burst"); NEED1("burst needs argument\n"); errno = 0; if (expand_number(av[0], &p->burst) < 0) if (errno != ERANGE) errx(EX_DATAERR, "burst: invalid argument"); if (errno || p->burst > (1ULL << 48) - 1) errx(EX_DATAERR, "burst: out of range (0..2^48-1)"); ac--; av++; break; default: errx(EX_DATAERR, "unrecognised option ``%s''", av[-1]); } } /* check validity of parameters */ if (p) { if (p->delay > 10000) errx(EX_DATAERR, "delay must be < 10000"); - if (p->bandwidth == -1) + if (p->bandwidth == (uint32_t)-1) p->bandwidth = 0; } if (fs) { /* XXX accept a 0 scheduler to keep the default */ if (fs->flags & DN_QSIZE_BYTES) { size_t len; long limit; len = sizeof(limit); if (sysctlbyname("net.inet.ip.dummynet.pipe_byte_limit", &limit, &len, NULL, 0) == -1) limit = 1024*1024; if (fs->qsize > limit) errx(EX_DATAERR, "queue size must be < %ldB", limit); } else { size_t len; long limit; len = sizeof(limit); if (sysctlbyname("net.inet.ip.dummynet.pipe_slot_limit", &limit, &len, NULL, 0) == -1) limit = 100; if (fs->qsize > limit) errx(EX_DATAERR, "2 <= queue size <= %ld", limit); } #ifdef NEW_AQM if ((fs->flags & DN_IS_ECN) && !((fs->flags & DN_IS_RED)|| (fs->flags & DN_IS_AQM))) errx(EX_USAGE, "ECN can be used with red/gred/" "codel/fq_codel only!"); #else if ((fs->flags & DN_IS_ECN) && !(fs->flags & DN_IS_RED)) errx(EX_USAGE, "enable red/gred for ECN"); #endif if (fs->flags & DN_IS_RED) { size_t len; int lookup_depth, avg_pkt_size; if (!(fs->flags & DN_IS_ECN) && (fs->min_th >= fs->max_th)) errx(EX_DATAERR, "min_th %d must be < than max_th %d", fs->min_th, fs->max_th); else if ((fs->flags & DN_IS_ECN) && (fs->min_th > fs->max_th)) errx(EX_DATAERR, "min_th %d must be =< than max_th %d", fs->min_th, fs->max_th); if (fs->max_th == 0) errx(EX_DATAERR, "max_th must be > 0"); len = sizeof(int); if (sysctlbyname("net.inet.ip.dummynet.red_lookup_depth", &lookup_depth, &len, NULL, 0) == -1) lookup_depth = 256; if (lookup_depth == 0) errx(EX_DATAERR, "net.inet.ip.dummynet.red_lookup_depth" " must be greater than zero"); len = sizeof(int); if (sysctlbyname("net.inet.ip.dummynet.red_avg_pkt_size", &avg_pkt_size, &len, NULL, 0) == -1) avg_pkt_size = 512; if (avg_pkt_size == 0) errx(EX_DATAERR, "net.inet.ip.dummynet.red_avg_pkt_size must" " be greater than zero"); #if 0 /* the following computation is now done in the kernel */ /* * Ticks needed for sending a medium-sized packet. * Unfortunately, when we are configuring a WF2Q+ queue, we * do not have bandwidth information, because that is stored * in the parent pipe, and also we have multiple queues * competing for it. So we set s=0, which is not very * correct. But on the other hand, why do we want RED with * WF2Q+ ? */ if (p.bandwidth==0) /* this is a WF2Q+ queue */ s = 0; else s = (double)ck.hz * avg_pkt_size * 8 / p.bandwidth; /* * max idle time (in ticks) before avg queue size becomes 0. * NOTA: (3/w_q) is approx the value x so that * (1-w_q)^x < 10^-3. */ w_q = ((double)fs->w_q) / (1 << SCALE_RED); idle = s * 3. / w_q; fs->lookup_step = (int)idle / lookup_depth; if (!fs->lookup_step) fs->lookup_step = 1; weight = 1 - w_q; for (t = fs->lookup_step; t > 1; --t) weight *= 1 - w_q; fs->lookup_weight = (int)(weight * (1 << SCALE_RED)); #endif /* code moved in the kernel */ } } i = do_cmd(IP_DUMMYNET3, base, (char *)buf - (char *)base); if (i) err(1, "setsockopt(%s)", "IP_DUMMYNET_CONFIGURE"); } void dummynet_flush(void) { struct dn_id oid; oid_fill(&oid, sizeof(oid), DN_CMD_FLUSH, DN_API_VERSION); do_cmd(IP_DUMMYNET3, &oid, oid.len); } /* Parse input for 'ipfw [pipe|sched|queue] show [range list]' * Returns the number of ranges, and possibly stores them * in the array v of size len. */ static int parse_range(int ac, char *av[], uint32_t *v, int len) { int n = 0; char *endptr, *s; uint32_t base[2]; if (v == NULL || len < 2) { v = base; len = 2; } for (s = *av; s != NULL; av++, ac--) { v[0] = strtoul(s, &endptr, 10); v[1] = (*endptr != '-') ? v[0] : strtoul(endptr+1, &endptr, 10); if (*endptr == '\0') { /* prepare for next round */ s = (ac > 0) ? *(av+1) : NULL; } else { if (*endptr != ',') { warn("invalid number: %s", s); s = ++endptr; continue; } /* continue processing from here */ s = ++endptr; ac++; av--; } if (v[1] < v[0] || v[0] >= DN_MAX_ID-1 || v[1] >= DN_MAX_ID-1) { continue; /* invalid entry */ } n++; /* translate if 'pipe list' */ if (g_co.do_pipe == 1) { v[0] += DN_MAX_ID; v[1] += DN_MAX_ID; } v = (n*2 < len) ? v + 2 : base; } return n; } /* main entry point for dummynet list functions. co.do_pipe indicates * which function we want to support. * av may contain filtering arguments, either individual entries * or ranges, or lists (space or commas are valid separators). * Format for a range can be n1-n2 or n3 n4 n5 ... * In a range n1 must be <= n2, otherwise the range is ignored. * A number 'n4' is translate in a range 'n4-n4' * All number must be > 0 and < DN_MAX_ID-1 */ void dummynet_list(int ac, char *av[], int show_counters) { struct dn_id *oid, *x = NULL; int ret, i; int n; /* # of ranges */ u_int buflen, l; u_int max_size; /* largest obj passed up */ (void)show_counters; // XXX unused, but we should use it. ac--; av++; /* skip 'list' | 'show' word */ n = parse_range(ac, av, NULL, 0); /* Count # of ranges. */ /* Allocate space to store ranges */ l = sizeof(*oid) + sizeof(uint32_t) * n * 2; oid = safe_calloc(1, l); oid_fill(oid, l, DN_CMD_GET, DN_API_VERSION); if (n > 0) /* store ranges in idx */ parse_range(ac, av, (uint32_t *)(oid + 1), n*2); /* * Compute the size of the largest object returned. If the * response leaves at least this much spare space in the * buffer, then surely the response is complete; otherwise * there might be a risk of truncation and we will need to * retry with a larger buffer. * XXX don't bother with smaller structs. */ max_size = sizeof(struct dn_fs); if (max_size < sizeof(struct dn_sch)) max_size = sizeof(struct dn_sch); if (max_size < sizeof(struct dn_flow)) max_size = sizeof(struct dn_flow); switch (g_co.do_pipe) { case 1: oid->subtype = DN_LINK; /* list pipe */ break; case 2: oid->subtype = DN_FS; /* list queue */ break; case 3: oid->subtype = DN_SCH; /* list sched */ break; } /* * Ask the kernel an estimate of the required space (result * in oid.id), unless we are requesting a subset of objects, * in which case the kernel does not give an exact answer. * In any case, space might grow in the meantime due to the * creation of new queues, so we must be prepared to retry. */ if (n > 0) { buflen = 4*1024; } else { ret = do_cmd(-IP_DUMMYNET3, oid, (uintptr_t)&l); if (ret != 0 || oid->id <= sizeof(*oid)) goto done; buflen = oid->id + max_size; oid->len = sizeof(*oid); /* restore */ } /* Try a few times, until the buffer fits */ for (i = 0; i < 20; i++) { l = buflen; x = safe_realloc(x, l); bcopy(oid, x, oid->len); ret = do_cmd(-IP_DUMMYNET3, x, (uintptr_t)&l); if (ret != 0 || x->id <= sizeof(*oid)) goto done; /* no response */ if (l + max_size <= buflen) break; /* ok */ buflen *= 2; /* double for next attempt */ } list_pipes(x, O_NEXT(x, l)); done: if (x) free(x); free(oid); } diff --git a/sys/netinet/ip_dummynet.h b/sys/netinet/ip_dummynet.h index de38ed9fbedf..533dc5ccd85f 100644 --- a/sys/netinet/ip_dummynet.h +++ b/sys/netinet/ip_dummynet.h @@ -1,285 +1,285 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998-2010 Luigi Rizzo, Universita` di Pisa * Portions Copyright (c) 2000 Akamba Corp. * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _IP_DUMMYNET_H #define _IP_DUMMYNET_H #define NEW_AQM /* * Definition of the kernel-userland API for dummynet. * * Setsockopt() and getsockopt() pass a batch of objects, each * of them starting with a "struct dn_id" which should fully identify * the object and its relation with others in the sequence. * The first object in each request should have * type= DN_CMD_*, id = DN_API_VERSION. * For other objects, type and subtype specify the object, len indicates * the total length including the header, and 'id' identifies the specific * object. * * Most objects are numbered with an identifier in the range 1..65535. * DN_MAX_ID indicates the first value outside the range. */ #define DN_API_VERSION 12500000 #define DN_MAX_ID 0x10000 struct dn_id { uint16_t len; /* total obj len including this header */ uint8_t type; uint8_t subtype; uint32_t id; /* generic id */ }; /* * These values are in the type field of struct dn_id. * To preserve the ABI, never rearrange the list or delete * entries with the exception of DN_LAST */ enum { DN_NONE = 0, DN_LINK = 1, DN_FS, DN_SCH, DN_SCH_I, DN_QUEUE, DN_DELAY_LINE, DN_PROFILE, DN_FLOW, /* struct dn_flow */ DN_TEXT, /* opaque text is the object */ DN_CMD_CONFIG = 0x80, /* objects follow */ DN_CMD_DELETE, /* subtype + list of entries */ DN_CMD_GET, /* subtype + list of entries */ DN_CMD_FLUSH, /* for compatibility with FreeBSD 7.2/8 */ DN_COMPAT_PIPE, DN_COMPAT_QUEUE, DN_GET_COMPAT, /* special commands for emulation of sysctl variables */ DN_SYSCTL_GET, DN_SYSCTL_SET, #ifdef NEW_AQM /* subtypes used for setting/getting extra parameters. * these subtypes used with IP_DUMMYNET3 command (get) * and DN_TEXT (set). */ DN_AQM_PARAMS, /* AQM extra params */ DN_SCH_PARAMS, /* scheduler extra params */ #endif DN_LAST, }; enum { /* subtype for schedulers, flowset and the like */ DN_SCHED_UNKNOWN = 0, DN_SCHED_FIFO = 1, DN_SCHED_WF2QP = 2, /* others are in individual modules */ }; enum { /* user flags */ DN_HAVE_MASK = 0x0001, /* fs or sched has a mask */ DN_NOERROR = 0x0002, /* do not report errors */ DN_QHT_HASH = 0x0004, /* qht is a hash table */ DN_QSIZE_BYTES = 0x0008, /* queue size is in bytes */ DN_HAS_PROFILE = 0x0010, /* a link has a profile */ DN_IS_RED = 0x0020, DN_IS_GENTLE_RED= 0x0040, DN_IS_ECN = 0x0080, #ifdef NEW_AQM DN_IS_AQM = 0x0100, /* AQMs: e.g Codel & PIE */ #endif DN_PIPE_CMD = 0x1000, /* pipe config... */ }; /* * link template. */ struct dn_link { struct dn_id oid; /* * Userland sets bw and delay in bits/s and milliseconds. * The kernel converts this back and forth to bits/tick and ticks. * XXX what about burst ? */ int32_t link_nr; - int bandwidth; /* bit/s or bits/tick. */ + uint32_t bandwidth; /* bit/s or bits/tick. */ int delay; /* ms and ticks */ uint64_t burst; /* scaled. bits*Hz XXX */ }; /* * A flowset, which is a template for flows. Contains parameters * from the command line: id, target scheduler, queue sizes, plr, * flow masks, buckets for the flow hash, and possibly scheduler- * specific parameters (weight, quantum and so on). */ struct dn_fs { struct dn_id oid; uint32_t fs_nr; /* the flowset number */ uint32_t flags; /* userland flags */ int qsize; /* queue size in slots or bytes */ int32_t plr; /* PLR, pkt loss rate (2^31-1 means 100%) */ uint32_t buckets; /* buckets used for the queue hash table */ struct ipfw_flow_id flow_mask; uint32_t sched_nr; /* the scheduler we attach to */ /* generic scheduler parameters. Leave them at -1 if unset. * Now we use 0: weight, 1: lmax, 2: priority */ int par[4]; /* RED/GRED parameters. * weight and probabilities are in the range 0..1 represented * in fixed point arithmetic with SCALE_RED decimal bits. */ #define SCALE_RED 16 #define SCALE(x) ( (x) << SCALE_RED ) #define SCALE_VAL(x) ( (x) >> SCALE_RED ) #define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED ) int w_q ; /* queue weight (scaled) */ int max_th ; /* maximum threshold for queue (scaled) */ int min_th ; /* minimum threshold for queue (scaled) */ int max_p ; /* maximum value for p_b (scaled) */ }; /* * dn_flow collects flow_id and stats for queues and scheduler * instances, and is used to pass these info to userland. * oid.type/oid.subtype describe the object, oid.id is number * of the parent object. */ struct dn_flow { struct dn_id oid; struct ipfw_flow_id fid; uint64_t tot_pkts; /* statistics counters */ uint64_t tot_bytes; uint32_t length; /* Queue length, in packets */ uint32_t len_bytes; /* Queue length, in bytes */ uint32_t drops; }; /* * Scheduler template, mostly indicating the name, number, * sched_mask and buckets. */ struct dn_sch { struct dn_id oid; uint32_t sched_nr; /* N, scheduler number */ uint32_t buckets; /* number of buckets for the instances */ uint32_t flags; /* have_mask, ... */ char name[16]; /* null terminated */ /* mask to select the appropriate scheduler instance */ struct ipfw_flow_id sched_mask; /* M */ }; /* A delay profile is attached to a link. * Note that a profile, as any other object, cannot be longer than 2^16 */ #define ED_MAX_SAMPLES_NO 1024 struct dn_profile { struct dn_id oid; /* fields to simulate a delay profile */ #define ED_MAX_NAME_LEN 32 char name[ED_MAX_NAME_LEN]; int link_nr; int loss_level; - int bandwidth; // XXX use link bandwidth? + uint32_t bandwidth; // XXX use link bandwidth? int samples_no; /* actual len of samples[] */ int samples[ED_MAX_SAMPLES_NO]; /* may be shorter */ }; #ifdef NEW_AQM /* Extra parameters for AQM and scheduler. * This struct is used to pass and retrieve parameters (configurations) * to/from AQM and Scheduler. */ struct dn_extra_parms { struct dn_id oid; char name[16]; uint32_t nr; #define DN_MAX_EXTRA_PARM 10 int64_t par[DN_MAX_EXTRA_PARM]; }; #endif /* * Overall structure of dummynet In dummynet, packets are selected with the firewall rules, and passed to two different objects: PIPE or QUEUE (bad name). A QUEUE defines a classifier, which groups packets into flows according to a 'mask', puts them into independent queues (one per flow) with configurable size and queue management policy, and passes flows to a scheduler: (flow_mask|sched_mask) sched_mask +---------+ weight Wx +-------------+ | |->-[flow]-->--| |-+ -->--| QUEUE x | ... | | | | |->-[flow]-->--| SCHEDuler N | | +---------+ | | | ... | +--[LINK N]-->-- +---------+ weight Wy | | +--[LINK N]-->-- | |->-[flow]-->--| | | -->--| QUEUE y | ... | | | | |->-[flow]-->--| | | +---------+ +-------------+ | +-------------+ Many QUEUE objects can connect to the same scheduler, each QUEUE object can have its own set of parameters. In turn, the SCHEDuler 'forks' multiple instances according to a 'sched_mask', each instance manages its own set of queues and transmits on a private instance of a configurable LINK. A PIPE is a simplified version of the above, where there is no flow_mask, and each scheduler instance handles a single queue. The following data structures (visible from userland) describe the objects used by dummynet: + dn_link, contains the main configuration parameters related to delay and bandwidth; + dn_profile describes a delay profile; + dn_flow describes the flow status (flow id, statistics) + dn_sch describes a scheduler + dn_fs describes a flowset (msk, weight, queue parameters) * */ #endif /* _IP_DUMMYNET_H */ diff --git a/sys/netpfil/ipfw/ip_dn_glue.c b/sys/netpfil/ipfw/ip_dn_glue.c index e035fedaaf91..a690aa0290d7 100644 --- a/sys/netpfil/ipfw/ip_dn_glue.c +++ b/sys/netpfil/ipfw/ip_dn_glue.c @@ -1,850 +1,850 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Riccardo Panicucci, Universita` di Pisa * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * Binary compatibility support for /sbin/ipfw RELENG_7 and RELENG_8 */ #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include #include /* ip_output(), IP_FORWARDING */ #include #include #include #include #include #ifdef NEW_AQM #include #endif #include /* FREEBSD7.2 ip_dummynet.h r191715*/ struct dn_heap_entry7 { int64_t key; /* sorting key. Topmost element is smallest one */ void *object; /* object pointer */ }; struct dn_heap7 { int size; int elements; int offset; /* XXX if > 0 this is the offset of direct ptr to obj */ struct dn_heap_entry7 *p; /* really an array of "size" entries */ }; /* Common to 7.2 and 8 */ struct dn_flow_set { SLIST_ENTRY(dn_flow_set) next; /* linked list in a hash slot */ u_short fs_nr ; /* flow_set number */ u_short flags_fs; #define DNOLD_HAVE_FLOW_MASK 0x0001 #define DNOLD_IS_RED 0x0002 #define DNOLD_IS_GENTLE_RED 0x0004 #define DNOLD_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */ #define DNOLD_NOERROR 0x0010 /* do not report ENOBUFS on drops */ #define DNOLD_HAS_PROFILE 0x0020 /* the pipe has a delay profile. */ #define DNOLD_IS_PIPE 0x4000 #define DNOLD_IS_QUEUE 0x8000 struct dn_pipe7 *pipe ; /* pointer to parent pipe */ u_short parent_nr ; /* parent pipe#, 0 if local to a pipe */ int weight ; /* WFQ queue weight */ int qsize ; /* queue size in slots or bytes */ int plr ; /* pkt loss rate (2^31-1 means 100%) */ struct ipfw_flow_id flow_mask ; /* hash table of queues onto this flow_set */ int rq_size ; /* number of slots */ int rq_elements ; /* active elements */ struct dn_flow_queue7 **rq; /* array of rq_size entries */ u_int32_t last_expired ; /* do not expire too frequently */ int backlogged ; /* #active queues for this flowset */ /* RED parameters */ #define SCALE_RED 16 #define SCALE(x) ( (x) << SCALE_RED ) #define SCALE_VAL(x) ( (x) >> SCALE_RED ) #define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED ) int w_q ; /* queue weight (scaled) */ int max_th ; /* maximum threshold for queue (scaled) */ int min_th ; /* minimum threshold for queue (scaled) */ int max_p ; /* maximum value for p_b (scaled) */ u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */ u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */ u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */ u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */ u_int * w_q_lookup ; /* lookup table for computing (1-w_q)^t */ u_int lookup_depth ; /* depth of lookup table */ int lookup_step ; /* granularity inside the lookup table */ int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */ int avg_pkt_size ; /* medium packet size */ int max_pkt_size ; /* max packet size */ }; SLIST_HEAD(dn_flow_set_head, dn_flow_set); #define DN_IS_PIPE 0x4000 #define DN_IS_QUEUE 0x8000 struct dn_flow_queue7 { struct dn_flow_queue7 *next ; struct ipfw_flow_id id ; struct mbuf *head, *tail ; /* queue of packets */ u_int len ; u_int len_bytes ; u_long numbytes; u_int64_t tot_pkts ; /* statistics counters */ u_int64_t tot_bytes ; u_int32_t drops ; int hash_slot ; /* debugging/diagnostic */ /* RED parameters */ int avg ; /* average queue length est. (scaled) */ int count ; /* arrivals since last RED drop */ int random ; /* random value (scaled) */ u_int32_t q_time; /* start of queue idle time */ /* WF2Q+ support */ struct dn_flow_set *fs ; /* parent flow set */ int heap_pos ; /* position (index) of struct in heap */ int64_t sched_time ; /* current time when queue enters ready_heap */ int64_t S,F ; /* start time, finish time */ }; struct dn_pipe7 { /* a pipe */ SLIST_ENTRY(dn_pipe7) next; /* linked list in a hash slot */ int pipe_nr ; /* number */ - int bandwidth; /* really, bytes/tick. */ + uint32_t bandwidth; /* really, bytes/tick. */ int delay ; /* really, ticks */ struct mbuf *head, *tail ; /* packets in delay line */ /* WF2Q+ */ struct dn_heap7 scheduler_heap ; /* top extract - key Finish time*/ struct dn_heap7 not_eligible_heap; /* top extract- key Start time */ struct dn_heap7 idle_heap ; /* random extract - key Start=Finish time */ int64_t V ; /* virtual time */ int sum; /* sum of weights of all active sessions */ int numbytes; int64_t sched_time ; /* time pipe was scheduled in ready_heap */ /* * When the tx clock come from an interface (if_name[0] != '\0'), its name * is stored below, whereas the ifp is filled when the rule is configured. */ char if_name[IFNAMSIZ]; struct ifnet *ifp ; int ready ; /* set if ifp != NULL and we got a signal from it */ struct dn_flow_set fs ; /* used with fixed-rate flows */ }; SLIST_HEAD(dn_pipe_head7, dn_pipe7); /* FREEBSD8 ip_dummynet.h r196045 */ struct dn_flow_queue8 { struct dn_flow_queue8 *next ; struct ipfw_flow_id id ; struct mbuf *head, *tail ; /* queue of packets */ u_int len ; u_int len_bytes ; uint64_t numbytes ; /* credit for transmission (dynamic queues) */ int64_t extra_bits; /* extra bits simulating unavailable channel */ u_int64_t tot_pkts ; /* statistics counters */ u_int64_t tot_bytes ; u_int32_t drops ; int hash_slot ; /* debugging/diagnostic */ /* RED parameters */ int avg ; /* average queue length est. (scaled) */ int count ; /* arrivals since last RED drop */ int random ; /* random value (scaled) */ int64_t idle_time; /* start of queue idle time */ /* WF2Q+ support */ struct dn_flow_set *fs ; /* parent flow set */ int heap_pos ; /* position (index) of struct in heap */ int64_t sched_time ; /* current time when queue enters ready_heap */ int64_t S,F ; /* start time, finish time */ }; struct dn_pipe8 { /* a pipe */ SLIST_ENTRY(dn_pipe8) next; /* linked list in a hash slot */ int pipe_nr ; /* number */ - int bandwidth; /* really, bytes/tick. */ + uint32_t bandwidth; /* really, bytes/tick. */ int delay ; /* really, ticks */ struct mbuf *head, *tail ; /* packets in delay line */ /* WF2Q+ */ struct dn_heap7 scheduler_heap ; /* top extract - key Finish time*/ struct dn_heap7 not_eligible_heap; /* top extract- key Start time */ struct dn_heap7 idle_heap ; /* random extract - key Start=Finish time */ int64_t V ; /* virtual time */ int sum; /* sum of weights of all active sessions */ /* Same as in dn_flow_queue, numbytes can become large */ int64_t numbytes; /* bits I can transmit (more or less). */ uint64_t burst; /* burst size, scaled: bits * hz */ int64_t sched_time ; /* time pipe was scheduled in ready_heap */ int64_t idle_time; /* start of pipe idle time */ char if_name[IFNAMSIZ]; struct ifnet *ifp ; int ready ; /* set if ifp != NULL and we got a signal from it */ struct dn_flow_set fs ; /* used with fixed-rate flows */ /* fields to simulate a delay profile */ #define ED_MAX_NAME_LEN 32 char name[ED_MAX_NAME_LEN]; int loss_level; int samples_no; int *samples; }; #define ED_MAX_SAMPLES_NO 1024 struct dn_pipe_max8 { struct dn_pipe8 pipe; int samples[ED_MAX_SAMPLES_NO]; }; SLIST_HEAD(dn_pipe_head8, dn_pipe8); /* * Changes from 7.2 to 8: * dn_pipe: * numbytes from int to int64_t * add burst (int64_t) * add idle_time (int64_t) * add profile * add struct dn_pipe_max * add flag DN_HAS_PROFILE * * dn_flow_queue * numbytes from u_long to int64_t * add extra_bits (int64_t) * q_time from u_int32_t to int64_t and name idle_time * * dn_flow_set unchanged * */ /* NOTE:XXX copied from dummynet.c */ #define O_NEXT(p, len) ((void *)((char *)p + len)) static void oid_fill(struct dn_id *oid, int len, int type, uintptr_t id) { oid->len = len; oid->type = type; oid->subtype = 0; oid->id = id; } /* make room in the buffer and move the pointer forward */ static void * o_next(struct dn_id **o, int len, int type) { struct dn_id *ret = *o; oid_fill(ret, len, type, 0); *o = O_NEXT(*o, len); return ret; } static size_t pipesize7 = sizeof(struct dn_pipe7); static size_t pipesize8 = sizeof(struct dn_pipe8); static size_t pipesizemax8 = sizeof(struct dn_pipe_max8); /* Indicate 'ipfw' version * 1: from FreeBSD 7.2 * 0: from FreeBSD 8 * -1: unknown (for now is unused) * * It is update when a IP_DUMMYNET_DEL or IP_DUMMYNET_CONFIGURE request arrives * NOTE: if a IP_DUMMYNET_GET arrives and the 'ipfw' version is unknown, * it is suppose to be the FreeBSD 8 version. */ static int is7 = 0; static int convertflags2new(int src) { int dst = 0; if (src & DNOLD_HAVE_FLOW_MASK) dst |= DN_HAVE_MASK; if (src & DNOLD_QSIZE_IS_BYTES) dst |= DN_QSIZE_BYTES; if (src & DNOLD_NOERROR) dst |= DN_NOERROR; if (src & DNOLD_IS_RED) dst |= DN_IS_RED; if (src & DNOLD_IS_GENTLE_RED) dst |= DN_IS_GENTLE_RED; if (src & DNOLD_HAS_PROFILE) dst |= DN_HAS_PROFILE; return dst; } static int convertflags2old(int src) { int dst = 0; if (src & DN_HAVE_MASK) dst |= DNOLD_HAVE_FLOW_MASK; if (src & DN_IS_RED) dst |= DNOLD_IS_RED; if (src & DN_IS_GENTLE_RED) dst |= DNOLD_IS_GENTLE_RED; if (src & DN_NOERROR) dst |= DNOLD_NOERROR; if (src & DN_HAS_PROFILE) dst |= DNOLD_HAS_PROFILE; if (src & DN_QSIZE_BYTES) dst |= DNOLD_QSIZE_IS_BYTES; return dst; } static int dn_compat_del(void *v) { struct dn_pipe7 *p = (struct dn_pipe7 *) v; struct dn_pipe8 *p8 = (struct dn_pipe8 *) v; struct { struct dn_id oid; uintptr_t a[1]; /* add more if we want a list */ } cmd; /* XXX DN_API_VERSION ??? */ oid_fill((void *)&cmd, sizeof(cmd), DN_CMD_DELETE, DN_API_VERSION); if (is7) { if (p->pipe_nr == 0 && p->fs.fs_nr == 0) return EINVAL; if (p->pipe_nr != 0 && p->fs.fs_nr != 0) return EINVAL; } else { if (p8->pipe_nr == 0 && p8->fs.fs_nr == 0) return EINVAL; if (p8->pipe_nr != 0 && p8->fs.fs_nr != 0) return EINVAL; } if (p->pipe_nr != 0) { /* pipe x delete */ cmd.a[0] = p->pipe_nr; cmd.oid.subtype = DN_LINK; } else { /* queue x delete */ cmd.oid.subtype = DN_FS; cmd.a[0] = (is7) ? p->fs.fs_nr : p8->fs.fs_nr; } return do_config(&cmd, cmd.oid.len); } static int dn_compat_config_queue(struct dn_fs *fs, void* v) { struct dn_pipe7 *p7 = (struct dn_pipe7 *)v; struct dn_pipe8 *p8 = (struct dn_pipe8 *)v; struct dn_flow_set *f; if (is7) f = &p7->fs; else f = &p8->fs; fs->fs_nr = f->fs_nr; fs->sched_nr = f->parent_nr; fs->flow_mask = f->flow_mask; fs->buckets = f->rq_size; fs->qsize = f->qsize; fs->plr = f->plr; fs->par[0] = f->weight; fs->flags = convertflags2new(f->flags_fs); if (fs->flags & DN_IS_GENTLE_RED || fs->flags & DN_IS_RED) { fs->w_q = f->w_q; fs->max_th = f->max_th; fs->min_th = f->min_th; fs->max_p = f->max_p; } return 0; } static int dn_compat_config_pipe(struct dn_sch *sch, struct dn_link *p, struct dn_fs *fs, void* v) { struct dn_pipe7 *p7 = (struct dn_pipe7 *)v; struct dn_pipe8 *p8 = (struct dn_pipe8 *)v; int i = p7->pipe_nr; sch->sched_nr = i; sch->oid.subtype = 0; p->link_nr = i; fs->fs_nr = i + 2*DN_MAX_ID; fs->sched_nr = i + DN_MAX_ID; /* Common to 7 and 8 */ p->bandwidth = p7->bandwidth; p->delay = p7->delay; if (!is7) { /* FreeBSD 8 has burst */ p->burst = p8->burst; } /* fill the fifo flowset */ dn_compat_config_queue(fs, v); fs->fs_nr = i + 2*DN_MAX_ID; fs->sched_nr = i + DN_MAX_ID; /* Move scheduler related parameter from fs to sch */ sch->buckets = fs->buckets; /*XXX*/ fs->buckets = 0; if (fs->flags & DN_HAVE_MASK) { sch->flags |= DN_HAVE_MASK; fs->flags &= ~DN_HAVE_MASK; sch->sched_mask = fs->flow_mask; bzero(&fs->flow_mask, sizeof(struct ipfw_flow_id)); } return 0; } static int dn_compat_config_profile(struct dn_profile *pf, struct dn_link *p, void *v) { struct dn_pipe8 *p8 = (struct dn_pipe8 *)v; p8->samples = &(((struct dn_pipe_max8 *)p8)->samples[0]); pf->link_nr = p->link_nr; pf->loss_level = p8->loss_level; // pf->bandwidth = p->bandwidth; //XXX bandwidth redundant? pf->samples_no = p8->samples_no; strncpy(pf->name, p8->name,sizeof(pf->name)); bcopy(p8->samples, pf->samples, sizeof(pf->samples)); return 0; } /* * If p->pipe_nr != 0 the command is 'pipe x config', so need to create * the three main struct, else only a flowset is created */ static int dn_compat_configure(void *v) { struct dn_id *buf = NULL, *base; struct dn_sch *sch = NULL; struct dn_link *p = NULL; struct dn_fs *fs = NULL; struct dn_profile *pf = NULL; int lmax; int error; struct dn_pipe7 *p7 = (struct dn_pipe7 *)v; struct dn_pipe8 *p8 = (struct dn_pipe8 *)v; int i; /* number of object to configure */ lmax = sizeof(struct dn_id); /* command header */ lmax += sizeof(struct dn_sch) + sizeof(struct dn_link) + sizeof(struct dn_fs) + sizeof(struct dn_profile); base = buf = malloc(lmax, M_DUMMYNET, M_WAITOK|M_ZERO); o_next(&buf, sizeof(struct dn_id), DN_CMD_CONFIG); base->id = DN_API_VERSION; /* pipe_nr is the same in p7 and p8 */ i = p7->pipe_nr; if (i != 0) { /* pipe config */ sch = o_next(&buf, sizeof(*sch), DN_SCH); p = o_next(&buf, sizeof(*p), DN_LINK); fs = o_next(&buf, sizeof(*fs), DN_FS); error = dn_compat_config_pipe(sch, p, fs, v); if (error) { free(buf, M_DUMMYNET); return error; } if (!is7 && p8->samples_no > 0) { /* Add profiles*/ pf = o_next(&buf, sizeof(*pf), DN_PROFILE); error = dn_compat_config_profile(pf, p, v); if (error) { free(buf, M_DUMMYNET); return error; } } } else { /* queue config */ fs = o_next(&buf, sizeof(*fs), DN_FS); error = dn_compat_config_queue(fs, v); if (error) { free(buf, M_DUMMYNET); return error; } } error = do_config(base, (char *)buf - (char *)base); if (buf) free(buf, M_DUMMYNET); return error; } int dn_compat_calc_size(void) { int need = 0; /* XXX use FreeBSD 8 struct size */ /* NOTE: * - half scheduler: schk_count/2 * - all flowset: fsk_count * - all flowset queues: queue_count * - all pipe queue: si_count */ need += V_dn_cfg.schk_count * sizeof(struct dn_pipe8) / 2; need += V_dn_cfg.fsk_count * sizeof(struct dn_flow_set); need += V_dn_cfg.si_count * sizeof(struct dn_flow_queue8); need += V_dn_cfg.queue_count * sizeof(struct dn_flow_queue8); return need; } int dn_c_copy_q (void *_ni, void *arg) { struct copy_args *a = arg; struct dn_flow_queue7 *fq7 = (struct dn_flow_queue7 *)*a->start; struct dn_flow_queue8 *fq8 = (struct dn_flow_queue8 *)*a->start; struct dn_flow *ni = (struct dn_flow *)_ni; int size = 0; /* XXX hash slot not set */ /* No difference between 7.2/8 */ fq7->len = ni->length; fq7->len_bytes = ni->len_bytes; fq7->id = ni->fid; if (is7) { size = sizeof(struct dn_flow_queue7); fq7->tot_pkts = ni->tot_pkts; fq7->tot_bytes = ni->tot_bytes; fq7->drops = ni->drops; } else { size = sizeof(struct dn_flow_queue8); fq8->tot_pkts = ni->tot_pkts; fq8->tot_bytes = ni->tot_bytes; fq8->drops = ni->drops; } *a->start += size; return 0; } int dn_c_copy_pipe(struct dn_schk *s, struct copy_args *a, int nq) { struct dn_link *l = &s->link; struct dn_fsk *f = s->fs; struct dn_pipe7 *pipe7 = (struct dn_pipe7 *)*a->start; struct dn_pipe8 *pipe8 = (struct dn_pipe8 *)*a->start; struct dn_flow_set *fs; int size = 0; if (is7) { fs = &pipe7->fs; size = sizeof(struct dn_pipe7); } else { fs = &pipe8->fs; size = sizeof(struct dn_pipe8); } /* These 4 field are the same in pipe7 and pipe8 */ pipe7->next.sle_next = (struct dn_pipe7 *)DN_IS_PIPE; pipe7->bandwidth = l->bandwidth; pipe7->delay = l->delay * 1000 / hz; pipe7->pipe_nr = l->link_nr - DN_MAX_ID; if (!is7) { if (s->profile) { struct dn_profile *pf = s->profile; strncpy(pipe8->name, pf->name, sizeof(pf->name)); pipe8->loss_level = pf->loss_level; pipe8->samples_no = pf->samples_no; } pipe8->burst = div64(l->burst , 8 * hz); } fs->flow_mask = s->sch.sched_mask; fs->rq_size = s->sch.buckets ? s->sch.buckets : 1; fs->parent_nr = l->link_nr - DN_MAX_ID; fs->qsize = f->fs.qsize; fs->plr = f->fs.plr; fs->w_q = f->fs.w_q; fs->max_th = f->max_th; fs->min_th = f->min_th; fs->max_p = f->fs.max_p; fs->rq_elements = nq; fs->flags_fs = convertflags2old(f->fs.flags); *a->start += size; return 0; } int dn_compat_copy_pipe(struct copy_args *a, void *_o) { int have = a->end - *a->start; int need = 0; int pipe_size = sizeof(struct dn_pipe8); int queue_size = sizeof(struct dn_flow_queue8); int n_queue = 0; /* number of queues */ struct dn_schk *s = (struct dn_schk *)_o; /* calculate needed space: * - struct dn_pipe * - if there are instances, dn_queue * n_instances */ n_queue = (s->sch.flags & DN_HAVE_MASK ? dn_ht_entries(s->siht) : (s->siht ? 1 : 0)); need = pipe_size + queue_size * n_queue; if (have < need) { D("have %d < need %d", have, need); return 1; } /* copy pipe */ dn_c_copy_pipe(s, a, n_queue); /* copy queues */ if (s->sch.flags & DN_HAVE_MASK) dn_ht_scan(s->siht, dn_c_copy_q, a); else if (s->siht) dn_c_copy_q(s->siht, a); return 0; } int dn_c_copy_fs(struct dn_fsk *f, struct copy_args *a, int nq) { struct dn_flow_set *fs = (struct dn_flow_set *)*a->start; fs->next.sle_next = (struct dn_flow_set *)DN_IS_QUEUE; fs->fs_nr = f->fs.fs_nr; fs->qsize = f->fs.qsize; fs->plr = f->fs.plr; fs->w_q = f->fs.w_q; fs->max_th = f->max_th; fs->min_th = f->min_th; fs->max_p = f->fs.max_p; fs->flow_mask = f->fs.flow_mask; fs->rq_elements = nq; fs->rq_size = (f->fs.buckets ? f->fs.buckets : 1); fs->parent_nr = f->fs.sched_nr; fs->weight = f->fs.par[0]; fs->flags_fs = convertflags2old(f->fs.flags); *a->start += sizeof(struct dn_flow_set); return 0; } int dn_compat_copy_queue(struct copy_args *a, void *_o) { int have = a->end - *a->start; int need = 0; int fs_size = sizeof(struct dn_flow_set); int queue_size = sizeof(struct dn_flow_queue8); struct dn_fsk *fs = (struct dn_fsk *)_o; int n_queue = 0; /* number of queues */ n_queue = (fs->fs.flags & DN_HAVE_MASK ? dn_ht_entries(fs->qht) : (fs->qht ? 1 : 0)); need = fs_size + queue_size * n_queue; if (have < need) { D("have < need"); return 1; } /* copy flowset */ dn_c_copy_fs(fs, a, n_queue); /* copy queues */ if (fs->fs.flags & DN_HAVE_MASK) dn_ht_scan(fs->qht, dn_c_copy_q, a); else if (fs->qht) dn_c_copy_q(fs->qht, a); return 0; } int copy_data_helper_compat(void *_o, void *_arg) { struct copy_args *a = _arg; if (a->type == DN_COMPAT_PIPE) { struct dn_schk *s = _o; if (s->sch.oid.subtype != 1 || s->sch.sched_nr <= DN_MAX_ID) { return 0; /* not old type */ } /* copy pipe parameters, and if instance exists, copy * other parameters and eventually queues. */ if(dn_compat_copy_pipe(a, _o)) return DNHT_SCAN_END; } else if (a->type == DN_COMPAT_QUEUE) { struct dn_fsk *fs = _o; if (fs->fs.fs_nr >= DN_MAX_ID) return 0; if (dn_compat_copy_queue(a, _o)) return DNHT_SCAN_END; } return 0; } /* Main function to manage old requests */ int ip_dummynet_compat(struct sockopt *sopt) { int error=0; void *v = NULL; struct dn_id oid; /* Length of data, used to found ipfw version... */ int len = sopt->sopt_valsize; /* len can be 0 if command was dummynet_flush */ if (len == pipesize7) { D("setting compatibility with FreeBSD 7.2"); is7 = 1; } else if (len == pipesize8 || len == pipesizemax8) { D("setting compatibility with FreeBSD 8"); is7 = 0; } switch (sopt->sopt_name) { default: printf("dummynet: -- unknown option %d", sopt->sopt_name); error = EINVAL; break; case IP_DUMMYNET_FLUSH: oid_fill(&oid, sizeof(oid), DN_CMD_FLUSH, DN_API_VERSION); do_config(&oid, oid.len); break; case IP_DUMMYNET_DEL: v = malloc(len, M_TEMP, M_WAITOK); error = sooptcopyin(sopt, v, len, len); if (error) break; error = dn_compat_del(v); free(v, M_TEMP); break; case IP_DUMMYNET_CONFIGURE: v = malloc(len, M_TEMP, M_NOWAIT); if (v == NULL) { error = ENOMEM; break; } error = sooptcopyin(sopt, v, len, len); if (error) break; error = dn_compat_configure(v); free(v, M_TEMP); break; case IP_DUMMYNET_GET: { void *buf; int ret; int original_size = sopt->sopt_valsize; int size; ret = dummynet_get(sopt, &buf); if (ret) return 0;//XXX ? size = sopt->sopt_valsize; sopt->sopt_valsize = original_size; D("size=%d, buf=%p", size, buf); ret = sooptcopyout(sopt, buf, size); if (ret) printf(" %s ERROR sooptcopyout\n", __FUNCTION__); if (buf) free(buf, M_DUMMYNET); } } return error; } diff --git a/sys/netpfil/ipfw/ip_dn_io.c b/sys/netpfil/ipfw/ip_dn_io.c index 11357b44e05e..dad5cb087b39 100644 --- a/sys/netpfil/ipfw/ip_dn_io.c +++ b/sys/netpfil/ipfw/ip_dn_io.c @@ -1,967 +1,968 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa * All rights reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Dummynet portions related to packet handling. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ #include /* NET_EPOCH_... */ #include #include #include #include /* ip_len, ip_off */ #include /* ip_output(), IP_FORWARDING */ #include #include #include /* various ether_* routines */ #include /* for ip6_input, ip6_output prototypes */ #include #include #include #include #ifdef NEW_AQM #include #endif #include /* * We keep a private variable for the simulation time, but we could * probably use an existing one ("softticks" in sys/kern/kern_timeout.c) * instead of V_dn_cfg.curr_time */ VNET_DEFINE(struct dn_parms, dn_cfg); #define V_dn_cfg VNET(dn_cfg) /* * We use a heap to store entities for which we have pending timer events. * The heap is checked at every tick and all entities with expired events * are extracted. */ MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap"); extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *); #ifdef SYSCTL_NODE /* * Because of the way the SYSBEGIN/SYSEND macros work on other * platforms, there should not be functions between them. * So keep the handlers outside the block. */ static int sysctl_hash_size(SYSCTL_HANDLER_ARGS) { int error, value; value = V_dn_cfg.hash_size; error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 16 || value > 65536) return (EINVAL); V_dn_cfg.hash_size = value; return (0); } static int sysctl_limits(SYSCTL_HANDLER_ARGS) { int error; long value; if (arg2 != 0) value = V_dn_cfg.slot_limit; else value = V_dn_cfg.byte_limit; error = sysctl_handle_long(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (arg2 != 0) { if (value < 1) return (EINVAL); V_dn_cfg.slot_limit = value; } else { if (value < 1500) return (EINVAL); V_dn_cfg.byte_limit = value; } return (0); } SYSBEGIN(f4) SYSCTL_DECL(_net_inet); SYSCTL_DECL(_net_inet_ip); #ifdef NEW_AQM SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Dummynet"); #else static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Dummynet"); #endif /* wrapper to pass V_dn_cfg fields to SYSCTL_* */ #define DC(x) (&(VNET_NAME(dn_cfg).x)) /* parameters */ SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_hash_size, "I", "Default hash table size"); SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 1, sysctl_limits, "L", "Upper limit in slots for pipe queue."); SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit, CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_limits, "L", "Upper limit in bytes for pipe queue."); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast, CTLFLAG_RW | CTLFLAG_VNET, DC(io_fast), 0, "Enable fast dummynet io."); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_VNET, DC(debug), 0, "Dummynet debug level"); /* RED parameters */ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD | CTLFLAG_VNET, DC(red_lookup_depth), 0, "Depth of RED lookup table"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD | CTLFLAG_VNET, DC(red_avg_pkt_size), 0, "RED Medium packet size"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD | CTLFLAG_VNET, DC(red_max_pkt_size), 0, "RED Max packet size"); /* time adjustment */ SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_delta), 0, "Last vs standard tick difference (usec)."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_delta_sum), 0, "Accumulated tick difference (usec)."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_adjustment), 0, "Tick adjustments done."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_diff), 0, "Adjusted vs non-adjusted curr_time difference (ticks)."); SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost, CTLFLAG_RD | CTLFLAG_VNET, DC(tick_lost), 0, "Number of ticks coalesced by dummynet taskqueue."); /* Drain parameters */ SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW | CTLFLAG_VNET, DC(expire), 0, "Expire empty queues/pipes"); SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle, CTLFLAG_RD | CTLFLAG_VNET, DC(expire_cycle), 0, "Expire cycle for queues/pipes"); /* statistics */ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count, CTLFLAG_RD | CTLFLAG_VNET, DC(schk_count), 0, "Number of schedulers"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count, CTLFLAG_RD | CTLFLAG_VNET, DC(si_count), 0, "Number of scheduler instances"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count, CTLFLAG_RD | CTLFLAG_VNET, DC(fsk_count), 0, "Number of flowsets"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count, CTLFLAG_RD | CTLFLAG_VNET, DC(queue_count), 0, "Number of queues"); SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt, CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt), 0, "Number of packets passed to dummynet."); SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast, CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt_fast), 0, "Number of packets bypassed dummynet scheduler."); SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop, CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt_drop), 0, "Number of packets dropped by dummynet."); #undef DC SYSEND #endif static void dummynet_send(struct mbuf *); /* * Return the mbuf tag holding the dummynet state (it should * be the first one on the list). */ struct dn_pkt_tag * dn_tag_get(struct mbuf *m) { struct m_tag *mtag = m_tag_first(m); #ifdef NEW_AQM /* XXX: to skip ts m_tag. For Debugging only*/ if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) { m_tag_delete(m,mtag); mtag = m_tag_first(m); D("skip TS tag"); } #endif KASSERT(mtag != NULL && mtag->m_tag_cookie == MTAG_ABI_COMPAT && mtag->m_tag_id == PACKET_TAG_DUMMYNET, ("packet on dummynet queue w/o dummynet tag!")); return (struct dn_pkt_tag *)(mtag+1); } #ifndef NEW_AQM static inline void mq_append(struct mq *q, struct mbuf *m) { #ifdef USERSPACE // buffers from netmap need to be copied // XXX note that the routine is not expected to fail ND("append %p to %p", m, q); if (m->m_flags & M_STACK) { struct mbuf *m_new; void *p; int l, ofs; ofs = m->m_data - m->__m_extbuf; // XXX allocate MGETHDR(m_new, M_NOWAIT, MT_DATA); ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p", m, m->__m_extbuf, m->__m_extlen, ofs, m_new); p = m_new->__m_extbuf; /* new pointer */ l = m_new->__m_extlen; /* new len */ if (l <= m->__m_extlen) { panic("extlen too large"); } *m_new = *m; // copy m_new->m_flags &= ~M_STACK; m_new->__m_extbuf = p; // point to new buffer _pkt_copy(m->__m_extbuf, p, m->__m_extlen); m_new->m_data = p + ofs; m = m_new; } #endif /* USERSPACE */ if (q->head == NULL) q->head = m; else q->tail->m_nextpkt = m; q->count++; q->tail = m; m->m_nextpkt = NULL; } #endif /* * Dispose a list of packet. Use a functions so if we need to do * more work, this is a central point to do it. */ void dn_free_pkts(struct mbuf *mnext) { struct mbuf *m; while ((m = mnext) != NULL) { mnext = m->m_nextpkt; FREE_PKT(m); } } static int red_drops (struct dn_queue *q, int len) { /* * RED algorithm * * RED calculates the average queue size (avg) using a low-pass filter * with an exponential weighted (w_q) moving average: * avg <- (1-w_q) * avg + w_q * q_size * where q_size is the queue length (measured in bytes or * packets). * * If q_size == 0, we compute the idle time for the link, and set * avg = (1 - w_q)^(idle/s) * where s is the time needed for transmitting a medium-sized packet. * * Now, if avg < min_th the packet is enqueued. * If avg > max_th the packet is dropped. Otherwise, the packet is * dropped with probability P function of avg. */ struct dn_fsk *fs = q->fs; int64_t p_b = 0; /* Queue in bytes or packets? */ uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ? q->ni.len_bytes : q->ni.length; /* Average queue size estimation. */ if (q_size != 0) { /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */ int diff = SCALE(q_size) - q->avg; int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); q->avg += (int)v; } else { /* * Queue is empty, find for how long the queue has been * empty and use a lookup table for computing * (1 - * w_q)^(idle_time/s) where s is the time to send a * (small) packet. * XXX check wraps... */ if (q->avg) { u_int t = div64((V_dn_cfg.curr_time - q->q_time), fs->lookup_step); q->avg = (t < fs->lookup_depth) ? SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; } } /* Should i drop? */ if (q->avg < fs->min_th) { q->count = -1; return (0); /* accept packet */ } if (q->avg >= fs->max_th) { /* average queue >= max threshold */ if (fs->fs.flags & DN_IS_ECN) return (1); if (fs->fs.flags & DN_IS_GENTLE_RED) { /* * According to Gentle-RED, if avg is greater than * max_th the packet is dropped with a probability * p_b = c_3 * avg - c_4 * where c_3 = (1 - max_p) / max_th * c_4 = 1 - 2 * max_p */ p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4; } else { q->count = -1; return (1); } } else if (q->avg > fs->min_th) { if (fs->fs.flags & DN_IS_ECN) return (1); /* * We compute p_b using the linear dropping function * p_b = c_1 * avg - c_2 * where c_1 = max_p / (max_th - min_th) * c_2 = max_p * min_th / (max_th - min_th) */ p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; } if (fs->fs.flags & DN_QSIZE_BYTES) p_b = div64((p_b * len) , fs->max_pkt_size); if (++q->count == 0) q->random = random() & 0xffff; else { /* * q->count counts packets arrived since last drop, so a greater * value of q->count means a greater packet drop probability. */ if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) { q->count = 0; /* After a drop we calculate a new random value. */ q->random = random() & 0xffff; return (1); /* drop */ } } /* End of RED algorithm. */ return (0); /* accept */ } /* * ECN/ECT Processing (partially adopted from altq) */ #ifndef NEW_AQM static #endif int ecn_mark(struct mbuf* m) { struct ip *ip; ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); switch (ip->ip_v) { case IPVERSION: { uint16_t old; if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) return (0); /* not-ECT */ if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) return (1); /* already marked */ /* * ecn-capable but not marked, * mark CE and update checksum */ old = *(uint16_t *)ip; ip->ip_tos |= IPTOS_ECN_CE; ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip); return (1); } #ifdef INET6 case (IPV6_VERSION >> 4): { struct ip6_hdr *ip6 = (struct ip6_hdr *)ip; u_int32_t flowlabel; flowlabel = ntohl(ip6->ip6_flow); if ((flowlabel >> 28) != 6) return (0); /* version mismatch! */ if ((flowlabel & (IPTOS_ECN_MASK << 20)) == (IPTOS_ECN_NOTECT << 20)) return (0); /* not-ECT */ if ((flowlabel & (IPTOS_ECN_MASK << 20)) == (IPTOS_ECN_CE << 20)) return (1); /* already marked */ /* * ecn-capable but not marked, mark CE */ flowlabel |= (IPTOS_ECN_CE << 20); ip6->ip6_flow = htonl(flowlabel); return (1); } #endif } return (0); } /* * Enqueue a packet in q, subject to space and queue management policy * (whose parameters are in q->fs). * Update stats for the queue and the scheduler. * Return 0 on success, 1 on drop. The packet is consumed anyways. */ int dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop) { struct dn_fs *f; struct dn_flow *ni; /* stats for scheduler instance */ uint64_t len; if (q->fs == NULL || q->_si == NULL) { printf("%s fs %p si %p, dropping\n", __FUNCTION__, q->fs, q->_si); FREE_PKT(m); return 1; } f = &(q->fs->fs); ni = &q->_si->ni; len = m->m_pkthdr.len; /* Update statistics, then check reasons to drop pkt. */ q->ni.tot_bytes += len; q->ni.tot_pkts++; ni->tot_bytes += len; ni->tot_pkts++; if (drop) goto drop; if (f->plr && random() < f->plr) goto drop; #ifdef NEW_AQM /* Call AQM enqueue function */ if (q->fs->aqmfp) return q->fs->aqmfp->enqueue(q ,m); #endif if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) { if (!(f->flags & DN_IS_ECN) || !ecn_mark(m)) goto drop; } if (f->flags & DN_QSIZE_BYTES) { if (q->ni.len_bytes > f->qsize) goto drop; } else if (q->ni.length >= f->qsize) { goto drop; } mq_append(&q->mq, m); q->ni.length++; q->ni.len_bytes += len; ni->length++; ni->len_bytes += len; return (0); drop: V_dn_cfg.io_pkt_drop++; q->ni.drops++; ni->drops++; FREE_PKT(m); return (1); } /* * Fetch packets from the delay line which are due now. If there are * leftover packets, reinsert the delay line in the heap. * Runs under scheduler lock. */ static void transmit_event(struct mq *q, struct delay_line *dline, uint64_t now) { struct mbuf *m; struct dn_pkt_tag *pkt = NULL; dline->oid.subtype = 0; /* not in heap */ while ((m = dline->mq.head) != NULL) { pkt = dn_tag_get(m); if (!DN_KEY_LEQ(pkt->output_time, now)) break; dline->mq.head = m->m_nextpkt; dline->mq.count--; mq_append(q, m); } if (m != NULL) { dline->oid.subtype = 1; /* in heap */ heap_insert(&V_dn_cfg.evheap, pkt->output_time, dline); } } /* * Convert the additional MAC overheads/delays into an equivalent * number of bits for the given data rate. The samples are * in milliseconds so we need to divide by 1000. */ static uint64_t extra_bits(struct mbuf *m, struct dn_schk *s) { int index; uint64_t bits; struct dn_profile *pf = s->profile; if (!pf || pf->samples_no == 0) return 0; index = random() % pf->samples_no; bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000); if (index >= pf->loss_level) { struct dn_pkt_tag *dt = dn_tag_get(m); if (dt) dt->dn_dir = DIR_DROP; } return bits; } /* * Send traffic from a scheduler instance due by 'now'. * Return a pointer to the head of the queue. */ static struct mbuf * serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now) { struct mq def_q; struct dn_schk *s = si->sched; struct mbuf *m = NULL; int delay_line_idle = (si->dline.mq.head == NULL); - int done, bw; + int done; + uint32_t bw; if (q == NULL) { q = &def_q; q->head = NULL; } bw = s->link.bandwidth; si->kflags &= ~DN_ACTIVE; if (bw > 0) si->credit += (now - si->sched_time) * bw; else si->credit = 0; si->sched_time = now; done = 0; while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) { uint64_t len_scaled; done++; len_scaled = (bw == 0) ? 0 : hz * (m->m_pkthdr.len * 8 + extra_bits(m, s)); si->credit -= len_scaled; /* Move packet in the delay line */ dn_tag_get(m)->output_time = V_dn_cfg.curr_time + s->link.delay ; mq_append(&si->dline.mq, m); } /* * If credit >= 0 the instance is idle, mark time. * Otherwise put back in the heap, and adjust the output * time of the last inserted packet, m, which was too early. */ if (si->credit >= 0) { si->idle_time = now; } else { uint64_t t; KASSERT (bw > 0, ("bw=0 and credit<0 ?")); t = div64(bw - 1 - si->credit, bw); if (m) dn_tag_get(m)->output_time += t; si->kflags |= DN_ACTIVE; heap_insert(&V_dn_cfg.evheap, now + t, si); } if (delay_line_idle && done) transmit_event(q, &si->dline, now); return q->head; } /* * The timer handler for dummynet. Time is computed in ticks, but * but the code is tolerant to the actual rate at which this is called. * Once complete, the function reschedules itself for the next tick. */ void dummynet_task(void *context, int pending) { struct timeval t; struct mq q = { NULL, NULL }; /* queue to accumulate results */ struct epoch_tracker et; VNET_ITERATOR_DECL(vnet_iter); VNET_LIST_RLOCK(); NET_EPOCH_ENTER(et); VNET_FOREACH(vnet_iter) { memset(&q, 0, sizeof(struct mq)); CURVNET_SET(vnet_iter); DN_BH_WLOCK(); /* Update number of lost(coalesced) ticks. */ V_dn_cfg.tick_lost += pending - 1; getmicrouptime(&t); /* Last tick duration (usec). */ V_dn_cfg.tick_last = (t.tv_sec - V_dn_cfg.prev_t.tv_sec) * 1000000 + (t.tv_usec - V_dn_cfg.prev_t.tv_usec); /* Last tick vs standard tick difference (usec). */ V_dn_cfg.tick_delta = (V_dn_cfg.tick_last * hz - 1000000) / hz; /* Accumulated tick difference (usec). */ V_dn_cfg.tick_delta_sum += V_dn_cfg.tick_delta; V_dn_cfg.prev_t = t; /* * Adjust curr_time if the accumulated tick difference is * greater than the 'standard' tick. Since curr_time should * be monotonically increasing, we do positive adjustments * as required, and throttle curr_time in case of negative * adjustment. */ V_dn_cfg.curr_time++; if (V_dn_cfg.tick_delta_sum - tick >= 0) { int diff = V_dn_cfg.tick_delta_sum / tick; V_dn_cfg.curr_time += diff; V_dn_cfg.tick_diff += diff; V_dn_cfg.tick_delta_sum %= tick; V_dn_cfg.tick_adjustment++; } else if (V_dn_cfg.tick_delta_sum + tick <= 0) { V_dn_cfg.curr_time--; V_dn_cfg.tick_diff--; V_dn_cfg.tick_delta_sum += tick; V_dn_cfg.tick_adjustment++; } /* serve pending events, accumulate in q */ for (;;) { struct dn_id *p; /* generic parameter to handler */ if (V_dn_cfg.evheap.elements == 0 || DN_KEY_LT(V_dn_cfg.curr_time, HEAP_TOP(&V_dn_cfg.evheap)->key)) break; p = HEAP_TOP(&V_dn_cfg.evheap)->object; heap_extract(&V_dn_cfg.evheap, NULL); if (p->type == DN_SCH_I) { serve_sched(&q, (struct dn_sch_inst *)p, V_dn_cfg.curr_time); } else { /* extracted a delay line */ transmit_event(&q, (struct delay_line *)p, V_dn_cfg.curr_time); } } if (V_dn_cfg.expire && ++V_dn_cfg.expire_cycle >= V_dn_cfg.expire) { V_dn_cfg.expire_cycle = 0; dn_drain_scheduler(); dn_drain_queue(); } DN_BH_WUNLOCK(); if (q.head != NULL) dummynet_send(q.head); CURVNET_RESTORE(); } NET_EPOCH_EXIT(et); VNET_LIST_RUNLOCK(); /* Schedule our next run. */ dn_reschedule(); } /* * forward a chain of packets to the proper destination. * This runs outside the dummynet lock. */ static void dummynet_send(struct mbuf *m) { struct mbuf *n; NET_EPOCH_ASSERT(); for (; m != NULL; m = n) { struct ifnet *ifp = NULL; /* gcc 3.4.6 complains */ struct m_tag *tag; int dst; n = m->m_nextpkt; m->m_nextpkt = NULL; tag = m_tag_first(m); if (tag == NULL) { /* should not happen */ dst = DIR_DROP; } else { struct dn_pkt_tag *pkt = dn_tag_get(m); /* extract the dummynet info, rename the tag * to carry reinject info. */ if (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2) && pkt->ifp == NULL) { dst = DIR_DROP; } else { dst = pkt->dn_dir; ifp = pkt->ifp; tag->m_tag_cookie = MTAG_IPFW_RULE; tag->m_tag_id = 0; } } switch (dst) { case DIR_OUT: ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); break ; case DIR_IN : netisr_dispatch(NETISR_IP, m); break; #ifdef INET6 case DIR_IN | PROTO_IPV6: netisr_dispatch(NETISR_IPV6, m); break; case DIR_OUT | PROTO_IPV6: ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL); break; #endif case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */ if (bridge_dn_p != NULL) ((*bridge_dn_p)(m, ifp)); else printf("dummynet: if_bridge not loaded\n"); break; case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */ /* * The Ethernet code assumes the Ethernet header is * contiguous in the first mbuf header. * Insure this is true. */ if (m->m_len < ETHER_HDR_LEN && (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) { printf("dummynet/ether: pullup failed, " "dropping packet\n"); break; } ether_demux(m->m_pkthdr.rcvif, m); break; case DIR_OUT | PROTO_LAYER2: /* DN_TO_ETH_OUT: */ ether_output_frame(ifp, m); break; case DIR_DROP: /* drop the packet after some time */ FREE_PKT(m); break; default: printf("dummynet: bad switch %d!\n", dst); FREE_PKT(m); break; } } } static inline int tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa) { struct dn_pkt_tag *dt; struct m_tag *mtag; mtag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*dt), M_NOWAIT | M_ZERO); if (mtag == NULL) return 1; /* Cannot allocate packet header. */ m_tag_prepend(m, mtag); /* Attach to mbuf chain. */ dt = (struct dn_pkt_tag *)(mtag + 1); dt->rule = fwa->rule; dt->rule.info &= IPFW_ONEPASS; /* only keep this info */ dt->dn_dir = dir; dt->ifp = fwa->flags & IPFW_ARGS_OUT ? fwa->ifp : NULL; /* dt->output tame is updated as we move through */ dt->output_time = V_dn_cfg.curr_time; dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0; return 0; } /* * dummynet hook for packets. * We use the argument to locate the flowset fs and the sched_set sch * associated to it. The we apply flow_mask and sched_mask to * determine the queue and scheduler instances. */ int dummynet_io(struct mbuf **m0, struct ip_fw_args *fwa) { struct mbuf *m = *m0; struct dn_fsk *fs = NULL; struct dn_sch_inst *si; struct dn_queue *q = NULL; /* default */ int fs_id, dir; fs_id = (fwa->rule.info & IPFW_INFO_MASK) + ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0); /* XXXGL: convert args to dir */ if (fwa->flags & IPFW_ARGS_IN) dir = DIR_IN; else dir = DIR_OUT; if (fwa->flags & IPFW_ARGS_ETHER) dir |= PROTO_LAYER2; else if (fwa->flags & IPFW_ARGS_IP6) dir |= PROTO_IPV6; DN_BH_WLOCK(); V_dn_cfg.io_pkt++; /* we could actually tag outside the lock, but who cares... */ if (tag_mbuf(m, dir, fwa)) goto dropit; /* XXX locate_flowset could be optimised with a direct ref. */ fs = dn_ht_find(V_dn_cfg.fshash, fs_id, 0, NULL); if (fs == NULL) goto dropit; /* This queue/pipe does not exist! */ if (fs->sched == NULL) /* should not happen */ goto dropit; /* find scheduler instance, possibly applying sched_mask */ si = ipdn_si_find(fs->sched, &(fwa->f_id)); if (si == NULL) goto dropit; /* * If the scheduler supports multiple queues, find the right one * (otherwise it will be ignored by enqueue). */ if (fs->sched->fp->flags & DN_MULTIQUEUE) { q = ipdn_q_find(fs, si, &(fwa->f_id)); if (q == NULL) goto dropit; } if (fs->sched->fp->enqueue(si, q, m)) { /* packet was dropped by enqueue() */ m = *m0 = NULL; /* dn_enqueue already increases io_pkt_drop */ V_dn_cfg.io_pkt_drop--; goto dropit; } if (si->kflags & DN_ACTIVE) { m = *m0 = NULL; /* consumed */ goto done; /* already active, nothing to do */ } /* compute the initial allowance */ if (si->idle_time < V_dn_cfg.curr_time) { /* Do this only on the first packet on an idle pipe */ struct dn_link *p = &fs->sched->link; si->sched_time = V_dn_cfg.curr_time; si->credit = V_dn_cfg.io_fast ? p->bandwidth : 0; if (p->burst) { uint64_t burst = (V_dn_cfg.curr_time - si->idle_time) * p->bandwidth; if (burst > p->burst) burst = p->burst; si->credit += burst; } } /* pass through scheduler and delay line */ m = serve_sched(NULL, si, V_dn_cfg.curr_time); /* optimization -- pass it back to ipfw for immediate send */ /* XXX Don't call dummynet_send() if scheduler return the packet * just enqueued. This avoid a lock order reversal. * */ if (/*V_dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) { /* fast io, rename the tag * to carry reinject info. */ struct m_tag *tag = m_tag_first(m); tag->m_tag_cookie = MTAG_IPFW_RULE; tag->m_tag_id = 0; V_dn_cfg.io_pkt_fast++; if (m->m_nextpkt != NULL) { printf("dummynet: fast io: pkt chain detected!\n"); m->m_nextpkt = NULL; } m = NULL; } else { *m0 = NULL; } done: DN_BH_WUNLOCK(); if (m) dummynet_send(m); return 0; dropit: V_dn_cfg.io_pkt_drop++; DN_BH_WUNLOCK(); if (m) FREE_PKT(m); *m0 = NULL; return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS; }