Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: projects/clang360-import/bin/expr/expr.y
===================================================================
--- projects/clang360-import/bin/expr/expr.y (revision 277808)
+++ projects/clang360-import/bin/expr/expr.y (revision 277809)
@@ -1,555 +1,567 @@
%{
/*-
* Written by Pace Willisson (pace@blitz.com)
* and placed in the public domain.
*
* Largely rewritten by J.T. Conklin (jtc@wimsey.com)
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <ctype.h>
#include <err.h>
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <locale.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <regex.h>
#include <unistd.h>
/*
* POSIX specifies a specific error code for syntax errors. We exit
* with this code for all errors.
*/
#define ERR_EXIT 2
enum valtype {
integer, numeric_string, string
} ;
struct val {
enum valtype type;
union {
char *s;
intmax_t i;
} u;
} ;
char **av;
int nonposix;
struct val *result;
void assert_to_integer(struct val *);
void assert_div(intmax_t, intmax_t);
void assert_minus(intmax_t, intmax_t, intmax_t);
void assert_plus(intmax_t, intmax_t, intmax_t);
void assert_times(intmax_t, intmax_t, intmax_t);
int compare_vals(struct val *, struct val *);
void free_value(struct val *);
int is_integer(const char *);
int is_string(struct val *);
int is_zero_or_null(struct val *);
struct val *make_integer(intmax_t);
struct val *make_str(const char *);
struct val *op_and(struct val *, struct val *);
struct val *op_colon(struct val *, struct val *);
struct val *op_div(struct val *, struct val *);
struct val *op_eq(struct val *, struct val *);
struct val *op_ge(struct val *, struct val *);
struct val *op_gt(struct val *, struct val *);
struct val *op_le(struct val *, struct val *);
struct val *op_lt(struct val *, struct val *);
struct val *op_minus(struct val *, struct val *);
struct val *op_ne(struct val *, struct val *);
struct val *op_or(struct val *, struct val *);
struct val *op_plus(struct val *, struct val *);
struct val *op_rem(struct val *, struct val *);
struct val *op_times(struct val *, struct val *);
int to_integer(struct val *);
void to_string(struct val *);
int yyerror(const char *);
int yylex(void);
%}
%union
{
struct val *val;
}
%left <val> '|'
%left <val> '&'
%left <val> '=' '>' '<' GE LE NE
%left <val> '+' '-'
%left <val> '*' '/' '%'
%left <val> ':'
%token <val> TOKEN
%type <val> start expr
%%
start: expr { result = $$; }
expr: TOKEN
| '(' expr ')' { $$ = $2; }
| expr '|' expr { $$ = op_or($1, $3); }
| expr '&' expr { $$ = op_and($1, $3); }
| expr '=' expr { $$ = op_eq($1, $3); }
| expr '>' expr { $$ = op_gt($1, $3); }
| expr '<' expr { $$ = op_lt($1, $3); }
| expr GE expr { $$ = op_ge($1, $3); }
| expr LE expr { $$ = op_le($1, $3); }
| expr NE expr { $$ = op_ne($1, $3); }
| expr '+' expr { $$ = op_plus($1, $3); }
| expr '-' expr { $$ = op_minus($1, $3); }
| expr '*' expr { $$ = op_times($1, $3); }
| expr '/' expr { $$ = op_div($1, $3); }
| expr '%' expr { $$ = op_rem($1, $3); }
| expr ':' expr { $$ = op_colon($1, $3); }
;
%%
struct val *
make_integer(intmax_t i)
{
struct val *vp;
vp = (struct val *)malloc(sizeof(*vp));
if (vp == NULL)
errx(ERR_EXIT, "malloc() failed");
vp->type = integer;
vp->u.i = i;
return (vp);
}
struct val *
make_str(const char *s)
{
struct val *vp;
vp = (struct val *)malloc(sizeof(*vp));
if (vp == NULL || ((vp->u.s = strdup(s)) == NULL))
errx(ERR_EXIT, "malloc() failed");
if (is_integer(s))
vp->type = numeric_string;
else
vp->type = string;
return (vp);
}
void
free_value(struct val *vp)
{
if (vp->type == string || vp->type == numeric_string)
free(vp->u.s);
}
int
to_integer(struct val *vp)
{
intmax_t i;
/* we can only convert numeric_string to integer, here */
if (vp->type == numeric_string) {
errno = 0;
i = strtoimax(vp->u.s, (char **)NULL, 10);
/* just keep as numeric_string, if the conversion fails */
if (errno != ERANGE) {
free(vp->u.s);
vp->u.i = i;
vp->type = integer;
}
}
return (vp->type == integer);
}
void
assert_to_integer(struct val *vp)
{
if (vp->type == string)
errx(ERR_EXIT, "not a decimal number: '%s'", vp->u.s);
if (!to_integer(vp))
errx(ERR_EXIT, "operand too large: '%s'", vp->u.s);
}
void
to_string(struct val *vp)
{
char *tmp;
if (vp->type == string || vp->type == numeric_string)
return;
/*
* log_10(x) ~= 0.3 * log_2(x). Rounding up gives the number
* of digits; add one each for the sign and terminating null
* character, respectively.
*/
#define NDIGITS(x) (3 * (sizeof(x) * CHAR_BIT) / 10 + 1 + 1 + 1)
tmp = malloc(NDIGITS(vp->u.i));
if (tmp == NULL)
errx(ERR_EXIT, "malloc() failed");
sprintf(tmp, "%jd", vp->u.i);
vp->type = string;
vp->u.s = tmp;
}
int
is_integer(const char *s)
{
if (nonposix) {
if (*s == '\0')
return (1);
while (isspace((unsigned char)*s))
s++;
}
if (*s == '-' || (nonposix && *s == '+'))
s++;
if (*s == '\0')
return (0);
while (isdigit((unsigned char)*s))
s++;
return (*s == '\0');
}
int
is_string(struct val *vp)
{
/* only TRUE if this string is not a valid integer */
return (vp->type == string);
}
int
yylex(void)
{
char *p;
if (*av == NULL)
return (0);
p = *av++;
if (strlen(p) == 1) {
if (strchr("|&=<>+-*/%:()", *p))
return (*p);
} else if (strlen(p) == 2 && p[1] == '=') {
switch (*p) {
case '>': return (GE);
case '<': return (LE);
case '!': return (NE);
}
}
yylval.val = make_str(p);
return (TOKEN);
}
int
is_zero_or_null(struct val *vp)
{
if (vp->type == integer)
return (vp->u.i == 0);
return (*vp->u.s == 0 || (to_integer(vp) && vp->u.i == 0));
}
int
main(int argc, char *argv[])
{
int c;
setlocale(LC_ALL, "");
if (getenv("EXPR_COMPAT") != NULL
|| check_utility_compat("expr")) {
av = argv + 1;
nonposix = 1;
} else {
while ((c = getopt(argc, argv, "e")) != -1) {
switch (c) {
case 'e':
nonposix = 1;
break;
default:
errx(ERR_EXIT,
"usage: expr [-e] expression\n");
}
}
av = argv + optind;
}
yyparse();
if (result->type == integer)
printf("%jd\n", result->u.i);
else
printf("%s\n", result->u.s);
return (is_zero_or_null(result));
}
int
yyerror(const char *s __unused)
{
errx(ERR_EXIT, "syntax error");
}
struct val *
op_or(struct val *a, struct val *b)
{
if (!is_zero_or_null(a)) {
free_value(b);
return (a);
}
free_value(a);
if (!is_zero_or_null(b))
return (b);
free_value(b);
return (make_integer((intmax_t)0));
}
struct val *
op_and(struct val *a, struct val *b)
{
if (is_zero_or_null(a) || is_zero_or_null(b)) {
free_value(a);
free_value(b);
return (make_integer((intmax_t)0));
} else {
free_value(b);
return (a);
}
}
int
compare_vals(struct val *a, struct val *b)
{
int r;
if (is_string(a) || is_string(b)) {
to_string(a);
to_string(b);
r = strcoll(a->u.s, b->u.s);
} else {
assert_to_integer(a);
assert_to_integer(b);
if (a->u.i > b->u.i)
r = 1;
else if (a->u.i < b->u.i)
r = -1;
else
r = 0;
}
free_value(a);
free_value(b);
return (r);
}
struct val *
op_eq(struct val *a, struct val *b)
{
return (make_integer((intmax_t)(compare_vals(a, b) == 0)));
}
struct val *
op_gt(struct val *a, struct val *b)
{
return (make_integer((intmax_t)(compare_vals(a, b) > 0)));
}
struct val *
op_lt(struct val *a, struct val *b)
{
return (make_integer((intmax_t)(compare_vals(a, b) < 0)));
}
struct val *
op_ge(struct val *a, struct val *b)
{
return (make_integer((intmax_t)(compare_vals(a, b) >= 0)));
}
struct val *
op_le(struct val *a, struct val *b)
{
return (make_integer((intmax_t)(compare_vals(a, b) <= 0)));
}
struct val *
op_ne(struct val *a, struct val *b)
{
return (make_integer((intmax_t)(compare_vals(a, b) != 0)));
}
void
assert_plus(intmax_t a, intmax_t b, intmax_t r)
{
/*
* sum of two positive numbers must be positive,
* sum of two negative numbers must be negative
*/
if ((a > 0 && b > 0 && r <= 0) ||
(a < 0 && b < 0 && r >= 0))
errx(ERR_EXIT, "overflow");
}
struct val *
op_plus(struct val *a, struct val *b)
{
struct val *r;
assert_to_integer(a);
assert_to_integer(b);
r = make_integer(a->u.i + b->u.i);
assert_plus(a->u.i, b->u.i, r->u.i);
free_value(a);
free_value(b);
return (r);
}
void
assert_minus(intmax_t a, intmax_t b, intmax_t r)
{
/* special case subtraction of INTMAX_MIN */
if (b == INTMAX_MIN && a < 0)
errx(ERR_EXIT, "overflow");
/* check addition of negative subtrahend */
assert_plus(a, -b, r);
}
struct val *
op_minus(struct val *a, struct val *b)
{
struct val *r;
assert_to_integer(a);
assert_to_integer(b);
r = make_integer(a->u.i - b->u.i);
assert_minus(a->u.i, b->u.i, r->u.i);
free_value(a);
free_value(b);
return (r);
}
+/*
+ * We depend on undefined behaviour giving a result (in r).
+ * To test this result, pass it as volatile. This prevents
+ * optimizing away of the test based on the undefined behaviour.
+ */
void
-assert_times(intmax_t a, intmax_t b, intmax_t r)
+assert_times(intmax_t a, intmax_t b, volatile intmax_t r)
{
/*
- * if first operand is 0, no overflow is possible,
- * else result of division test must match second operand
+ * If the first operand is 0, no overflow is possible,
+ * else the result of the division test must match the
+ * second operand.
+ *
+ * Be careful to avoid overflow in the overflow test, as
+ * in assert_div(). Overflow in division would kill us
+ * with a SIGFPE before getting the test wrong. In old
+ * buggy versions, optimization used to give a null test
+ * instead of a SIGFPE.
*/
- if (a != 0 && r / a != b)
+ if ((a == -1 && b == INTMAX_MIN) || (a != 0 && r / a != b))
errx(ERR_EXIT, "overflow");
}
struct val *
op_times(struct val *a, struct val *b)
{
struct val *r;
assert_to_integer(a);
assert_to_integer(b);
r = make_integer(a->u.i * b->u.i);
assert_times(a->u.i, b->u.i, r->u.i);
free_value(a);
free_value(b);
return (r);
}
void
assert_div(intmax_t a, intmax_t b)
{
if (b == 0)
errx(ERR_EXIT, "division by zero");
/* only INTMAX_MIN / -1 causes overflow */
if (a == INTMAX_MIN && b == -1)
errx(ERR_EXIT, "overflow");
}
struct val *
op_div(struct val *a, struct val *b)
{
struct val *r;
assert_to_integer(a);
assert_to_integer(b);
/* assert based on operands only, not on result */
assert_div(a->u.i, b->u.i);
r = make_integer(a->u.i / b->u.i);
free_value(a);
free_value(b);
return (r);
}
struct val *
op_rem(struct val *a, struct val *b)
{
struct val *r;
assert_to_integer(a);
assert_to_integer(b);
/* pass a=1 to only check for div by zero */
assert_div(1, b->u.i);
r = make_integer(a->u.i % b->u.i);
free_value(a);
free_value(b);
return (r);
}
struct val *
op_colon(struct val *a, struct val *b)
{
regex_t rp;
regmatch_t rm[2];
char errbuf[256];
int eval;
struct val *v;
/* coerce both arguments to strings */
to_string(a);
to_string(b);
/* compile regular expression */
if ((eval = regcomp(&rp, b->u.s, 0)) != 0) {
regerror(eval, &rp, errbuf, sizeof(errbuf));
errx(ERR_EXIT, "%s", errbuf);
}
/* compare string against pattern */
/* remember that patterns are anchored to the beginning of the line */
if (regexec(&rp, a->u.s, (size_t)2, rm, 0) == 0 && rm[0].rm_so == 0)
if (rm[1].rm_so >= 0) {
*(a->u.s + rm[1].rm_eo) = '\0';
v = make_str(a->u.s + rm[1].rm_so);
} else
v = make_integer((intmax_t)(rm[0].rm_eo));
else
if (rp.re_nsub == 0)
v = make_integer((intmax_t)0);
else
v = make_str("");
/* free arguments and pattern buffer */
free_value(a);
free_value(b);
regfree(&rp);
return (v);
}
Index: projects/clang360-import/contrib/sendmail/cf/m4/cfhead.m4
===================================================================
--- projects/clang360-import/contrib/sendmail/cf/m4/cfhead.m4 (revision 277808)
+++ projects/clang360-import/contrib/sendmail/cf/m4/cfhead.m4 (revision 277809)
@@ -1,312 +1,312 @@
#
# Copyright (c) 1998-2004, 2009, 2010 Proofpoint, Inc. and its suppliers.
# All rights reserved.
# Copyright (c) 1983, 1995 Eric P. Allman. All rights reserved.
# Copyright (c) 1988, 1993
# The Regents of the University of California. All rights reserved.
#
# By using this file, you agree to the terms and conditions set
# forth in the LICENSE file which can be found at the top level of
# the sendmail distribution.
#
# $FreeBSD$
#
######################################################################
######################################################################
#####
##### SENDMAIL CONFIGURATION FILE
#####
ifdef(`__win32__', `dnl', `dnl
ifdef(`TEMPFILE', `dnl', `define(`TEMPFILE', maketemp(/tmp/cfXXXXXX))dnl
syscmd(sh _CF_DIR_`'sh/makeinfo.sh _CF_DIR_ > TEMPFILE)dnl
-include(TEMPFILE)dnl
+ifdef(`_NO_MAKEINFO_',, `include(TEMPFILE)')dnl
syscmd(rm -f TEMPFILE)dnl')')
#####
######################################################################
#####
##### DO NOT EDIT THIS FILE! Only edit the source .mc file.
#####
######################################################################
######################################################################
divert(-1)
changecom()
undefine(`format')
undefine(`hpux')
ifdef(`pushdef', `',
`errprint(`You need a newer version of M4, at least as new as
System V or GNU')
include(NoSuchFile)')
define(`PUSHDIVERT', `pushdef(`__D__', divnum)divert($1)')
define(`POPDIVERT', `divert(__D__)popdef(`__D__')')
define(`OSTYPE',
`PUSHDIVERT(-1)
ifdef(`__OSTYPE__', `errprint(`duplicate OSTYPE'($1)
)')
define(`__OSTYPE__', $1)
define(`_ARG_', $2)
include(_CF_DIR_`'ostype/$1.m4)POPDIVERT`'')
## helpful functions
define(`lower', `translit(`$1', `ABCDEFGHIJKLMNOPQRSTUVWXYZ', `abcdefghijklmnopqrstuvwxyz')')
define(`strcasecmp', `ifelse(lower($1), lower($2), `1', `0')')
## access to further arguments in FEATURE/HACK
define(`_ACC_ARG_1_',`$1')
define(`_ACC_ARG_2_',`$2')
define(`_ACC_ARG_3_',`$3')
define(`_ACC_ARG_4_',`$4')
define(`_ACC_ARG_5_',`$5')
define(`_ACC_ARG_6_',`$6')
define(`_ACC_ARG_7_',`$7')
define(`_ACC_ARG_8_',`$8')
define(`_ACC_ARG_9_',`$9')
define(`_ARG1_',`_ACC_ARG_1_(_ARGS_)')
define(`_ARG2_',`_ACC_ARG_2_(_ARGS_)')
define(`_ARG3_',`_ACC_ARG_3_(_ARGS_)')
define(`_ARG4_',`_ACC_ARG_4_(_ARGS_)')
define(`_ARG5_',`_ACC_ARG_5_(_ARGS_)')
define(`_ARG6_',`_ACC_ARG_6_(_ARGS_)')
define(`_ARG7_',`_ACC_ARG_7_(_ARGS_)')
define(`_ARG8_',`_ACC_ARG_8_(_ARGS_)')
define(`_ARG9_',`_ACC_ARG_9_(_ARGS_)')
dnl define if not yet defined: if `$1' is not defined it will be `$2'
define(`_DEFIFNOT',`ifdef(`$1',`',`define(`$1',`$2')')')
dnl ----------------------------------------
dnl add a char $2 to a string $1 if it is not there
define(`_ADDCHAR_',`define(`_I_',`eval(index(`$1',`$2') >= 0)')`'ifelse(_I_,`1',`$1',`$1$2')')
dnl ----
dnl delete a char $2 from a string $1 if it is there
define(`_DELCHAR_',`define(`_IDX_',`index(`$1',`$2')')`'define(`_I_',`eval(_IDX_ >= 0)')`'ifelse(_I_,`1',`substr(`$1',0,_IDX_)`'substr(`$1',eval(_IDX_+1))',`$1')')
dnl ----
dnl apply a macro to a whole string by recursion (one char at a time)
dnl $1: macro
dnl $2: first argument to macro
dnl $3: list that is split up into characters
define(`_AP_',`ifelse(`$3',`',`$2',`_AP_(`$1',$1(`$2',substr(`$3',0,1)),substr(`$3',1))')')
dnl ----
dnl MODIFY_MAILER_FLAGS: append tail of $2 to $1_MF_A/D_
dnl A if head($2) = +
dnl D if head($2) = -
dnl $1_MF_ is set otherwise; set _A/D_ to `'
define(`MODIFY_MAILER_FLAGS',`define(`_hd_',`substr(`$2',0,1)')define(`_tl_',`substr(`$2',1)')`'ifelse(_hd_,`+',`ifdef($1`'_MF_A_, `define($1`'_MF_A_,$1_MF_A_`'_tl_)', `define($1`'_MF_A_, _tl_)')',_hd_,`-',`ifdef($1`'_MF_D_, `define($1`'_MF_D_,$1_MF_D_`'_tl_)', `define($1`'_MF_D_,_tl_)')',`define($1`'_MF_,`$2')define($1`'_MF_A_,`')define($1`'_MF_D_,`')')')
dnl ----
dnl actually modify flags:
dnl $1: flags (strings) to modify
dnl $2: name of flags (just first part) to modify
dnl WARNING: the order might be important: if someone adds and delete the
dnl same characters, he does not deserve any better, does he?
dnl this could be coded more efficiently... (do not apply the macro if _MF_A/D_ is undefined)
define(`_MODMF_',`ifdef($2`'_MF_,`$2_MF_',`_AP_(`_ADDCHAR_',_AP_(`_DELCHAR_',$1,ifdef($2`'_MF_D_,`$2_MF_D_',`')),ifdef($2`'_MF_A_,`$2_MF_A_',`'))')')
dnl usage:
dnl MODIFY_MAILER_FLAGS(`LOCAL',`+FlaGs')dnl
dnl in MAILER.m4: _MODMF_(LMF,`LOCAL')
dnl ----------------------------------------
define(`MAILER',
`define(`_M_N_', `ifelse(`$2', `', `$1', `$2')')dnl
ifdef(`_MAILER_DEFINED_', `', `define(`_MAILER_DEFINED_', `1')')dnl
ifdef(_MAILER_`'_M_N_`'_,
`errprint(`*** ERROR: MAILER('_M_N_`) already included
')',
`define(_MAILER_`'_M_N_`'_, `')define(`_ARG_', `$2')define(`_ARGS_', `shift($@)')PUSHDIVERT(7)include(_CF_DIR_`'mailer/$1.m4)POPDIVERT`'')')
define(`DOMAIN', `PUSHDIVERT(-1)define(`_ARG_', `$2')include(_CF_DIR_`'domain/$1.m4)POPDIVERT`'')
define(`FEATURE', `PUSHDIVERT(-1)ifdef(`_MAILER_DEFINED_',`errprint(`*** ERROR: FEATURE() should be before MAILER()
')')define(`_ARG_', `$2')define(`_ARGS_', `shift($@)')include(_CF_DIR_`'feature/$1.m4)POPDIVERT`'')
define(`HACK', `PUSHDIVERT(-1)define(`_ARG_', `$2')define(`_ARGS_', `shift($@)')include(_CF_DIR_`'hack/$1.m4)POPDIVERT`'')
define(`_DPO_',`')
define(`DAEMON_OPTIONS', `define(`_DPO_', defn(`_DPO_')
O DaemonPortOptions=`$1')')
define(`_CPO_',`')
define(`CLIENT_OPTIONS', `define(`_CPO_', defn(`_CPO_')
O ClientPortOptions=`$1')')
define(`_MAIL_FILTERS_', `')
define(`_MAIL_FILTERS_DEF', `')
define(`MAIL_FILTER', `define(`_MAIL_FILTERS_', defn(`_MAIL_FILTERS_')
X`'$1`, '`$2')
define(`_MAIL_FILTERS_DEF', defn(`_MAIL_FILTERS_DEF')`X')')
define(`INPUT_MAIL_FILTER', `MAIL_FILTER(`$1', `$2')
ifelse(defn(`confINPUT_MAIL_FILTERS')X, `X',
`define(`confINPUT_MAIL_FILTERS', $1)',
`define(`confINPUT_MAIL_FILTERS', defn(`confINPUT_MAIL_FILTERS')`, '`$1')')')
define(`_QUEUE_GROUP_', `')
define(`QUEUE_GROUP', `define(`_QUEUE_GROUP_', defn(`_QUEUE_GROUP_')
Q`'$1`, '`$2')')
define(`CF_LEVEL', `10')dnl
define(`VERSIONID', ``##### $1 #####'')
define(`LOCAL_RULE_0', `divert(3)')
dnl for UUCP...
define(`LOCAL_UUCP', `divert(4)')
define(`LOCAL_RULE_1',
`divert(9)dnl
#######################################
### Ruleset 1 -- Sender Rewriting ###
#######################################
Ssender=1
')
define(`LOCAL_RULE_2',
`divert(9)dnl
##########################################
### Ruleset 2 -- Recipient Rewriting ###
##########################################
Srecipient=2
')
define(`LOCAL_RULESETS',
`divert(9)
')
define(`LOCAL_SRV_FEATURES',
`define(`_LOCAL_SRV_FEATURES_')
ifdef(`_MAILER_DEFINED_',,`errprint(`*** WARNING: MAILER() should be before LOCAL_SRV_FEATURES
')')
divert(9)
SLocal_srv_features')
define(`LOCAL_TRY_TLS',
`define(`_LOCAL_TRY_TLS_')
ifdef(`_MAILER_DEFINED_',,`errprint(`*** WARNING: MAILER() should be before LOCAL_TRY_TLS
')')
divert(9)
SLocal_try_tls')
define(`LOCAL_TLS_RCPT',
`define(`_LOCAL_TLS_RCPT_')
ifdef(`_MAILER_DEFINED_',,`errprint(`*** WARNING: MAILER() should be before LOCAL_TLS_RCPT
')')
divert(9)
SLocal_tls_rcpt')
define(`LOCAL_TLS_CLIENT',
`define(`_LOCAL_TLS_CLIENT_')
ifdef(`_MAILER_DEFINED_',,`errprint(`*** WARNING: MAILER() should be before LOCAL_TLS_CLIENT
')')
divert(9)
SLocal_tls_client')
define(`LOCAL_TLS_SERVER',
`define(`_LOCAL_TLS_SERVER_')
ifdef(`_MAILER_DEFINED_',,`errprint(`*** WARNING: MAILER() should be before LOCAL_TLS_SERVER
')')
divert(9)
SLocal_tls_server')
define(`LOCAL_RULE_3', `divert(2)')
define(`LOCAL_CONFIG', `divert(6)')
define(`MAILER_DEFINITIONS', `divert(7)')
define(`LOCAL_DNSBL', `divert(8)')
define(`LOCAL_NET_CONFIG', `define(`_LOCAL_RULES_', 1)divert(1)')
define(`UUCPSMTP', `R DOL(*) < @ $1 .UUCP > DOL(*) DOL(1) < @ $2 > DOL(2)')
define(`CONCAT', `$1$2$3$4$5$6$7')
define(`DOL', ``$'$1')
define(`SITECONFIG',
`CONCAT(D, $3, $2)
define(`_CLASS_$3_', `')dnl
ifelse($3, U, C{w}$2 $2.UUCP, `dnl')
define(`SITE', `ifelse(CONCAT($'2`, $3), SU,
CONCAT(CY, $'1`),
CONCAT(C, $3, $'1`))')
sinclude(_CF_DIR_`'siteconfig/$1.m4)')
define(`EXPOSED_USER', `PUSHDIVERT(5)C{E}$1
POPDIVERT`'dnl`'')
define(`EXPOSED_USER_FILE', `PUSHDIVERT(5)F{E}$1
POPDIVERT`'dnl`'')
define(`LOCAL_USER', `PUSHDIVERT(5)C{L}$1
POPDIVERT`'dnl`'')
define(`LOCAL_USER_FILE', `PUSHDIVERT(5)F{L}$1
POPDIVERT`'dnl`'')
define(`MASQUERADE_AS', `define(`MASQUERADE_NAME', $1)')
define(`MASQUERADE_DOMAIN', `PUSHDIVERT(5)C{M}$1
POPDIVERT`'dnl`'')
define(`MASQUERADE_EXCEPTION', `PUSHDIVERT(5)C{N}$1
POPDIVERT`'dnl`'')
define(`MASQUERADE_DOMAIN_FILE', `PUSHDIVERT(5)F{M}$1
POPDIVERT`'dnl`'')
define(`MASQUERADE_EXCEPTION_FILE', `PUSHDIVERT(5)F{N}$1
POPDIVERT`'dnl`'')
define(`LOCAL_DOMAIN', `PUSHDIVERT(5)C{w}$1
POPDIVERT`'dnl`'')
define(`CANONIFY_DOMAIN', `PUSHDIVERT(5)C{Canonify}$1
POPDIVERT`'dnl`'')
define(`CANONIFY_DOMAIN_FILE', `PUSHDIVERT(5)F{Canonify}$1
POPDIVERT`'dnl`'')
define(`GENERICS_DOMAIN', `PUSHDIVERT(5)C{G}$1
POPDIVERT`'dnl`'')
define(`GENERICS_DOMAIN_FILE', `PUSHDIVERT(5)F{G}$1
POPDIVERT`'dnl`'')
define(`LDAPROUTE_DOMAIN', `PUSHDIVERT(5)C{LDAPRoute}$1
POPDIVERT`'dnl`'')
define(`LDAPROUTE_DOMAIN_FILE', `PUSHDIVERT(5)F{LDAPRoute}$1
POPDIVERT`'dnl`'')
define(`LDAPROUTE_EQUIVALENT', `PUSHDIVERT(5)C{LDAPRouteEquiv}$1
POPDIVERT`'dnl`'')
define(`LDAPROUTE_EQUIVALENT_FILE', `PUSHDIVERT(5)F{LDAPRouteEquiv}$1
POPDIVERT`'dnl`'')
define(`VIRTUSER_DOMAIN', `PUSHDIVERT(5)C{VirtHost}$1
define(`_VIRTHOSTS_')
POPDIVERT`'dnl`'')
define(`VIRTUSER_DOMAIN_FILE', `PUSHDIVERT(5)F{VirtHost}$1
define(`_VIRTHOSTS_')
POPDIVERT`'dnl`'')
define(`RELAY_DOMAIN', `PUSHDIVERT(5)C{R}$1
POPDIVERT`'dnl`'')
define(`RELAY_DOMAIN_FILE', `PUSHDIVERT(5)F{R}$1
POPDIVERT`'dnl`'')
define(`TRUST_AUTH_MECH', `_DEFIFNOT(`_USE_AUTH_',`1')PUSHDIVERT(5)C{TrustAuthMech}$1
POPDIVERT`'dnl`'')
define(`_OPTINS', `ifdef(`$1', `$2$1$3')')
m4wrap(`include(_CF_DIR_`m4/proto.m4')')
# default location for files
ifdef(`MAIL_SETTINGS_DIR', , `define(`MAIL_SETTINGS_DIR', `/etc/mail/')')
# set our default hashed database type
define(`DATABASE_MAP_TYPE', `hash')
# set up default values for options
define(`ALIAS_FILE', `MAIL_SETTINGS_DIR`'aliases')
define(`confMAILER_NAME', ``MAILER-DAEMON'')
define(`confFROM_LINE', `From $g $d')
define(`confOPERATORS', `.:%@!^/[]+')
define(`confSMTP_LOGIN_MSG', `$j Sendmail $v/$Z; $b')
define(`_REC_AUTH_', `$.$?{auth_type}(authenticated')
define(`_REC_FULL_AUTH_', `$.$?{auth_type}(user=${auth_authen} $?{auth_author}author=${auth_author} $.mech=${auth_type}')
define(`_REC_HDR_', `$?sfrom $s $.$?_($?s$|from $.$_)')
define(`_REC_END_', `for $u; $|;
$.$b$?g
(envelope-from $g)$.')
define(`_REC_TLS_', `(version=${tls_version} cipher=${cipher} bits=${cipher_bits} verify=${verify})$.$?u')
define(`_REC_BY_', `$.by $j ($v/$Z)$?r with $r$. id $i$?{tls_version}')
define(`confRECEIVED_HEADER', `_REC_HDR_
_REC_AUTH_$?{auth_ssf} bits=${auth_ssf}$.)
_REC_BY_
_REC_TLS_
_REC_END_')
define(`confSEVEN_BIT_INPUT', `False')
define(`confALIAS_WAIT', `10')
define(`confMIN_FREE_BLOCKS', `100')
define(`confBLANK_SUB', `.')
define(`confCON_EXPENSIVE', `False')
define(`confDELIVERY_MODE', `background')
define(`confTEMP_FILE_MODE', `0600')
define(`confMCI_CACHE_SIZE', `2')
define(`confMCI_CACHE_TIMEOUT', `5m')
define(`confUSE_ERRORS_TO', `False')
define(`confLOG_LEVEL', `9')
define(`confCHECK_ALIASES', `False')
define(`confOLD_STYLE_HEADERS', `True')
define(`confPRIVACY_FLAGS', `authwarnings')
define(`confSAFE_QUEUE', `True')
define(`confTO_QUEUERETURN', `5d')
define(`confTO_QUEUEWARN', `4h')
define(`confTIME_ZONE', `USE_SYSTEM')
define(`confCW_FILE', `MAIL_SETTINGS_DIR`'local-host-names')
define(`confMIME_FORMAT_ERRORS', `True')
define(`confFORWARD_PATH', `$z/.forward.$w:$z/.forward')
define(`confCR_FILE', `-o MAIL_SETTINGS_DIR`'relay-domains')
define(`confMILTER_MACROS_CONNECT', ``j, _, {daemon_name}, {if_name}, {if_addr}'')
define(`confMILTER_MACROS_HELO', ``{tls_version}, {cipher}, {cipher_bits}, {cert_subject}, {cert_issuer}'')
define(`confMILTER_MACROS_ENVFROM', ``i, {auth_type}, {auth_authen}, {auth_ssf}, {auth_author}, {mail_mailer}, {mail_host}, {mail_addr}'')
define(`confMILTER_MACROS_ENVRCPT', ``{rcpt_mailer}, {rcpt_host}, {rcpt_addr}'')
define(`confMILTER_MACROS_EOM', `{msg_id}')
divert(0)dnl
VERSIONID(`$Id: cfhead.m4,v 8.122 2013-11-22 20:51:13 ca Exp $')
Index: projects/clang360-import/contrib/sendmail
===================================================================
--- projects/clang360-import/contrib/sendmail (revision 277808)
+++ projects/clang360-import/contrib/sendmail (revision 277809)
Property changes on: projects/clang360-import/contrib/sendmail
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /head/contrib/sendmail:r277327-277803
Index: projects/clang360-import/contrib/tcpdump/print-ip.c
===================================================================
--- projects/clang360-import/contrib/tcpdump/print-ip.c (revision 277808)
+++ projects/clang360-import/contrib/tcpdump/print-ip.c (revision 277809)
@@ -1,714 +1,717 @@
/*
* Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that: (1) source code distributions
* retain the above copyright notice and this paragraph in its entirety, (2)
* distributions including binary code include the above copyright notice and
* this paragraph in its entirety in the documentation or other materials
* provided with the distribution, and (3) all advertising materials mentioning
* features or use of this software display the following acknowledgement:
* ``This product includes software developed by the University of California,
* Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
* the University nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior
* written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* $FreeBSD$
*/
#define NETDISSECT_REWORKED
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <tcpdump-stdinc.h>
#include <string.h>
#include "interface.h"
#include "addrtoname.h"
#include "extract.h" /* must come after interface.h */
#include "ip.h"
#include "ipproto.h"
static const char tstr[] = "[|ip]";
static const struct tok ip_option_values[] = {
{ IPOPT_EOL, "EOL" },
{ IPOPT_NOP, "NOP" },
{ IPOPT_TS, "timestamp" },
{ IPOPT_SECURITY, "security" },
{ IPOPT_RR, "RR" },
{ IPOPT_SSRR, "SSRR" },
{ IPOPT_LSRR, "LSRR" },
{ IPOPT_RA, "RA" },
{ IPOPT_RFC1393, "traceroute" },
{ 0, NULL }
};
/*
* print the recorded route in an IP RR, LSRR or SSRR option.
*/
static void
ip_printroute(netdissect_options *ndo,
register const u_char *cp, u_int length)
{
register u_int ptr;
register u_int len;
if (length < 3) {
ND_PRINT((ndo, " [bad length %u]", length));
return;
}
if ((length + 1) & 3)
ND_PRINT((ndo, " [bad length %u]", length));
ptr = cp[2] - 1;
if (ptr < 3 || ((ptr + 1) & 3) || ptr > length + 1)
ND_PRINT((ndo, " [bad ptr %u]", cp[2]));
for (len = 3; len < length; len += 4) {
ND_PRINT((ndo, " %s", ipaddr_string(ndo, &cp[len])));
if (ptr > len)
ND_PRINT((ndo, ","));
}
}
/*
* If source-routing is present and valid, return the final destination.
* Otherwise, return IP destination.
*
* This is used for UDP and TCP pseudo-header in the checksum
* calculation.
*/
static uint32_t
ip_finddst(netdissect_options *ndo,
const struct ip *ip)
{
int length;
int len;
const u_char *cp;
uint32_t retval;
cp = (const u_char *)(ip + 1);
length = (IP_HL(ip) << 2) - sizeof(struct ip);
for (; length > 0; cp += len, length -= len) {
int tt;
ND_TCHECK(*cp);
tt = *cp;
if (tt == IPOPT_EOL)
break;
else if (tt == IPOPT_NOP)
len = 1;
else {
ND_TCHECK(cp[1]);
len = cp[1];
if (len < 2)
break;
}
ND_TCHECK2(*cp, len);
switch (tt) {
case IPOPT_SSRR:
case IPOPT_LSRR:
if (len < 7)
break;
UNALIGNED_MEMCPY(&retval, cp + len - 4, 4);
return retval;
}
}
trunc:
UNALIGNED_MEMCPY(&retval, &ip->ip_dst.s_addr, sizeof(uint32_t));
return retval;
}
/*
* Compute a V4-style checksum by building a pseudoheader.
*/
int
nextproto4_cksum(netdissect_options *ndo,
const struct ip *ip, const uint8_t *data,
u_int len, u_int covlen, u_int next_proto)
{
struct phdr {
uint32_t src;
uint32_t dst;
u_char mbz;
u_char proto;
uint16_t len;
} ph;
struct cksum_vec vec[2];
/* pseudo-header.. */
ph.len = htons((uint16_t)len);
ph.mbz = 0;
ph.proto = next_proto;
UNALIGNED_MEMCPY(&ph.src, &ip->ip_src.s_addr, sizeof(uint32_t));
if (IP_HL(ip) == 5)
UNALIGNED_MEMCPY(&ph.dst, &ip->ip_dst.s_addr, sizeof(uint32_t));
else
ph.dst = ip_finddst(ndo, ip);
vec[0].ptr = (const uint8_t *)(void *)&ph;
vec[0].len = sizeof(ph);
vec[1].ptr = data;
vec[1].len = covlen;
return (in_cksum(vec, 2));
}
static void
ip_printts(netdissect_options *ndo,
register const u_char *cp, u_int length)
{
register u_int ptr;
register u_int len;
int hoplen;
const char *type;
if (length < 4) {
ND_PRINT((ndo, "[bad length %u]", length));
return;
}
ND_PRINT((ndo, " TS{"));
hoplen = ((cp[3]&0xF) != IPOPT_TS_TSONLY) ? 8 : 4;
if ((length - 4) & (hoplen-1))
ND_PRINT((ndo, "[bad length %u]", length));
ptr = cp[2] - 1;
len = 0;
if (ptr < 4 || ((ptr - 4) & (hoplen-1)) || ptr > length + 1)
ND_PRINT((ndo, "[bad ptr %u]", cp[2]));
switch (cp[3]&0xF) {
case IPOPT_TS_TSONLY:
ND_PRINT((ndo, "TSONLY"));
break;
case IPOPT_TS_TSANDADDR:
ND_PRINT((ndo, "TS+ADDR"));
break;
/*
* prespecified should really be 3, but some ones might send 2
* instead, and the IPOPT_TS_PRESPEC constant can apparently
* have both values, so we have to hard-code it here.
*/
case 2:
ND_PRINT((ndo, "PRESPEC2.0"));
break;
case 3: /* IPOPT_TS_PRESPEC */
ND_PRINT((ndo, "PRESPEC"));
break;
default:
ND_PRINT((ndo, "[bad ts type %d]", cp[3]&0xF));
goto done;
}
type = " ";
for (len = 4; len < length; len += hoplen) {
if (ptr == len)
type = " ^ ";
ND_PRINT((ndo, "%s%d@%s", type, EXTRACT_32BITS(&cp[len+hoplen-4]),
hoplen!=8 ? "" : ipaddr_string(ndo, &cp[len])));
type = " ";
}
done:
ND_PRINT((ndo, "%s", ptr == len ? " ^ " : ""));
if (cp[3]>>4)
ND_PRINT((ndo, " [%d hops not recorded]} ", cp[3]>>4));
else
ND_PRINT((ndo, "}"));
}
/*
* print IP options.
*/
static void
ip_optprint(netdissect_options *ndo,
register const u_char *cp, u_int length)
{
register u_int option_len;
const char *sep = "";
for (; length > 0; cp += option_len, length -= option_len) {
u_int option_code;
ND_PRINT((ndo, "%s", sep));
sep = ",";
ND_TCHECK(*cp);
option_code = *cp;
ND_PRINT((ndo, "%s",
tok2str(ip_option_values,"unknown %u",option_code)));
if (option_code == IPOPT_NOP ||
option_code == IPOPT_EOL)
option_len = 1;
else {
ND_TCHECK(cp[1]);
option_len = cp[1];
if (option_len < 2) {
ND_PRINT((ndo, " [bad length %u]", option_len));
return;
}
}
if (option_len > length) {
ND_PRINT((ndo, " [bad length %u]", option_len));
return;
}
ND_TCHECK2(*cp, option_len);
switch (option_code) {
case IPOPT_EOL:
return;
case IPOPT_TS:
ip_printts(ndo, cp, option_len);
break;
case IPOPT_RR: /* fall through */
case IPOPT_SSRR:
case IPOPT_LSRR:
ip_printroute(ndo, cp, option_len);
break;
case IPOPT_RA:
if (option_len < 4) {
ND_PRINT((ndo, " [bad length %u]", option_len));
break;
}
ND_TCHECK(cp[3]);
if (EXTRACT_16BITS(&cp[2]) != 0)
ND_PRINT((ndo, " value %u", EXTRACT_16BITS(&cp[2])));
break;
case IPOPT_NOP: /* nothing to print - fall through */
case IPOPT_SECURITY:
default:
break;
}
}
return;
trunc:
ND_PRINT((ndo, "%s", tstr));
}
#define IP_RES 0x8000
static const struct tok ip_frag_values[] = {
{ IP_MF, "+" },
{ IP_DF, "DF" },
{ IP_RES, "rsvd" }, /* The RFC3514 evil ;-) bit */
{ 0, NULL }
};
struct ip_print_demux_state {
const struct ip *ip;
const u_char *cp;
u_int len, off;
u_char nh;
int advance;
};
static void
ip_print_demux(netdissect_options *ndo,
struct ip_print_demux_state *ipds)
{
struct protoent *proto;
struct cksum_vec vec[1];
again:
switch (ipds->nh) {
case IPPROTO_AH:
ipds->nh = *ipds->cp;
ipds->advance = ah_print(ndo, ipds->cp);
if (ipds->advance <= 0)
break;
ipds->cp += ipds->advance;
ipds->len -= ipds->advance;
goto again;
case IPPROTO_ESP:
{
int enh, padlen;
ipds->advance = esp_print(ndo, ipds->cp, ipds->len,
(const u_char *)ipds->ip,
&enh, &padlen);
if (ipds->advance <= 0)
break;
ipds->cp += ipds->advance;
ipds->len -= ipds->advance + padlen;
ipds->nh = enh & 0xff;
goto again;
}
case IPPROTO_IPCOMP:
{
int enh;
ipds->advance = ipcomp_print(ndo, ipds->cp, &enh);
if (ipds->advance <= 0)
break;
ipds->cp += ipds->advance;
ipds->len -= ipds->advance;
ipds->nh = enh & 0xff;
goto again;
}
case IPPROTO_SCTP:
sctp_print(ndo, ipds->cp, (const u_char *)ipds->ip, ipds->len);
break;
case IPPROTO_DCCP:
dccp_print(ndo, ipds->cp, (const u_char *)ipds->ip, ipds->len);
break;
case IPPROTO_TCP:
/* pass on the MF bit plus the offset to detect fragments */
tcp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip,
ipds->off & (IP_MF|IP_OFFMASK));
break;
case IPPROTO_UDP:
/* pass on the MF bit plus the offset to detect fragments */
udp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip,
ipds->off & (IP_MF|IP_OFFMASK));
break;
case IPPROTO_ICMP:
/* pass on the MF bit plus the offset to detect fragments */
icmp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip,
ipds->off & (IP_MF|IP_OFFMASK));
break;
case IPPROTO_PIGP:
/*
* XXX - the current IANA protocol number assignments
* page lists 9 as "any private interior gateway
* (used by Cisco for their IGRP)" and 88 as
* "EIGRP" from Cisco.
*
* Recent BSD <netinet/in.h> headers define
* IP_PROTO_PIGP as 9 and IP_PROTO_IGRP as 88.
* We define IP_PROTO_PIGP as 9 and
* IP_PROTO_EIGRP as 88; those names better
* match was the current protocol number
* assignments say.
*/
igrp_print(ndo, ipds->cp, ipds->len);
break;
case IPPROTO_EIGRP:
eigrp_print(ndo, ipds->cp, ipds->len);
break;
case IPPROTO_ND:
ND_PRINT((ndo, " nd %d", ipds->len));
break;
case IPPROTO_EGP:
egp_print(ndo, ipds->cp, ipds->len);
break;
case IPPROTO_OSPF:
ospf_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip);
break;
case IPPROTO_IGMP:
igmp_print(ndo, ipds->cp, ipds->len);
break;
case IPPROTO_IPV4:
/* DVMRP multicast tunnel (ip-in-ip encapsulation) */
ip_print(ndo, ipds->cp, ipds->len);
if (! ndo->ndo_vflag) {
ND_PRINT((ndo, " (ipip-proto-4)"));
return;
}
break;
#ifdef INET6
case IPPROTO_IPV6:
/* ip6-in-ip encapsulation */
ip6_print(ndo, ipds->cp, ipds->len);
break;
#endif /*INET6*/
case IPPROTO_RSVP:
rsvp_print(ndo, ipds->cp, ipds->len);
break;
case IPPROTO_GRE:
/* do it */
gre_print(ndo, ipds->cp, ipds->len);
break;
case IPPROTO_MOBILE:
mobile_print(ndo, ipds->cp, ipds->len);
break;
case IPPROTO_PIM:
vec[0].ptr = ipds->cp;
vec[0].len = ipds->len;
pim_print(ndo, ipds->cp, ipds->len, in_cksum(vec, 1));
break;
case IPPROTO_VRRP:
if (ndo->ndo_packettype == PT_CARP) {
if (ndo->ndo_vflag)
ND_PRINT((ndo, "carp %s > %s: ",
ipaddr_string(ndo, &ipds->ip->ip_src),
ipaddr_string(ndo, &ipds->ip->ip_dst)));
carp_print(ndo, ipds->cp, ipds->len, ipds->ip->ip_ttl);
} else {
if (ndo->ndo_vflag)
ND_PRINT((ndo, "vrrp %s > %s: ",
ipaddr_string(ndo, &ipds->ip->ip_src),
ipaddr_string(ndo, &ipds->ip->ip_dst)));
vrrp_print(ndo, ipds->cp, ipds->len,
(const u_char *)ipds->ip, ipds->ip->ip_ttl);
}
break;
case IPPROTO_PGM:
pgm_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip);
break;
#if defined(HAVE_NET_PFVAR_H)
case IPPROTO_PFSYNC:
pfsync_ip_print(ipds->cp, ipds->len);
break;
#endif
default:
if (ndo->ndo_nflag==0 && (proto = getprotobynumber(ipds->nh)) != NULL)
ND_PRINT((ndo, " %s", proto->p_name));
else
ND_PRINT((ndo, " ip-proto-%d", ipds->nh));
ND_PRINT((ndo, " %d", ipds->len));
break;
}
}
void
ip_print_inner(netdissect_options *ndo,
const u_char *bp,
u_int length, u_int nh,
const u_char *bp2)
{
struct ip_print_demux_state ipd;
ipd.ip = (const struct ip *)bp2;
ipd.cp = bp;
ipd.len = length;
ipd.off = 0;
ipd.nh = nh;
ipd.advance = 0;
ip_print_demux(ndo, &ipd);
}
/*
* print an IP datagram.
*/
void
ip_print(netdissect_options *ndo,
const u_char *bp,
u_int length)
{
struct ip_print_demux_state ipd;
struct ip_print_demux_state *ipds=&ipd;
const u_char *ipend;
u_int hlen;
struct cksum_vec vec[1];
uint16_t sum, ip_sum;
struct protoent *proto;
ipds->ip = (const struct ip *)bp;
+ ND_TCHECK(ipds->ip->ip_vhl);
if (IP_V(ipds->ip) != 4) { /* print version if != 4 */
ND_PRINT((ndo, "IP%u ", IP_V(ipds->ip)));
if (IP_V(ipds->ip) == 6)
ND_PRINT((ndo, ", wrong link-layer encapsulation"));
}
else if (!ndo->ndo_eflag)
ND_PRINT((ndo, "IP "));
- if ((u_char *)(ipds->ip + 1) > ndo->ndo_snapend) {
- ND_PRINT((ndo, "%s", tstr));
- return;
- }
+ ND_TCHECK(*ipds->ip);
if (length < sizeof (struct ip)) {
ND_PRINT((ndo, "truncated-ip %u", length));
return;
}
hlen = IP_HL(ipds->ip) * 4;
if (hlen < sizeof (struct ip)) {
ND_PRINT((ndo, "bad-hlen %u", hlen));
return;
}
ipds->len = EXTRACT_16BITS(&ipds->ip->ip_len);
if (length < ipds->len)
ND_PRINT((ndo, "truncated-ip - %u bytes missing! ",
ipds->len - length));
if (ipds->len < hlen) {
#ifdef GUESS_TSO
if (ipds->len) {
ND_PRINT((ndo, "bad-len %u", ipds->len));
return;
}
else {
/* we guess that it is a TSO send */
ipds->len = length;
}
#else
ND_PRINT((ndo, "bad-len %u", ipds->len));
return;
#endif /* GUESS_TSO */
}
/*
* Cut off the snapshot length to the end of the IP payload.
*/
ipend = bp + ipds->len;
if (ipend < ndo->ndo_snapend)
ndo->ndo_snapend = ipend;
ipds->len -= hlen;
ipds->off = EXTRACT_16BITS(&ipds->ip->ip_off);
if (ndo->ndo_vflag) {
ND_PRINT((ndo, "(tos 0x%x", (int)ipds->ip->ip_tos));
/* ECN bits */
if (ipds->ip->ip_tos & 0x03) {
switch (ipds->ip->ip_tos & 0x03) {
case 1:
ND_PRINT((ndo, ",ECT(1)"));
break;
case 2:
ND_PRINT((ndo, ",ECT(0)"));
break;
case 3:
ND_PRINT((ndo, ",CE"));
}
}
if (ipds->ip->ip_ttl >= 1)
ND_PRINT((ndo, ", ttl %u", ipds->ip->ip_ttl));
/*
* for the firewall guys, print id, offset.
* On all but the last stick a "+" in the flags portion.
* For unfragmented datagrams, note the don't fragment flag.
*/
ND_PRINT((ndo, ", id %u, offset %u, flags [%s], proto %s (%u)",
EXTRACT_16BITS(&ipds->ip->ip_id),
(ipds->off & 0x1fff) * 8,
bittok2str(ip_frag_values, "none", ipds->off&0xe000),
tok2str(ipproto_values,"unknown",ipds->ip->ip_p),
ipds->ip->ip_p));
ND_PRINT((ndo, ", length %u", EXTRACT_16BITS(&ipds->ip->ip_len)));
if ((hlen - sizeof(struct ip)) > 0) {
ND_PRINT((ndo, ", options ("));
ip_optprint(ndo, (u_char *)(ipds->ip + 1), hlen - sizeof(struct ip));
ND_PRINT((ndo, ")"));
}
if (!ndo->ndo_Kflag && (u_char *)ipds->ip + hlen <= ndo->ndo_snapend) {
vec[0].ptr = (const uint8_t *)(void *)ipds->ip;
vec[0].len = hlen;
sum = in_cksum(vec, 1);
if (sum != 0) {
ip_sum = EXTRACT_16BITS(&ipds->ip->ip_sum);
ND_PRINT((ndo, ", bad cksum %x (->%x)!", ip_sum,
in_cksum_shouldbe(ip_sum, sum)));
}
}
ND_PRINT((ndo, ")\n "));
}
/*
* If this is fragment zero, hand it to the next higher
* level protocol.
*/
if ((ipds->off & 0x1fff) == 0) {
ipds->cp = (const u_char *)ipds->ip + hlen;
ipds->nh = ipds->ip->ip_p;
if (ipds->nh != IPPROTO_TCP && ipds->nh != IPPROTO_UDP &&
ipds->nh != IPPROTO_SCTP && ipds->nh != IPPROTO_DCCP) {
ND_PRINT((ndo, "%s > %s: ",
ipaddr_string(ndo, &ipds->ip->ip_src),
ipaddr_string(ndo, &ipds->ip->ip_dst)));
}
ip_print_demux(ndo, ipds);
} else {
/* Ultra quiet now means that all this stuff should be suppressed */
if (ndo->ndo_qflag > 1) return;
/*
* if this isn't the first frag, we're missing the
* next level protocol header. print the ip addr
* and the protocol.
*/
if (ipds->off & 0x1fff) {
ND_PRINT((ndo, "%s > %s:", ipaddr_string(ndo, &ipds->ip->ip_src),
ipaddr_string(ndo, &ipds->ip->ip_dst)));
if (!ndo->ndo_nflag && (proto = getprotobynumber(ipds->ip->ip_p)) != NULL)
ND_PRINT((ndo, " %s", proto->p_name));
else
ND_PRINT((ndo, " ip-proto-%d", ipds->ip->ip_p));
}
}
+ return;
+
+trunc:
+ ND_PRINT((ndo, "%s", tstr));
+ return;
}
void
ipN_print(netdissect_options *ndo, register const u_char *bp, register u_int length)
{
struct ip hdr;
if (length < 4) {
ND_PRINT((ndo, "truncated-ip %d", length));
return;
}
memcpy (&hdr, bp, 4);
switch (IP_V(&hdr)) {
case 4:
ip_print (ndo, bp, length);
return;
#ifdef INET6
case 6:
ip6_print (ndo, bp, length);
return;
#endif
default:
ND_PRINT((ndo, "unknown ip %d", IP_V(&hdr)));
return;
}
}
/*
* Local Variables:
* c-style: whitesmith
* c-basic-offset: 8
* End:
*/
Index: projects/clang360-import/contrib/tcpdump/print-sl.c
===================================================================
--- projects/clang360-import/contrib/tcpdump/print-sl.c (revision 277808)
+++ projects/clang360-import/contrib/tcpdump/print-sl.c (revision 277809)
@@ -1,252 +1,252 @@
/*
* Copyright (c) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that: (1) source code distributions
* retain the above copyright notice and this paragraph in its entirety, (2)
* distributions including binary code include the above copyright notice and
* this paragraph in its entirety in the documentation or other materials
* provided with the distribution, and (3) all advertising materials mentioning
* features or use of this software display the following acknowledgement:
* ``This product includes software developed by the University of California,
* Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
* the University nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior
* written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* $FreeBSD$
*/
#define NETDISSECT_REWORKED
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <tcpdump-stdinc.h>
#include "interface.h"
#include "extract.h" /* must come after interface.h */
#include "ip.h"
#include "tcp.h"
#include "slcompress.h"
/*
* definitions of the pseudo- link-level header attached to slip
* packets grabbed by the packet filter (bpf) traffic monitor.
*/
#define SLIP_HDRLEN 16
#define SLX_DIR 0
#define SLX_CHDR 1
#define CHDR_LEN 15
#define SLIPDIR_IN 0
#define SLIPDIR_OUT 1
static const char tstr[] = "[|slip]";
static u_int lastlen[2][256];
static u_int lastconn = 255;
static void sliplink_print(netdissect_options *, const u_char *, const struct ip *, u_int);
static void compressed_sl_print(netdissect_options *, const u_char *, const struct ip *, u_int, int);
u_int
sl_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
register u_int caplen = h->caplen;
register u_int length = h->len;
register const struct ip *ip;
- if (caplen < SLIP_HDRLEN) {
+ if (caplen < SLIP_HDRLEN || length < SLIP_HDRLEN) {
ND_PRINT((ndo, "%s", tstr));
return (caplen);
}
length -= SLIP_HDRLEN;
ip = (struct ip *)(p + SLIP_HDRLEN);
if (ndo->ndo_eflag)
sliplink_print(ndo, p, ip, length);
switch (IP_V(ip)) {
case 4:
ip_print(ndo, (u_char *)ip, length);
break;
#ifdef INET6
case 6:
ip6_print(ndo, (u_char *)ip, length);
break;
#endif
default:
ND_PRINT((ndo, "ip v%d", IP_V(ip)));
}
return (SLIP_HDRLEN);
}
u_int
sl_bsdos_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
register u_int caplen = h->caplen;
register u_int length = h->len;
register const struct ip *ip;
if (caplen < SLIP_HDRLEN) {
ND_PRINT((ndo, "%s", tstr));
return (caplen);
}
length -= SLIP_HDRLEN;
ip = (struct ip *)(p + SLIP_HDRLEN);
#ifdef notdef
if (ndo->ndo_eflag)
sliplink_print(ndo, p, ip, length);
#endif
ip_print(ndo, (u_char *)ip, length);
return (SLIP_HDRLEN);
}
static void
sliplink_print(netdissect_options *ndo,
register const u_char *p, register const struct ip *ip,
register u_int length)
{
int dir;
u_int hlen;
dir = p[SLX_DIR];
ND_PRINT((ndo, dir == SLIPDIR_IN ? "I " : "O "));
if (ndo->ndo_nflag) {
/* XXX just dump the header */
register int i;
for (i = SLX_CHDR; i < SLX_CHDR + CHDR_LEN - 1; ++i)
ND_PRINT((ndo, "%02x.", p[i]));
ND_PRINT((ndo, "%02x: ", p[SLX_CHDR + CHDR_LEN - 1]));
return;
}
switch (p[SLX_CHDR] & 0xf0) {
case TYPE_IP:
ND_PRINT((ndo, "ip %d: ", length + SLIP_HDRLEN));
break;
case TYPE_UNCOMPRESSED_TCP:
/*
* The connection id is stored in the IP protocol field.
* Get it from the link layer since sl_uncompress_tcp()
* has restored the IP header copy to IPPROTO_TCP.
*/
lastconn = ((struct ip *)&p[SLX_CHDR])->ip_p;
hlen = IP_HL(ip);
hlen += TH_OFF((struct tcphdr *)&((int *)ip)[hlen]);
lastlen[dir][lastconn] = length - (hlen << 2);
ND_PRINT((ndo, "utcp %d: ", lastconn));
break;
default:
if (p[SLX_CHDR] & TYPE_COMPRESSED_TCP) {
compressed_sl_print(ndo, &p[SLX_CHDR], ip,
length, dir);
ND_PRINT((ndo, ": "));
} else
ND_PRINT((ndo, "slip-%d!: ", p[SLX_CHDR]));
}
}
static const u_char *
print_sl_change(netdissect_options *ndo,
const char *str, register const u_char *cp)
{
register u_int i;
if ((i = *cp++) == 0) {
i = EXTRACT_16BITS(cp);
cp += 2;
}
ND_PRINT((ndo, " %s%d", str, i));
return (cp);
}
static const u_char *
print_sl_winchange(netdissect_options *ndo,
register const u_char *cp)
{
register short i;
if ((i = *cp++) == 0) {
i = EXTRACT_16BITS(cp);
cp += 2;
}
if (i >= 0)
ND_PRINT((ndo, " W+%d", i));
else
ND_PRINT((ndo, " W%d", i));
return (cp);
}
static void
compressed_sl_print(netdissect_options *ndo,
const u_char *chdr, const struct ip *ip,
u_int length, int dir)
{
register const u_char *cp = chdr;
register u_int flags, hlen;
flags = *cp++;
if (flags & NEW_C) {
lastconn = *cp++;
ND_PRINT((ndo, "ctcp %d", lastconn));
} else
ND_PRINT((ndo, "ctcp *"));
/* skip tcp checksum */
cp += 2;
switch (flags & SPECIALS_MASK) {
case SPECIAL_I:
ND_PRINT((ndo, " *SA+%d", lastlen[dir][lastconn]));
break;
case SPECIAL_D:
ND_PRINT((ndo, " *S+%d", lastlen[dir][lastconn]));
break;
default:
if (flags & NEW_U)
cp = print_sl_change(ndo, "U=", cp);
if (flags & NEW_W)
cp = print_sl_winchange(ndo, cp);
if (flags & NEW_A)
cp = print_sl_change(ndo, "A+", cp);
if (flags & NEW_S)
cp = print_sl_change(ndo, "S+", cp);
break;
}
if (flags & NEW_I)
cp = print_sl_change(ndo, "I+", cp);
/*
* 'hlen' is the length of the uncompressed TCP/IP header (in words).
* 'cp - chdr' is the length of the compressed header.
* 'length - hlen' is the amount of data in the packet.
*/
hlen = IP_HL(ip);
hlen += TH_OFF((struct tcphdr *)&((int32_t *)ip)[hlen]);
lastlen[dir][lastconn] = length - (hlen << 2);
ND_PRINT((ndo, " %d (%ld)", lastlen[dir][lastconn], (long)(cp - chdr)));
}
Index: projects/clang360-import/contrib/tcpdump
===================================================================
--- projects/clang360-import/contrib/tcpdump (revision 277808)
+++ projects/clang360-import/contrib/tcpdump (revision 277809)
Property changes on: projects/clang360-import/contrib/tcpdump
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,2 ##
Merged /head/contrib/tcpdump:r277719-277803
Merged /vendor/tcpdump/dist:r277782
Index: projects/clang360-import/etc/sendmail/Makefile
===================================================================
--- projects/clang360-import/etc/sendmail/Makefile (revision 277808)
+++ projects/clang360-import/etc/sendmail/Makefile (revision 277809)
@@ -1,94 +1,95 @@
# @(#)Makefile 8.19 (Berkeley) 1/14/97
# $FreeBSD$
M4= m4
CHMOD= chmod
ROMODE= 444
RM= rm -f
SENDMAIL_DIR= ${.CURDIR}/../../contrib/sendmail
SMDIR= ${SENDMAIL_DIR}/src
SENDMAIL_CF_DIR?=${SENDMAIL_DIR}/cf
# this is overkill, but....
M4FILES!= find ${SENDMAIL_CF_DIR} -type f -name '*.m4' -print
.SUFFIXES: .mc .cf
.mc.cf: ${M4FILES}
${RM} ${.TARGET}
- ${M4} -D_CF_DIR_=${SENDMAIL_CF_DIR}/ ${SENDMAIL_M4_FLAGS} \
+ ${M4} -D_CF_DIR_=${SENDMAIL_CF_DIR}/ -D_NO_MAKEINFO_ \
+ ${SENDMAIL_M4_FLAGS} \
${SENDMAIL_CF_DIR}/m4/cf.m4 ${.IMPSRC} > ${.TARGET}
${CHMOD} ${ROMODE} ${.TARGET}
DEST_CF= ${DESTDIR}/etc/mail/sendmail.cf
DEST_SUBMIT_CF= ${DESTDIR}/etc/mail/submit.cf
ALL= freebsd.cf freebsd.submit.cf
CLEANFILES= freebsd.cf freebsd.submit.cf
# Local SENDMAIL_MC or SENDMAIL_CF may be set in /etc/make.conf.
# Warning! If set, this causes 'make install' to always copy it
# over /etc/mail/sendmail.cf!!!
# Caveat emptor! Be sure you want this before you enable it.
.if defined(SENDMAIL_MC) && defined(SENDMAIL_CF)
.error Both SENDMAIL_MC and SENDMAIL_CF cannot be set.
.elif defined(SENDMAIL_MC)
INSTALL_CF= ${SENDMAIL_MC:T:R}.cf
ALL+= ${INSTALL_CF}
CLEANFILES+= ${SENDMAIL_MC:T:R}.cf
${INSTALL_CF}: ${SENDMAIL_MC}
.elif defined(SENDMAIL_CF)
ALL+= ${SENDMAIL_CF}
INSTALL_CF= ${SENDMAIL_CF}
.endif
.if !defined(SENDMAIL_SET_USER_ID) && defined(SENDMAIL_SUBMIT_MC)
INSTALL_SUBMIT_CF= ${SENDMAIL_SUBMIT_MC:T:R}.cf
ALL+= ${INSTALL_SUBMIT_CF}
CLEANFILES+= ${INSTALL_SUBMIT_CF}
${INSTALL_SUBMIT_CF}: ${SENDMAIL_SUBMIT_MC}
.endif
# Additional .cf files to build.
.if defined(SENDMAIL_ADDITIONAL_MC)
SENDMAIL_ADDITIONAL_CF= ${SENDMAIL_ADDITIONAL_MC:T:S/.mc$/.cf/}
ALL+= ${SENDMAIL_ADDITIONAL_CF}
CLEANFILES+= ${SENDMAIL_ADDITIONAL_CF}
.for mc in ${SENDMAIL_ADDITIONAL_MC}
${mc:T:R}.cf: ${mc}
.endfor
.endif
all: ${ALL}
distribution:
${INSTALL} -o ${BINOWN} -g ${BINGRP} -m 644 \
${.CURDIR}/freebsd.mc freebsd.cf ${DESTDIR}/etc/mail
${INSTALL} -o ${BINOWN} -g ${BINGRP} -m 444 \
${.CURDIR}/freebsd.submit.mc freebsd.submit.cf ${DESTDIR}/etc/mail
${INSTALL} -o ${BINOWN} -g ${BINGRP} -m 444 \
${SMDIR}/helpfile ${DESTDIR}/etc/mail
${INSTALL} -o ${BINOWN} -g ${BINGRP} -m 640 \
/dev/null ${DESTDIR}/var/log/sendmail.st
${INSTALL} -o ${BINOWN} -g ${BINGRP} -m 644 \
freebsd.cf ${DEST_CF}
${INSTALL} -o ${BINOWN} -g ${BINGRP} -m 444 \
freebsd.submit.cf ${DEST_SUBMIT_CF}
install:
.if defined(INSTALL_CF) && ${INSTALL_CF} != ${DEST_CF}
${INSTALL} -o ${BINOWN} -g ${BINGRP} -m 644 \
${INSTALL_CF} ${DEST_CF}
.endif
.if defined(SENDMAIL_ADDITIONAL_CF)
${INSTALL} -o ${BINOWN} -g ${BINGRP} -m 644 \
${SENDMAIL_ADDITIONAL_CF} ${DESTDIR}/etc/mail
.endif
.if !defined(SENDMAIL_SET_USER_ID) && \
defined(INSTALL_SUBMIT_CF) && ${INSTALL_SUBMIT_CF} != ${DEST_SUBMIT_CF}
${INSTALL} -o ${BINOWN} -g ${BINGRP} -m 644 \
${INSTALL_SUBMIT_CF} ${DEST_SUBMIT_CF}
.endif
.include <bsd.prog.mk>
Index: projects/clang360-import/etc
===================================================================
--- projects/clang360-import/etc (revision 277808)
+++ projects/clang360-import/etc (revision 277809)
Property changes on: projects/clang360-import/etc
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /head/etc:r277777-277803
Index: projects/clang360-import/lib/libthread_db/arch/i386/libpthread_md.c
===================================================================
--- projects/clang360-import/lib/libthread_db/arch/i386/libpthread_md.c (revision 277808)
+++ projects/clang360-import/lib/libthread_db/arch/i386/libpthread_md.c (revision 277809)
@@ -1,118 +1,118 @@
/*
* Copyright (c) 2004 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <machine/npx.h>
#include <string.h>
#include <thread_db.h>
#include "libpthread_db.h"
static int has_xmm_regs;
void
pt_reg_to_ucontext(const struct reg *r, ucontext_t *uc)
{
memcpy(&uc->uc_mcontext.mc_fs, &r->r_fs, 18*4);
uc->uc_mcontext.mc_gs = r->r_gs;
}
void
pt_ucontext_to_reg(const ucontext_t *uc, struct reg *r)
{
memcpy(&r->r_fs, &uc->uc_mcontext.mc_fs, 18*4);
r->r_gs = uc->uc_mcontext.mc_gs;
}
void
pt_fpreg_to_ucontext(const struct fpreg* r, ucontext_t *uc)
{
if (!has_xmm_regs)
memcpy(&uc->uc_mcontext.mc_fpstate, r,
sizeof(struct save87));
else {
int i;
struct savexmm *sx = (struct savexmm *)&uc->uc_mcontext.mc_fpstate;
memcpy(&sx->sv_env, &r->fpr_env, sizeof(r->fpr_env));
for (i = 0; i < 8; ++i)
memcpy(&sx->sv_fp[i].fp_acc, &r->fpr_acc[i], 10);
}
}
void
pt_ucontext_to_fpreg(const ucontext_t *uc, struct fpreg *r)
{
if (!has_xmm_regs)
memcpy(r, &uc->uc_mcontext.mc_fpstate, sizeof(struct save87));
else {
int i;
- struct savexmm *sx = (struct savexmm *)&uc->uc_mcontext.mc_fpstate;
+ const struct savexmm *sx = (const struct savexmm *)&uc->uc_mcontext.mc_fpstate;
memcpy(&r->fpr_env, &sx->sv_env, sizeof(r->fpr_env));
for (i = 0; i < 8; ++i)
memcpy(&r->fpr_acc[i], &sx->sv_fp[i].fp_acc, 10);
}
}
void
pt_fxsave_to_ucontext(const char* r, ucontext_t *uc)
{
if (has_xmm_regs)
memcpy(&uc->uc_mcontext.mc_fpstate, r, sizeof(struct savexmm));
}
void
pt_ucontext_to_fxsave(const ucontext_t *uc, char *r)
{
if (has_xmm_regs)
memcpy(r, &uc->uc_mcontext.mc_fpstate, sizeof(struct savexmm));
}
void
pt_md_init(void)
{
ucontext_t uc;
getcontext(&uc);
if (uc.uc_mcontext.mc_fpformat == _MC_FPFMT_XMM)
has_xmm_regs = 1;
}
int
pt_reg_sstep(struct reg *reg, int step)
{
unsigned int old;
old = reg->r_eflags;
if (step)
reg->r_eflags |= 0x0100;
else
reg->r_eflags &= ~0x0100;
return (old != reg->r_eflags); /* changed ? */
}
Index: projects/clang360-import/sbin/geom/class/mountver/gmountver.8
===================================================================
--- projects/clang360-import/sbin/geom/class/mountver/gmountver.8 (revision 277808)
+++ projects/clang360-import/sbin/geom/class/mountver/gmountver.8 (revision 277809)
@@ -1,130 +1,130 @@
.\"-
.\" Copyright (c) 2010 Edward Tomasz Napierala
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
-.Dd January 14, 2010
+.Dd January 27, 2015
.Dt GMOUNTVER 8
.Os
.Sh NAME
.Nm gmountver
.Nd "control utility for disk mount verification GEOM class"
.Sh SYNOPSIS
.Nm
.Cm create
.Op Fl v
.Ar dev ...
.Nm
.Cm destroy
.Op Fl fv
.Ar prov ...
.Nm
.Cm list
.Nm
.Cm status
.Op Fl s Ar name
.Nm
.Cm load
.Op Fl v
.Nm
.Cm unload
.Op Fl v
.Sh DESCRIPTION
The
.Nm
utility is used to control the mount verification GEOM class.
When configured, it passes all the I/O requests to the underlying provider.
When the underlying provider disappears - for example because the disk device
got disconnected - it queues all the I/O requests and waits for the provider
to reappear.
When that happens, it attaches to it and sends the queued requests.
.Pp
The first argument to
.Nm
indicates an action to be performed:
.Bl -tag -width ".Cm destroy"
.It Cm create
Cache the given devices with specified
.Ar name .
The kernel module
.Pa geom_mountver.ko
will be loaded if it is not loaded already.
.It Cm destroy
Destroy
.Ar name .
.It Cm list
See
.Xr geom 8 .
.It Cm status
See
.Xr geom 8 .
.It Cm load
See
.Xr geom 8 .
.It Cm unload
See
.Xr geom 8 .
.El
.Pp
Additional options:
.Bl -tag -width indent
.It Fl f
Force the removal of the specified mountver device.
.It Fl v
Be more verbose.
.El
.Sh SYSCTL VARIABLES
The following
.Xr sysctl 8
variables can be used to control the behavior of the
.Nm MOUNTVER
GEOM class.
The default value is shown next to each variable.
.Bl -tag -width indent
.It Va kern.geom.mountver.debug : No 0
Debug level of the
.Nm MOUNTVER
GEOM class.
This can be set to a number between 0 and 3 inclusive.
If set to 0 minimal debug information is printed, and if set to 3 the
maximum amount of debug information is printed.
-.It Va kern.geom.mountver.check.check_ident : No 1
+.It Va kern.geom.mountver.check_ident : No 1
This can be set to 0 or 1.
If set to 0,
.Nm
will reattach to the device even if the device reports a different disk ID.
.El
.Sh EXIT STATUS
Exit status is 0 on success, and 1 if the command fails.
.Sh SEE ALSO
.Xr geom 4 ,
.Xr geom 8
.Sh HISTORY
The
.Nm
utility appeared in
.Fx 9.0 .
.Sh AUTHORS
.An Edward Tomasz Napierala Aq Mt trasz@FreeBSD.org
Index: projects/clang360-import/sbin
===================================================================
--- projects/clang360-import/sbin (revision 277808)
+++ projects/clang360-import/sbin (revision 277809)
Property changes on: projects/clang360-import/sbin
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /head/sbin:r277777-277803
Index: projects/clang360-import/share/man/man9/pmap_enter.9
===================================================================
--- projects/clang360-import/share/man/man9/pmap_enter.9 (revision 277808)
+++ projects/clang360-import/share/man/man9/pmap_enter.9 (revision 277809)
@@ -1,68 +1,164 @@
.\"
.\" Copyright (c) 2003 Bruce M Simpson <bms@spc.org>
+.\" Copyright (c) 2014 The FreeBSD Foundation
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
-.Dd July 21, 2003
+.Dd January 27, 2015
.Dt PMAP_ENTER 9
.Os
.Sh NAME
.Nm pmap_enter
.Nd insert a virtual page into a physical map
.Sh SYNOPSIS
.In sys/param.h
.In vm/vm.h
.In vm/pmap.h
-.Ft void
+.Ft int
.Fo pmap_enter
-.Fa "pmap_t pmap" "vm_offset_t va" "vm_page_t p" "vm_prot_t prot"
-.Fa "boolean_t wired"
+.Fa "pmap_t pmap" "vm_offset_t va" "vm_page_t m" "vm_prot_t prot"
+.Fa "u_int flags" "int8_t psind"
.Fc
.Sh DESCRIPTION
The
.Fn pmap_enter
-function inserts the given physical page
-.Fa p ,
-into the physical map
-.Fa pmap ,
-at the virtual address
-.Fa va ,
+function creates a mapping in the physical map
+.Fa pmap
+from the virtual address
+.Fa va
+to the physical page
+.Fa m
with the protection
.Fa prot .
-If
-.Fa wired
-is
-.Dv TRUE ,
-then increment the wired count for the page as soon as the mapping
-is inserted into
-.Fa pmap .
-.Sh IMPLEMENTATION NOTES
-This routine MAY NOT lazy-evaluate the entry; it is required by
-specification to make the requested entry at the time it is called.
+Any previous mapping at the virtual address
+.Fa va
+is destroyed.
+.Pp
+The
+.Fa flags
+argument may have the following values:
+.Bl -tag -width ".Dv PMAP_ENTER_NOSLEEP"
+.It Dv VM_PROT_READ
+A read access to the given virtual address triggered the call.
+.It Dv VM_PROT_WRITE
+A write access to the given virtual address triggered the call.
+.It Dv VM_PROT_EXECUTE
+An execute access to the given virtual address triggered the call.
+.It Dv PMAP_ENTER_WIRED
+The mapping should be marked as wired.
+.It Dv PMAP_ENTER_NOSLEEP
+This function may not sleep during creation of the mapping.
+If the mapping cannot be created without sleeping, an appropriate
+Mach VM error is returned.
+.El
+If the
+.Dv PMAP_ENTER_NOSLEEP
+flag is not specified, this function must create the requested mapping
+before returning.
+It may not fail.
+In order to create the requested mapping, this function may destroy
+any non-wired mapping in any pmap.
+.Pp
+The
+.Fa psind
+parameter specifies the page size that should be used by the mapping.
+The supported page sizes are described by the global array
+.Dv pagesizes[] .
+The desired page size is specified by passing the index of the array
+element that equals the desired page size.
+.Pp
+When the
+.Fn pmap_enter
+function destroys or updates a managed mapping, including an existing
+mapping at virtual address
+.Fa va ,
+it updates the
+.Ft vm_page
+structure corresponding to the previously mapped physical page.
+If the physical page was accessed through the managed mapping,
+then the
+.Ft vm_page
+structure's
+.Dv PGA_REFERENCED
+aflag is set.
+If the physical page was modified through the managed mapping, then the
+.Fn vm_page_dirty
+function is called on the
+.Ft vm_page
+structure.
+.Pp
+The
+.Dv PGA_WRITEABLE
+aflag must be set for the page
+.Fa m
+if the new mapping is managed and writeable.
+It is advised to clear
+.Dv PGA_WRITEABLE
+for destroyed mappings if the implementation can ensure
+that no other writeable managed mappings for the previously
+mapped pages exist.
+.Pp
+If the page
+.Fa m
+is managed, the page must be busied by the caller
+or the owning object must be locked.
+In the later case, the
+.Dv PMAP_ENTER_NOSLEEP
+must be specified by the caller.
+.Pp
+The
+.Fn pmap_enter
+function must handle the multiprocessor TLB consistency for the
+given address.
+.Sh NOTES
+On amd64, arm and i386 architectures the existing implementation
+of the
+.Nm
+function is incomplete, only value 0 for
+.Fa psind
+is supported.
+Other supported architectures have
+.Dv pagesizes[]
+array of size 1.
+.Sh RETURN VALUES
+If successful, the
+.Fn pmap_enter
+function returns
+.Er KERN_SUCCESS .
+If the
+.Dv PMAP_ENTER_NOSLEEP
+flag was specified and the resources required for the mapping cannot
+be acquired without sleeping,
+.Dv KERN_RESOURCE_SHORTAGE
+is returned.
.Sh SEE ALSO
.Xr pmap 9
.Sh AUTHORS
-This manual page was written by
-.An Bruce M Simpson Aq Mt bms@spc.org .
+This manual page was first written by
+.An Bruce M Simpson Aq Mt bms@spc.org
+and then rewritten by
+.An Alan Cox Aq Mt alc@FreeBSD.org
+and
+.An Konstantin Belousov Aq Mt kib@FreeBSD.org .
Index: projects/clang360-import/share
===================================================================
--- projects/clang360-import/share (revision 277808)
+++ projects/clang360-import/share (revision 277809)
Property changes on: projects/clang360-import/share
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /head/share:r277777-277803
Index: projects/clang360-import/sys/dev/acpica/acpi.c
===================================================================
--- projects/clang360-import/sys/dev/acpica/acpi.c (revision 277808)
+++ projects/clang360-import/sys/dev/acpica/acpi.c (revision 277809)
@@ -1,3995 +1,3999 @@
/*-
* Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org>
* Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
* Copyright (c) 2000, 2001 Michael Smith
* Copyright (c) 2000 BSDi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_acpi.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/fcntl.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/ioccom.h>
#include <sys/reboot.h>
#include <sys/sysctl.h>
#include <sys/ctype.h>
#include <sys/linker.h>
#include <sys/power.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/timetc.h>
#if defined(__i386__) || defined(__amd64__)
#include <machine/pci_cfgreg.h>
#endif
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <isa/isavar.h>
#include <isa/pnpvar.h>
#include <contrib/dev/acpica/include/acpi.h>
#include <contrib/dev/acpica/include/accommon.h>
#include <contrib/dev/acpica/include/acnamesp.h>
#include <dev/acpica/acpivar.h>
#include <dev/acpica/acpiio.h>
#include <vm/vm_param.h>
static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
/* Hooks for the ACPI CA debugging infrastructure */
#define _COMPONENT ACPI_BUS
ACPI_MODULE_NAME("ACPI")
static d_open_t acpiopen;
static d_close_t acpiclose;
static d_ioctl_t acpiioctl;
static struct cdevsw acpi_cdevsw = {
.d_version = D_VERSION,
.d_open = acpiopen,
.d_close = acpiclose,
.d_ioctl = acpiioctl,
.d_name = "acpi",
};
struct acpi_interface {
ACPI_STRING *data;
int num;
};
/* Global mutex for locking access to the ACPI subsystem. */
struct mtx acpi_mutex;
struct callout acpi_sleep_timer;
/* Bitmap of device quirks. */
int acpi_quirks;
/* Supported sleep states. */
static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT];
static int acpi_modevent(struct module *mod, int event, void *junk);
static int acpi_probe(device_t dev);
static int acpi_attach(device_t dev);
static int acpi_suspend(device_t dev);
static int acpi_resume(device_t dev);
static int acpi_shutdown(device_t dev);
static device_t acpi_add_child(device_t bus, u_int order, const char *name,
int unit);
static int acpi_print_child(device_t bus, device_t child);
static void acpi_probe_nomatch(device_t bus, device_t child);
static void acpi_driver_added(device_t dev, driver_t *driver);
static int acpi_read_ivar(device_t dev, device_t child, int index,
uintptr_t *result);
static int acpi_write_ivar(device_t dev, device_t child, int index,
uintptr_t value);
static struct resource_list *acpi_get_rlist(device_t dev, device_t child);
static void acpi_reserve_resources(device_t dev);
static int acpi_sysres_alloc(device_t dev);
static int acpi_set_resource(device_t dev, device_t child, int type,
int rid, u_long start, u_long count);
static struct resource *acpi_alloc_resource(device_t bus, device_t child,
int type, int *rid, u_long start, u_long end,
u_long count, u_int flags);
static int acpi_adjust_resource(device_t bus, device_t child, int type,
struct resource *r, u_long start, u_long end);
static int acpi_release_resource(device_t bus, device_t child, int type,
int rid, struct resource *r);
static void acpi_delete_resource(device_t bus, device_t child, int type,
int rid);
static uint32_t acpi_isa_get_logicalid(device_t dev);
static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count);
static char *acpi_device_id_probe(device_t bus, device_t dev, char **ids);
static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev,
ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters,
ACPI_BUFFER *ret);
static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level,
void *context, void **retval);
static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev,
int max_depth, acpi_scan_cb_t user_fn, void *arg);
static int acpi_set_powerstate(device_t child, int state);
static int acpi_isa_pnp_probe(device_t bus, device_t child,
struct isa_pnp_id *ids);
static void acpi_probe_children(device_t bus);
static void acpi_probe_order(ACPI_HANDLE handle, int *order);
static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
void *context, void **status);
static void acpi_sleep_enable(void *arg);
static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
static void acpi_shutdown_final(void *arg, int howto);
static void acpi_enable_fixed_events(struct acpi_softc *sc);
static BOOLEAN acpi_has_hid(ACPI_HANDLE handle);
static void acpi_resync_clock(struct acpi_softc *sc);
static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
static int acpi_wake_prep_walk(int sstate);
static int acpi_wake_sysctl_walk(device_t dev);
static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
static void acpi_system_eventhandler_sleep(void *arg, int state);
static void acpi_system_eventhandler_wakeup(void *arg, int state);
static int acpi_sname2sstate(const char *sname);
static const char *acpi_sstate2sname(int sstate);
static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_pm_func(u_long cmd, void *arg, ...);
static int acpi_child_location_str_method(device_t acdev, device_t child,
char *buf, size_t buflen);
static int acpi_child_pnpinfo_str_method(device_t acdev, device_t child,
char *buf, size_t buflen);
#if defined(__i386__) || defined(__amd64__)
static void acpi_enable_pcie(void);
#endif
static void acpi_hint_device_unit(device_t acdev, device_t child,
const char *name, int *unitp);
static void acpi_reset_interfaces(device_t dev);
static device_method_t acpi_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, acpi_probe),
DEVMETHOD(device_attach, acpi_attach),
DEVMETHOD(device_shutdown, acpi_shutdown),
DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_suspend, acpi_suspend),
DEVMETHOD(device_resume, acpi_resume),
/* Bus interface */
DEVMETHOD(bus_add_child, acpi_add_child),
DEVMETHOD(bus_print_child, acpi_print_child),
DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch),
DEVMETHOD(bus_driver_added, acpi_driver_added),
DEVMETHOD(bus_read_ivar, acpi_read_ivar),
DEVMETHOD(bus_write_ivar, acpi_write_ivar),
DEVMETHOD(bus_get_resource_list, acpi_get_rlist),
DEVMETHOD(bus_set_resource, acpi_set_resource),
DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
DEVMETHOD(bus_alloc_resource, acpi_alloc_resource),
DEVMETHOD(bus_adjust_resource, acpi_adjust_resource),
DEVMETHOD(bus_release_resource, acpi_release_resource),
DEVMETHOD(bus_delete_resource, acpi_delete_resource),
DEVMETHOD(bus_child_pnpinfo_str, acpi_child_pnpinfo_str_method),
DEVMETHOD(bus_child_location_str, acpi_child_location_str_method),
DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit),
DEVMETHOD(bus_get_domain, acpi_get_domain),
/* ACPI bus */
DEVMETHOD(acpi_id_probe, acpi_device_id_probe),
DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj),
DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep),
DEVMETHOD(acpi_scan_children, acpi_device_scan_children),
/* ISA emulation */
DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe),
DEVMETHOD_END
};
static driver_t acpi_driver = {
"acpi",
acpi_methods,
sizeof(struct acpi_softc),
};
static devclass_t acpi_devclass;
DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0);
MODULE_VERSION(acpi, 1);
ACPI_SERIAL_DECL(acpi, "ACPI root bus");
/* Local pools for managing system resources for ACPI child devices. */
static struct rman acpi_rman_io, acpi_rman_mem;
#define ACPI_MINIMUM_AWAKETIME 5
/* Holds the description of the acpi0 device. */
static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2];
SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD, NULL, "ACPI debugging");
static char acpi_ca_version[12];
SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD,
acpi_ca_version, 0, "Version of Intel ACPI-CA");
/*
* Allow overriding _OSI methods.
*/
static char acpi_install_interface[256];
TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface,
sizeof(acpi_install_interface));
static char acpi_remove_interface[256];
TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface,
sizeof(acpi_remove_interface));
/* Allow users to dump Debug objects without ACPI debugger. */
static int acpi_debug_objects;
TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects);
SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects,
CTLFLAG_RW | CTLTYPE_INT, NULL, 0, acpi_debug_objects_sysctl, "I",
"Enable Debug objects");
/* Allow the interpreter to ignore common mistakes in BIOS. */
static int acpi_interpreter_slack = 1;
TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack);
SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN,
&acpi_interpreter_slack, 1, "Turn on interpreter slack mode.");
/* Ignore register widths set by FADT and use default widths instead. */
static int acpi_ignore_reg_width = 1;
TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width);
SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN,
&acpi_ignore_reg_width, 1, "Ignore register widths set by FADT");
#ifdef __amd64__
/* Reset system clock while resuming. XXX Remove once tested. */
static int acpi_reset_clock = 1;
TUNABLE_INT("debug.acpi.reset_clock", &acpi_reset_clock);
SYSCTL_INT(_debug_acpi, OID_AUTO, reset_clock, CTLFLAG_RW,
&acpi_reset_clock, 1, "Reset system clock while resuming.");
#endif
/* Allow users to override quirks. */
TUNABLE_INT("debug.acpi.quirks", &acpi_quirks);
static int acpi_susp_bounce;
SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW,
&acpi_susp_bounce, 0, "Don't actually suspend, just test devices.");
/*
* ACPI can only be loaded as a module by the loader; activating it after
* system bootstrap time is not useful, and can be fatal to the system.
* It also cannot be unloaded, since the entire system bus hierarchy hangs
* off it.
*/
static int
acpi_modevent(struct module *mod, int event, void *junk)
{
switch (event) {
case MOD_LOAD:
if (!cold) {
printf("The ACPI driver cannot be loaded after boot.\n");
return (EPERM);
}
break;
case MOD_UNLOAD:
if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI)
return (EBUSY);
break;
default:
break;
}
return (0);
}
/*
* Perform early initialization.
*/
ACPI_STATUS
acpi_Startup(void)
{
static int started = 0;
ACPI_STATUS status;
int val;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
/* Only run the startup code once. The MADT driver also calls this. */
if (started)
return_VALUE (AE_OK);
started = 1;
/*
* Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing
* if more tables exist.
*/
if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) {
printf("ACPI: Table initialisation failed: %s\n",
AcpiFormatException(status));
return_VALUE (status);
}
/* Set up any quirks we have for this system. */
if (acpi_quirks == ACPI_Q_OK)
acpi_table_quirks(&acpi_quirks);
/* If the user manually set the disabled hint to 0, force-enable ACPI. */
if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0)
acpi_quirks &= ~ACPI_Q_BROKEN;
if (acpi_quirks & ACPI_Q_BROKEN) {
printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n");
status = AE_SUPPORT;
}
return_VALUE (status);
}
/*
* Detect ACPI and perform early initialisation.
*/
int
acpi_identify(void)
{
ACPI_TABLE_RSDP *rsdp;
ACPI_TABLE_HEADER *rsdt;
ACPI_PHYSICAL_ADDRESS paddr;
struct sbuf sb;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (!cold)
return (ENXIO);
/* Check that we haven't been disabled with a hint. */
if (resource_disabled("acpi", 0))
return (ENXIO);
/* Check for other PM systems. */
if (power_pm_get_type() != POWER_PM_TYPE_NONE &&
power_pm_get_type() != POWER_PM_TYPE_ACPI) {
printf("ACPI identify failed, other PM system enabled.\n");
return (ENXIO);
}
/* Initialize root tables. */
if (ACPI_FAILURE(acpi_Startup())) {
printf("ACPI: Try disabling either ACPI or apic support.\n");
return (ENXIO);
}
if ((paddr = AcpiOsGetRootPointer()) == 0 ||
(rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL)
return (ENXIO);
if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0)
paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress;
else
paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress;
AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP));
if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL)
return (ENXIO);
sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN);
sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE);
sbuf_trim(&sb);
sbuf_putc(&sb, ' ');
sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE);
sbuf_trim(&sb);
sbuf_finish(&sb);
sbuf_delete(&sb);
AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER));
snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION);
return (0);
}
/*
* Fetch some descriptive data from ACPI to put in our attach message.
*/
static int
acpi_probe(device_t dev)
{
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
device_set_desc(dev, acpi_desc);
return_VALUE (BUS_PROBE_NOWILDCARD);
}
static int
acpi_attach(device_t dev)
{
struct acpi_softc *sc;
ACPI_STATUS status;
int error, state;
UINT32 flags;
UINT8 TypeA, TypeB;
char *env;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
sc = device_get_softc(dev);
sc->acpi_dev = dev;
callout_init(&sc->susp_force_to, TRUE);
error = ENXIO;
/* Initialize resource manager. */
acpi_rman_io.rm_type = RMAN_ARRAY;
acpi_rman_io.rm_start = 0;
acpi_rman_io.rm_end = 0xffff;
acpi_rman_io.rm_descr = "ACPI I/O ports";
if (rman_init(&acpi_rman_io) != 0)
panic("acpi rman_init IO ports failed");
acpi_rman_mem.rm_type = RMAN_ARRAY;
acpi_rman_mem.rm_start = 0;
acpi_rman_mem.rm_end = ~0ul;
acpi_rman_mem.rm_descr = "ACPI I/O memory addresses";
if (rman_init(&acpi_rman_mem) != 0)
panic("acpi rman_init memory failed");
/* Initialise the ACPI mutex */
mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF);
/*
* Set the globals from our tunables. This is needed because ACPI-CA
* uses UINT8 for some values and we have no tunable_byte.
*/
AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE;
AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE;
#ifndef ACPI_DEBUG
/*
* Disable all debugging layers and levels.
*/
AcpiDbgLayer = 0;
AcpiDbgLevel = 0;
#endif
/* Start up the ACPI CA subsystem. */
status = AcpiInitializeSubsystem();
if (ACPI_FAILURE(status)) {
device_printf(dev, "Could not initialize Subsystem: %s\n",
AcpiFormatException(status));
goto out;
}
/* Override OS interfaces if the user requested. */
acpi_reset_interfaces(dev);
/* Load ACPI name space. */
status = AcpiLoadTables();
if (ACPI_FAILURE(status)) {
device_printf(dev, "Could not load Namespace: %s\n",
AcpiFormatException(status));
goto out;
}
#if defined(__i386__) || defined(__amd64__)
/* Handle MCFG table if present. */
acpi_enable_pcie();
#endif
/*
* Note that some systems (specifically, those with namespace evaluation
* issues that require the avoidance of parts of the namespace) must
* avoid running _INI and _STA on everything, as well as dodging the final
* object init pass.
*
* For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT).
*
* XXX We should arrange for the object init pass after we have attached
* all our child devices, but on many systems it works here.
*/
flags = 0;
if (testenv("debug.acpi.avoid"))
flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT;
/* Bring the hardware and basic handlers online. */
if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) {
device_printf(dev, "Could not enable ACPI: %s\n",
AcpiFormatException(status));
goto out;
}
/*
* Call the ECDT probe function to provide EC functionality before
* the namespace has been evaluated.
*
* XXX This happens before the sysresource devices have been probed and
* attached so its resources come from nexus0. In practice, this isn't
* a problem but should be addressed eventually.
*/
acpi_ec_ecdt_probe(dev);
/* Bring device objects and regions online. */
if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) {
device_printf(dev, "Could not initialize ACPI objects: %s\n",
AcpiFormatException(status));
goto out;
}
/*
* Setup our sysctl tree.
*
* XXX: This doesn't check to make sure that none of these fail.
*/
sysctl_ctx_init(&sc->acpi_sysctl_ctx);
sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
device_get_name(dev), CTLFLAG_RD, 0, "");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD,
0, 0, acpi_supported_sleep_state_sysctl, "A", "");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW,
&sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW,
&sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW,
&sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW,
&sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW,
&sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
"sleep delay in seconds");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "disable_on_reboot", CTLFLAG_RW,
&sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "handle_reboot", CTLFLAG_RW,
&sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot");
/*
* Default to 1 second before sleeping to give some machines time to
* stabilize.
*/
sc->acpi_sleep_delay = 1;
if (bootverbose)
sc->acpi_verbose = 1;
if ((env = kern_getenv("hw.acpi.verbose")) != NULL) {
if (strcmp(env, "0") != 0)
sc->acpi_verbose = 1;
freeenv(env);
}
/* Only enable reboot by default if the FADT says it is available. */
if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER)
sc->acpi_handle_reboot = 1;
/* Only enable S4BIOS by default if the FACS says it is available. */
if (AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT)
sc->acpi_s4bios = 1;
/* Probe all supported sleep states. */
acpi_sleep_states[ACPI_STATE_S0] = TRUE;
for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT,
__DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) &&
ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
acpi_sleep_states[state] = TRUE;
/*
* Dispatch the default sleep state to devices. The lid switch is set
* to UNKNOWN by default to avoid surprising users.
*/
sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
/* Pick the first valid sleep state for the sleep button default. */
sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
if (acpi_sleep_states[state]) {
sc->acpi_sleep_button_sx = state;
break;
}
acpi_enable_fixed_events(sc);
/*
* Scan the namespace and attach/initialise children.
*/
/* Register our shutdown handler. */
EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc,
SHUTDOWN_PRI_LAST);
/*
* Register our acpi event handlers.
* XXX should be configurable eg. via userland policy manager.
*/
EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep,
sc, ACPI_EVENT_PRI_LAST);
EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup,
sc, ACPI_EVENT_PRI_LAST);
/* Flag our initial states. */
sc->acpi_enabled = TRUE;
sc->acpi_sstate = ACPI_STATE_S0;
sc->acpi_sleep_disabled = TRUE;
/* Create the control device */
sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644,
"acpi");
sc->acpi_dev_t->si_drv1 = sc;
if ((error = acpi_machdep_init(dev)))
goto out;
/* Register ACPI again to pass the correct argument of pm_func. */
power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
if (!acpi_disabled("bus"))
acpi_probe_children(dev);
/* Update all GPEs and enable runtime GPEs. */
status = AcpiUpdateAllGpes();
if (ACPI_FAILURE(status))
device_printf(dev, "Could not update all GPEs: %s\n",
AcpiFormatException(status));
/* Allow sleep request after a while. */
callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0);
callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME,
acpi_sleep_enable, sc);
error = 0;
out:
return_VALUE (error);
}
static void
acpi_set_power_children(device_t dev, int state)
{
device_t child;
device_t *devlist;
int dstate, i, numdevs;
if (device_get_children(dev, &devlist, &numdevs) != 0)
return;
/*
* Retrieve and set D-state for the sleep state if _SxD is present.
* Skip children who aren't attached since they are handled separately.
*/
for (i = 0; i < numdevs; i++) {
child = devlist[i];
dstate = state;
if (device_is_attached(child) &&
acpi_device_pwr_for_sleep(dev, child, &dstate) == 0)
acpi_set_powerstate(child, dstate);
}
free(devlist, M_TEMP);
}
static int
acpi_suspend(device_t dev)
{
int error;
GIANT_REQUIRED;
error = bus_generic_suspend(dev);
if (error == 0)
acpi_set_power_children(dev, ACPI_STATE_D3);
return (error);
}
static int
acpi_resume(device_t dev)
{
GIANT_REQUIRED;
acpi_set_power_children(dev, ACPI_STATE_D0);
return (bus_generic_resume(dev));
}
static int
acpi_shutdown(device_t dev)
{
GIANT_REQUIRED;
/* Allow children to shutdown first. */
bus_generic_shutdown(dev);
/*
* Enable any GPEs that are able to power-on the system (i.e., RTC).
* Also, disable any that are not valid for this state (most).
*/
acpi_wake_prep_walk(ACPI_STATE_S5);
return (0);
}
/*
* Handle a new device being added
*/
static device_t
acpi_add_child(device_t bus, u_int order, const char *name, int unit)
{
struct acpi_device *ad;
device_t child;
if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL)
return (NULL);
resource_list_init(&ad->ad_rl);
child = device_add_child_ordered(bus, order, name, unit);
if (child != NULL)
device_set_ivars(child, ad);
else
free(ad, M_ACPIDEV);
return (child);
}
static int
acpi_print_child(device_t bus, device_t child)
{
struct acpi_device *adev = device_get_ivars(child);
struct resource_list *rl = &adev->ad_rl;
int retval = 0;
retval += bus_print_child_header(bus, child);
retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx");
retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%ld");
if (device_get_flags(child))
retval += printf(" flags %#x", device_get_flags(child));
retval += bus_print_child_domain(bus, child);
retval += bus_print_child_footer(bus, child);
return (retval);
}
/*
* If this device is an ACPI child but no one claimed it, attempt
* to power it off. We'll power it back up when a driver is added.
*
* XXX Disabled for now since many necessary devices (like fdc and
* ATA) don't claim the devices we created for them but still expect
* them to be powered up.
*/
static void
acpi_probe_nomatch(device_t bus, device_t child)
{
#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
acpi_set_powerstate(child, ACPI_STATE_D3);
#endif
}
/*
* If a new driver has a chance to probe a child, first power it up.
*
* XXX Disabled for now (see acpi_probe_nomatch for details).
*/
static void
acpi_driver_added(device_t dev, driver_t *driver)
{
device_t child, *devlist;
int i, numdevs;
DEVICE_IDENTIFY(driver, dev);
if (device_get_children(dev, &devlist, &numdevs))
return;
for (i = 0; i < numdevs; i++) {
child = devlist[i];
if (device_get_state(child) == DS_NOTPRESENT) {
#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
acpi_set_powerstate(child, ACPI_STATE_D0);
if (device_probe_and_attach(child) != 0)
acpi_set_powerstate(child, ACPI_STATE_D3);
#else
device_probe_and_attach(child);
#endif
}
}
free(devlist, M_TEMP);
}
/* Location hint for devctl(8) */
static int
acpi_child_location_str_method(device_t cbdev, device_t child, char *buf,
size_t buflen)
{
struct acpi_device *dinfo = device_get_ivars(child);
char buf2[32];
int pxm;
if (dinfo->ad_handle) {
snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle));
if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) {
snprintf(buf2, 32, " _PXM=%d", pxm);
strlcat(buf, buf2, buflen);
}
} else {
snprintf(buf, buflen, "unknown");
}
return (0);
}
/* PnP information for devctl(8) */
static int
acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf,
size_t buflen)
{
struct acpi_device *dinfo = device_get_ivars(child);
ACPI_DEVICE_INFO *adinfo;
if (ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo))) {
snprintf(buf, buflen, "unknown");
return (0);
}
snprintf(buf, buflen, "_HID=%s _UID=%lu",
(adinfo->Valid & ACPI_VALID_HID) ?
adinfo->HardwareId.String : "none",
(adinfo->Valid & ACPI_VALID_UID) ?
strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL);
AcpiOsFree(adinfo);
return (0);
}
/*
* Handle per-device ivars
*/
static int
acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
{
struct acpi_device *ad;
if ((ad = device_get_ivars(child)) == NULL) {
device_printf(child, "device has no ivars\n");
return (ENOENT);
}
/* ACPI and ISA compatibility ivars */
switch(index) {
case ACPI_IVAR_HANDLE:
*(ACPI_HANDLE *)result = ad->ad_handle;
break;
case ACPI_IVAR_PRIVATE:
*(void **)result = ad->ad_private;
break;
case ACPI_IVAR_FLAGS:
*(int *)result = ad->ad_flags;
break;
case ISA_IVAR_VENDORID:
case ISA_IVAR_SERIAL:
case ISA_IVAR_COMPATID:
*(int *)result = -1;
break;
case ISA_IVAR_LOGICALID:
*(int *)result = acpi_isa_get_logicalid(child);
break;
default:
return (ENOENT);
}
return (0);
}
static int
acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
{
struct acpi_device *ad;
if ((ad = device_get_ivars(child)) == NULL) {
device_printf(child, "device has no ivars\n");
return (ENOENT);
}
switch(index) {
case ACPI_IVAR_HANDLE:
ad->ad_handle = (ACPI_HANDLE)value;
break;
case ACPI_IVAR_PRIVATE:
ad->ad_private = (void *)value;
break;
case ACPI_IVAR_FLAGS:
ad->ad_flags = (int)value;
break;
default:
panic("bad ivar write request (%d)", index);
return (ENOENT);
}
return (0);
}
/*
* Handle child resource allocation/removal
*/
static struct resource_list *
acpi_get_rlist(device_t dev, device_t child)
{
struct acpi_device *ad;
ad = device_get_ivars(child);
return (&ad->ad_rl);
}
static int
acpi_match_resource_hint(device_t dev, int type, long value)
{
struct acpi_device *ad = device_get_ivars(dev);
struct resource_list *rl = &ad->ad_rl;
struct resource_list_entry *rle;
STAILQ_FOREACH(rle, rl, link) {
if (rle->type != type)
continue;
if (rle->start <= value && rle->end >= value)
return (1);
}
return (0);
}
/*
* Wire device unit numbers based on resource matches in hints.
*/
static void
acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
int *unitp)
{
const char *s;
long value;
int line, matches, unit;
/*
* Iterate over all the hints for the devices with the specified
* name to see if one's resources are a subset of this device.
*/
line = 0;
for (;;) {
if (resource_find_dev(&line, name, &unit, "at", NULL) != 0)
break;
/* Must have an "at" for acpi or isa. */
resource_string_value(name, unit, "at", &s);
if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 ||
strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0))
continue;
/*
* Check for matching resources. We must have at least one match.
* Since I/O and memory resources cannot be shared, if we get a
* match on either of those, ignore any mismatches in IRQs or DRQs.
*
* XXX: We may want to revisit this to be more lenient and wire
* as long as it gets one match.
*/
matches = 0;
if (resource_long_value(name, unit, "port", &value) == 0) {
/*
* Floppy drive controllers are notorious for having a
* wide variety of resources not all of which include the
* first port that is specified by the hint (typically
* 0x3f0) (see the comment above fdc_isa_alloc_resources()
* in fdc_isa.c). However, they do all seem to include
* port + 2 (e.g. 0x3f2) so for a floppy device, look for
* 'value + 2' in the port resources instead of the hint
* value.
*/
if (strcmp(name, "fdc") == 0)
value += 2;
if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value))
matches++;
else
continue;
}
if (resource_long_value(name, unit, "maddr", &value) == 0) {
if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value))
matches++;
else
continue;
}
if (matches > 0)
goto matched;
if (resource_long_value(name, unit, "irq", &value) == 0) {
if (acpi_match_resource_hint(child, SYS_RES_IRQ, value))
matches++;
else
continue;
}
if (resource_long_value(name, unit, "drq", &value) == 0) {
if (acpi_match_resource_hint(child, SYS_RES_DRQ, value))
matches++;
else
continue;
}
matched:
if (matches > 0) {
/* We have a winner! */
*unitp = unit;
break;
}
}
}
/*
* Fech the NUMA domain for the given device.
*
* If a device has a _PXM method, map that to a NUMA domain.
*
* If none is found, then it'll call the parent method.
* If there's no domain, return ENOENT.
*/
int
acpi_get_domain(device_t dev, device_t child, int *domain)
{
#if MAXMEMDOM > 1
ACPI_HANDLE h;
int d, pxm;
h = acpi_get_handle(child);
if ((h != NULL) &&
ACPI_SUCCESS(acpi_GetInteger(h, "_PXM", &pxm))) {
d = acpi_map_pxm_to_vm_domainid(pxm);
if (d < 0)
return (ENOENT);
*domain = d;
return (0);
}
#endif
/* No _PXM node; go up a level */
return (bus_generic_get_domain(dev, child, domain));
}
/*
* Pre-allocate/manage all memory and IO resources. Since rman can't handle
* duplicates, we merge any in the sysresource attach routine.
*/
static int
acpi_sysres_alloc(device_t dev)
{
struct resource *res;
struct resource_list *rl;
struct resource_list_entry *rle;
struct rman *rm;
char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
device_t *children;
int child_count, i;
/*
* Probe/attach any sysresource devices. This would be unnecessary if we
* had multi-pass probe/attach.
*/
if (device_get_children(dev, &children, &child_count) != 0)
return (ENXIO);
for (i = 0; i < child_count; i++) {
if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL)
device_probe_and_attach(children[i]);
}
free(children, M_TEMP);
rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
STAILQ_FOREACH(rle, rl, link) {
if (rle->res != NULL) {
device_printf(dev, "duplicate resource for %lx\n", rle->start);
continue;
}
/* Only memory and IO resources are valid here. */
switch (rle->type) {
case SYS_RES_IOPORT:
rm = &acpi_rman_io;
break;
case SYS_RES_MEMORY:
rm = &acpi_rman_mem;
break;
default:
continue;
}
/* Pre-allocate resource and add to our rman pool. */
res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type,
&rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0);
if (res != NULL) {
rman_manage_region(rm, rman_get_start(res), rman_get_end(res));
rle->res = res;
} else
device_printf(dev, "reservation of %lx, %lx (%d) failed\n",
rle->start, rle->count, rle->type);
}
return (0);
}
static char *pcilink_ids[] = { "PNP0C0F", NULL };
static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
/*
* Reserve declared resources for devices found during attach once system
* resources have been allocated.
*/
static void
acpi_reserve_resources(device_t dev)
{
struct resource_list_entry *rle;
struct resource_list *rl;
struct acpi_device *ad;
struct acpi_softc *sc;
device_t *children;
int child_count, i;
sc = device_get_softc(dev);
if (device_get_children(dev, &children, &child_count) != 0)
return;
for (i = 0; i < child_count; i++) {
ad = device_get_ivars(children[i]);
rl = &ad->ad_rl;
/* Don't reserve system resources. */
if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL)
continue;
STAILQ_FOREACH(rle, rl, link) {
/*
* Don't reserve IRQ resources. There are many sticky things
* to get right otherwise (e.g. IRQs for psm, atkbd, and HPET
* when using legacy routing).
*/
if (rle->type == SYS_RES_IRQ)
continue;
/*
* Don't reserve the resource if it is already allocated.
* The acpi_ec(4) driver can allocate its resources early
* if ECDT is present.
*/
if (rle->res != NULL)
continue;
/*
* Try to reserve the resource from our parent. If this
* fails because the resource is a system resource, just
* let it be. The resource range is already reserved so
* that other devices will not use it. If the driver
* needs to allocate the resource, then
* acpi_alloc_resource() will sub-alloc from the system
* resource.
*/
resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid,
rle->start, rle->end, rle->count, 0);
}
}
free(children, M_TEMP);
sc->acpi_resources_reserved = 1;
}
static int
acpi_set_resource(device_t dev, device_t child, int type, int rid,
u_long start, u_long count)
{
struct acpi_softc *sc = device_get_softc(dev);
struct acpi_device *ad = device_get_ivars(child);
struct resource_list *rl = &ad->ad_rl;
ACPI_DEVICE_INFO *devinfo;
u_long end;
/* Ignore IRQ resources for PCI link devices. */
if (type == SYS_RES_IRQ && ACPI_ID_PROBE(dev, child, pcilink_ids) != NULL)
return (0);
/*
* Ignore most resources for PCI root bridges. Some BIOSes
* incorrectly enumerate the memory ranges they decode as plain
* memory resources instead of as ResourceProducer ranges. Other
* BIOSes incorrectly list system resource entries for I/O ranges
* under the PCI bridge. Do allow the one known-correct case on
* x86 of a PCI bridge claiming the I/O ports used for PCI config
* access.
*/
if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
if (ACPI_SUCCESS(AcpiGetObjectInfo(ad->ad_handle, &devinfo))) {
if ((devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0) {
#if defined(__i386__) || defined(__amd64__)
if (!(type == SYS_RES_IOPORT && start == CONF1_ADDR_PORT))
#endif
{
AcpiOsFree(devinfo);
return (0);
}
}
AcpiOsFree(devinfo);
}
}
/* If the resource is already allocated, fail. */
if (resource_list_busy(rl, type, rid))
return (EBUSY);
/* If the resource is already reserved, release it. */
if (resource_list_reserved(rl, type, rid))
resource_list_unreserve(rl, dev, child, type, rid);
/* Add the resource. */
end = (start + count - 1);
resource_list_add(rl, type, rid, start, end, count);
/* Don't reserve resources until the system resources are allocated. */
if (!sc->acpi_resources_reserved)
return (0);
/* Don't reserve system resources. */
if (ACPI_ID_PROBE(dev, child, sysres_ids) != NULL)
return (0);
/*
* Don't reserve IRQ resources. There are many sticky things to
* get right otherwise (e.g. IRQs for psm, atkbd, and HPET when
* using legacy routing).
*/
if (type == SYS_RES_IRQ)
return (0);
/*
* Reserve the resource.
*
* XXX: Ignores failure for now. Failure here is probably a
* BIOS/firmware bug?
*/
resource_list_reserve(rl, dev, child, type, &rid, start, end, count, 0);
return (0);
}
static struct resource *
acpi_alloc_resource(device_t bus, device_t child, int type, int *rid,
u_long start, u_long end, u_long count, u_int flags)
{
ACPI_RESOURCE ares;
struct acpi_device *ad;
struct resource_list_entry *rle;
struct resource_list *rl;
struct resource *res;
int isdefault = (start == 0UL && end == ~0UL);
/*
* First attempt at allocating the resource. For direct children,
* use resource_list_alloc() to handle reserved resources. For
* other devices, pass the request up to our parent.
*/
if (bus == device_get_parent(child)) {
ad = device_get_ivars(child);
rl = &ad->ad_rl;
/*
* Simulate the behavior of the ISA bus for direct children
* devices. That is, if a non-default range is specified for
* a resource that doesn't exist, use bus_set_resource() to
* add the resource before allocating it. Note that these
* resources will not be reserved.
*/
if (!isdefault && resource_list_find(rl, type, *rid) == NULL)
resource_list_add(rl, type, *rid, start, end, count);
res = resource_list_alloc(rl, bus, child, type, rid, start, end, count,
flags);
if (res != NULL && type == SYS_RES_IRQ) {
/*
* Since bus_config_intr() takes immediate effect, we cannot
* configure the interrupt associated with a device when we
* parse the resources but have to defer it until a driver
* actually allocates the interrupt via bus_alloc_resource().
*
* XXX: Should we handle the lookup failing?
*/
if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares)))
acpi_config_intr(child, &ares);
}
/*
* If this is an allocation of the "default" range for a given
* RID, fetch the exact bounds for this resource from the
* resource list entry to try to allocate the range from the
* system resource regions.
*/
if (res == NULL && isdefault) {
rle = resource_list_find(rl, type, *rid);
if (rle != NULL) {
start = rle->start;
end = rle->end;
count = rle->count;
}
}
} else
res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid,
start, end, count, flags);
/*
* If the first attempt failed and this is an allocation of a
* specific range, try to satisfy the request via a suballocation
* from our system resource regions.
*/
if (res == NULL && start + count - 1 == end)
res = acpi_alloc_sysres(child, type, rid, start, end, count, flags);
return (res);
}
/*
* Attempt to allocate a specific resource range from the system
* resource ranges. Note that we only handle memory and I/O port
* system resources.
*/
struct resource *
acpi_alloc_sysres(device_t child, int type, int *rid, u_long start, u_long end,
u_long count, u_int flags)
{
struct rman *rm;
struct resource *res;
switch (type) {
case SYS_RES_IOPORT:
rm = &acpi_rman_io;
break;
case SYS_RES_MEMORY:
rm = &acpi_rman_mem;
break;
default:
return (NULL);
}
KASSERT(start + count - 1 == end, ("wildcard resource range"));
res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
child);
if (res == NULL)
return (NULL);
rman_set_rid(res, *rid);
/* If requested, activate the resource using the parent's method. */
if (flags & RF_ACTIVE)
if (bus_activate_resource(child, type, *rid, res) != 0) {
rman_release_resource(res);
return (NULL);
}
return (res);
}
static int
acpi_is_resource_managed(int type, struct resource *r)
{
/* We only handle memory and IO resources through rman. */
switch (type) {
case SYS_RES_IOPORT:
return (rman_is_region_manager(r, &acpi_rman_io));
case SYS_RES_MEMORY:
return (rman_is_region_manager(r, &acpi_rman_mem));
}
return (0);
}
static int
acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r,
u_long start, u_long end)
{
if (acpi_is_resource_managed(type, r))
return (rman_adjust_resource(r, start, end));
return (bus_generic_adjust_resource(bus, child, type, r, start, end));
}
static int
acpi_release_resource(device_t bus, device_t child, int type, int rid,
struct resource *r)
{
int ret;
/*
* If this resource belongs to one of our internal managers,
* deactivate it and release it to the local pool.
*/
if (acpi_is_resource_managed(type, r)) {
if (rman_get_flags(r) & RF_ACTIVE) {
ret = bus_deactivate_resource(child, type, rid, r);
if (ret != 0)
return (ret);
}
return (rman_release_resource(r));
}
return (bus_generic_rl_release_resource(bus, child, type, rid, r));
}
static void
acpi_delete_resource(device_t bus, device_t child, int type, int rid)
{
struct resource_list *rl;
rl = acpi_get_rlist(bus, child);
if (resource_list_busy(rl, type, rid)) {
device_printf(bus, "delete_resource: Resource still owned by child"
" (type=%d, rid=%d)\n", type, rid);
return;
}
resource_list_unreserve(rl, bus, child, type, rid);
resource_list_delete(rl, type, rid);
}
/* Allocate an IO port or memory resource, given its GAS. */
int
acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas,
struct resource **res, u_int flags)
{
int error, res_type;
error = ENOMEM;
if (type == NULL || rid == NULL || gas == NULL || res == NULL)
return (EINVAL);
/* We only support memory and IO spaces. */
switch (gas->SpaceId) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
res_type = SYS_RES_MEMORY;
break;
case ACPI_ADR_SPACE_SYSTEM_IO:
res_type = SYS_RES_IOPORT;
break;
default:
return (EOPNOTSUPP);
}
/*
* If the register width is less than 8, assume the BIOS author means
* it is a bit field and just allocate a byte.
*/
if (gas->BitWidth && gas->BitWidth < 8)
gas->BitWidth = 8;
/* Validate the address after we're sure we support the space. */
if (gas->Address == 0 || gas->BitWidth == 0)
return (EINVAL);
bus_set_resource(dev, res_type, *rid, gas->Address,
gas->BitWidth / 8);
*res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags);
if (*res != NULL) {
*type = res_type;
error = 0;
} else
bus_delete_resource(dev, res_type, *rid);
return (error);
}
/* Probe _HID and _CID for compatible ISA PNP ids. */
static uint32_t
acpi_isa_get_logicalid(device_t dev)
{
ACPI_DEVICE_INFO *devinfo;
ACPI_HANDLE h;
uint32_t pnpid;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
/* Fetch and validate the HID. */
if ((h = acpi_get_handle(dev)) == NULL ||
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
return_VALUE (0);
pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 &&
devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ?
PNP_EISAID(devinfo->HardwareId.String) : 0;
AcpiOsFree(devinfo);
return_VALUE (pnpid);
}
static int
acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count)
{
ACPI_DEVICE_INFO *devinfo;
ACPI_PNP_DEVICE_ID *ids;
ACPI_HANDLE h;
uint32_t *pnpid;
int i, valid;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
pnpid = cids;
/* Fetch and validate the CID */
if ((h = acpi_get_handle(dev)) == NULL ||
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
return_VALUE (0);
if ((devinfo->Valid & ACPI_VALID_CID) == 0) {
AcpiOsFree(devinfo);
return_VALUE (0);
}
if (devinfo->CompatibleIdList.Count < count)
count = devinfo->CompatibleIdList.Count;
ids = devinfo->CompatibleIdList.Ids;
for (i = 0, valid = 0; i < count; i++)
if (ids[i].Length >= ACPI_EISAID_STRING_SIZE &&
strncmp(ids[i].String, "PNP", 3) == 0) {
*pnpid++ = PNP_EISAID(ids[i].String);
valid++;
}
AcpiOsFree(devinfo);
return_VALUE (valid);
}
static char *
acpi_device_id_probe(device_t bus, device_t dev, char **ids)
{
ACPI_HANDLE h;
ACPI_OBJECT_TYPE t;
int i;
h = acpi_get_handle(dev);
if (ids == NULL || h == NULL)
return (NULL);
t = acpi_get_type(dev);
if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR)
return (NULL);
/* Try to match one of the array of IDs with a HID or CID. */
for (i = 0; ids[i] != NULL; i++) {
if (acpi_MatchHid(h, ids[i]))
return (ids[i]);
}
return (NULL);
}
static ACPI_STATUS
acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname,
ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret)
{
ACPI_HANDLE h;
if (dev == NULL)
h = ACPI_ROOT_OBJECT;
else if ((h = acpi_get_handle(dev)) == NULL)
return (AE_BAD_PARAMETER);
return (AcpiEvaluateObject(h, pathname, parameters, ret));
}
int
acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
{
struct acpi_softc *sc;
ACPI_HANDLE handle;
ACPI_STATUS status;
char sxd[8];
handle = acpi_get_handle(dev);
/*
* XXX If we find these devices, don't try to power them down.
* The serial and IRDA ports on my T23 hang the system when
* set to D3 and it appears that such legacy devices may
* need special handling in their drivers.
*/
if (dstate == NULL || handle == NULL ||
acpi_MatchHid(handle, "PNP0500") ||
acpi_MatchHid(handle, "PNP0501") ||
acpi_MatchHid(handle, "PNP0502") ||
acpi_MatchHid(handle, "PNP0510") ||
acpi_MatchHid(handle, "PNP0511"))
return (ENXIO);
/*
* Override next state with the value from _SxD, if present.
* Note illegal _S0D is evaluated because some systems expect this.
*/
sc = device_get_softc(bus);
snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
status = acpi_GetInteger(handle, sxd, dstate);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
device_printf(dev, "failed to get %s on %s: %s\n", sxd,
acpi_name(handle), AcpiFormatException(status));
return (ENXIO);
}
return (0);
}
/* Callback arg for our implementation of walking the namespace. */
struct acpi_device_scan_ctx {
acpi_scan_cb_t user_fn;
void *arg;
ACPI_HANDLE parent;
};
static ACPI_STATUS
acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval)
{
struct acpi_device_scan_ctx *ctx;
device_t dev, old_dev;
ACPI_STATUS status;
ACPI_OBJECT_TYPE type;
/*
* Skip this device if we think we'll have trouble with it or it is
* the parent where the scan began.
*/
ctx = (struct acpi_device_scan_ctx *)arg;
if (acpi_avoid(h) || h == ctx->parent)
return (AE_OK);
/* If this is not a valid device type (e.g., a method), skip it. */
if (ACPI_FAILURE(AcpiGetType(h, &type)))
return (AE_OK);
if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR &&
type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER)
return (AE_OK);
/*
* Call the user function with the current device. If it is unchanged
* afterwards, return. Otherwise, we update the handle to the new dev.
*/
old_dev = acpi_get_device(h);
dev = old_dev;
status = ctx->user_fn(h, &dev, level, ctx->arg);
if (ACPI_FAILURE(status) || old_dev == dev)
return (status);
/* Remove the old child and its connection to the handle. */
if (old_dev != NULL) {
device_delete_child(device_get_parent(old_dev), old_dev);
AcpiDetachData(h, acpi_fake_objhandler);
}
/* Recreate the handle association if the user created a device. */
if (dev != NULL)
AcpiAttachData(h, acpi_fake_objhandler, dev);
return (AE_OK);
}
static ACPI_STATUS
acpi_device_scan_children(device_t bus, device_t dev, int max_depth,
acpi_scan_cb_t user_fn, void *arg)
{
ACPI_HANDLE h;
struct acpi_device_scan_ctx ctx;
if (acpi_disabled("children"))
return (AE_OK);
if (dev == NULL)
h = ACPI_ROOT_OBJECT;
else if ((h = acpi_get_handle(dev)) == NULL)
return (AE_BAD_PARAMETER);
ctx.user_fn = user_fn;
ctx.arg = arg;
ctx.parent = h;
return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth,
acpi_device_scan_cb, NULL, &ctx, NULL));
}
/*
* Even though ACPI devices are not PCI, we use the PCI approach for setting
* device power states since it's close enough to ACPI.
*/
static int
acpi_set_powerstate(device_t child, int state)
{
ACPI_HANDLE h;
ACPI_STATUS status;
h = acpi_get_handle(child);
if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX)
return (EINVAL);
if (h == NULL)
return (0);
/* Ignore errors if the power methods aren't present. */
status = acpi_pwr_switch_consumer(h, state);
if (ACPI_SUCCESS(status)) {
if (bootverbose)
device_printf(child, "set ACPI power state D%d on %s\n",
state, acpi_name(h));
} else if (status != AE_NOT_FOUND)
device_printf(child,
"failed to set ACPI power state D%d on %s: %s\n", state,
acpi_name(h), AcpiFormatException(status));
return (0);
}
static int
acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids)
{
int result, cid_count, i;
uint32_t lid, cids[8];
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
/*
* ISA-style drivers attached to ACPI may persist and
* probe manually if we return ENOENT. We never want
* that to happen, so don't ever return it.
*/
result = ENXIO;
/* Scan the supplied IDs for a match */
lid = acpi_isa_get_logicalid(child);
cid_count = acpi_isa_get_compatid(child, cids, 8);
while (ids && ids->ip_id) {
if (lid == ids->ip_id) {
result = 0;
goto out;
}
for (i = 0; i < cid_count; i++) {
if (cids[i] == ids->ip_id) {
result = 0;
goto out;
}
}
ids++;
}
out:
if (result == 0 && ids->ip_desc)
device_set_desc(child, ids->ip_desc);
return_VALUE (result);
}
#if defined(__i386__) || defined(__amd64__)
/*
* Look for a MCFG table. If it is present, use the settings for
* domain (segment) 0 to setup PCI config space access via the memory
* map.
*/
static void
acpi_enable_pcie(void)
{
ACPI_TABLE_HEADER *hdr;
ACPI_MCFG_ALLOCATION *alloc, *end;
ACPI_STATUS status;
status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr);
if (ACPI_FAILURE(status))
return;
end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length);
alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1);
while (alloc < end) {
if (alloc->PciSegment == 0) {
pcie_cfgregopen(alloc->Address, alloc->StartBusNumber,
alloc->EndBusNumber);
return;
}
alloc++;
}
}
#endif
/*
* Scan all of the ACPI namespace and attach child devices.
*
* We should only expect to find devices in the \_PR, \_TZ, \_SI, and
* \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec.
* However, in violation of the spec, some systems place their PCI link
* devices in \, so we have to walk the whole namespace. We check the
* type of namespace nodes, so this should be ok.
*/
static void
acpi_probe_children(device_t bus)
{
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
/*
* Scan the namespace and insert placeholders for all the devices that
* we find. We also probe/attach any early devices.
*
* Note that we use AcpiWalkNamespace rather than AcpiGetDevices because
* we want to create nodes for all devices, not just those that are
* currently present. (This assumes that we don't want to create/remove
* devices as they appear, which might be smarter.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n"));
AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child,
NULL, bus, NULL);
/* Pre-allocate resources for our rman from any sysresource devices. */
acpi_sysres_alloc(bus);
/* Reserve resources already allocated to children. */
acpi_reserve_resources(bus);
/* Create any static children by calling device identify methods. */
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n"));
bus_generic_probe(bus);
/* Probe/attach all children, created statically and from the namespace. */
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n"));
bus_generic_attach(bus);
/* Attach wake sysctls. */
acpi_wake_sysctl_walk(bus);
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n"));
return_VOID;
}
/*
* Determine the probe order for a given device.
*/
static void
acpi_probe_order(ACPI_HANDLE handle, int *order)
{
ACPI_OBJECT_TYPE type;
/*
* 0. CPUs
* 1. I/O port and memory system resource holders
* 2. Clocks and timers (to handle early accesses)
* 3. Embedded controllers (to handle early accesses)
* 4. PCI Link Devices
*/
AcpiGetType(handle, &type);
if (type == ACPI_TYPE_PROCESSOR)
*order = 0;
else if (acpi_MatchHid(handle, "PNP0C01") ||
acpi_MatchHid(handle, "PNP0C02"))
*order = 1;
else if (acpi_MatchHid(handle, "PNP0100") ||
acpi_MatchHid(handle, "PNP0103") ||
acpi_MatchHid(handle, "PNP0B00"))
*order = 2;
else if (acpi_MatchHid(handle, "PNP0C09"))
*order = 3;
else if (acpi_MatchHid(handle, "PNP0C0F"))
*order = 4;
}
/*
* Evaluate a child device and determine whether we might attach a device to
* it.
*/
static ACPI_STATUS
acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
{
struct acpi_prw_data prw;
ACPI_OBJECT_TYPE type;
ACPI_HANDLE h;
device_t bus, child;
char *handle_str;
int order;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (acpi_disabled("children"))
return_ACPI_STATUS (AE_OK);
/* Skip this device if we think we'll have trouble with it. */
if (acpi_avoid(handle))
return_ACPI_STATUS (AE_OK);
bus = (device_t)context;
if (ACPI_SUCCESS(AcpiGetType(handle, &type))) {
handle_str = acpi_name(handle);
switch (type) {
case ACPI_TYPE_DEVICE:
/*
* Since we scan from \, be sure to skip system scope objects.
* \_SB_ and \_TZ_ are defined in ACPICA as devices to work around
* BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run
* during the intialization and \_TZ_ is to support Notify() on it.
*/
if (strcmp(handle_str, "\\_SB_") == 0 ||
strcmp(handle_str, "\\_TZ_") == 0)
break;
if (acpi_parse_prw(handle, &prw) == 0)
AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit);
/*
* Ignore devices that do not have a _HID or _CID. They should
* be discovered by other buses (e.g. the PCI bus driver).
*/
if (!acpi_has_hid(handle))
break;
/* FALLTHROUGH */
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
case ACPI_TYPE_POWER:
/*
* Create a placeholder device for this node. Sort the
* placeholder so that the probe/attach passes will run
* breadth-first. Orders less than ACPI_DEV_BASE_ORDER
* are reserved for special objects (i.e., system
* resources).
*/
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str));
order = level * 10 + ACPI_DEV_BASE_ORDER;
acpi_probe_order(handle, &order);
child = BUS_ADD_CHILD(bus, order, NULL, -1);
if (child == NULL)
break;
/* Associate the handle with the device_t and vice versa. */
acpi_set_handle(child, handle);
AcpiAttachData(handle, acpi_fake_objhandler, child);
/*
* Check that the device is present. If it's not present,
* leave it disabled (so that we have a device_t attached to
* the handle, but we don't probe it).
*
* XXX PCI link devices sometimes report "present" but not
* "functional" (i.e. if disabled). Go ahead and probe them
* anyway since we may enable them later.
*/
if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) {
/* Never disable PCI link devices. */
if (acpi_MatchHid(handle, "PNP0C0F"))
break;
/*
* Docking stations should remain enabled since the system
* may be undocked at boot.
*/
if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h)))
break;
device_disable(child);
break;
}
/*
* Get the device's resource settings and attach them.
* Note that if the device has _PRS but no _CRS, we need
* to decide when it's appropriate to try to configure the
* device. Ignore the return value here; it's OK for the
* device not to have any resources.
*/
acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL);
break;
}
}
return_ACPI_STATUS (AE_OK);
}
/*
* AcpiAttachData() requires an object handler but never uses it. This is a
* placeholder object handler so we can store a device_t in an ACPI_HANDLE.
*/
void
acpi_fake_objhandler(ACPI_HANDLE h, void *data)
{
}
static void
acpi_shutdown_final(void *arg, int howto)
{
struct acpi_softc *sc = (struct acpi_softc *)arg;
register_t intr;
ACPI_STATUS status;
/*
* XXX Shutdown code should only run on the BSP (cpuid 0).
* Some chipsets do not power off the system correctly if called from
* an AP.
*/
if ((howto & RB_POWEROFF) != 0) {
status = AcpiEnterSleepStatePrep(ACPI_STATE_S5);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
AcpiFormatException(status));
return;
}
device_printf(sc->acpi_dev, "Powering system off\n");
intr = intr_disable();
status = AcpiEnterSleepState(ACPI_STATE_S5);
if (ACPI_FAILURE(status)) {
intr_restore(intr);
device_printf(sc->acpi_dev, "power-off failed - %s\n",
AcpiFormatException(status));
} else {
DELAY(1000000);
intr_restore(intr);
device_printf(sc->acpi_dev, "power-off failed - timeout\n");
}
} else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) {
/* Reboot using the reset register. */
status = AcpiReset();
if (ACPI_SUCCESS(status)) {
DELAY(1000000);
device_printf(sc->acpi_dev, "reset failed - timeout\n");
} else if (status != AE_NOT_EXIST)
device_printf(sc->acpi_dev, "reset failed - %s\n",
AcpiFormatException(status));
} else if (sc->acpi_do_disable && panicstr == NULL) {
/*
* Only disable ACPI if the user requested. On some systems, writing
* the disable value to SMI_CMD hangs the system.
*/
device_printf(sc->acpi_dev, "Shutting down\n");
AcpiTerminate();
}
}
static void
acpi_enable_fixed_events(struct acpi_softc *sc)
{
static int first_time = 1;
/* Enable and clear fixed events and install handlers. */
if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) {
AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON,
acpi_event_power_button_sleep, sc);
if (first_time)
device_printf(sc->acpi_dev, "Power Button (fixed)\n");
}
if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON);
AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON,
acpi_event_sleep_button_sleep, sc);
if (first_time)
device_printf(sc->acpi_dev, "Sleep Button (fixed)\n");
}
first_time = 0;
}
/*
* Returns true if the device is actually present and should
* be attached to. This requires the present, enabled, UI-visible
* and diagnostics-passed bits to be set.
*/
BOOLEAN
acpi_DeviceIsPresent(device_t dev)
{
ACPI_DEVICE_INFO *devinfo;
ACPI_HANDLE h;
BOOLEAN present;
if ((h = acpi_get_handle(dev)) == NULL ||
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
return (FALSE);
/* If no _STA method, must be present */
present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
ACPI_DEVICE_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
AcpiOsFree(devinfo);
return (present);
}
/*
* Returns true if the battery is actually present and inserted.
*/
BOOLEAN
acpi_BatteryIsPresent(device_t dev)
{
ACPI_DEVICE_INFO *devinfo;
ACPI_HANDLE h;
BOOLEAN present;
if ((h = acpi_get_handle(dev)) == NULL ||
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
return (FALSE);
/* If no _STA method, must be present */
present = (devinfo->Valid & ACPI_VALID_STA) == 0 ||
ACPI_BATTERY_PRESENT(devinfo->CurrentStatus) ? TRUE : FALSE;
AcpiOsFree(devinfo);
return (present);
}
/*
* Returns true if a device has at least one valid device ID.
*/
static BOOLEAN
acpi_has_hid(ACPI_HANDLE h)
{
ACPI_DEVICE_INFO *devinfo;
BOOLEAN ret;
if (h == NULL ||
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
return (FALSE);
ret = FALSE;
if ((devinfo->Valid & ACPI_VALID_HID) != 0)
ret = TRUE;
else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
if (devinfo->CompatibleIdList.Count > 0)
ret = TRUE;
AcpiOsFree(devinfo);
return (ret);
}
/*
* Match a HID string against a handle
*/
BOOLEAN
acpi_MatchHid(ACPI_HANDLE h, const char *hid)
{
ACPI_DEVICE_INFO *devinfo;
BOOLEAN ret;
int i;
if (hid == NULL || h == NULL ||
ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
return (FALSE);
ret = FALSE;
if ((devinfo->Valid & ACPI_VALID_HID) != 0 &&
strcmp(hid, devinfo->HardwareId.String) == 0)
ret = TRUE;
else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
for (i = 0; i < devinfo->CompatibleIdList.Count; i++) {
if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) {
ret = TRUE;
break;
}
}
AcpiOsFree(devinfo);
return (ret);
}
/*
* Return the handle of a named object within our scope, ie. that of (parent)
* or one if its parents.
*/
ACPI_STATUS
acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result)
{
ACPI_HANDLE r;
ACPI_STATUS status;
/* Walk back up the tree to the root */
for (;;) {
status = AcpiGetHandle(parent, path, &r);
if (ACPI_SUCCESS(status)) {
*result = r;
return (AE_OK);
}
/* XXX Return error here? */
if (status != AE_NOT_FOUND)
return (AE_OK);
if (ACPI_FAILURE(AcpiGetParent(parent, &r)))
return (AE_NOT_FOUND);
parent = r;
}
}
/*
* Allocate a buffer with a preset data size.
*/
ACPI_BUFFER *
acpi_AllocBuffer(int size)
{
ACPI_BUFFER *buf;
if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL)
return (NULL);
buf->Length = size;
buf->Pointer = (void *)(buf + 1);
return (buf);
}
ACPI_STATUS
acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number)
{
ACPI_OBJECT arg1;
ACPI_OBJECT_LIST args;
arg1.Type = ACPI_TYPE_INTEGER;
arg1.Integer.Value = number;
args.Count = 1;
args.Pointer = &arg1;
return (AcpiEvaluateObject(handle, path, &args, NULL));
}
/*
* Evaluate a path that should return an integer.
*/
ACPI_STATUS
acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number)
{
ACPI_STATUS status;
ACPI_BUFFER buf;
ACPI_OBJECT param;
if (handle == NULL)
handle = ACPI_ROOT_OBJECT;
/*
* Assume that what we've been pointed at is an Integer object, or
* a method that will return an Integer.
*/
buf.Pointer = &param;
buf.Length = sizeof(param);
status = AcpiEvaluateObject(handle, path, NULL, &buf);
if (ACPI_SUCCESS(status)) {
if (param.Type == ACPI_TYPE_INTEGER)
*number = param.Integer.Value;
else
status = AE_TYPE;
}
/*
* In some applications, a method that's expected to return an Integer
* may instead return a Buffer (probably to simplify some internal
* arithmetic). We'll try to fetch whatever it is, and if it's a Buffer,
* convert it into an Integer as best we can.
*
* This is a hack.
*/
if (status == AE_BUFFER_OVERFLOW) {
if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) {
status = AE_NO_MEMORY;
} else {
status = AcpiEvaluateObject(handle, path, NULL, &buf);
if (ACPI_SUCCESS(status))
status = acpi_ConvertBufferToInteger(&buf, number);
AcpiOsFree(buf.Pointer);
}
}
return (status);
}
ACPI_STATUS
acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number)
{
ACPI_OBJECT *p;
UINT8 *val;
int i;
p = (ACPI_OBJECT *)bufp->Pointer;
if (p->Type == ACPI_TYPE_INTEGER) {
*number = p->Integer.Value;
return (AE_OK);
}
if (p->Type != ACPI_TYPE_BUFFER)
return (AE_TYPE);
if (p->Buffer.Length > sizeof(int))
return (AE_BAD_DATA);
*number = 0;
val = p->Buffer.Pointer;
for (i = 0; i < p->Buffer.Length; i++)
*number += val[i] << (i * 8);
return (AE_OK);
}
/*
* Iterate over the elements of an a package object, calling the supplied
* function for each element.
*
* XXX possible enhancement might be to abort traversal on error.
*/
ACPI_STATUS
acpi_ForeachPackageObject(ACPI_OBJECT *pkg,
void (*func)(ACPI_OBJECT *comp, void *arg), void *arg)
{
ACPI_OBJECT *comp;
int i;
if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE)
return (AE_BAD_PARAMETER);
/* Iterate over components */
i = 0;
comp = pkg->Package.Elements;
for (; i < pkg->Package.Count; i++, comp++)
func(comp, arg);
return (AE_OK);
}
/*
* Find the (index)th resource object in a set.
*/
ACPI_STATUS
acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp)
{
ACPI_RESOURCE *rp;
int i;
rp = (ACPI_RESOURCE *)buf->Pointer;
i = index;
while (i-- > 0) {
/* Range check */
if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
return (AE_BAD_PARAMETER);
/* Check for terminator */
if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
return (AE_NOT_FOUND);
rp = ACPI_NEXT_RESOURCE(rp);
}
if (resp != NULL)
*resp = rp;
return (AE_OK);
}
/*
* Append an ACPI_RESOURCE to an ACPI_BUFFER.
*
* Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER
* provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible
* backing block. If the ACPI_RESOURCE is NULL, return an empty set of
* resources.
*/
#define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512
ACPI_STATUS
acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res)
{
ACPI_RESOURCE *rp;
void *newp;
/* Initialise the buffer if necessary. */
if (buf->Pointer == NULL) {
buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE;
if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL)
return (AE_NO_MEMORY);
rp = (ACPI_RESOURCE *)buf->Pointer;
rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
rp->Length = ACPI_RS_SIZE_MIN;
}
if (res == NULL)
return (AE_OK);
/*
* Scan the current buffer looking for the terminator.
* This will either find the terminator or hit the end
* of the buffer and return an error.
*/
rp = (ACPI_RESOURCE *)buf->Pointer;
for (;;) {
/* Range check, don't go outside the buffer */
if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
return (AE_BAD_PARAMETER);
if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
break;
rp = ACPI_NEXT_RESOURCE(rp);
}
/*
* Check the size of the buffer and expand if required.
*
* Required size is:
* size of existing resources before terminator +
* size of new resource and header +
* size of terminator.
*
* Note that this loop should really only run once, unless
* for some reason we are stuffing a *really* huge resource.
*/
while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) +
res->Length + ACPI_RS_SIZE_NO_DATA +
ACPI_RS_SIZE_MIN) >= buf->Length) {
if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL)
return (AE_NO_MEMORY);
bcopy(buf->Pointer, newp, buf->Length);
rp = (ACPI_RESOURCE *)((u_int8_t *)newp +
((u_int8_t *)rp - (u_int8_t *)buf->Pointer));
AcpiOsFree(buf->Pointer);
buf->Pointer = newp;
buf->Length += buf->Length;
}
/* Insert the new resource. */
bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA);
/* And add the terminator. */
rp = ACPI_NEXT_RESOURCE(rp);
rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
rp->Length = ACPI_RS_SIZE_MIN;
return (AE_OK);
}
/*
* Set interrupt model.
*/
ACPI_STATUS
acpi_SetIntrModel(int model)
{
return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model));
}
/*
* Walk subtables of a table and call a callback routine for each
* subtable. The caller should provide the first subtable and a
* pointer to the end of the table. This can be used to walk tables
* such as MADT and SRAT that use subtable entries.
*/
void
acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler,
void *arg)
{
ACPI_SUBTABLE_HEADER *entry;
for (entry = first; (void *)entry < end; ) {
/* Avoid an infinite loop if we hit a bogus entry. */
if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER))
return;
handler(entry, arg);
entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length);
}
}
/*
* DEPRECATED. This interface has serious deficiencies and will be
* removed.
*
* Immediately enter the sleep state. In the old model, acpiconf(8) ran
* rc.suspend and rc.resume so we don't have to notify devd(8) to do this.
*/
ACPI_STATUS
acpi_SetSleepState(struct acpi_softc *sc, int state)
{
static int once;
if (!once) {
device_printf(sc->acpi_dev,
"warning: acpi_SetSleepState() deprecated, need to update your software\n");
once = 1;
}
return (acpi_EnterSleepState(sc, state));
}
#if defined(__amd64__) || defined(__i386__)
static void
acpi_sleep_force_task(void *context)
{
struct acpi_softc *sc = (struct acpi_softc *)context;
if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
sc->acpi_next_sstate);
}
static void
acpi_sleep_force(void *arg)
{
struct acpi_softc *sc = (struct acpi_softc *)arg;
device_printf(sc->acpi_dev,
"suspend request timed out, forcing sleep now\n");
/*
* XXX Suspending from callout causes freezes in DEVICE_SUSPEND().
* Suspend from acpi_task thread instead.
*/
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
acpi_sleep_force_task, sc)))
device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n");
}
#endif
/*
* Request that the system enter the given suspend state. All /dev/apm
* devices and devd(8) will be notified. Userland then has a chance to
* save state and acknowledge the request. The system sleeps once all
* acks are in.
*/
int
acpi_ReqSleepState(struct acpi_softc *sc, int state)
{
#if defined(__amd64__) || defined(__i386__)
struct apm_clone_data *clone;
ACPI_STATUS status;
if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
return (EINVAL);
if (!acpi_sleep_states[state])
return (EOPNOTSUPP);
/* If a suspend request is already in progress, just return. */
if (sc->acpi_next_sstate != 0) {
return (0);
}
/* Wait until sleep is enabled. */
while (sc->acpi_sleep_disabled) {
AcpiOsSleep(1000);
}
ACPI_LOCK(acpi);
sc->acpi_next_sstate = state;
/* S5 (soft-off) should be entered directly with no waiting. */
if (state == ACPI_STATE_S5) {
ACPI_UNLOCK(acpi);
status = acpi_EnterSleepState(sc, state);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
/* Record the pending state and notify all apm devices. */
STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
clone->notify_status = APM_EV_NONE;
if ((clone->flags & ACPI_EVF_DEVD) == 0) {
selwakeuppri(&clone->sel_read, PZERO);
KNOTE_LOCKED(&clone->sel_read.si_note, 0);
}
}
/* If devd(8) is not running, immediately enter the sleep state. */
if (!devctl_process_running()) {
ACPI_UNLOCK(acpi);
status = acpi_EnterSleepState(sc, state);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
/*
* Set a timeout to fire if userland doesn't ack the suspend request
* in time. This way we still eventually go to sleep if we were
* overheating or running low on battery, even if userland is hung.
* We cancel this timeout once all userland acks are in or the
* suspend request is aborted.
*/
callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc);
ACPI_UNLOCK(acpi);
/* Now notify devd(8) also. */
acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
return (0);
#else
/* This platform does not support acpi suspend/resume. */
return (EOPNOTSUPP);
#endif
}
/*
* Acknowledge (or reject) a pending sleep state. The caller has
* prepared for suspend and is now ready for it to proceed. If the
* error argument is non-zero, it indicates suspend should be cancelled
* and gives an errno value describing why. Once all votes are in,
* we suspend the system.
*/
int
acpi_AckSleepState(struct apm_clone_data *clone, int error)
{
#if defined(__amd64__) || defined(__i386__)
struct acpi_softc *sc;
int ret, sleeping;
/* If no pending sleep state, return an error. */
ACPI_LOCK(acpi);
sc = clone->acpi_sc;
if (sc->acpi_next_sstate == 0) {
ACPI_UNLOCK(acpi);
return (ENXIO);
}
/* Caller wants to abort suspend process. */
if (error) {
sc->acpi_next_sstate = 0;
callout_stop(&sc->susp_force_to);
device_printf(sc->acpi_dev,
"listener on %s cancelled the pending suspend\n",
devtoname(clone->cdev));
ACPI_UNLOCK(acpi);
return (0);
}
/*
* Mark this device as acking the suspend request. Then, walk through
* all devices, seeing if they agree yet. We only count devices that
* are writable since read-only devices couldn't ack the request.
*/
sleeping = TRUE;
clone->notify_status = APM_EV_ACKED;
STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
if ((clone->flags & ACPI_EVF_WRITE) != 0 &&
clone->notify_status != APM_EV_ACKED) {
sleeping = FALSE;
break;
}
}
/* If all devices have voted "yes", we will suspend now. */
if (sleeping)
callout_stop(&sc->susp_force_to);
ACPI_UNLOCK(acpi);
ret = 0;
if (sleeping) {
if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
ret = ENODEV;
}
return (ret);
#else
/* This platform does not support acpi suspend/resume. */
return (EOPNOTSUPP);
#endif
}
static void
acpi_sleep_enable(void *arg)
{
struct acpi_softc *sc = (struct acpi_softc *)arg;
ACPI_LOCK_ASSERT(acpi);
/* Reschedule if the system is not fully up and running. */
if (!AcpiGbl_SystemAwakeAndRunning) {
callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
return;
}
sc->acpi_sleep_disabled = FALSE;
}
static ACPI_STATUS
acpi_sleep_disable(struct acpi_softc *sc)
{
ACPI_STATUS status;
/* Fail if the system is not fully up and running. */
if (!AcpiGbl_SystemAwakeAndRunning)
return (AE_ERROR);
ACPI_LOCK(acpi);
status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK;
sc->acpi_sleep_disabled = TRUE;
ACPI_UNLOCK(acpi);
return (status);
}
enum acpi_sleep_state {
ACPI_SS_NONE,
ACPI_SS_GPE_SET,
ACPI_SS_DEV_SUSPEND,
ACPI_SS_SLP_PREP,
ACPI_SS_SLEPT,
};
/*
* Enter the desired system sleep state.
*
* Currently we support S1-S5 but S4 is only S4BIOS
*/
static ACPI_STATUS
acpi_EnterSleepState(struct acpi_softc *sc, int state)
{
register_t intr;
ACPI_STATUS status;
ACPI_EVENT_STATUS power_button_status;
enum acpi_sleep_state slp_state;
int sleep_result;
ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
return_ACPI_STATUS (AE_BAD_PARAMETER);
if (!acpi_sleep_states[state]) {
device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
state);
return (AE_SUPPORT);
}
/* Re-entry once we're suspending is not allowed. */
status = acpi_sleep_disable(sc);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev,
"suspend request ignored (not ready yet)\n");
return (status);
}
if (state == ACPI_STATE_S5) {
/*
* Shut down cleanly and power off. This will call us back through the
* shutdown handlers.
*/
shutdown_nice(RB_POWEROFF);
return_ACPI_STATUS (AE_OK);
}
+ EVENTHANDLER_INVOKE(power_suspend_early);
+ stop_all_proc();
EVENTHANDLER_INVOKE(power_suspend);
if (smp_started) {
thread_lock(curthread);
sched_bind(curthread, 0);
thread_unlock(curthread);
}
/*
* Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
* drivers need this.
*/
mtx_lock(&Giant);
slp_state = ACPI_SS_NONE;
sc->acpi_sstate = state;
/* Enable any GPEs as appropriate and requested by the user. */
acpi_wake_prep_walk(state);
slp_state = ACPI_SS_GPE_SET;
/*
* Inform all devices that we are going to sleep. If at least one
* device fails, DEVICE_SUSPEND() automatically resumes the tree.
*
* XXX Note that a better two-pass approach with a 'veto' pass
* followed by a "real thing" pass would be better, but the current
* bus interface does not provide for this.
*/
if (DEVICE_SUSPEND(root_bus) != 0) {
device_printf(sc->acpi_dev, "device_suspend failed\n");
goto backout;
}
slp_state = ACPI_SS_DEV_SUSPEND;
/* If testing device suspend only, back out of everything here. */
if (acpi_susp_bounce)
goto backout;
status = AcpiEnterSleepStatePrep(state);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
AcpiFormatException(status));
goto backout;
}
slp_state = ACPI_SS_SLP_PREP;
if (sc->acpi_sleep_delay > 0)
DELAY(sc->acpi_sleep_delay * 1000000);
intr = intr_disable();
if (state != ACPI_STATE_S1) {
sleep_result = acpi_sleep_machdep(sc, state);
acpi_wakeup_machdep(sc, state, sleep_result, 0);
/*
* XXX According to ACPI specification SCI_EN bit should be restored
* by ACPI platform (BIOS, firmware) to its pre-sleep state.
* Unfortunately some BIOSes fail to do that and that leads to
* unexpected and serious consequences during wake up like a system
* getting stuck in SMI handlers.
* This hack is picked up from Linux, which claims that it follows
* Windows behavior.
*/
if (sleep_result == 1 && state != ACPI_STATE_S4)
AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
AcpiLeaveSleepStatePrep(state);
if (sleep_result == 1 && state == ACPI_STATE_S3) {
/*
* Prevent mis-interpretation of the wakeup by power button
* as a request for power off.
* Ideally we should post an appropriate wakeup event,
* perhaps using acpi_event_power_button_wake or alike.
*
* Clearing of power button status after wakeup is mandated
* by ACPI specification in section "Fixed Power Button".
*
* XXX As of ACPICA 20121114 AcpiGetEventStatus provides
* status as 0/1 corressponding to inactive/active despite
* its type being ACPI_EVENT_STATUS. In other words,
* we should not test for ACPI_EVENT_FLAG_SET for time being.
*/
if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON,
&power_button_status)) && power_button_status != 0) {
AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
device_printf(sc->acpi_dev,
"cleared fixed power button status\n");
}
}
intr_restore(intr);
/* call acpi_wakeup_machdep() again with interrupt enabled */
acpi_wakeup_machdep(sc, state, sleep_result, 1);
if (sleep_result == -1)
goto backout;
/* Re-enable ACPI hardware on wakeup from sleep state 4. */
if (state == ACPI_STATE_S4)
AcpiEnable();
} else {
status = AcpiEnterSleepState(state);
AcpiLeaveSleepStatePrep(state);
intr_restore(intr);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
AcpiFormatException(status));
goto backout;
}
}
slp_state = ACPI_SS_SLEPT;
/*
* Back out state according to how far along we got in the suspend
* process. This handles both the error and success cases.
*/
backout:
if (slp_state >= ACPI_SS_GPE_SET) {
acpi_wake_prep_walk(state);
sc->acpi_sstate = ACPI_STATE_S0;
}
if (slp_state >= ACPI_SS_DEV_SUSPEND)
DEVICE_RESUME(root_bus);
if (slp_state >= ACPI_SS_SLP_PREP)
AcpiLeaveSleepState(state);
if (slp_state >= ACPI_SS_SLEPT) {
acpi_resync_clock(sc);
acpi_enable_fixed_events(sc);
}
sc->acpi_next_sstate = 0;
mtx_unlock(&Giant);
if (smp_started) {
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
}
+
+ resume_all_proc();
EVENTHANDLER_INVOKE(power_resume);
/* Allow another sleep request after a while. */
callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
/* Run /etc/rc.resume after we are back. */
if (devctl_process_running())
acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
return_ACPI_STATUS (status);
}
static void
acpi_resync_clock(struct acpi_softc *sc)
{
#ifdef __amd64__
if (!acpi_reset_clock)
return;
/*
* Warm up timecounter again and reset system clock.
*/
(void)timecounter->tc_get_timecount(timecounter);
(void)timecounter->tc_get_timecount(timecounter);
inittodr(time_second + sc->acpi_sleep_delay);
#endif
}
/* Enable or disable the device's wake GPE. */
int
acpi_wake_set_enable(device_t dev, int enable)
{
struct acpi_prw_data prw;
ACPI_STATUS status;
int flags;
/* Make sure the device supports waking the system and get the GPE. */
if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0)
return (ENXIO);
flags = acpi_get_flags(dev);
if (enable) {
status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
ACPI_GPE_ENABLE);
if (ACPI_FAILURE(status)) {
device_printf(dev, "enable wake failed\n");
return (ENXIO);
}
acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED);
} else {
status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit,
ACPI_GPE_DISABLE);
if (ACPI_FAILURE(status)) {
device_printf(dev, "disable wake failed\n");
return (ENXIO);
}
acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED);
}
return (0);
}
static int
acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
{
struct acpi_prw_data prw;
device_t dev;
/* Check that this is a wake-capable device and get its GPE. */
if (acpi_parse_prw(handle, &prw) != 0)
return (ENXIO);
dev = acpi_get_device(handle);
/*
* The destination sleep state must be less than (i.e., higher power)
* or equal to the value specified by _PRW. If this GPE cannot be
* enabled for the next sleep state, then disable it. If it can and
* the user requested it be enabled, turn on any required power resources
* and set _PSW.
*/
if (sstate > prw.lowest_wake) {
AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
if (bootverbose)
device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
acpi_name(handle), sstate);
} else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
acpi_pwr_wake_enable(handle, 1);
acpi_SetInteger(handle, "_PSW", 1);
if (bootverbose)
device_printf(dev, "wake_prep enabled for %s (S%d)\n",
acpi_name(handle), sstate);
}
return (0);
}
static int
acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
{
struct acpi_prw_data prw;
device_t dev;
/*
* Check that this is a wake-capable device and get its GPE. Return
* now if the user didn't enable this device for wake.
*/
if (acpi_parse_prw(handle, &prw) != 0)
return (ENXIO);
dev = acpi_get_device(handle);
if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
return (0);
/*
* If this GPE couldn't be enabled for the previous sleep state, it was
* disabled before going to sleep so re-enable it. If it was enabled,
* clear _PSW and turn off any power resources it used.
*/
if (sstate > prw.lowest_wake) {
AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE);
if (bootverbose)
device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle));
} else {
acpi_SetInteger(handle, "_PSW", 0);
acpi_pwr_wake_enable(handle, 0);
if (bootverbose)
device_printf(dev, "run_prep cleaned up for %s\n",
acpi_name(handle));
}
return (0);
}
static ACPI_STATUS
acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
{
int sstate;
/* If suspending, run the sleep prep function, otherwise wake. */
sstate = *(int *)context;
if (AcpiGbl_SystemAwakeAndRunning)
acpi_wake_sleep_prep(handle, sstate);
else
acpi_wake_run_prep(handle, sstate);
return (AE_OK);
}
/* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
static int
acpi_wake_prep_walk(int sstate)
{
ACPI_HANDLE sb_handle;
if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
acpi_wake_prep, NULL, &sstate, NULL);
return (0);
}
/* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */
static int
acpi_wake_sysctl_walk(device_t dev)
{
int error, i, numdevs;
device_t *devlist;
device_t child;
ACPI_STATUS status;
error = device_get_children(dev, &devlist, &numdevs);
if (error != 0 || numdevs == 0) {
if (numdevs == 0)
free(devlist, M_TEMP);
return (error);
}
for (i = 0; i < numdevs; i++) {
child = devlist[i];
acpi_wake_sysctl_walk(child);
if (!device_is_attached(child))
continue;
status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL);
if (ACPI_SUCCESS(status)) {
SYSCTL_ADD_PROC(device_get_sysctl_ctx(child),
SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO,
"wake", CTLTYPE_INT | CTLFLAG_RW, child, 0,
acpi_wake_set_sysctl, "I", "Device set to wake the system");
}
}
free(devlist, M_TEMP);
return (0);
}
/* Enable or disable wake from userland. */
static int
acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS)
{
int enable, error;
device_t dev;
dev = (device_t)arg1;
enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0;
error = sysctl_handle_int(oidp, &enable, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (enable != 0 && enable != 1)
return (EINVAL);
return (acpi_wake_set_enable(dev, enable));
}
/* Parse a device's _PRW into a structure. */
int
acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw)
{
ACPI_STATUS status;
ACPI_BUFFER prw_buffer;
ACPI_OBJECT *res, *res2;
int error, i, power_count;
if (h == NULL || prw == NULL)
return (EINVAL);
/*
* The _PRW object (7.2.9) is only required for devices that have the
* ability to wake the system from a sleeping state.
*/
error = EINVAL;
prw_buffer.Pointer = NULL;
prw_buffer.Length = ACPI_ALLOCATE_BUFFER;
status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer);
if (ACPI_FAILURE(status))
return (ENOENT);
res = (ACPI_OBJECT *)prw_buffer.Pointer;
if (res == NULL)
return (ENOENT);
if (!ACPI_PKG_VALID(res, 2))
goto out;
/*
* Element 1 of the _PRW object:
* The lowest power system sleeping state that can be entered while still
* providing wake functionality. The sleeping state being entered must
* be less than (i.e., higher power) or equal to this value.
*/
if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0)
goto out;
/*
* Element 0 of the _PRW object:
*/
switch (res->Package.Elements[0].Type) {
case ACPI_TYPE_INTEGER:
/*
* If the data type of this package element is numeric, then this
* _PRW package element is the bit index in the GPEx_EN, in the
* GPE blocks described in the FADT, of the enable bit that is
* enabled for the wake event.
*/
prw->gpe_handle = NULL;
prw->gpe_bit = res->Package.Elements[0].Integer.Value;
error = 0;
break;
case ACPI_TYPE_PACKAGE:
/*
* If the data type of this package element is a package, then this
* _PRW package element is itself a package containing two
* elements. The first is an object reference to the GPE Block
* device that contains the GPE that will be triggered by the wake
* event. The second element is numeric and it contains the bit
* index in the GPEx_EN, in the GPE Block referenced by the
* first element in the package, of the enable bit that is enabled for
* the wake event.
*
* For example, if this field is a package then it is of the form:
* Package() {\_SB.PCI0.ISA.GPE, 2}
*/
res2 = &res->Package.Elements[0];
if (!ACPI_PKG_VALID(res2, 2))
goto out;
prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]);
if (prw->gpe_handle == NULL)
goto out;
if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0)
goto out;
error = 0;
break;
default:
goto out;
}
/* Elements 2 to N of the _PRW object are power resources. */
power_count = res->Package.Count - 2;
if (power_count > ACPI_PRW_MAX_POWERRES) {
printf("ACPI device %s has too many power resources\n", acpi_name(h));
power_count = 0;
}
prw->power_res_count = power_count;
for (i = 0; i < power_count; i++)
prw->power_res[i] = res->Package.Elements[i];
out:
if (prw_buffer.Pointer != NULL)
AcpiOsFree(prw_buffer.Pointer);
return (error);
}
/*
* ACPI Event Handlers
*/
/* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
static void
acpi_system_eventhandler_sleep(void *arg, int state)
{
struct acpi_softc *sc = (struct acpi_softc *)arg;
int ret;
ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
/* Check if button action is disabled or unknown. */
if (state == ACPI_STATE_UNKNOWN)
return;
/* Request that the system prepare to enter the given suspend state. */
ret = acpi_ReqSleepState(sc, state);
if (ret != 0)
device_printf(sc->acpi_dev,
"request to enter state S%d failed (err %d)\n", state, ret);
return_VOID;
}
static void
acpi_system_eventhandler_wakeup(void *arg, int state)
{
ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
/* Currently, nothing to do for wakeup. */
return_VOID;
}
/*
* ACPICA Event Handlers (FixedEvent, also called from button notify handler)
*/
static void
acpi_invoke_sleep_eventhandler(void *context)
{
EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context);
}
static void
acpi_invoke_wake_eventhandler(void *context)
{
EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context);
}
UINT32
acpi_event_power_button_sleep(void *context)
{
struct acpi_softc *sc = (struct acpi_softc *)context;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
UINT32
acpi_event_power_button_wake(void *context)
{
struct acpi_softc *sc = (struct acpi_softc *)context;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
UINT32
acpi_event_sleep_button_sleep(void *context)
{
struct acpi_softc *sc = (struct acpi_softc *)context;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
UINT32
acpi_event_sleep_button_wake(void *context)
{
struct acpi_softc *sc = (struct acpi_softc *)context;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
/*
* XXX This static buffer is suboptimal. There is no locking so only
* use this for single-threaded callers.
*/
char *
acpi_name(ACPI_HANDLE handle)
{
ACPI_BUFFER buf;
static char data[256];
buf.Length = sizeof(data);
buf.Pointer = data;
if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf)))
return (data);
return ("(unknown)");
}
/*
* Debugging/bug-avoidance. Avoid trying to fetch info on various
* parts of the namespace.
*/
int
acpi_avoid(ACPI_HANDLE handle)
{
char *cp, *env, *np;
int len;
np = acpi_name(handle);
if (*np == '\\')
np++;
if ((env = kern_getenv("debug.acpi.avoid")) == NULL)
return (0);
/* Scan the avoid list checking for a match */
cp = env;
for (;;) {
while (*cp != 0 && isspace(*cp))
cp++;
if (*cp == 0)
break;
len = 0;
while (cp[len] != 0 && !isspace(cp[len]))
len++;
if (!strncmp(cp, np, len)) {
freeenv(env);
return(1);
}
cp += len;
}
freeenv(env);
return (0);
}
/*
* Debugging/bug-avoidance. Disable ACPI subsystem components.
*/
int
acpi_disabled(char *subsys)
{
char *cp, *env;
int len;
if ((env = kern_getenv("debug.acpi.disabled")) == NULL)
return (0);
if (strcmp(env, "all") == 0) {
freeenv(env);
return (1);
}
/* Scan the disable list, checking for a match. */
cp = env;
for (;;) {
while (*cp != '\0' && isspace(*cp))
cp++;
if (*cp == '\0')
break;
len = 0;
while (cp[len] != '\0' && !isspace(cp[len]))
len++;
if (strncmp(cp, subsys, len) == 0) {
freeenv(env);
return (1);
}
cp += len;
}
freeenv(env);
return (0);
}
/*
* Control interface.
*
* We multiplex ioctls for all participating ACPI devices here. Individual
* drivers wanting to be accessible via /dev/acpi should use the
* register/deregister interface to make their handlers visible.
*/
struct acpi_ioctl_hook
{
TAILQ_ENTRY(acpi_ioctl_hook) link;
u_long cmd;
acpi_ioctl_fn fn;
void *arg;
};
static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks;
static int acpi_ioctl_hooks_initted;
int
acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg)
{
struct acpi_ioctl_hook *hp;
if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL)
return (ENOMEM);
hp->cmd = cmd;
hp->fn = fn;
hp->arg = arg;
ACPI_LOCK(acpi);
if (acpi_ioctl_hooks_initted == 0) {
TAILQ_INIT(&acpi_ioctl_hooks);
acpi_ioctl_hooks_initted = 1;
}
TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link);
ACPI_UNLOCK(acpi);
return (0);
}
void
acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn)
{
struct acpi_ioctl_hook *hp;
ACPI_LOCK(acpi);
TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link)
if (hp->cmd == cmd && hp->fn == fn)
break;
if (hp != NULL) {
TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link);
free(hp, M_ACPIDEV);
}
ACPI_UNLOCK(acpi);
}
static int
acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td)
{
return (0);
}
static int
acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td)
{
return (0);
}
static int
acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
{
struct acpi_softc *sc;
struct acpi_ioctl_hook *hp;
int error, state;
error = 0;
hp = NULL;
sc = dev->si_drv1;
/*
* Scan the list of registered ioctls, looking for handlers.
*/
ACPI_LOCK(acpi);
if (acpi_ioctl_hooks_initted)
TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) {
if (hp->cmd == cmd)
break;
}
ACPI_UNLOCK(acpi);
if (hp)
return (hp->fn(cmd, addr, hp->arg));
/*
* Core ioctls are not permitted for non-writable user.
* Currently, other ioctls just fetch information.
* Not changing system behavior.
*/
if ((flag & FWRITE) == 0)
return (EPERM);
/* Core system ioctls. */
switch (cmd) {
case ACPIIO_REQSLPSTATE:
state = *(int *)addr;
if (state != ACPI_STATE_S5)
return (acpi_ReqSleepState(sc, state));
device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
error = EOPNOTSUPP;
break;
case ACPIIO_ACKSLPSTATE:
error = *(int *)addr;
error = acpi_AckSleepState(sc->acpi_clone, error);
break;
case ACPIIO_SETSLPSTATE: /* DEPRECATED */
state = *(int *)addr;
if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
return (EINVAL);
if (!acpi_sleep_states[state])
return (EOPNOTSUPP);
if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
error = ENXIO;
break;
default:
error = ENXIO;
break;
}
return (error);
}
static int
acpi_sname2sstate(const char *sname)
{
int sstate;
if (toupper(sname[0]) == 'S') {
sstate = sname[1] - '0';
if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 &&
sname[2] == '\0')
return (sstate);
} else if (strcasecmp(sname, "NONE") == 0)
return (ACPI_STATE_UNKNOWN);
return (-1);
}
static const char *
acpi_sstate2sname(int sstate)
{
static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
return (snames[sstate]);
else if (sstate == ACPI_STATE_UNKNOWN)
return ("NONE");
return (NULL);
}
static int
acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
{
int error;
struct sbuf sb;
UINT8 state;
sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
if (acpi_sleep_states[state])
sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
sbuf_trim(&sb);
sbuf_finish(&sb);
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
sbuf_delete(&sb);
return (error);
}
static int
acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
{
char sleep_state[10];
int error, new_state, old_state;
old_state = *(int *)oidp->oid_arg1;
strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
if (error == 0 && req->newptr != NULL) {
new_state = acpi_sname2sstate(sleep_state);
if (new_state < ACPI_STATE_S1)
return (EINVAL);
if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
return (EOPNOTSUPP);
if (new_state != old_state)
*(int *)oidp->oid_arg1 = new_state;
}
return (error);
}
/* Inform devctl(4) when we receive a Notify. */
void
acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
{
char notify_buf[16];
ACPI_BUFFER handle_buf;
ACPI_STATUS status;
if (subsystem == NULL)
return;
handle_buf.Pointer = NULL;
handle_buf.Length = ACPI_ALLOCATE_BUFFER;
status = AcpiNsHandleToPathname(h, &handle_buf);
if (ACPI_FAILURE(status))
return;
snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify);
devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf);
AcpiOsFree(handle_buf.Pointer);
}
#ifdef ACPI_DEBUG
/*
* Support for parsing debug options from the kernel environment.
*
* Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers
* by specifying the names of the bits in the debug.acpi.layer and
* debug.acpi.level environment variables. Bits may be unset by
* prefixing the bit name with !.
*/
struct debugtag
{
char *name;
UINT32 value;
};
static struct debugtag dbg_layer[] = {
{"ACPI_UTILITIES", ACPI_UTILITIES},
{"ACPI_HARDWARE", ACPI_HARDWARE},
{"ACPI_EVENTS", ACPI_EVENTS},
{"ACPI_TABLES", ACPI_TABLES},
{"ACPI_NAMESPACE", ACPI_NAMESPACE},
{"ACPI_PARSER", ACPI_PARSER},
{"ACPI_DISPATCHER", ACPI_DISPATCHER},
{"ACPI_EXECUTER", ACPI_EXECUTER},
{"ACPI_RESOURCES", ACPI_RESOURCES},
{"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER},
{"ACPI_OS_SERVICES", ACPI_OS_SERVICES},
{"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER},
{"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS},
{"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER},
{"ACPI_BATTERY", ACPI_BATTERY},
{"ACPI_BUS", ACPI_BUS},
{"ACPI_BUTTON", ACPI_BUTTON},
{"ACPI_EC", ACPI_EC},
{"ACPI_FAN", ACPI_FAN},
{"ACPI_POWERRES", ACPI_POWERRES},
{"ACPI_PROCESSOR", ACPI_PROCESSOR},
{"ACPI_THERMAL", ACPI_THERMAL},
{"ACPI_TIMER", ACPI_TIMER},
{"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS},
{NULL, 0}
};
static struct debugtag dbg_level[] = {
{"ACPI_LV_INIT", ACPI_LV_INIT},
{"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT},
{"ACPI_LV_INFO", ACPI_LV_INFO},
{"ACPI_LV_REPAIR", ACPI_LV_REPAIR},
{"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS},
/* Trace verbosity level 1 [Standard Trace Level] */
{"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES},
{"ACPI_LV_PARSE", ACPI_LV_PARSE},
{"ACPI_LV_LOAD", ACPI_LV_LOAD},
{"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH},
{"ACPI_LV_EXEC", ACPI_LV_EXEC},
{"ACPI_LV_NAMES", ACPI_LV_NAMES},
{"ACPI_LV_OPREGION", ACPI_LV_OPREGION},
{"ACPI_LV_BFIELD", ACPI_LV_BFIELD},
{"ACPI_LV_TABLES", ACPI_LV_TABLES},
{"ACPI_LV_VALUES", ACPI_LV_VALUES},
{"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS},
{"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES},
{"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS},
{"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE},
{"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1},
/* Trace verbosity level 2 [Function tracing and memory allocation] */
{"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS},
{"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS},
{"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS},
{"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2},
{"ACPI_LV_ALL", ACPI_LV_ALL},
/* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
{"ACPI_LV_MUTEX", ACPI_LV_MUTEX},
{"ACPI_LV_THREADS", ACPI_LV_THREADS},
{"ACPI_LV_IO", ACPI_LV_IO},
{"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS},
{"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3},
/* Exceptionally verbose output -- also used in the global "DebugLevel" */
{"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE},
{"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO},
{"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES},
{"ACPI_LV_EVENTS", ACPI_LV_EVENTS},
{"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE},
{NULL, 0}
};
static void
acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag)
{
char *ep;
int i, l;
int set;
while (*cp) {
if (isspace(*cp)) {
cp++;
continue;
}
ep = cp;
while (*ep && !isspace(*ep))
ep++;
if (*cp == '!') {
set = 0;
cp++;
if (cp == ep)
continue;
} else {
set = 1;
}
l = ep - cp;
for (i = 0; tag[i].name != NULL; i++) {
if (!strncmp(cp, tag[i].name, l)) {
if (set)
*flag |= tag[i].value;
else
*flag &= ~tag[i].value;
}
}
cp = ep;
}
}
static void
acpi_set_debugging(void *junk)
{
char *layer, *level;
if (cold) {
AcpiDbgLayer = 0;
AcpiDbgLevel = 0;
}
layer = kern_getenv("debug.acpi.layer");
level = kern_getenv("debug.acpi.level");
if (layer == NULL && level == NULL)
return;
printf("ACPI set debug");
if (layer != NULL) {
if (strcmp("NONE", layer) != 0)
printf(" layer '%s'", layer);
acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer);
freeenv(layer);
}
if (level != NULL) {
if (strcmp("NONE", level) != 0)
printf(" level '%s'", level);
acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel);
freeenv(level);
}
printf("\n");
}
SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging,
NULL);
static int
acpi_debug_sysctl(SYSCTL_HANDLER_ARGS)
{
int error, *dbg;
struct debugtag *tag;
struct sbuf sb;
char temp[128];
if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL)
return (ENOMEM);
if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) {
tag = &dbg_layer[0];
dbg = &AcpiDbgLayer;
} else {
tag = &dbg_level[0];
dbg = &AcpiDbgLevel;
}
/* Get old values if this is a get request. */
ACPI_SERIAL_BEGIN(acpi);
if (*dbg == 0) {
sbuf_cpy(&sb, "NONE");
} else if (req->newptr == NULL) {
for (; tag->name != NULL; tag++) {
if ((*dbg & tag->value) == tag->value)
sbuf_printf(&sb, "%s ", tag->name);
}
}
sbuf_trim(&sb);
sbuf_finish(&sb);
strlcpy(temp, sbuf_data(&sb), sizeof(temp));
sbuf_delete(&sb);
error = sysctl_handle_string(oidp, temp, sizeof(temp), req);
/* Check for error or no change */
if (error == 0 && req->newptr != NULL) {
*dbg = 0;
kern_setenv((char *)oidp->oid_arg1, temp);
acpi_set_debugging(NULL);
}
ACPI_SERIAL_END(acpi);
return (error);
}
SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING,
"debug.acpi.layer", 0, acpi_debug_sysctl, "A", "");
SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING,
"debug.acpi.level", 0, acpi_debug_sysctl, "A", "");
#endif /* ACPI_DEBUG */
static int
acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS)
{
int error;
int old;
old = acpi_debug_objects;
error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (old == acpi_debug_objects || (old && acpi_debug_objects))
return (0);
ACPI_SERIAL_BEGIN(acpi);
AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE;
ACPI_SERIAL_END(acpi);
return (0);
}
static int
acpi_parse_interfaces(char *str, struct acpi_interface *iface)
{
char *p;
size_t len;
int i, j;
p = str;
while (isspace(*p) || *p == ',')
p++;
len = strlen(p);
if (len == 0)
return (0);
p = strdup(p, M_TEMP);
for (i = 0; i < len; i++)
if (p[i] == ',')
p[i] = '\0';
i = j = 0;
while (i < len)
if (isspace(p[i]) || p[i] == '\0')
i++;
else {
i += strlen(p + i) + 1;
j++;
}
if (j == 0) {
free(p, M_TEMP);
return (0);
}
iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK);
iface->num = j;
i = j = 0;
while (i < len)
if (isspace(p[i]) || p[i] == '\0')
i++;
else {
iface->data[j] = p + i;
i += strlen(p + i) + 1;
j++;
}
return (j);
}
static void
acpi_free_interfaces(struct acpi_interface *iface)
{
free(iface->data[0], M_TEMP);
free(iface->data, M_TEMP);
}
static void
acpi_reset_interfaces(device_t dev)
{
struct acpi_interface list;
ACPI_STATUS status;
int i;
if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) {
for (i = 0; i < list.num; i++) {
status = AcpiInstallInterface(list.data[i]);
if (ACPI_FAILURE(status))
device_printf(dev,
"failed to install _OSI(\"%s\"): %s\n",
list.data[i], AcpiFormatException(status));
else if (bootverbose)
device_printf(dev, "installed _OSI(\"%s\")\n",
list.data[i]);
}
acpi_free_interfaces(&list);
}
if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) {
for (i = 0; i < list.num; i++) {
status = AcpiRemoveInterface(list.data[i]);
if (ACPI_FAILURE(status))
device_printf(dev,
"failed to remove _OSI(\"%s\"): %s\n",
list.data[i], AcpiFormatException(status));
else if (bootverbose)
device_printf(dev, "removed _OSI(\"%s\")\n",
list.data[i]);
}
acpi_free_interfaces(&list);
}
}
static int
acpi_pm_func(u_long cmd, void *arg, ...)
{
int state, acpi_state;
int error;
struct acpi_softc *sc;
va_list ap;
error = 0;
switch (cmd) {
case POWER_CMD_SUSPEND:
sc = (struct acpi_softc *)arg;
if (sc == NULL) {
error = EINVAL;
goto out;
}
va_start(ap, arg);
state = va_arg(ap, int);
va_end(ap);
switch (state) {
case POWER_SLEEP_STATE_STANDBY:
acpi_state = sc->acpi_standby_sx;
break;
case POWER_SLEEP_STATE_SUSPEND:
acpi_state = sc->acpi_suspend_sx;
break;
case POWER_SLEEP_STATE_HIBERNATE:
acpi_state = ACPI_STATE_S4;
break;
default:
error = EINVAL;
goto out;
}
if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
error = ENXIO;
break;
default:
error = EINVAL;
goto out;
}
out:
return (error);
}
static void
acpi_pm_register(void *arg)
{
if (!cold || resource_disabled("acpi", 0))
return;
power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
}
SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, 0);
Index: projects/clang360-import/sys/dev/fb/fbd.c
===================================================================
--- projects/clang360-import/sys/dev/fb/fbd.c (revision 277808)
+++ projects/clang360-import/sys/dev/fb/fbd.c (revision 277809)
@@ -1,375 +1,357 @@
/*-
* Copyright (c) 2013 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Aleksandr Rybalko under sponsorship from the
* FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/* Generic framebuffer */
/* TODO unlink from VT(9) */
/* TODO done normal /dev/fb methods */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/queue.h>
#include <sys/fbio.h>
#include <machine/bus.h>
#include <dev/vt/vt.h>
#include <dev/vt/hw/fb/vt_fb.h>
#include "fb_if.h"
LIST_HEAD(fb_list_head_t, fb_list_entry) fb_list_head =
LIST_HEAD_INITIALIZER(fb_list_head);
struct fb_list_entry {
struct fb_info *fb_info;
struct cdev *fb_si;
LIST_ENTRY(fb_list_entry) fb_list;
};
struct fbd_softc {
device_t sc_dev;
struct fb_info *sc_info;
};
static void fbd_evh_init(void *);
/* SI_ORDER_SECOND, just after EVENTHANDLERs initialized. */
SYSINIT(fbd_evh_init, SI_SUB_CONFIGURE, SI_ORDER_SECOND, fbd_evh_init, NULL);
static d_open_t fb_open;
static d_close_t fb_close;
static d_read_t fb_read;
static d_write_t fb_write;
static d_ioctl_t fb_ioctl;
static d_mmap_t fb_mmap;
static struct cdevsw fb_cdevsw = {
.d_version = D_VERSION,
.d_flags = D_NEEDGIANT,
.d_open = fb_open,
.d_close = fb_close,
.d_read = fb_read,
.d_write = fb_write,
.d_ioctl = fb_ioctl,
.d_mmap = fb_mmap,
.d_name = "fb",
};
static int framebuffer_dev_unit = 0;
static int
fb_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
{
return (0);
}
static int
fb_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
{
return (0);
}
static int
fb_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
struct thread *td)
{
struct fb_info *info;
int error;
error = 0;
info = dev->si_drv1;
switch (cmd) {
case FBIOGTYPE:
bcopy(info, (struct fbtype *)data, sizeof(struct fbtype));
break;
case FBIO_GETWINORG: /* get frame buffer window origin */
*(u_int *)data = 0;
break;
case FBIO_GETDISPSTART: /* get display start address */
((video_display_start_t *)data)->x = 0;
((video_display_start_t *)data)->y = 0;
break;
case FBIO_GETLINEWIDTH: /* get scan line width in bytes */
*(u_int *)data = info->fb_stride;
break;
case FBIO_BLANK: /* blank display */
error = 0; /* TODO */
break;
default:
error = ENOIOCTL;
break;
}
return (error);
}
static int
fb_read(struct cdev *dev, struct uio *uio, int ioflag)
{
return (0); /* XXX nothing to read, yet */
}
static int
fb_write(struct cdev *dev, struct uio *uio, int ioflag)
{
return (0); /* XXX nothing written */
}
static int
fb_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
vm_memattr_t *memattr)
{
struct fb_info *info;
info = dev->si_drv1;
if ((info->fb_flags & FB_FLAG_NOMMAP) || info->fb_pbase == 0)
return (ENODEV);
if (offset < info->fb_size) {
*paddr = info->fb_pbase + offset;
return (0);
}
return (EINVAL);
}
static int
fb_init(struct fb_list_entry *entry, int unit)
{
struct fb_info *info;
info = entry->fb_info;
entry->fb_si = make_dev(&fb_cdevsw, unit, UID_ROOT, GID_WHEEL,
0600, "fb%d", unit);
entry->fb_si->si_drv1 = info;
info->fb_cdev = entry->fb_si;
return (0);
}
int
fbd_list()
{
struct fb_list_entry *entry;
if (LIST_EMPTY(&fb_list_head))
return (ENOENT);
LIST_FOREACH(entry, &fb_list_head, fb_list) {
printf("FB %s @%p\n", entry->fb_info->fb_name,
(void *)entry->fb_info->fb_pbase);
}
return (0);
}
static struct fb_list_entry *
fbd_find(struct fb_info* info)
{
struct fb_list_entry *entry, *tmp;
LIST_FOREACH_SAFE(entry, &fb_list_head, fb_list, tmp) {
if (entry->fb_info == info) {
return (entry);
}
}
return (NULL);
}
int
fbd_register(struct fb_info* info)
{
struct fb_list_entry *entry;
int err, first;
first = 0;
if (LIST_EMPTY(&fb_list_head))
first++;
entry = fbd_find(info);
if (entry != NULL) {
/* XXX Update framebuffer params */
return (0);
}
entry = malloc(sizeof(struct fb_list_entry), M_DEVBUF, M_WAITOK|M_ZERO);
entry->fb_info = info;
LIST_INSERT_HEAD(&fb_list_head, entry, fb_list);
err = fb_init(entry, framebuffer_dev_unit++);
if (err)
return (err);
if (first) {
err = vt_fb_attach(info);
if (err)
return (err);
}
return (0);
}
int
fbd_unregister(struct fb_info* info)
{
struct fb_list_entry *entry, *tmp;
LIST_FOREACH_SAFE(entry, &fb_list_head, fb_list, tmp) {
if (entry->fb_info == info) {
LIST_REMOVE(entry, fb_list);
free(entry, M_DEVBUF);
return (0);
}
}
return (ENOENT);
}
static void
register_fb_wrap(void *arg, void *ptr)
{
fbd_register((struct fb_info *)ptr);
}
static void
unregister_fb_wrap(void *arg, void *ptr)
{
fbd_unregister((struct fb_info *)ptr);
}
static void
fbd_evh_init(void *ctx)
{
EVENTHANDLER_REGISTER(register_framebuffer, register_fb_wrap, NULL,
EVENTHANDLER_PRI_ANY);
EVENTHANDLER_REGISTER(unregister_framebuffer, unregister_fb_wrap, NULL,
EVENTHANDLER_PRI_ANY);
}
/* Newbus methods. */
static int
fbd_probe(device_t dev)
{
return (BUS_PROBE_NOWILDCARD);
}
static int
fbd_attach(device_t dev)
{
struct fbd_softc *sc;
int err;
sc = device_get_softc(dev);
sc->sc_dev = dev;
sc->sc_info = FB_GETINFO(device_get_parent(dev));
if (sc->sc_info == NULL)
return (ENXIO);
err = fbd_register(sc->sc_info);
return (err);
}
static int
fbd_detach(device_t dev)
{
struct fbd_softc *sc;
int err;
sc = device_get_softc(dev);
err = fbd_unregister(sc->sc_info);
return (err);
}
-static int
-fbd_suspend(device_t dev)
-{
-
- vt_fb_suspend();
- return (bus_generic_suspend(dev));
-}
-
-static int
-fbd_resume(device_t dev)
-{
-
- vt_fb_resume();
- return (bus_generic_resume(dev));
-}
-
static device_method_t fbd_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, fbd_probe),
DEVMETHOD(device_attach, fbd_attach),
DEVMETHOD(device_detach, fbd_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, fbd_suspend),
- DEVMETHOD(device_resume, fbd_resume),
{ 0, 0 }
};
driver_t fbd_driver = {
"fbd",
fbd_methods,
sizeof(struct fbd_softc)
};
devclass_t fbd_devclass;
DRIVER_MODULE(fbd, fb, fbd_driver, fbd_devclass, 0, 0);
DRIVER_MODULE(fbd, drmn, fbd_driver, fbd_devclass, 0, 0);
MODULE_VERSION(fbd, 1);
Index: projects/clang360-import/sys/dev/syscons/syscons.c
===================================================================
--- projects/clang360-import/sys/dev/syscons/syscons.c (revision 277808)
+++ projects/clang360-import/sys/dev/syscons/syscons.c (revision 277809)
@@ -1,3942 +1,3942 @@
/*-
* Copyright (c) 1992-1998 Søren Schmidt
* All rights reserved.
*
* This code is derived from software contributed to The DragonFly Project
* by Sascha Wildner <saw@online.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_compat.h"
#include "opt_syscons.h"
#include "opt_splash.h"
#include "opt_ddb.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/consio.h>
#include <sys/kdb.h>
#include <sys/eventhandler.h>
#include <sys/fbio.h>
#include <sys/kbio.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/random.h>
#include <sys/reboot.h>
#include <sys/serial.h>
#include <sys/signalvar.h>
#include <sys/sysctl.h>
#include <sys/tty.h>
#include <sys/power.h>
#include <machine/clock.h>
#if defined(__arm__) || defined(__mips__) || \
defined(__powerpc__) || defined(__sparc64__)
#include <machine/sc_machdep.h>
#else
#include <machine/pc/display.h>
#endif
#if defined( __i386__) || defined(__amd64__)
#include <machine/psl.h>
#include <machine/frame.h>
#endif
#include <machine/stdarg.h>
#include <dev/kbd/kbdreg.h>
#include <dev/fb/fbreg.h>
#include <dev/fb/splashreg.h>
#include <dev/syscons/syscons.h>
#define COLD 0
#define WARM 1
#define DEFAULT_BLANKTIME (5*60) /* 5 minutes */
#define MAX_BLANKTIME (7*24*60*60) /* 7 days!? */
#define KEYCODE_BS 0x0e /* "<-- Backspace" key, XXX */
/* NULL-safe version of "tty_opened()" */
#define tty_opened_ns(tp) ((tp) != NULL && tty_opened(tp))
typedef struct default_attr {
int std_color; /* normal hardware color */
int rev_color; /* reverse hardware color */
} default_attr;
static default_attr user_default = {
SC_NORM_ATTR,
SC_NORM_REV_ATTR,
};
static int sc_console_unit = -1;
static int sc_saver_keyb_only = 1;
static scr_stat *sc_console;
static struct consdev *sc_consptr;
static scr_stat main_console;
static struct tty *main_devs[MAXCONS];
static char init_done = COLD;
static int shutdown_in_progress = FALSE;
static int suspend_in_progress = FALSE;
static char sc_malloc = FALSE;
static int saver_mode = CONS_NO_SAVER; /* LKM/user saver */
static int run_scrn_saver = FALSE; /* should run the saver? */
static int enable_bell = TRUE; /* enable beeper */
#ifndef SC_DISABLE_REBOOT
static int enable_reboot = TRUE; /* enable keyboard reboot */
#endif
#ifndef SC_DISABLE_KDBKEY
static int enable_kdbkey = TRUE; /* enable keyboard debug */
#endif
static long scrn_blank_time = 0; /* screen saver timeout value */
#ifdef DEV_SPLASH
static int scrn_blanked; /* # of blanked screen */
static int sticky_splash = FALSE;
static void none_saver(sc_softc_t *sc, int blank) { }
static void (*current_saver)(sc_softc_t *, int) = none_saver;
#endif
#ifdef SC_NO_SUSPEND_VTYSWITCH
static int sc_no_suspend_vtswitch = 1;
#else
static int sc_no_suspend_vtswitch = 0;
#endif
static int sc_susp_scr;
static SYSCTL_NODE(_hw, OID_AUTO, syscons, CTLFLAG_RD, 0, "syscons");
static SYSCTL_NODE(_hw_syscons, OID_AUTO, saver, CTLFLAG_RD, 0, "saver");
SYSCTL_INT(_hw_syscons_saver, OID_AUTO, keybonly, CTLFLAG_RW,
&sc_saver_keyb_only, 0, "screen saver interrupted by input only");
SYSCTL_INT(_hw_syscons, OID_AUTO, bell, CTLFLAG_RW, &enable_bell,
0, "enable bell");
#ifndef SC_DISABLE_REBOOT
SYSCTL_INT(_hw_syscons, OID_AUTO, kbd_reboot, CTLFLAG_RW|CTLFLAG_SECURE, &enable_reboot,
0, "enable keyboard reboot");
#endif
#ifndef SC_DISABLE_KDBKEY
SYSCTL_INT(_hw_syscons, OID_AUTO, kbd_debug, CTLFLAG_RW|CTLFLAG_SECURE, &enable_kdbkey,
0, "enable keyboard debug");
#endif
SYSCTL_INT(_hw_syscons, OID_AUTO, sc_no_suspend_vtswitch, CTLFLAG_RWTUN,
&sc_no_suspend_vtswitch, 0, "Disable VT switch before suspend.");
#if !defined(SC_NO_FONT_LOADING) && defined(SC_DFLT_FONT)
#include "font.h"
#endif
tsw_ioctl_t *sc_user_ioctl;
static bios_values_t bios_value;
static int enable_panic_key;
SYSCTL_INT(_machdep, OID_AUTO, enable_panic_key, CTLFLAG_RW, &enable_panic_key,
0, "Enable panic via keypress specified in kbdmap(5)");
#define SC_CONSOLECTL 255
#define VTY_WCHAN(sc, vty) (&SC_DEV(sc, vty))
static int debugger;
/* prototypes */
static int sc_allocate_keyboard(sc_softc_t *sc, int unit);
static int scvidprobe(int unit, int flags, int cons);
static int sckbdprobe(int unit, int flags, int cons);
static void scmeminit(void *arg);
static int scdevtounit(struct tty *tp);
static kbd_callback_func_t sckbdevent;
static void scinit(int unit, int flags);
static scr_stat *sc_get_stat(struct tty *tp);
static void scterm(int unit, int flags);
static void scshutdown(void *, int);
static void scsuspend(void *);
static void scresume(void *);
static u_int scgetc(sc_softc_t *sc, u_int flags);
static void sc_puts(scr_stat *scp, u_char *buf, int len, int kernel);
#define SCGETC_CN 1
#define SCGETC_NONBLOCK 2
static void sccnupdate(scr_stat *scp);
static scr_stat *alloc_scp(sc_softc_t *sc, int vty);
static void init_scp(sc_softc_t *sc, int vty, scr_stat *scp);
static timeout_t scrn_timer;
static int and_region(int *s1, int *e1, int s2, int e2);
static void scrn_update(scr_stat *scp, int show_cursor);
#ifdef DEV_SPLASH
static int scsplash_callback(int event, void *arg);
static void scsplash_saver(sc_softc_t *sc, int show);
static int add_scrn_saver(void (*this_saver)(sc_softc_t *, int));
static int remove_scrn_saver(void (*this_saver)(sc_softc_t *, int));
static int set_scrn_saver_mode(scr_stat *scp, int mode, u_char *pal, int border);
static int restore_scrn_saver_mode(scr_stat *scp, int changemode);
static void stop_scrn_saver(sc_softc_t *sc, void (*saver)(sc_softc_t *, int));
static int wait_scrn_saver_stop(sc_softc_t *sc);
#define scsplash_stick(stick) (sticky_splash = (stick))
#else /* !DEV_SPLASH */
#define scsplash_stick(stick)
#endif /* DEV_SPLASH */
static int do_switch_scr(sc_softc_t *sc, int s);
static int vt_proc_alive(scr_stat *scp);
static int signal_vt_rel(scr_stat *scp);
static int signal_vt_acq(scr_stat *scp);
static int finish_vt_rel(scr_stat *scp, int release, int *s);
static int finish_vt_acq(scr_stat *scp);
static void exchange_scr(sc_softc_t *sc);
static void update_cursor_image(scr_stat *scp);
static void change_cursor_shape(scr_stat *scp, int flags, int base, int height);
static void update_font(scr_stat *);
static int save_kbd_state(scr_stat *scp);
static int update_kbd_state(scr_stat *scp, int state, int mask);
static int update_kbd_leds(scr_stat *scp, int which);
static timeout_t blink_screen;
static struct tty *sc_alloc_tty(int, int);
static cn_probe_t sc_cnprobe;
static cn_init_t sc_cninit;
static cn_term_t sc_cnterm;
static cn_getc_t sc_cngetc;
static cn_putc_t sc_cnputc;
static cn_grab_t sc_cngrab;
static cn_ungrab_t sc_cnungrab;
CONSOLE_DRIVER(sc);
static tsw_open_t sctty_open;
static tsw_close_t sctty_close;
static tsw_outwakeup_t sctty_outwakeup;
static tsw_ioctl_t sctty_ioctl;
static tsw_mmap_t sctty_mmap;
static struct ttydevsw sc_ttydevsw = {
.tsw_open = sctty_open,
.tsw_close = sctty_close,
.tsw_outwakeup = sctty_outwakeup,
.tsw_ioctl = sctty_ioctl,
.tsw_mmap = sctty_mmap,
};
static d_ioctl_t consolectl_ioctl;
static d_close_t consolectl_close;
static struct cdevsw consolectl_devsw = {
.d_version = D_VERSION,
.d_flags = D_NEEDGIANT | D_TRACKCLOSE,
.d_ioctl = consolectl_ioctl,
.d_close = consolectl_close,
.d_name = "consolectl",
};
int
sc_probe_unit(int unit, int flags)
{
if (!vty_enabled(VTY_SC))
return ENXIO;
if (!scvidprobe(unit, flags, FALSE)) {
if (bootverbose)
printf("%s%d: no video adapter found.\n", SC_DRIVER_NAME, unit);
return ENXIO;
}
/* syscons will be attached even when there is no keyboard */
sckbdprobe(unit, flags, FALSE);
return 0;
}
/* probe video adapters, return TRUE if found */
static int
scvidprobe(int unit, int flags, int cons)
{
/*
* Access the video adapter driver through the back door!
* Video adapter drivers need to be configured before syscons.
* However, when syscons is being probed as the low-level console,
* they have not been initialized yet. We force them to initialize
* themselves here. XXX
*/
vid_configure(cons ? VIO_PROBE_ONLY : 0);
return (vid_find_adapter("*", unit) >= 0);
}
/* probe the keyboard, return TRUE if found */
static int
sckbdprobe(int unit, int flags, int cons)
{
/* access the keyboard driver through the backdoor! */
kbd_configure(cons ? KB_CONF_PROBE_ONLY : 0);
return (kbd_find_keyboard("*", unit) >= 0);
}
static char
*adapter_name(video_adapter_t *adp)
{
static struct {
int type;
char *name[2];
} names[] = {
{ KD_MONO, { "MDA", "MDA" } },
{ KD_HERCULES, { "Hercules", "Hercules" } },
{ KD_CGA, { "CGA", "CGA" } },
{ KD_EGA, { "EGA", "EGA (mono)" } },
{ KD_VGA, { "VGA", "VGA (mono)" } },
{ KD_PC98, { "PC-98x1", "PC-98x1" } },
{ KD_TGA, { "TGA", "TGA" } },
{ -1, { "Unknown", "Unknown" } },
};
int i;
for (i = 0; names[i].type != -1; ++i)
if (names[i].type == adp->va_type)
break;
return names[i].name[(adp->va_flags & V_ADP_COLOR) ? 0 : 1];
}
static void
sctty_outwakeup(struct tty *tp)
{
size_t len;
u_char buf[PCBURST];
scr_stat *scp = sc_get_stat(tp);
if (scp->status & SLKED ||
(scp == scp->sc->cur_scp && scp->sc->blink_in_progress))
return;
for (;;) {
len = ttydisc_getc(tp, buf, sizeof buf);
if (len == 0)
break;
sc_puts(scp, buf, len, 0);
}
}
static struct tty *
sc_alloc_tty(int index, int devnum)
{
struct sc_ttysoftc *stc;
struct tty *tp;
/* Allocate TTY object and softc to store unit number. */
stc = malloc(sizeof(struct sc_ttysoftc), M_DEVBUF, M_WAITOK);
stc->st_index = index;
stc->st_stat = NULL;
tp = tty_alloc_mutex(&sc_ttydevsw, stc, &Giant);
/* Create device node. */
tty_makedev(tp, NULL, "v%r", devnum);
return (tp);
}
#ifdef SC_PIXEL_MODE
static void
sc_set_vesa_mode(scr_stat *scp, sc_softc_t *sc, int unit)
{
video_info_t info;
u_char *font;
int depth;
int fontsize;
int i;
int vmode;
vmode = 0;
(void)resource_int_value("sc", unit, "vesa_mode", &vmode);
if (vmode < M_VESA_BASE || vmode > M_VESA_MODE_MAX ||
vidd_get_info(sc->adp, vmode, &info) != 0 ||
!sc_support_pixel_mode(&info))
vmode = 0;
/*
* If the mode is unset or unsupported, search for an available
* 800x600 graphics mode with the highest color depth.
*/
if (vmode == 0) {
for (depth = 0, i = M_VESA_BASE; i <= M_VESA_MODE_MAX; i++)
if (vidd_get_info(sc->adp, i, &info) == 0 &&
info.vi_width == 800 && info.vi_height == 600 &&
sc_support_pixel_mode(&info) &&
info.vi_depth > depth) {
vmode = i;
depth = info.vi_depth;
}
if (vmode == 0)
return;
vidd_get_info(sc->adp, vmode, &info);
}
#if !defined(SC_NO_FONT_LOADING) && defined(SC_DFLT_FONT)
fontsize = info.vi_cheight;
#else
fontsize = scp->font_size;
#endif
if (fontsize < 14)
fontsize = 8;
else if (fontsize >= 16)
fontsize = 16;
else
fontsize = 14;
#ifndef SC_NO_FONT_LOADING
switch (fontsize) {
case 8:
if ((sc->fonts_loaded & FONT_8) == 0)
return;
font = sc->font_8;
break;
case 14:
if ((sc->fonts_loaded & FONT_14) == 0)
return;
font = sc->font_14;
break;
case 16:
if ((sc->fonts_loaded & FONT_16) == 0)
return;
font = sc->font_16;
break;
}
#else
font = NULL;
#endif
#ifdef DEV_SPLASH
if ((sc->flags & SC_SPLASH_SCRN) != 0)
splash_term(sc->adp);
#endif
#ifndef SC_NO_HISTORY
if (scp->history != NULL) {
sc_vtb_append(&scp->vtb, 0, scp->history,
scp->ypos * scp->xsize + scp->xpos);
scp->history_pos = sc_vtb_tail(scp->history);
}
#endif
vidd_set_mode(sc->adp, vmode);
scp->status |= (UNKNOWN_MODE | PIXEL_MODE | MOUSE_HIDDEN);
scp->status &= ~(GRAPHICS_MODE | MOUSE_VISIBLE);
scp->xpixel = info.vi_width;
scp->ypixel = info.vi_height;
scp->xsize = scp->xpixel / 8;
scp->ysize = scp->ypixel / fontsize;
scp->xpos = 0;
scp->ypos = scp->ysize - 1;
scp->xoff = scp->yoff = 0;
scp->font = font;
scp->font_size = fontsize;
scp->font_width = 8;
scp->start = scp->xsize * scp->ysize - 1;
scp->end = 0;
scp->cursor_pos = scp->cursor_oldpos = scp->xsize * scp->xsize;
scp->mode = sc->initial_mode = vmode;
#ifndef __sparc64__
sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, scp->xsize, scp->ysize,
(void *)sc->adp->va_window, FALSE);
#endif
sc_alloc_scr_buffer(scp, FALSE, FALSE);
sc_init_emulator(scp, NULL);
#ifndef SC_NO_CUTPASTE
sc_alloc_cut_buffer(scp, FALSE);
#endif
#ifndef SC_NO_HISTORY
sc_alloc_history_buffer(scp, 0, 0, FALSE);
#endif
sc_set_border(scp, scp->border);
sc_set_cursor_image(scp);
scp->status &= ~UNKNOWN_MODE;
#ifdef DEV_SPLASH
if ((sc->flags & SC_SPLASH_SCRN) != 0)
splash_init(sc->adp, scsplash_callback, sc);
#endif
}
#endif
int
sc_attach_unit(int unit, int flags)
{
sc_softc_t *sc;
scr_stat *scp;
struct cdev *dev;
int vc;
if (!vty_enabled(VTY_SC))
return ENXIO;
flags &= ~SC_KERNEL_CONSOLE;
if (sc_console_unit == unit) {
/*
* If this unit is being used as the system console, we need to
* adjust some variables and buffers before and after scinit().
*/
/* assert(sc_console != NULL) */
flags |= SC_KERNEL_CONSOLE;
scmeminit(NULL);
}
scinit(unit, flags);
sc = sc_get_softc(unit, flags & SC_KERNEL_CONSOLE);
sc->config = flags;
callout_init(&sc->ctimeout, 0);
callout_init(&sc->cblink, 0);
scp = sc_get_stat(sc->dev[0]);
if (sc_console == NULL) /* sc_console_unit < 0 */
sc_console = scp;
#ifdef SC_PIXEL_MODE
if ((sc->config & SC_VESAMODE) != 0)
sc_set_vesa_mode(scp, sc, unit);
#endif /* SC_PIXEL_MODE */
/* initialize cursor */
if (!ISGRAPHSC(scp))
update_cursor_image(scp);
/* get screen update going */
scrn_timer(sc);
/* set up the keyboard */
(void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode);
update_kbd_state(scp, scp->status, LOCK_MASK);
printf("%s%d: %s <%d virtual consoles, flags=0x%x>\n",
SC_DRIVER_NAME, unit, adapter_name(sc->adp), sc->vtys, sc->config);
if (bootverbose) {
printf("%s%d:", SC_DRIVER_NAME, unit);
if (sc->adapter >= 0)
printf(" fb%d", sc->adapter);
if (sc->keyboard >= 0)
printf(", kbd%d", sc->keyboard);
if (scp->tsw)
printf(", terminal emulator: %s (%s)",
scp->tsw->te_name, scp->tsw->te_desc);
printf("\n");
}
/* Register suspend/resume/shutdown callbacks for the kernel console. */
if (sc_console_unit == unit) {
- EVENTHANDLER_REGISTER(power_suspend, scsuspend, NULL,
+ EVENTHANDLER_REGISTER(power_suspend_early, scsuspend, NULL,
EVENTHANDLER_PRI_ANY);
EVENTHANDLER_REGISTER(power_resume, scresume, NULL,
EVENTHANDLER_PRI_ANY);
EVENTHANDLER_REGISTER(shutdown_pre_sync, scshutdown, NULL,
SHUTDOWN_PRI_DEFAULT);
}
for (vc = 0; vc < sc->vtys; vc++) {
if (sc->dev[vc] == NULL) {
sc->dev[vc] = sc_alloc_tty(vc, vc + unit * MAXCONS);
if (vc == 0 && sc->dev == main_devs)
SC_STAT(sc->dev[0]) = &main_console;
}
/*
* The first vty already has struct tty and scr_stat initialized
* in scinit(). The other vtys will have these structs when
* first opened.
*/
}
dev = make_dev(&consolectl_devsw, 0, UID_ROOT, GID_WHEEL, 0600,
"consolectl");
dev->si_drv1 = sc->dev[0];
return 0;
}
static void
scmeminit(void *arg)
{
if (!vty_enabled(VTY_SC))
return;
if (sc_malloc)
return;
sc_malloc = TRUE;
/*
* As soon as malloc() becomes functional, we had better allocate
* various buffers for the kernel console.
*/
if (sc_console_unit < 0) /* sc_console == NULL */
return;
/* copy the temporary buffer to the final buffer */
sc_alloc_scr_buffer(sc_console, FALSE, FALSE);
#ifndef SC_NO_CUTPASTE
sc_alloc_cut_buffer(sc_console, FALSE);
#endif
#ifndef SC_NO_HISTORY
/* initialize history buffer & pointers */
sc_alloc_history_buffer(sc_console, 0, 0, FALSE);
#endif
}
/* XXX */
SYSINIT(sc_mem, SI_SUB_KMEM, SI_ORDER_ANY, scmeminit, NULL);
static int
scdevtounit(struct tty *tp)
{
int vty = SC_VTY(tp);
if (vty == SC_CONSOLECTL)
return ((sc_console != NULL) ? sc_console->sc->unit : -1);
else if ((vty < 0) || (vty >= MAXCONS*sc_max_unit()))
return -1;
else
return vty/MAXCONS;
}
static int
sctty_open(struct tty *tp)
{
int unit = scdevtounit(tp);
sc_softc_t *sc;
scr_stat *scp;
#ifndef __sparc64__
keyarg_t key;
#endif
DPRINTF(5, ("scopen: dev:%s, unit:%d, vty:%d\n",
devtoname(tp->t_dev), unit, SC_VTY(tp)));
sc = sc_get_softc(unit, (sc_console_unit == unit) ? SC_KERNEL_CONSOLE : 0);
if (sc == NULL)
return ENXIO;
if (!tty_opened(tp)) {
/* Use the current setting of the <-- key as default VERASE. */
/* If the Delete key is preferable, an stty is necessary */
#ifndef __sparc64__
if (sc->kbd != NULL) {
key.keynum = KEYCODE_BS;
(void)kbdd_ioctl(sc->kbd, GIO_KEYMAPENT, (caddr_t)&key);
tp->t_termios.c_cc[VERASE] = key.key.map[0];
}
#endif
}
scp = sc_get_stat(tp);
if (scp == NULL) {
scp = SC_STAT(tp) = alloc_scp(sc, SC_VTY(tp));
if (ISGRAPHSC(scp))
sc_set_pixel_mode(scp, NULL, 0, 0, 16, 8);
}
if (!tp->t_winsize.ws_col && !tp->t_winsize.ws_row) {
tp->t_winsize.ws_col = scp->xsize;
tp->t_winsize.ws_row = scp->ysize;
}
return (0);
}
static void
sctty_close(struct tty *tp)
{
scr_stat *scp;
int s;
if (SC_VTY(tp) != SC_CONSOLECTL) {
scp = sc_get_stat(tp);
/* were we in the middle of the VT switching process? */
DPRINTF(5, ("sc%d: scclose(), ", scp->sc->unit));
s = spltty();
if ((scp == scp->sc->cur_scp) && (scp->sc->unit == sc_console_unit))
cnavailable(sc_consptr, TRUE);
if (finish_vt_rel(scp, TRUE, &s) == 0) /* force release */
DPRINTF(5, ("reset WAIT_REL, "));
if (finish_vt_acq(scp) == 0) /* force acknowledge */
DPRINTF(5, ("reset WAIT_ACQ, "));
#ifdef not_yet_done
if (scp == &main_console) {
scp->pid = 0;
scp->proc = NULL;
scp->smode.mode = VT_AUTO;
}
else {
sc_vtb_destroy(&scp->vtb);
#ifndef __sparc64__
sc_vtb_destroy(&scp->scr);
#endif
sc_free_history_buffer(scp, scp->ysize);
SC_STAT(tp) = NULL;
free(scp, M_DEVBUF);
}
#else
scp->pid = 0;
scp->proc = NULL;
scp->smode.mode = VT_AUTO;
#endif
scp->kbd_mode = K_XLATE;
if (scp == scp->sc->cur_scp)
(void)kbdd_ioctl(scp->sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode);
DPRINTF(5, ("done.\n"));
}
}
#if 0 /* XXX mpsafetty: fix screensaver. What about outwakeup? */
static int
scread(struct cdev *dev, struct uio *uio, int flag)
{
if (!sc_saver_keyb_only)
sc_touch_scrn_saver();
return ttyread(dev, uio, flag);
}
#endif
static int
sckbdevent(keyboard_t *thiskbd, int event, void *arg)
{
sc_softc_t *sc;
struct tty *cur_tty;
int c, error = 0;
size_t len;
const u_char *cp;
sc = (sc_softc_t *)arg;
/* assert(thiskbd == sc->kbd) */
mtx_lock(&Giant);
switch (event) {
case KBDIO_KEYINPUT:
break;
case KBDIO_UNLOADING:
sc->kbd = NULL;
sc->keyboard = -1;
kbd_release(thiskbd, (void *)&sc->keyboard);
goto done;
default:
error = EINVAL;
goto done;
}
/*
* Loop while there is still input to get from the keyboard.
* I don't think this is nessesary, and it doesn't fix
* the Xaccel-2.1 keyboard hang, but it can't hurt. XXX
*/
while ((c = scgetc(sc, SCGETC_NONBLOCK)) != NOKEY) {
cur_tty = SC_DEV(sc, sc->cur_scp->index);
if (!tty_opened_ns(cur_tty))
continue;
if ((*sc->cur_scp->tsw->te_input)(sc->cur_scp, c, cur_tty))
continue;
switch (KEYFLAGS(c)) {
case 0x0000: /* normal key */
ttydisc_rint(cur_tty, KEYCHAR(c), 0);
break;
case FKEY: /* function key, return string */
cp = (*sc->cur_scp->tsw->te_fkeystr)(sc->cur_scp, c);
if (cp != NULL) {
ttydisc_rint_simple(cur_tty, cp, strlen(cp));
break;
}
cp = kbdd_get_fkeystr(thiskbd, KEYCHAR(c), &len);
if (cp != NULL)
ttydisc_rint_simple(cur_tty, cp, len);
break;
case MKEY: /* meta is active, prepend ESC */
ttydisc_rint(cur_tty, 0x1b, 0);
ttydisc_rint(cur_tty, KEYCHAR(c), 0);
break;
case BKEY: /* backtab fixed sequence (esc [ Z) */
ttydisc_rint_simple(cur_tty, "\x1B[Z", 3);
break;
}
ttydisc_rint_done(cur_tty);
}
sc->cur_scp->status |= MOUSE_HIDDEN;
done:
mtx_unlock(&Giant);
return (error);
}
static int
sctty_ioctl(struct tty *tp, u_long cmd, caddr_t data, struct thread *td)
{
int error;
int i;
sc_softc_t *sc;
scr_stat *scp;
int s;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
int ival;
#endif
/* If there is a user_ioctl function call that first */
if (sc_user_ioctl) {
error = (*sc_user_ioctl)(tp, cmd, data, td);
if (error != ENOIOCTL)
return error;
}
error = sc_vid_ioctl(tp, cmd, data, td);
if (error != ENOIOCTL)
return error;
#ifndef SC_NO_HISTORY
error = sc_hist_ioctl(tp, cmd, data, td);
if (error != ENOIOCTL)
return error;
#endif
#ifndef SC_NO_SYSMOUSE
error = sc_mouse_ioctl(tp, cmd, data, td);
if (error != ENOIOCTL)
return error;
#endif
scp = sc_get_stat(tp);
/* assert(scp != NULL) */
/* scp is sc_console, if SC_VTY(dev) == SC_CONSOLECTL. */
sc = scp->sc;
if (scp->tsw) {
error = (*scp->tsw->te_ioctl)(scp, tp, cmd, data, td);
if (error != ENOIOCTL)
return error;
}
switch (cmd) { /* process console hardware related ioctl's */
case GIO_ATTR: /* get current attributes */
/* this ioctl is not processed here, but in the terminal emulator */
return ENOTTY;
case GIO_COLOR: /* is this a color console ? */
*(int *)data = (sc->adp->va_flags & V_ADP_COLOR) ? 1 : 0;
return 0;
case CONS_BLANKTIME: /* set screen saver timeout (0 = no saver) */
if (*(int *)data < 0 || *(int *)data > MAX_BLANKTIME)
return EINVAL;
s = spltty();
scrn_blank_time = *(int *)data;
run_scrn_saver = (scrn_blank_time != 0);
splx(s);
return 0;
case CONS_CURSORTYPE: /* set cursor type (obsolete) */
s = spltty();
*(int *)data &= CONS_CURSOR_ATTRS;
sc_change_cursor_shape(scp, *(int *)data, -1, -1);
splx(s);
return 0;
case CONS_GETCURSORSHAPE: /* get cursor shape (new interface) */
if (((int *)data)[0] & CONS_LOCAL_CURSOR) {
((int *)data)[0] = scp->curr_curs_attr.flags;
((int *)data)[1] = scp->curr_curs_attr.base;
((int *)data)[2] = scp->curr_curs_attr.height;
} else {
((int *)data)[0] = sc->curs_attr.flags;
((int *)data)[1] = sc->curs_attr.base;
((int *)data)[2] = sc->curs_attr.height;
}
return 0;
case CONS_SETCURSORSHAPE: /* set cursor shape (new interface) */
s = spltty();
sc_change_cursor_shape(scp, ((int *)data)[0],
((int *)data)[1], ((int *)data)[2]);
splx(s);
return 0;
case CONS_BELLTYPE: /* set bell type sound/visual */
if ((*(int *)data) & CONS_VISUAL_BELL)
sc->flags |= SC_VISUAL_BELL;
else
sc->flags &= ~SC_VISUAL_BELL;
if ((*(int *)data) & CONS_QUIET_BELL)
sc->flags |= SC_QUIET_BELL;
else
sc->flags &= ~SC_QUIET_BELL;
return 0;
case CONS_GETINFO: /* get current (virtual) console info */
{
vid_info_t *ptr = (vid_info_t*)data;
if (ptr->size == sizeof(struct vid_info)) {
ptr->m_num = sc->cur_scp->index;
ptr->font_size = scp->font_size;
ptr->mv_col = scp->xpos;
ptr->mv_row = scp->ypos;
ptr->mv_csz = scp->xsize;
ptr->mv_rsz = scp->ysize;
ptr->mv_hsz = (scp->history != NULL) ? scp->history->vtb_rows : 0;
/*
* The following fields are filled by the terminal emulator. XXX
*
* ptr->mv_norm.fore
* ptr->mv_norm.back
* ptr->mv_rev.fore
* ptr->mv_rev.back
*/
ptr->mv_grfc.fore = 0; /* not supported */
ptr->mv_grfc.back = 0; /* not supported */
ptr->mv_ovscan = scp->border;
if (scp == sc->cur_scp)
save_kbd_state(scp);
ptr->mk_keylock = scp->status & LOCK_MASK;
return 0;
}
return EINVAL;
}
case CONS_GETVERS: /* get version number */
*(int*)data = 0x200; /* version 2.0 */
return 0;
case CONS_IDLE: /* see if the screen has been idle */
/*
* When the screen is in the GRAPHICS_MODE or UNKNOWN_MODE,
* the user process may have been writing something on the
* screen and syscons is not aware of it. Declare the screen
* is NOT idle if it is in one of these modes. But there is
* an exception to it; if a screen saver is running in the
* graphics mode in the current screen, we should say that the
* screen has been idle.
*/
*(int *)data = (sc->flags & SC_SCRN_IDLE)
&& (!ISGRAPHSC(sc->cur_scp)
|| (sc->cur_scp->status & SAVER_RUNNING));
return 0;
case CONS_SAVERMODE: /* set saver mode */
switch(*(int *)data) {
case CONS_NO_SAVER:
case CONS_USR_SAVER:
/* if a LKM screen saver is running, stop it first. */
scsplash_stick(FALSE);
saver_mode = *(int *)data;
s = spltty();
#ifdef DEV_SPLASH
if ((error = wait_scrn_saver_stop(NULL))) {
splx(s);
return error;
}
#endif
run_scrn_saver = TRUE;
if (saver_mode == CONS_USR_SAVER)
scp->status |= SAVER_RUNNING;
else
scp->status &= ~SAVER_RUNNING;
scsplash_stick(TRUE);
splx(s);
break;
case CONS_LKM_SAVER:
s = spltty();
if ((saver_mode == CONS_USR_SAVER) && (scp->status & SAVER_RUNNING))
scp->status &= ~SAVER_RUNNING;
saver_mode = *(int *)data;
splx(s);
break;
default:
return EINVAL;
}
return 0;
case CONS_SAVERSTART: /* immediately start/stop the screen saver */
/*
* Note that this ioctl does not guarantee the screen saver
* actually starts or stops. It merely attempts to do so...
*/
s = spltty();
run_scrn_saver = (*(int *)data != 0);
if (run_scrn_saver)
sc->scrn_time_stamp -= scrn_blank_time;
splx(s);
return 0;
case CONS_SCRSHOT: /* get a screen shot */
{
int retval, hist_rsz;
size_t lsize, csize;
vm_offset_t frbp, hstp;
unsigned lnum;
scrshot_t *ptr = (scrshot_t *)data;
void *outp = ptr->buf;
if (ptr->x < 0 || ptr->y < 0 || ptr->xsize < 0 || ptr->ysize < 0)
return EINVAL;
s = spltty();
if (ISGRAPHSC(scp)) {
splx(s);
return EOPNOTSUPP;
}
hist_rsz = (scp->history != NULL) ? scp->history->vtb_rows : 0;
if (((u_int)ptr->x + ptr->xsize) > scp->xsize ||
((u_int)ptr->y + ptr->ysize) > (scp->ysize + hist_rsz)) {
splx(s);
return EINVAL;
}
lsize = scp->xsize * sizeof(u_int16_t);
csize = ptr->xsize * sizeof(u_int16_t);
/* Pointer to the last line of framebuffer */
frbp = scp->vtb.vtb_buffer + scp->ysize * lsize + ptr->x *
sizeof(u_int16_t);
/* Pointer to the last line of target buffer */
outp = (char *)outp + ptr->ysize * csize;
/* Pointer to the last line of history buffer */
if (scp->history != NULL)
hstp = scp->history->vtb_buffer + sc_vtb_tail(scp->history) *
sizeof(u_int16_t) + ptr->x * sizeof(u_int16_t);
else
hstp = 0;
retval = 0;
for (lnum = 0; lnum < (ptr->y + ptr->ysize); lnum++) {
if (lnum < scp->ysize) {
frbp -= lsize;
} else {
hstp -= lsize;
if (hstp < scp->history->vtb_buffer)
hstp += scp->history->vtb_rows * lsize;
frbp = hstp;
}
if (lnum < ptr->y)
continue;
outp = (char *)outp - csize;
retval = copyout((void *)frbp, outp, csize);
if (retval != 0)
break;
}
splx(s);
return retval;
}
case VT_SETMODE: /* set screen switcher mode */
{
struct vt_mode *mode;
struct proc *p1;
mode = (struct vt_mode *)data;
DPRINTF(5, ("%s%d: VT_SETMODE ", SC_DRIVER_NAME, sc->unit));
if (scp->smode.mode == VT_PROCESS) {
p1 = pfind(scp->pid);
if (scp->proc == p1 && scp->proc != td->td_proc) {
if (p1)
PROC_UNLOCK(p1);
DPRINTF(5, ("error EPERM\n"));
return EPERM;
}
if (p1)
PROC_UNLOCK(p1);
}
s = spltty();
if (mode->mode == VT_AUTO) {
scp->smode.mode = VT_AUTO;
scp->proc = NULL;
scp->pid = 0;
DPRINTF(5, ("VT_AUTO, "));
if ((scp == sc->cur_scp) && (sc->unit == sc_console_unit))
cnavailable(sc_consptr, TRUE);
/* were we in the middle of the vty switching process? */
if (finish_vt_rel(scp, TRUE, &s) == 0)
DPRINTF(5, ("reset WAIT_REL, "));
if (finish_vt_acq(scp) == 0)
DPRINTF(5, ("reset WAIT_ACQ, "));
} else {
if (!ISSIGVALID(mode->relsig) || !ISSIGVALID(mode->acqsig)
|| !ISSIGVALID(mode->frsig)) {
splx(s);
DPRINTF(5, ("error EINVAL\n"));
return EINVAL;
}
DPRINTF(5, ("VT_PROCESS %d, ", td->td_proc->p_pid));
bcopy(data, &scp->smode, sizeof(struct vt_mode));
scp->proc = td->td_proc;
scp->pid = scp->proc->p_pid;
if ((scp == sc->cur_scp) && (sc->unit == sc_console_unit))
cnavailable(sc_consptr, FALSE);
}
splx(s);
DPRINTF(5, ("\n"));
return 0;
}
case VT_GETMODE: /* get screen switcher mode */
bcopy(&scp->smode, data, sizeof(struct vt_mode));
return 0;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('v', 4):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case VT_RELDISP: /* screen switcher ioctl */
s = spltty();
/*
* This must be the current vty which is in the VT_PROCESS
* switching mode...
*/
if ((scp != sc->cur_scp) || (scp->smode.mode != VT_PROCESS)) {
splx(s);
return EINVAL;
}
/* ...and this process is controlling it. */
if (scp->proc != td->td_proc) {
splx(s);
return EPERM;
}
error = EINVAL;
switch(*(int *)data) {
case VT_FALSE: /* user refuses to release screen, abort */
if ((error = finish_vt_rel(scp, FALSE, &s)) == 0)
DPRINTF(5, ("%s%d: VT_FALSE\n", SC_DRIVER_NAME, sc->unit));
break;
case VT_TRUE: /* user has released screen, go on */
if ((error = finish_vt_rel(scp, TRUE, &s)) == 0)
DPRINTF(5, ("%s%d: VT_TRUE\n", SC_DRIVER_NAME, sc->unit));
break;
case VT_ACKACQ: /* acquire acknowledged, switch completed */
if ((error = finish_vt_acq(scp)) == 0)
DPRINTF(5, ("%s%d: VT_ACKACQ\n", SC_DRIVER_NAME, sc->unit));
break;
default:
break;
}
splx(s);
return error;
case VT_OPENQRY: /* return free virtual console */
for (i = sc->first_vty; i < sc->first_vty + sc->vtys; i++) {
tp = SC_DEV(sc, i);
if (!tty_opened_ns(tp)) {
*(int *)data = i + 1;
return 0;
}
}
return EINVAL;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('v', 5):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case VT_ACTIVATE: /* switch to screen *data */
i = (*(int *)data == 0) ? scp->index : (*(int *)data - 1);
s = spltty();
error = sc_clean_up(sc->cur_scp);
splx(s);
if (error)
return error;
error = sc_switch_scr(sc, i);
return (error);
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('v', 6):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case VT_WAITACTIVE: /* wait for switch to occur */
i = (*(int *)data == 0) ? scp->index : (*(int *)data - 1);
if ((i < sc->first_vty) || (i >= sc->first_vty + sc->vtys))
return EINVAL;
if (i == sc->cur_scp->index)
return 0;
error = tsleep(VTY_WCHAN(sc, i), (PZERO + 1) | PCATCH, "waitvt", 0);
return error;
case VT_GETACTIVE: /* get active vty # */
*(int *)data = sc->cur_scp->index + 1;
return 0;
case VT_GETINDEX: /* get this vty # */
*(int *)data = scp->index + 1;
return 0;
case VT_LOCKSWITCH: /* prevent vty switching */
if ((*(int *)data) & 0x01)
sc->flags |= SC_SCRN_VTYLOCK;
else
sc->flags &= ~SC_SCRN_VTYLOCK;
return 0;
case KDENABIO: /* allow io operations */
error = priv_check(td, PRIV_IO);
if (error != 0)
return error;
error = securelevel_gt(td->td_ucred, 0);
if (error != 0)
return error;
#ifdef __i386__
td->td_frame->tf_eflags |= PSL_IOPL;
#elif defined(__amd64__)
td->td_frame->tf_rflags |= PSL_IOPL;
#endif
return 0;
case KDDISABIO: /* disallow io operations (default) */
#ifdef __i386__
td->td_frame->tf_eflags &= ~PSL_IOPL;
#elif defined(__amd64__)
td->td_frame->tf_rflags &= ~PSL_IOPL;
#endif
return 0;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('K', 20):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case KDSKBSTATE: /* set keyboard state (locks) */
if (*(int *)data & ~LOCK_MASK)
return EINVAL;
scp->status &= ~LOCK_MASK;
scp->status |= *(int *)data;
if (scp == sc->cur_scp)
update_kbd_state(scp, scp->status, LOCK_MASK);
return 0;
case KDGKBSTATE: /* get keyboard state (locks) */
if (scp == sc->cur_scp)
save_kbd_state(scp);
*(int *)data = scp->status & LOCK_MASK;
return 0;
case KDGETREPEAT: /* get keyboard repeat & delay rates */
case KDSETREPEAT: /* set keyboard repeat & delay rates (new) */
error = kbdd_ioctl(sc->kbd, cmd, data);
if (error == ENOIOCTL)
error = ENODEV;
return error;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('K', 67):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case KDSETRAD: /* set keyboard repeat & delay rates (old) */
if (*(int *)data & ~0x7f)
return EINVAL;
error = kbdd_ioctl(sc->kbd, KDSETRAD, data);
if (error == ENOIOCTL)
error = ENODEV;
return error;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('K', 7):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case KDSKBMODE: /* set keyboard mode */
switch (*(int *)data) {
case K_XLATE: /* switch to XLT ascii mode */
case K_RAW: /* switch to RAW scancode mode */
case K_CODE: /* switch to CODE mode */
scp->kbd_mode = *(int *)data;
if (scp == sc->cur_scp)
(void)kbdd_ioctl(sc->kbd, KDSKBMODE, data);
return 0;
default:
return EINVAL;
}
/* NOT REACHED */
case KDGKBMODE: /* get keyboard mode */
*(int *)data = scp->kbd_mode;
return 0;
case KDGKBINFO:
error = kbdd_ioctl(sc->kbd, cmd, data);
if (error == ENOIOCTL)
error = ENODEV;
return error;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('K', 8):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case KDMKTONE: /* sound the bell */
if (*(int*)data)
sc_bell(scp, (*(int*)data)&0xffff,
(((*(int*)data)>>16)&0xffff)*hz/1000);
else
sc_bell(scp, scp->bell_pitch, scp->bell_duration);
return 0;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('K', 63):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case KIOCSOUND: /* make tone (*data) hz */
if (scp == sc->cur_scp) {
if (*(int *)data)
return sc_tone(*(int *)data);
else
return sc_tone(0);
}
return 0;
case KDGKBTYPE: /* get keyboard type */
error = kbdd_ioctl(sc->kbd, cmd, data);
if (error == ENOIOCTL) {
/* always return something? XXX */
*(int *)data = 0;
}
return 0;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('K', 66):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case KDSETLED: /* set keyboard LED status */
if (*(int *)data & ~LED_MASK) /* FIXME: LOCK_MASK? */
return EINVAL;
scp->status &= ~LED_MASK;
scp->status |= *(int *)data;
if (scp == sc->cur_scp)
update_kbd_leds(scp, scp->status);
return 0;
case KDGETLED: /* get keyboard LED status */
if (scp == sc->cur_scp)
save_kbd_state(scp);
*(int *)data = scp->status & LED_MASK;
return 0;
case KBADDKBD: /* add/remove keyboard to/from mux */
case KBRELKBD:
error = kbdd_ioctl(sc->kbd, cmd, data);
if (error == ENOIOCTL)
error = ENODEV;
return error;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
case _IO('c', 110):
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
/* FALLTHROUGH */
#endif
case CONS_SETKBD: /* set the new keyboard */
{
keyboard_t *newkbd;
s = spltty();
newkbd = kbd_get_keyboard(*(int *)data);
if (newkbd == NULL) {
splx(s);
return EINVAL;
}
error = 0;
if (sc->kbd != newkbd) {
i = kbd_allocate(newkbd->kb_name, newkbd->kb_unit,
(void *)&sc->keyboard, sckbdevent, sc);
/* i == newkbd->kb_index */
if (i >= 0) {
if (sc->kbd != NULL) {
save_kbd_state(sc->cur_scp);
kbd_release(sc->kbd, (void *)&sc->keyboard);
}
sc->kbd = kbd_get_keyboard(i); /* sc->kbd == newkbd */
sc->keyboard = i;
(void)kbdd_ioctl(sc->kbd, KDSKBMODE,
(caddr_t)&sc->cur_scp->kbd_mode);
update_kbd_state(sc->cur_scp, sc->cur_scp->status,
LOCK_MASK);
} else {
error = EPERM; /* XXX */
}
}
splx(s);
return error;
}
case CONS_RELKBD: /* release the current keyboard */
s = spltty();
error = 0;
if (sc->kbd != NULL) {
save_kbd_state(sc->cur_scp);
error = kbd_release(sc->kbd, (void *)&sc->keyboard);
if (error == 0) {
sc->kbd = NULL;
sc->keyboard = -1;
}
}
splx(s);
return error;
case CONS_GETTERM: /* get the current terminal emulator info */
{
sc_term_sw_t *sw;
if (((term_info_t *)data)->ti_index == 0) {
sw = scp->tsw;
} else {
sw = sc_term_match_by_number(((term_info_t *)data)->ti_index);
}
if (sw != NULL) {
strncpy(((term_info_t *)data)->ti_name, sw->te_name,
sizeof(((term_info_t *)data)->ti_name));
strncpy(((term_info_t *)data)->ti_desc, sw->te_desc,
sizeof(((term_info_t *)data)->ti_desc));
((term_info_t *)data)->ti_flags = 0;
return 0;
} else {
((term_info_t *)data)->ti_name[0] = '\0';
((term_info_t *)data)->ti_desc[0] = '\0';
((term_info_t *)data)->ti_flags = 0;
return EINVAL;
}
}
case CONS_SETTERM: /* set the current terminal emulator */
s = spltty();
error = sc_init_emulator(scp, ((term_info_t *)data)->ti_name);
/* FIXME: what if scp == sc_console! XXX */
splx(s);
return error;
case GIO_SCRNMAP: /* get output translation table */
bcopy(&sc->scr_map, data, sizeof(sc->scr_map));
return 0;
case PIO_SCRNMAP: /* set output translation table */
bcopy(data, &sc->scr_map, sizeof(sc->scr_map));
for (i=0; i<sizeof(sc->scr_map); i++) {
sc->scr_rmap[sc->scr_map[i]] = i;
}
return 0;
case GIO_KEYMAP: /* get keyboard translation table */
case PIO_KEYMAP: /* set keyboard translation table */
case OGIO_KEYMAP: /* get keyboard translation table (compat) */
case OPIO_KEYMAP: /* set keyboard translation table (compat) */
case GIO_DEADKEYMAP: /* get accent key translation table */
case PIO_DEADKEYMAP: /* set accent key translation table */
case GETFKEY: /* get function key string */
case SETFKEY: /* set function key string */
error = kbdd_ioctl(sc->kbd, cmd, data);
if (error == ENOIOCTL)
error = ENODEV;
return error;
#ifndef SC_NO_FONT_LOADING
case PIO_FONT8x8: /* set 8x8 dot font */
if (!ISFONTAVAIL(sc->adp->va_flags))
return ENXIO;
bcopy(data, sc->font_8, 8*256);
sc->fonts_loaded |= FONT_8;
/*
* FONT KLUDGE
* Always use the font page #0. XXX
* Don't load if the current font size is not 8x8.
*/
if (ISTEXTSC(sc->cur_scp) && (sc->cur_scp->font_size < 14))
sc_load_font(sc->cur_scp, 0, 8, 8, sc->font_8, 0, 256);
return 0;
case GIO_FONT8x8: /* get 8x8 dot font */
if (!ISFONTAVAIL(sc->adp->va_flags))
return ENXIO;
if (sc->fonts_loaded & FONT_8) {
bcopy(sc->font_8, data, 8*256);
return 0;
}
else
return ENXIO;
case PIO_FONT8x14: /* set 8x14 dot font */
if (!ISFONTAVAIL(sc->adp->va_flags))
return ENXIO;
bcopy(data, sc->font_14, 14*256);
sc->fonts_loaded |= FONT_14;
/*
* FONT KLUDGE
* Always use the font page #0. XXX
* Don't load if the current font size is not 8x14.
*/
if (ISTEXTSC(sc->cur_scp)
&& (sc->cur_scp->font_size >= 14)
&& (sc->cur_scp->font_size < 16))
sc_load_font(sc->cur_scp, 0, 14, 8, sc->font_14, 0, 256);
return 0;
case GIO_FONT8x14: /* get 8x14 dot font */
if (!ISFONTAVAIL(sc->adp->va_flags))
return ENXIO;
if (sc->fonts_loaded & FONT_14) {
bcopy(sc->font_14, data, 14*256);
return 0;
}
else
return ENXIO;
case PIO_FONT8x16: /* set 8x16 dot font */
if (!ISFONTAVAIL(sc->adp->va_flags))
return ENXIO;
bcopy(data, sc->font_16, 16*256);
sc->fonts_loaded |= FONT_16;
/*
* FONT KLUDGE
* Always use the font page #0. XXX
* Don't load if the current font size is not 8x16.
*/
if (ISTEXTSC(sc->cur_scp) && (sc->cur_scp->font_size >= 16))
sc_load_font(sc->cur_scp, 0, 16, 8, sc->font_16, 0, 256);
return 0;
case GIO_FONT8x16: /* get 8x16 dot font */
if (!ISFONTAVAIL(sc->adp->va_flags))
return ENXIO;
if (sc->fonts_loaded & FONT_16) {
bcopy(sc->font_16, data, 16*256);
return 0;
}
else
return ENXIO;
#endif /* SC_NO_FONT_LOADING */
default:
break;
}
return (ENOIOCTL);
}
static int
consolectl_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
struct thread *td)
{
return sctty_ioctl(dev->si_drv1, cmd, data, td);
}
static int
consolectl_close(struct cdev *dev, int flags, int mode, struct thread *td)
{
#ifndef SC_NO_SYSMOUSE
mouse_info_t info;
memset(&info, 0, sizeof(info));
info.operation = MOUSE_ACTION;
/*
* Make sure all buttons are released when moused and other
* console daemons exit, so that no buttons are left pressed.
*/
(void) sctty_ioctl(dev->si_drv1, CONS_MOUSECTL, (caddr_t)&info, td);
#endif
return (0);
}
static void
sc_cnprobe(struct consdev *cp)
{
int unit;
int flags;
if (!vty_enabled(VTY_SC)) {
cp->cn_pri = CN_DEAD;
return;
}
cp->cn_pri = sc_get_cons_priority(&unit, &flags);
/* a video card is always required */
if (!scvidprobe(unit, flags, TRUE))
cp->cn_pri = CN_DEAD;
/* syscons will become console even when there is no keyboard */
sckbdprobe(unit, flags, TRUE);
if (cp->cn_pri == CN_DEAD)
return;
/* initialize required fields */
strcpy(cp->cn_name, "ttyv0");
}
static void
sc_cninit(struct consdev *cp)
{
int unit;
int flags;
sc_get_cons_priority(&unit, &flags);
scinit(unit, flags | SC_KERNEL_CONSOLE);
sc_console_unit = unit;
sc_console = sc_get_stat(sc_get_softc(unit, SC_KERNEL_CONSOLE)->dev[0]);
sc_consptr = cp;
}
static void
sc_cnterm(struct consdev *cp)
{
/* we are not the kernel console any more, release everything */
if (sc_console_unit < 0)
return; /* shouldn't happen */
#if 0 /* XXX */
sc_clear_screen(sc_console);
sccnupdate(sc_console);
#endif
scterm(sc_console_unit, SC_KERNEL_CONSOLE);
sc_console_unit = -1;
sc_console = NULL;
}
static void
sc_cngrab(struct consdev *cp)
{
scr_stat *scp;
if (!cold &&
sc_console->sc->cur_scp->index != sc_console->index &&
sc_console->sc->cur_scp->smode.mode == VT_AUTO &&
sc_console->smode.mode == VT_AUTO)
sc_switch_scr(sc_console->sc, sc_console->index);
scp = sc_console->sc->cur_scp;
if (scp->sc->kbd == NULL)
return;
if (scp->grabbed++ > 0)
return;
/*
* Make sure the keyboard is accessible even when the kbd device
* driver is disabled.
*/
kbdd_enable(scp->sc->kbd);
/* we shall always use the keyboard in the XLATE mode here */
scp->kbd_prev_mode = scp->kbd_mode;
scp->kbd_mode = K_XLATE;
(void)kbdd_ioctl(scp->sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode);
kbdd_poll(scp->sc->kbd, TRUE);
}
static void
sc_cnungrab(struct consdev *cp)
{
scr_stat *scp;
scp = sc_console->sc->cur_scp; /* XXX */
if (scp->sc->kbd == NULL)
return;
if (--scp->grabbed > 0)
return;
kbdd_poll(scp->sc->kbd, FALSE);
scp->kbd_mode = scp->kbd_prev_mode;
(void)kbdd_ioctl(scp->sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode);
kbdd_disable(scp->sc->kbd);
}
static void
sc_cnputc(struct consdev *cd, int c)
{
u_char buf[1];
scr_stat *scp = sc_console;
#ifndef SC_NO_HISTORY
#if 0
struct tty *tp;
#endif
#endif /* !SC_NO_HISTORY */
int s;
/* assert(sc_console != NULL) */
#ifndef SC_NO_HISTORY
if (scp == scp->sc->cur_scp && scp->status & SLKED) {
scp->status &= ~SLKED;
update_kbd_state(scp, scp->status, SLKED);
if (scp->status & BUFFER_SAVED) {
if (!sc_hist_restore(scp))
sc_remove_cutmarking(scp);
scp->status &= ~BUFFER_SAVED;
scp->status |= CURSOR_ENABLED;
sc_draw_cursor_image(scp);
}
#if 0
/*
* XXX: Now that TTY's have their own locks, we cannot process
* any data after disabling scroll lock. cnputs already holds a
* spinlock.
*/
tp = SC_DEV(scp->sc, scp->index);
/* XXX "tp" can be NULL */
tty_lock(tp);
if (tty_opened(tp))
sctty_outwakeup(tp);
tty_unlock(tp);
#endif
}
#endif /* !SC_NO_HISTORY */
buf[0] = c;
sc_puts(scp, buf, 1, 1);
s = spltty(); /* block sckbdevent and scrn_timer */
sccnupdate(scp);
splx(s);
}
static int
sc_cngetc(struct consdev *cd)
{
static struct fkeytab fkey;
static int fkeycp;
scr_stat *scp;
const u_char *p;
int s = spltty(); /* block sckbdevent and scrn_timer while we poll */
int c;
/* assert(sc_console != NULL) */
/*
* Stop the screen saver and update the screen if necessary.
* What if we have been running in the screen saver code... XXX
*/
sc_touch_scrn_saver();
scp = sc_console->sc->cur_scp; /* XXX */
sccnupdate(scp);
if (fkeycp < fkey.len) {
splx(s);
return fkey.str[fkeycp++];
}
if (scp->sc->kbd == NULL) {
splx(s);
return -1;
}
c = scgetc(scp->sc, SCGETC_CN | SCGETC_NONBLOCK);
switch (KEYFLAGS(c)) {
case 0: /* normal char */
return KEYCHAR(c);
case FKEY: /* function key */
p = (*scp->tsw->te_fkeystr)(scp, c);
if (p != NULL) {
fkey.len = strlen(p);
bcopy(p, fkey.str, fkey.len);
fkeycp = 1;
return fkey.str[0];
}
p = kbdd_get_fkeystr(scp->sc->kbd, KEYCHAR(c), (size_t *)&fkeycp);
fkey.len = fkeycp;
if ((p != NULL) && (fkey.len > 0)) {
bcopy(p, fkey.str, fkey.len);
fkeycp = 1;
return fkey.str[0];
}
return c; /* XXX */
case NOKEY:
case ERRKEY:
default:
return -1;
}
/* NOT REACHED */
}
static void
sccnupdate(scr_stat *scp)
{
/* this is a cut-down version of scrn_timer()... */
if (suspend_in_progress || scp->sc->font_loading_in_progress)
return;
if (debugger > 0 || panicstr || shutdown_in_progress) {
sc_touch_scrn_saver();
} else if (scp != scp->sc->cur_scp) {
return;
}
if (!run_scrn_saver)
scp->sc->flags &= ~SC_SCRN_IDLE;
#ifdef DEV_SPLASH
if ((saver_mode != CONS_LKM_SAVER) || !(scp->sc->flags & SC_SCRN_IDLE))
if (scp->sc->flags & SC_SCRN_BLANKED)
stop_scrn_saver(scp->sc, current_saver);
#endif
if (scp != scp->sc->cur_scp || scp->sc->blink_in_progress
|| scp->sc->switch_in_progress)
return;
/*
* FIXME: unlike scrn_timer(), we call scrn_update() from here even
* when write_in_progress is non-zero. XXX
*/
if (!ISGRAPHSC(scp) && !(scp->sc->flags & SC_SCRN_BLANKED))
scrn_update(scp, TRUE);
}
static void
scrn_timer(void *arg)
{
#ifndef PC98
static time_t kbd_time_stamp = 0;
#endif
sc_softc_t *sc;
scr_stat *scp;
int again, rate;
again = (arg != NULL);
if (arg != NULL)
sc = (sc_softc_t *)arg;
else if (sc_console != NULL)
sc = sc_console->sc;
else
return;
/* find the vty to update */
scp = sc->cur_scp;
/* don't do anything when we are performing some I/O operations */
if (suspend_in_progress || sc->font_loading_in_progress)
goto done;
#ifndef PC98
if ((sc->kbd == NULL) && (sc->config & SC_AUTODETECT_KBD)) {
/* try to allocate a keyboard automatically */
if (kbd_time_stamp != time_uptime) {
kbd_time_stamp = time_uptime;
sc->keyboard = sc_allocate_keyboard(sc, -1);
if (sc->keyboard >= 0) {
sc->kbd = kbd_get_keyboard(sc->keyboard);
(void)kbdd_ioctl(sc->kbd, KDSKBMODE,
(caddr_t)&sc->cur_scp->kbd_mode);
update_kbd_state(sc->cur_scp, sc->cur_scp->status,
LOCK_MASK);
}
}
}
#endif /* PC98 */
/* should we stop the screen saver? */
if (debugger > 0 || panicstr || shutdown_in_progress)
sc_touch_scrn_saver();
if (run_scrn_saver) {
if (time_uptime > sc->scrn_time_stamp + scrn_blank_time)
sc->flags |= SC_SCRN_IDLE;
else
sc->flags &= ~SC_SCRN_IDLE;
} else {
sc->scrn_time_stamp = time_uptime;
sc->flags &= ~SC_SCRN_IDLE;
if (scrn_blank_time > 0)
run_scrn_saver = TRUE;
}
#ifdef DEV_SPLASH
if ((saver_mode != CONS_LKM_SAVER) || !(sc->flags & SC_SCRN_IDLE))
if (sc->flags & SC_SCRN_BLANKED)
stop_scrn_saver(sc, current_saver);
#endif
/* should we just return ? */
if (sc->blink_in_progress || sc->switch_in_progress
|| sc->write_in_progress)
goto done;
/* Update the screen */
scp = sc->cur_scp; /* cur_scp may have changed... */
if (!ISGRAPHSC(scp) && !(sc->flags & SC_SCRN_BLANKED))
scrn_update(scp, TRUE);
#ifdef DEV_SPLASH
/* should we activate the screen saver? */
if ((saver_mode == CONS_LKM_SAVER) && (sc->flags & SC_SCRN_IDLE))
if (!ISGRAPHSC(scp) || (sc->flags & SC_SCRN_BLANKED))
(*current_saver)(sc, TRUE);
#endif
done:
if (again) {
/*
* Use reduced "refresh" rate if we are in graphics and that is not a
* graphical screen saver. In such case we just have nothing to do.
*/
if (ISGRAPHSC(scp) && !(sc->flags & SC_SCRN_BLANKED))
rate = 2;
else
rate = 30;
callout_reset_sbt(&sc->ctimeout, SBT_1S / rate, 0,
scrn_timer, sc, C_PREL(1));
}
}
static int
and_region(int *s1, int *e1, int s2, int e2)
{
if (*e1 < s2 || e2 < *s1)
return FALSE;
*s1 = imax(*s1, s2);
*e1 = imin(*e1, e2);
return TRUE;
}
static void
scrn_update(scr_stat *scp, int show_cursor)
{
int start;
int end;
int s;
int e;
/* assert(scp == scp->sc->cur_scp) */
SC_VIDEO_LOCK(scp->sc);
#ifndef SC_NO_CUTPASTE
/* remove the previous mouse pointer image if necessary */
if (scp->status & MOUSE_VISIBLE) {
s = scp->mouse_pos;
e = scp->mouse_pos + scp->xsize + 1;
if ((scp->status & (MOUSE_MOVED | MOUSE_HIDDEN))
|| and_region(&s, &e, scp->start, scp->end)
|| ((scp->status & CURSOR_ENABLED) &&
(scp->cursor_pos != scp->cursor_oldpos) &&
(and_region(&s, &e, scp->cursor_pos, scp->cursor_pos)
|| and_region(&s, &e, scp->cursor_oldpos, scp->cursor_oldpos)))) {
sc_remove_mouse_image(scp);
if (scp->end >= scp->xsize*scp->ysize)
scp->end = scp->xsize*scp->ysize - 1;
}
}
#endif /* !SC_NO_CUTPASTE */
#if 1
/* debug: XXX */
if (scp->end >= scp->xsize*scp->ysize) {
printf("scrn_update(): scp->end %d > size_of_screen!!\n", scp->end);
scp->end = scp->xsize*scp->ysize - 1;
}
if (scp->start < 0) {
printf("scrn_update(): scp->start %d < 0\n", scp->start);
scp->start = 0;
}
#endif
/* update screen image */
if (scp->start <= scp->end) {
if (scp->mouse_cut_end >= 0) {
/* there is a marked region for cut & paste */
if (scp->mouse_cut_start <= scp->mouse_cut_end) {
start = scp->mouse_cut_start;
end = scp->mouse_cut_end;
} else {
start = scp->mouse_cut_end;
end = scp->mouse_cut_start - 1;
}
s = start;
e = end;
/* does the cut-mark region overlap with the update region? */
if (and_region(&s, &e, scp->start, scp->end)) {
(*scp->rndr->draw)(scp, s, e - s + 1, TRUE);
s = 0;
e = start - 1;
if (and_region(&s, &e, scp->start, scp->end))
(*scp->rndr->draw)(scp, s, e - s + 1, FALSE);
s = end + 1;
e = scp->xsize*scp->ysize - 1;
if (and_region(&s, &e, scp->start, scp->end))
(*scp->rndr->draw)(scp, s, e - s + 1, FALSE);
} else {
(*scp->rndr->draw)(scp, scp->start,
scp->end - scp->start + 1, FALSE);
}
} else {
(*scp->rndr->draw)(scp, scp->start,
scp->end - scp->start + 1, FALSE);
}
}
/* we are not to show the cursor and the mouse pointer... */
if (!show_cursor) {
scp->end = 0;
scp->start = scp->xsize*scp->ysize - 1;
SC_VIDEO_UNLOCK(scp->sc);
return;
}
/* update cursor image */
if (scp->status & CURSOR_ENABLED) {
s = scp->start;
e = scp->end;
/* did cursor move since last time ? */
if (scp->cursor_pos != scp->cursor_oldpos) {
/* do we need to remove old cursor image ? */
if (!and_region(&s, &e, scp->cursor_oldpos, scp->cursor_oldpos))
sc_remove_cursor_image(scp);
sc_draw_cursor_image(scp);
} else {
if (and_region(&s, &e, scp->cursor_pos, scp->cursor_pos))
/* cursor didn't move, but has been overwritten */
sc_draw_cursor_image(scp);
else if (scp->curs_attr.flags & CONS_BLINK_CURSOR)
/* if it's a blinking cursor, update it */
(*scp->rndr->blink_cursor)(scp, scp->cursor_pos,
sc_inside_cutmark(scp,
scp->cursor_pos));
}
}
#ifndef SC_NO_CUTPASTE
/* update "pseudo" mouse pointer image */
if (scp->sc->flags & SC_MOUSE_ENABLED) {
if (!(scp->status & (MOUSE_VISIBLE | MOUSE_HIDDEN))) {
scp->status &= ~MOUSE_MOVED;
sc_draw_mouse_image(scp);
}
}
#endif /* SC_NO_CUTPASTE */
scp->end = 0;
scp->start = scp->xsize*scp->ysize - 1;
SC_VIDEO_UNLOCK(scp->sc);
}
#ifdef DEV_SPLASH
static int
scsplash_callback(int event, void *arg)
{
sc_softc_t *sc;
int error;
sc = (sc_softc_t *)arg;
switch (event) {
case SPLASH_INIT:
if (add_scrn_saver(scsplash_saver) == 0) {
sc->flags &= ~SC_SAVER_FAILED;
run_scrn_saver = TRUE;
if (cold && !(boothowto & RB_VERBOSE)) {
scsplash_stick(TRUE);
(*current_saver)(sc, TRUE);
}
}
return 0;
case SPLASH_TERM:
if (current_saver == scsplash_saver) {
scsplash_stick(FALSE);
error = remove_scrn_saver(scsplash_saver);
if (error)
return error;
}
return 0;
default:
return EINVAL;
}
}
static void
scsplash_saver(sc_softc_t *sc, int show)
{
static int busy = FALSE;
scr_stat *scp;
if (busy)
return;
busy = TRUE;
scp = sc->cur_scp;
if (show) {
if (!(sc->flags & SC_SAVER_FAILED)) {
if (!(sc->flags & SC_SCRN_BLANKED))
set_scrn_saver_mode(scp, -1, NULL, 0);
switch (splash(sc->adp, TRUE)) {
case 0: /* succeeded */
break;
case EAGAIN: /* try later */
restore_scrn_saver_mode(scp, FALSE);
sc_touch_scrn_saver(); /* XXX */
break;
default:
sc->flags |= SC_SAVER_FAILED;
scsplash_stick(FALSE);
restore_scrn_saver_mode(scp, TRUE);
printf("scsplash_saver(): failed to put up the image\n");
break;
}
}
} else if (!sticky_splash) {
if ((sc->flags & SC_SCRN_BLANKED) && (splash(sc->adp, FALSE) == 0))
restore_scrn_saver_mode(scp, TRUE);
}
busy = FALSE;
}
static int
add_scrn_saver(void (*this_saver)(sc_softc_t *, int))
{
#if 0
int error;
if (current_saver != none_saver) {
error = remove_scrn_saver(current_saver);
if (error)
return error;
}
#endif
if (current_saver != none_saver)
return EBUSY;
run_scrn_saver = FALSE;
saver_mode = CONS_LKM_SAVER;
current_saver = this_saver;
return 0;
}
static int
remove_scrn_saver(void (*this_saver)(sc_softc_t *, int))
{
if (current_saver != this_saver)
return EINVAL;
#if 0
/*
* In order to prevent `current_saver' from being called by
* the timeout routine `scrn_timer()' while we manipulate
* the saver list, we shall set `current_saver' to `none_saver'
* before stopping the current saver, rather than blocking by `splXX()'.
*/
current_saver = none_saver;
if (scrn_blanked)
stop_scrn_saver(this_saver);
#endif
/* unblank all blanked screens */
wait_scrn_saver_stop(NULL);
if (scrn_blanked)
return EBUSY;
current_saver = none_saver;
return 0;
}
static int
set_scrn_saver_mode(scr_stat *scp, int mode, u_char *pal, int border)
{
int s;
/* assert(scp == scp->sc->cur_scp) */
s = spltty();
if (!ISGRAPHSC(scp))
sc_remove_cursor_image(scp);
scp->splash_save_mode = scp->mode;
scp->splash_save_status = scp->status & (GRAPHICS_MODE | PIXEL_MODE);
scp->status &= ~(GRAPHICS_MODE | PIXEL_MODE);
scp->status |= (UNKNOWN_MODE | SAVER_RUNNING);
scp->sc->flags |= SC_SCRN_BLANKED;
++scrn_blanked;
splx(s);
if (mode < 0)
return 0;
scp->mode = mode;
if (set_mode(scp) == 0) {
if (scp->sc->adp->va_info.vi_flags & V_INFO_GRAPHICS)
scp->status |= GRAPHICS_MODE;
#ifndef SC_NO_PALETTE_LOADING
if (pal != NULL)
vidd_load_palette(scp->sc->adp, pal);
#endif
sc_set_border(scp, border);
return 0;
} else {
s = spltty();
scp->mode = scp->splash_save_mode;
scp->status &= ~(UNKNOWN_MODE | SAVER_RUNNING);
scp->status |= scp->splash_save_status;
splx(s);
return 1;
}
}
static int
restore_scrn_saver_mode(scr_stat *scp, int changemode)
{
int mode;
int status;
int s;
/* assert(scp == scp->sc->cur_scp) */
s = spltty();
mode = scp->mode;
status = scp->status;
scp->mode = scp->splash_save_mode;
scp->status &= ~(UNKNOWN_MODE | SAVER_RUNNING);
scp->status |= scp->splash_save_status;
scp->sc->flags &= ~SC_SCRN_BLANKED;
if (!changemode) {
if (!ISGRAPHSC(scp))
sc_draw_cursor_image(scp);
--scrn_blanked;
splx(s);
return 0;
}
if (set_mode(scp) == 0) {
#ifndef SC_NO_PALETTE_LOADING
#ifdef SC_PIXEL_MODE
if (scp->sc->adp->va_info.vi_mem_model == V_INFO_MM_DIRECT)
vidd_load_palette(scp->sc->adp, scp->sc->palette2);
else
#endif
vidd_load_palette(scp->sc->adp, scp->sc->palette);
#endif
--scrn_blanked;
splx(s);
return 0;
} else {
scp->mode = mode;
scp->status = status;
splx(s);
return 1;
}
}
static void
stop_scrn_saver(sc_softc_t *sc, void (*saver)(sc_softc_t *, int))
{
(*saver)(sc, FALSE);
run_scrn_saver = FALSE;
/* the screen saver may have chosen not to stop after all... */
if (sc->flags & SC_SCRN_BLANKED)
return;
mark_all(sc->cur_scp);
if (sc->delayed_next_scr)
sc_switch_scr(sc, sc->delayed_next_scr - 1);
if (debugger == 0)
wakeup(&scrn_blanked);
}
static int
wait_scrn_saver_stop(sc_softc_t *sc)
{
int error = 0;
while (scrn_blanked > 0) {
run_scrn_saver = FALSE;
if (sc && !(sc->flags & SC_SCRN_BLANKED)) {
error = 0;
break;
}
error = tsleep(&scrn_blanked, PZERO | PCATCH, "scrsav", 0);
if ((error != 0) && (error != ERESTART))
break;
}
run_scrn_saver = FALSE;
return error;
}
#endif /* DEV_SPLASH */
void
sc_touch_scrn_saver(void)
{
scsplash_stick(FALSE);
run_scrn_saver = FALSE;
}
int
sc_switch_scr(sc_softc_t *sc, u_int next_scr)
{
scr_stat *cur_scp;
struct tty *tp;
struct proc *p;
int s;
DPRINTF(5, ("sc0: sc_switch_scr() %d ", next_scr + 1));
if (sc->cur_scp == NULL)
return (0);
/* prevent switch if previously requested */
if (sc->flags & SC_SCRN_VTYLOCK) {
sc_bell(sc->cur_scp, sc->cur_scp->bell_pitch,
sc->cur_scp->bell_duration);
return EPERM;
}
/* delay switch if the screen is blanked or being updated */
if ((sc->flags & SC_SCRN_BLANKED) || sc->write_in_progress
|| sc->blink_in_progress) {
sc->delayed_next_scr = next_scr + 1;
sc_touch_scrn_saver();
DPRINTF(5, ("switch delayed\n"));
return 0;
}
sc->delayed_next_scr = 0;
s = spltty();
cur_scp = sc->cur_scp;
/* we are in the middle of the vty switching process... */
if (sc->switch_in_progress
&& (cur_scp->smode.mode == VT_PROCESS)
&& cur_scp->proc) {
p = pfind(cur_scp->pid);
if (cur_scp->proc != p) {
if (p)
PROC_UNLOCK(p);
/*
* The controlling process has died!!. Do some clean up.
* NOTE:`cur_scp->proc' and `cur_scp->smode.mode'
* are not reset here yet; they will be cleared later.
*/
DPRINTF(5, ("cur_scp controlling process %d died, ",
cur_scp->pid));
if (cur_scp->status & SWITCH_WAIT_REL) {
/*
* Force the previous switch to finish, but return now
* with error.
*/
DPRINTF(5, ("reset WAIT_REL, "));
finish_vt_rel(cur_scp, TRUE, &s);
splx(s);
DPRINTF(5, ("finishing previous switch\n"));
return EINVAL;
} else if (cur_scp->status & SWITCH_WAIT_ACQ) {
/* let's assume screen switch has been completed. */
DPRINTF(5, ("reset WAIT_ACQ, "));
finish_vt_acq(cur_scp);
} else {
/*
* We are in between screen release and acquisition, and
* reached here via scgetc() or scrn_timer() which has
* interrupted exchange_scr(). Don't do anything stupid.
*/
DPRINTF(5, ("waiting nothing, "));
}
} else {
if (p)
PROC_UNLOCK(p);
/*
* The controlling process is alive, but not responding...
* It is either buggy or it may be just taking time.
* The following code is a gross kludge to cope with this
* problem for which there is no clean solution. XXX
*/
if (cur_scp->status & SWITCH_WAIT_REL) {
switch (sc->switch_in_progress++) {
case 1:
break;
case 2:
DPRINTF(5, ("sending relsig again, "));
signal_vt_rel(cur_scp);
break;
case 3:
break;
case 4:
default:
/*
* Act as if the controlling program returned
* VT_FALSE.
*/
DPRINTF(5, ("force reset WAIT_REL, "));
finish_vt_rel(cur_scp, FALSE, &s);
splx(s);
DPRINTF(5, ("act as if VT_FALSE was seen\n"));
return EINVAL;
}
} else if (cur_scp->status & SWITCH_WAIT_ACQ) {
switch (sc->switch_in_progress++) {
case 1:
break;
case 2:
DPRINTF(5, ("sending acqsig again, "));
signal_vt_acq(cur_scp);
break;
case 3:
break;
case 4:
default:
/* clear the flag and finish the previous switch */
DPRINTF(5, ("force reset WAIT_ACQ, "));
finish_vt_acq(cur_scp);
break;
}
}
}
}
/*
* Return error if an invalid argument is given, or vty switch
* is still in progress.
*/
if ((next_scr < sc->first_vty) || (next_scr >= sc->first_vty + sc->vtys)
|| sc->switch_in_progress) {
splx(s);
sc_bell(cur_scp, bios_value.bell_pitch, BELL_DURATION);
DPRINTF(5, ("error 1\n"));
return EINVAL;
}
/*
* Don't allow switching away from the graphics mode vty
* if the switch mode is VT_AUTO, unless the next vty is the same
* as the current or the current vty has been closed (but showing).
*/
tp = SC_DEV(sc, cur_scp->index);
if ((cur_scp->index != next_scr)
&& tty_opened_ns(tp)
&& (cur_scp->smode.mode == VT_AUTO)
&& ISGRAPHSC(cur_scp)) {
splx(s);
sc_bell(cur_scp, bios_value.bell_pitch, BELL_DURATION);
DPRINTF(5, ("error, graphics mode\n"));
return EINVAL;
}
/*
* Is the wanted vty open? Don't allow switching to a closed vty.
* If we are in DDB, don't switch to a vty in the VT_PROCESS mode.
* Note that we always allow the user to switch to the kernel
* console even if it is closed.
*/
if ((sc_console == NULL) || (next_scr != sc_console->index)) {
tp = SC_DEV(sc, next_scr);
if (!tty_opened_ns(tp)) {
splx(s);
sc_bell(cur_scp, bios_value.bell_pitch, BELL_DURATION);
DPRINTF(5, ("error 2, requested vty isn't open!\n"));
return EINVAL;
}
if ((debugger > 0) && (SC_STAT(tp)->smode.mode == VT_PROCESS)) {
splx(s);
DPRINTF(5, ("error 3, requested vty is in the VT_PROCESS mode\n"));
return EINVAL;
}
}
/* this is the start of vty switching process... */
++sc->switch_in_progress;
sc->old_scp = cur_scp;
sc->new_scp = sc_get_stat(SC_DEV(sc, next_scr));
if (sc->new_scp == sc->old_scp) {
sc->switch_in_progress = 0;
/*
* XXX wakeup() locks the scheduler lock which will hang if
* the lock is in an in-between state, e.g., when we stop at
* a breakpoint at fork_exit. It has always been wrong to call
* wakeup() when the debugger is active. In RELENG_4, wakeup()
* is supposed to be locked by splhigh(), but the debugger may
* be invoked at splhigh().
*/
if (debugger == 0)
wakeup(VTY_WCHAN(sc,next_scr));
splx(s);
DPRINTF(5, ("switch done (new == old)\n"));
return 0;
}
/* has controlling process died? */
vt_proc_alive(sc->old_scp);
vt_proc_alive(sc->new_scp);
/* wait for the controlling process to release the screen, if necessary */
if (signal_vt_rel(sc->old_scp)) {
splx(s);
return 0;
}
/* go set up the new vty screen */
splx(s);
exchange_scr(sc);
s = spltty();
/* wake up processes waiting for this vty */
if (debugger == 0)
wakeup(VTY_WCHAN(sc,next_scr));
/* wait for the controlling process to acknowledge, if necessary */
if (signal_vt_acq(sc->cur_scp)) {
splx(s);
return 0;
}
sc->switch_in_progress = 0;
if (sc->unit == sc_console_unit)
cnavailable(sc_consptr, TRUE);
splx(s);
DPRINTF(5, ("switch done\n"));
return 0;
}
static int
do_switch_scr(sc_softc_t *sc, int s)
{
vt_proc_alive(sc->new_scp);
splx(s);
exchange_scr(sc);
s = spltty();
/* sc->cur_scp == sc->new_scp */
wakeup(VTY_WCHAN(sc,sc->cur_scp->index));
/* wait for the controlling process to acknowledge, if necessary */
if (!signal_vt_acq(sc->cur_scp)) {
sc->switch_in_progress = 0;
if (sc->unit == sc_console_unit)
cnavailable(sc_consptr, TRUE);
}
return s;
}
static int
vt_proc_alive(scr_stat *scp)
{
struct proc *p;
if (scp->proc) {
if ((p = pfind(scp->pid)) != NULL)
PROC_UNLOCK(p);
if (scp->proc == p)
return TRUE;
scp->proc = NULL;
scp->smode.mode = VT_AUTO;
DPRINTF(5, ("vt controlling process %d died\n", scp->pid));
}
return FALSE;
}
static int
signal_vt_rel(scr_stat *scp)
{
if (scp->smode.mode != VT_PROCESS)
return FALSE;
scp->status |= SWITCH_WAIT_REL;
PROC_LOCK(scp->proc);
kern_psignal(scp->proc, scp->smode.relsig);
PROC_UNLOCK(scp->proc);
DPRINTF(5, ("sending relsig to %d\n", scp->pid));
return TRUE;
}
static int
signal_vt_acq(scr_stat *scp)
{
if (scp->smode.mode != VT_PROCESS)
return FALSE;
if (scp->sc->unit == sc_console_unit)
cnavailable(sc_consptr, FALSE);
scp->status |= SWITCH_WAIT_ACQ;
PROC_LOCK(scp->proc);
kern_psignal(scp->proc, scp->smode.acqsig);
PROC_UNLOCK(scp->proc);
DPRINTF(5, ("sending acqsig to %d\n", scp->pid));
return TRUE;
}
static int
finish_vt_rel(scr_stat *scp, int release, int *s)
{
if (scp == scp->sc->old_scp && scp->status & SWITCH_WAIT_REL) {
scp->status &= ~SWITCH_WAIT_REL;
if (release)
*s = do_switch_scr(scp->sc, *s);
else
scp->sc->switch_in_progress = 0;
return 0;
}
return EINVAL;
}
static int
finish_vt_acq(scr_stat *scp)
{
if (scp == scp->sc->new_scp && scp->status & SWITCH_WAIT_ACQ) {
scp->status &= ~SWITCH_WAIT_ACQ;
scp->sc->switch_in_progress = 0;
return 0;
}
return EINVAL;
}
static void
exchange_scr(sc_softc_t *sc)
{
scr_stat *scp;
/* save the current state of video and keyboard */
sc_move_cursor(sc->old_scp, sc->old_scp->xpos, sc->old_scp->ypos);
if (!ISGRAPHSC(sc->old_scp))
sc_remove_cursor_image(sc->old_scp);
if (sc->old_scp->kbd_mode == K_XLATE)
save_kbd_state(sc->old_scp);
/* set up the video for the new screen */
scp = sc->cur_scp = sc->new_scp;
#ifdef PC98
if (sc->old_scp->mode != scp->mode || ISUNKNOWNSC(sc->old_scp) || ISUNKNOWNSC(sc->new_scp))
#else
if (sc->old_scp->mode != scp->mode || ISUNKNOWNSC(sc->old_scp))
#endif
set_mode(scp);
#ifndef __sparc64__
else
sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, scp->xsize, scp->ysize,
(void *)sc->adp->va_window, FALSE);
#endif
scp->status |= MOUSE_HIDDEN;
sc_move_cursor(scp, scp->xpos, scp->ypos);
if (!ISGRAPHSC(scp))
sc_set_cursor_image(scp);
#ifndef SC_NO_PALETTE_LOADING
if (ISGRAPHSC(sc->old_scp)) {
#ifdef SC_PIXEL_MODE
if (sc->adp->va_info.vi_mem_model == V_INFO_MM_DIRECT)
vidd_load_palette(sc->adp, sc->palette2);
else
#endif
vidd_load_palette(sc->adp, sc->palette);
}
#endif
sc_set_border(scp, scp->border);
/* set up the keyboard for the new screen */
if (sc->old_scp->kbd_mode != scp->kbd_mode)
(void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode);
update_kbd_state(scp, scp->status, LOCK_MASK);
mark_all(scp);
}
static void
sc_puts(scr_stat *scp, u_char *buf, int len, int kernel)
{
int need_unlock = 0;
#ifdef DEV_SPLASH
/* make screensaver happy */
if (!sticky_splash && scp == scp->sc->cur_scp && !sc_saver_keyb_only)
run_scrn_saver = FALSE;
#endif
if (scp->tsw) {
if (!kdb_active && !mtx_owned(&scp->scr_lock)) {
need_unlock = 1;
mtx_lock_spin(&scp->scr_lock);
}
(*scp->tsw->te_puts)(scp, buf, len, kernel);
if (need_unlock)
mtx_unlock_spin(&scp->scr_lock);
}
if (scp->sc->delayed_next_scr)
sc_switch_scr(scp->sc, scp->sc->delayed_next_scr - 1);
}
void
sc_draw_cursor_image(scr_stat *scp)
{
/* assert(scp == scp->sc->cur_scp); */
SC_VIDEO_LOCK(scp->sc);
(*scp->rndr->draw_cursor)(scp, scp->cursor_pos,
scp->curs_attr.flags & CONS_BLINK_CURSOR, TRUE,
sc_inside_cutmark(scp, scp->cursor_pos));
scp->cursor_oldpos = scp->cursor_pos;
SC_VIDEO_UNLOCK(scp->sc);
}
void
sc_remove_cursor_image(scr_stat *scp)
{
/* assert(scp == scp->sc->cur_scp); */
SC_VIDEO_LOCK(scp->sc);
(*scp->rndr->draw_cursor)(scp, scp->cursor_oldpos,
scp->curs_attr.flags & CONS_BLINK_CURSOR, FALSE,
sc_inside_cutmark(scp, scp->cursor_oldpos));
SC_VIDEO_UNLOCK(scp->sc);
}
static void
update_cursor_image(scr_stat *scp)
{
/* assert(scp == scp->sc->cur_scp); */
sc_remove_cursor_image(scp);
sc_set_cursor_image(scp);
sc_draw_cursor_image(scp);
}
void
sc_set_cursor_image(scr_stat *scp)
{
scp->curs_attr.flags = scp->curr_curs_attr.flags;
if (scp->curs_attr.flags & CONS_HIDDEN_CURSOR) {
/* hidden cursor is internally represented as zero-height underline */
scp->curs_attr.flags = CONS_CHAR_CURSOR;
scp->curs_attr.base = scp->curs_attr.height = 0;
} else if (scp->curs_attr.flags & CONS_CHAR_CURSOR) {
scp->curs_attr.base = imin(scp->curr_curs_attr.base,
scp->font_size - 1);
scp->curs_attr.height = imin(scp->curr_curs_attr.height,
scp->font_size - scp->curs_attr.base);
} else { /* block cursor */
scp->curs_attr.base = 0;
scp->curs_attr.height = scp->font_size;
}
/* assert(scp == scp->sc->cur_scp); */
SC_VIDEO_LOCK(scp->sc);
(*scp->rndr->set_cursor)(scp, scp->curs_attr.base, scp->curs_attr.height,
scp->curs_attr.flags & CONS_BLINK_CURSOR);
SC_VIDEO_UNLOCK(scp->sc);
}
static void
change_cursor_shape(scr_stat *scp, int flags, int base, int height)
{
if ((scp == scp->sc->cur_scp) && !ISGRAPHSC(scp))
sc_remove_cursor_image(scp);
if (base >= 0)
scp->curr_curs_attr.base = base;
if (height >= 0)
scp->curr_curs_attr.height = height;
if (flags & CONS_RESET_CURSOR)
scp->curr_curs_attr = scp->dflt_curs_attr;
else
scp->curr_curs_attr.flags = flags & CONS_CURSOR_ATTRS;
if ((scp == scp->sc->cur_scp) && !ISGRAPHSC(scp)) {
sc_set_cursor_image(scp);
sc_draw_cursor_image(scp);
}
}
void
sc_change_cursor_shape(scr_stat *scp, int flags, int base, int height)
{
sc_softc_t *sc;
struct tty *tp;
int s;
int i;
s = spltty();
if ((flags != -1) && (flags & CONS_LOCAL_CURSOR)) {
/* local (per vty) change */
change_cursor_shape(scp, flags, base, height);
splx(s);
return;
}
/* global change */
sc = scp->sc;
if (base >= 0)
sc->curs_attr.base = base;
if (height >= 0)
sc->curs_attr.height = height;
if (flags != -1) {
if (flags & CONS_RESET_CURSOR)
sc->curs_attr = sc->dflt_curs_attr;
else
sc->curs_attr.flags = flags & CONS_CURSOR_ATTRS;
}
for (i = sc->first_vty; i < sc->first_vty + sc->vtys; ++i) {
if ((tp = SC_DEV(sc, i)) == NULL)
continue;
if ((scp = sc_get_stat(tp)) == NULL)
continue;
scp->dflt_curs_attr = sc->curs_attr;
change_cursor_shape(scp, CONS_RESET_CURSOR, -1, -1);
}
splx(s);
}
static void
scinit(int unit, int flags)
{
/*
* When syscons is being initialized as the kernel console, malloc()
* is not yet functional, because various kernel structures has not been
* fully initialized yet. Therefore, we need to declare the following
* static buffers for the console. This is less than ideal,
* but is necessry evil for the time being. XXX
*/
#ifdef PC98
static u_short sc_buffer[ROW*COL*2];/* XXX */
#else
static u_short sc_buffer[ROW*COL]; /* XXX */
#endif
#ifndef SC_NO_FONT_LOADING
static u_char font_8[256*8];
static u_char font_14[256*14];
static u_char font_16[256*16];
#endif
sc_softc_t *sc;
scr_stat *scp;
video_adapter_t *adp;
int col;
int row;
int i;
/* one time initialization */
if (init_done == COLD)
sc_get_bios_values(&bios_value);
init_done = WARM;
/*
* Allocate resources. Even if we are being called for the second
* time, we must allocate them again, because they might have
* disappeared...
*/
sc = sc_get_softc(unit, flags & SC_KERNEL_CONSOLE);
if ((sc->flags & SC_INIT_DONE) == 0)
SC_VIDEO_LOCKINIT(sc);
adp = NULL;
if (sc->adapter >= 0) {
vid_release(sc->adp, (void *)&sc->adapter);
adp = sc->adp;
sc->adp = NULL;
}
if (sc->keyboard >= 0) {
DPRINTF(5, ("sc%d: releasing kbd%d\n", unit, sc->keyboard));
i = kbd_release(sc->kbd, (void *)&sc->keyboard);
DPRINTF(5, ("sc%d: kbd_release returned %d\n", unit, i));
if (sc->kbd != NULL) {
DPRINTF(5, ("sc%d: kbd != NULL!, index:%d, unit:%d, flags:0x%x\n",
unit, sc->kbd->kb_index, sc->kbd->kb_unit, sc->kbd->kb_flags));
}
sc->kbd = NULL;
}
sc->adapter = vid_allocate("*", unit, (void *)&sc->adapter);
sc->adp = vid_get_adapter(sc->adapter);
/* assert((sc->adapter >= 0) && (sc->adp != NULL)) */
sc->keyboard = sc_allocate_keyboard(sc, unit);
DPRINTF(1, ("sc%d: keyboard %d\n", unit, sc->keyboard));
sc->kbd = kbd_get_keyboard(sc->keyboard);
if (sc->kbd != NULL) {
DPRINTF(1, ("sc%d: kbd index:%d, unit:%d, flags:0x%x\n",
unit, sc->kbd->kb_index, sc->kbd->kb_unit, sc->kbd->kb_flags));
}
if (!(sc->flags & SC_INIT_DONE) || (adp != sc->adp)) {
sc->initial_mode = sc->adp->va_initial_mode;
#ifndef SC_NO_FONT_LOADING
if (flags & SC_KERNEL_CONSOLE) {
sc->font_8 = font_8;
sc->font_14 = font_14;
sc->font_16 = font_16;
} else if (sc->font_8 == NULL) {
/* assert(sc_malloc) */
sc->font_8 = malloc(sizeof(font_8), M_DEVBUF, M_WAITOK);
sc->font_14 = malloc(sizeof(font_14), M_DEVBUF, M_WAITOK);
sc->font_16 = malloc(sizeof(font_16), M_DEVBUF, M_WAITOK);
}
#endif
/* extract the hardware cursor location and hide the cursor for now */
vidd_read_hw_cursor(sc->adp, &col, &row);
vidd_set_hw_cursor(sc->adp, -1, -1);
/* set up the first console */
sc->first_vty = unit*MAXCONS;
sc->vtys = MAXCONS; /* XXX: should be configurable */
if (flags & SC_KERNEL_CONSOLE) {
/*
* Set up devs structure but don't use it yet, calling make_dev()
* might panic kernel. Wait for sc_attach_unit() to actually
* create the devices.
*/
sc->dev = main_devs;
scp = &main_console;
init_scp(sc, sc->first_vty, scp);
sc_vtb_init(&scp->vtb, VTB_MEMORY, scp->xsize, scp->ysize,
(void *)sc_buffer, FALSE);
/* move cursors to the initial positions */
if (col >= scp->xsize)
col = 0;
if (row >= scp->ysize)
row = scp->ysize - 1;
scp->xpos = col;
scp->ypos = row;
scp->cursor_pos = scp->cursor_oldpos = row*scp->xsize + col;
if (sc_init_emulator(scp, SC_DFLT_TERM))
sc_init_emulator(scp, "*");
(*scp->tsw->te_default_attr)(scp,
user_default.std_color,
user_default.rev_color);
} else {
/* assert(sc_malloc) */
sc->dev = malloc(sizeof(struct tty *)*sc->vtys, M_DEVBUF,
M_WAITOK|M_ZERO);
sc->dev[0] = sc_alloc_tty(0, unit * MAXCONS);
scp = alloc_scp(sc, sc->first_vty);
SC_STAT(sc->dev[0]) = scp;
}
sc->cur_scp = scp;
#ifndef __sparc64__
/* copy screen to temporary buffer */
sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, scp->xsize, scp->ysize,
(void *)scp->sc->adp->va_window, FALSE);
if (ISTEXTSC(scp))
sc_vtb_copy(&scp->scr, 0, &scp->vtb, 0, scp->xsize*scp->ysize);
#endif
if (bios_value.cursor_end < scp->font_size)
sc->dflt_curs_attr.base = scp->font_size -
bios_value.cursor_end - 1;
else
sc->dflt_curs_attr.base = 0;
i = bios_value.cursor_end - bios_value.cursor_start + 1;
sc->dflt_curs_attr.height = imin(i, scp->font_size);
sc->dflt_curs_attr.flags = 0;
sc->curs_attr = sc->dflt_curs_attr;
scp->curr_curs_attr = scp->dflt_curs_attr = sc->curs_attr;
#ifndef SC_NO_SYSMOUSE
sc_mouse_move(scp, scp->xpixel/2, scp->ypixel/2);
#endif
if (!ISGRAPHSC(scp)) {
sc_set_cursor_image(scp);
sc_draw_cursor_image(scp);
}
/* save font and palette */
#ifndef SC_NO_FONT_LOADING
sc->fonts_loaded = 0;
if (ISFONTAVAIL(sc->adp->va_flags)) {
#ifdef SC_DFLT_FONT
bcopy(dflt_font_8, sc->font_8, sizeof(dflt_font_8));
bcopy(dflt_font_14, sc->font_14, sizeof(dflt_font_14));
bcopy(dflt_font_16, sc->font_16, sizeof(dflt_font_16));
sc->fonts_loaded = FONT_16 | FONT_14 | FONT_8;
if (scp->font_size < 14) {
sc_load_font(scp, 0, 8, 8, sc->font_8, 0, 256);
} else if (scp->font_size >= 16) {
sc_load_font(scp, 0, 16, 8, sc->font_16, 0, 256);
} else {
sc_load_font(scp, 0, 14, 8, sc->font_14, 0, 256);
}
#else /* !SC_DFLT_FONT */
if (scp->font_size < 14) {
sc_save_font(scp, 0, 8, 8, sc->font_8, 0, 256);
sc->fonts_loaded = FONT_8;
} else if (scp->font_size >= 16) {
sc_save_font(scp, 0, 16, 8, sc->font_16, 0, 256);
sc->fonts_loaded = FONT_16;
} else {
sc_save_font(scp, 0, 14, 8, sc->font_14, 0, 256);
sc->fonts_loaded = FONT_14;
}
#endif /* SC_DFLT_FONT */
/* FONT KLUDGE: always use the font page #0. XXX */
sc_show_font(scp, 0);
}
#endif /* !SC_NO_FONT_LOADING */
#ifndef SC_NO_PALETTE_LOADING
vidd_save_palette(sc->adp, sc->palette);
#ifdef SC_PIXEL_MODE
for (i = 0; i < sizeof(sc->palette2); i++)
sc->palette2[i] = i / 3;
#endif
#endif
#ifdef DEV_SPLASH
if (!(sc->flags & SC_SPLASH_SCRN)) {
/* we are ready to put up the splash image! */
splash_init(sc->adp, scsplash_callback, sc);
sc->flags |= SC_SPLASH_SCRN;
}
#endif
}
/* the rest is not necessary, if we have done it once */
if (sc->flags & SC_INIT_DONE)
return;
/* initialize mapscrn arrays to a one to one map */
for (i = 0; i < sizeof(sc->scr_map); i++)
sc->scr_map[i] = sc->scr_rmap[i] = i;
#ifdef PC98
sc->scr_map[0x5c] = (u_char)0xfc; /* for backslash */
#endif
sc->flags |= SC_INIT_DONE;
}
static void
scterm(int unit, int flags)
{
sc_softc_t *sc;
scr_stat *scp;
sc = sc_get_softc(unit, flags & SC_KERNEL_CONSOLE);
if (sc == NULL)
return; /* shouldn't happen */
#ifdef DEV_SPLASH
/* this console is no longer available for the splash screen */
if (sc->flags & SC_SPLASH_SCRN) {
splash_term(sc->adp);
sc->flags &= ~SC_SPLASH_SCRN;
}
#endif
#if 0 /* XXX */
/* move the hardware cursor to the upper-left corner */
vidd_set_hw_cursor(sc->adp, 0, 0);
#endif
/* release the keyboard and the video card */
if (sc->keyboard >= 0)
kbd_release(sc->kbd, &sc->keyboard);
if (sc->adapter >= 0)
vid_release(sc->adp, &sc->adapter);
/* stop the terminal emulator, if any */
scp = sc_get_stat(sc->dev[0]);
if (scp->tsw)
(*scp->tsw->te_term)(scp, &scp->ts);
if (scp->ts != NULL)
free(scp->ts, M_DEVBUF);
mtx_destroy(&scp->scr_lock);
/* clear the structure */
if (!(flags & SC_KERNEL_CONSOLE)) {
/* XXX: We need delete_dev() for this */
free(sc->dev, M_DEVBUF);
#if 0
/* XXX: We need a ttyunregister for this */
free(sc->tty, M_DEVBUF);
#endif
#ifndef SC_NO_FONT_LOADING
free(sc->font_8, M_DEVBUF);
free(sc->font_14, M_DEVBUF);
free(sc->font_16, M_DEVBUF);
#endif
/* XXX vtb, history */
}
bzero(sc, sizeof(*sc));
sc->keyboard = -1;
sc->adapter = -1;
}
static void
scshutdown(__unused void *arg, __unused int howto)
{
KASSERT(sc_console != NULL, ("sc_console != NULL"));
KASSERT(sc_console->sc != NULL, ("sc_console->sc != NULL"));
KASSERT(sc_console->sc->cur_scp != NULL,
("sc_console->sc->cur_scp != NULL"));
sc_touch_scrn_saver();
if (!cold &&
sc_console->sc->cur_scp->index != sc_console->index &&
sc_console->sc->cur_scp->smode.mode == VT_AUTO &&
sc_console->smode.mode == VT_AUTO)
sc_switch_scr(sc_console->sc, sc_console->index);
shutdown_in_progress = TRUE;
}
static void
scsuspend(__unused void *arg)
{
int retry;
KASSERT(sc_console != NULL, ("sc_console != NULL"));
KASSERT(sc_console->sc != NULL, ("sc_console->sc != NULL"));
KASSERT(sc_console->sc->cur_scp != NULL,
("sc_console->sc->cur_scp != NULL"));
sc_susp_scr = sc_console->sc->cur_scp->index;
if (sc_no_suspend_vtswitch ||
sc_susp_scr == sc_console->index) {
sc_touch_scrn_saver();
sc_susp_scr = -1;
return;
}
for (retry = 0; retry < 10; retry++) {
sc_switch_scr(sc_console->sc, sc_console->index);
if (!sc_console->sc->switch_in_progress)
break;
pause("scsuspend", hz);
}
suspend_in_progress = TRUE;
}
static void
scresume(__unused void *arg)
{
KASSERT(sc_console != NULL, ("sc_console != NULL"));
KASSERT(sc_console->sc != NULL, ("sc_console->sc != NULL"));
KASSERT(sc_console->sc->cur_scp != NULL,
("sc_console->sc->cur_scp != NULL"));
suspend_in_progress = FALSE;
if (sc_susp_scr < 0) {
update_font(sc_console->sc->cur_scp);
return;
}
sc_switch_scr(sc_console->sc, sc_susp_scr);
}
int
sc_clean_up(scr_stat *scp)
{
#ifdef DEV_SPLASH
int error;
#endif
if (scp->sc->flags & SC_SCRN_BLANKED) {
sc_touch_scrn_saver();
#ifdef DEV_SPLASH
if ((error = wait_scrn_saver_stop(scp->sc)))
return error;
#endif
}
scp->status |= MOUSE_HIDDEN;
sc_remove_mouse_image(scp);
sc_remove_cutmarking(scp);
return 0;
}
void
sc_alloc_scr_buffer(scr_stat *scp, int wait, int discard)
{
sc_vtb_t new;
sc_vtb_t old;
old = scp->vtb;
sc_vtb_init(&new, VTB_MEMORY, scp->xsize, scp->ysize, NULL, wait);
if (!discard && (old.vtb_flags & VTB_VALID)) {
/* retain the current cursor position and buffer contants */
scp->cursor_oldpos = scp->cursor_pos;
/*
* This works only if the old buffer has the same size as or larger
* than the new one. XXX
*/
sc_vtb_copy(&old, 0, &new, 0, scp->xsize*scp->ysize);
scp->vtb = new;
} else {
scp->vtb = new;
sc_vtb_destroy(&old);
}
#ifndef SC_NO_SYSMOUSE
/* move the mouse cursor at the center of the screen */
sc_mouse_move(scp, scp->xpixel / 2, scp->ypixel / 2);
#endif
}
static scr_stat
*alloc_scp(sc_softc_t *sc, int vty)
{
scr_stat *scp;
/* assert(sc_malloc) */
scp = (scr_stat *)malloc(sizeof(scr_stat), M_DEVBUF, M_WAITOK);
init_scp(sc, vty, scp);
sc_alloc_scr_buffer(scp, TRUE, TRUE);
if (sc_init_emulator(scp, SC_DFLT_TERM))
sc_init_emulator(scp, "*");
#ifndef SC_NO_CUTPASTE
sc_alloc_cut_buffer(scp, TRUE);
#endif
#ifndef SC_NO_HISTORY
sc_alloc_history_buffer(scp, 0, 0, TRUE);
#endif
return scp;
}
static void
init_scp(sc_softc_t *sc, int vty, scr_stat *scp)
{
video_info_t info;
bzero(scp, sizeof(*scp));
scp->index = vty;
scp->sc = sc;
scp->status = 0;
scp->mode = sc->initial_mode;
vidd_get_info(sc->adp, scp->mode, &info);
if (info.vi_flags & V_INFO_GRAPHICS) {
scp->status |= GRAPHICS_MODE;
scp->xpixel = info.vi_width;
scp->ypixel = info.vi_height;
scp->xsize = info.vi_width/info.vi_cwidth;
scp->ysize = info.vi_height/info.vi_cheight;
scp->font_size = 0;
scp->font = NULL;
} else {
scp->xsize = info.vi_width;
scp->ysize = info.vi_height;
scp->xpixel = scp->xsize*info.vi_cwidth;
scp->ypixel = scp->ysize*info.vi_cheight;
}
scp->font_size = info.vi_cheight;
scp->font_width = info.vi_cwidth;
#ifndef SC_NO_FONT_LOADING
if (info.vi_cheight < 14)
scp->font = sc->font_8;
else if (info.vi_cheight >= 16)
scp->font = sc->font_16;
else
scp->font = sc->font_14;
#else
scp->font = NULL;
#endif
sc_vtb_init(&scp->vtb, VTB_MEMORY, 0, 0, NULL, FALSE);
#ifndef __sparc64__
sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, 0, 0, NULL, FALSE);
#endif
scp->xoff = scp->yoff = 0;
scp->xpos = scp->ypos = 0;
scp->start = scp->xsize * scp->ysize - 1;
scp->end = 0;
scp->tsw = NULL;
scp->ts = NULL;
scp->rndr = NULL;
scp->border = (SC_NORM_ATTR >> 4) & 0x0f;
scp->curr_curs_attr = scp->dflt_curs_attr = sc->curs_attr;
scp->mouse_cut_start = scp->xsize*scp->ysize;
scp->mouse_cut_end = -1;
scp->mouse_signal = 0;
scp->mouse_pid = 0;
scp->mouse_proc = NULL;
scp->kbd_mode = K_XLATE;
scp->bell_pitch = bios_value.bell_pitch;
scp->bell_duration = BELL_DURATION;
scp->status |= (bios_value.shift_state & NLKED);
scp->status |= CURSOR_ENABLED | MOUSE_HIDDEN;
scp->pid = 0;
scp->proc = NULL;
scp->smode.mode = VT_AUTO;
scp->history = NULL;
scp->history_pos = 0;
scp->history_size = 0;
mtx_init(&scp->scr_lock, "scrlock", NULL, MTX_SPIN);
}
int
sc_init_emulator(scr_stat *scp, char *name)
{
sc_term_sw_t *sw;
sc_rndr_sw_t *rndr;
void *p;
int error;
if (name == NULL) /* if no name is given, use the current emulator */
sw = scp->tsw;
else /* ...otherwise find the named emulator */
sw = sc_term_match(name);
if (sw == NULL)
return EINVAL;
rndr = NULL;
if (strcmp(sw->te_renderer, "*") != 0) {
rndr = sc_render_match(scp, sw->te_renderer,
scp->status & (GRAPHICS_MODE | PIXEL_MODE));
}
if (rndr == NULL) {
rndr = sc_render_match(scp, scp->sc->adp->va_name,
scp->status & (GRAPHICS_MODE | PIXEL_MODE));
if (rndr == NULL)
return ENODEV;
}
if (sw == scp->tsw) {
error = (*sw->te_init)(scp, &scp->ts, SC_TE_WARM_INIT);
scp->rndr = rndr;
scp->rndr->init(scp);
sc_clear_screen(scp);
/* assert(error == 0); */
return error;
}
if (sc_malloc && (sw->te_size > 0))
p = malloc(sw->te_size, M_DEVBUF, M_NOWAIT);
else
p = NULL;
error = (*sw->te_init)(scp, &p, SC_TE_COLD_INIT);
if (error)
return error;
if (scp->tsw)
(*scp->tsw->te_term)(scp, &scp->ts);
if (scp->ts != NULL)
free(scp->ts, M_DEVBUF);
scp->tsw = sw;
scp->ts = p;
scp->rndr = rndr;
scp->rndr->init(scp);
/* XXX */
(*sw->te_default_attr)(scp, user_default.std_color, user_default.rev_color);
sc_clear_screen(scp);
return 0;
}
/*
* scgetc(flags) - get character from keyboard.
* If flags & SCGETC_CN, then avoid harmful side effects.
* If flags & SCGETC_NONBLOCK, then wait until a key is pressed, else
* return NOKEY if there is nothing there.
*/
static u_int
scgetc(sc_softc_t *sc, u_int flags)
{
scr_stat *scp;
#ifndef SC_NO_HISTORY
struct tty *tp;
#endif
u_int c;
int this_scr;
int f;
int i;
if (sc->kbd == NULL)
return NOKEY;
next_code:
#if 1
/* I don't like this, but... XXX */
if (flags & SCGETC_CN)
sccnupdate(sc->cur_scp);
#endif
scp = sc->cur_scp;
/* first see if there is something in the keyboard port */
for (;;) {
c = kbdd_read_char(sc->kbd, !(flags & SCGETC_NONBLOCK));
if (c == ERRKEY) {
if (!(flags & SCGETC_CN))
sc_bell(scp, bios_value.bell_pitch, BELL_DURATION);
} else if (c == NOKEY)
return c;
else
break;
}
/* make screensaver happy */
if (!(c & RELKEY))
sc_touch_scrn_saver();
if (!(flags & SCGETC_CN))
random_harvest(&c, sizeof(c), 1, RANDOM_KEYBOARD);
if (scp->kbd_mode != K_XLATE)
return KEYCHAR(c);
/* if scroll-lock pressed allow history browsing */
if (!ISGRAPHSC(scp) && scp->history && scp->status & SLKED) {
scp->status &= ~CURSOR_ENABLED;
sc_remove_cursor_image(scp);
#ifndef SC_NO_HISTORY
if (!(scp->status & BUFFER_SAVED)) {
scp->status |= BUFFER_SAVED;
sc_hist_save(scp);
}
switch (c) {
/* FIXME: key codes */
case SPCLKEY | FKEY | F(49): /* home key */
sc_remove_cutmarking(scp);
sc_hist_home(scp);
goto next_code;
case SPCLKEY | FKEY | F(57): /* end key */
sc_remove_cutmarking(scp);
sc_hist_end(scp);
goto next_code;
case SPCLKEY | FKEY | F(50): /* up arrow key */
sc_remove_cutmarking(scp);
if (sc_hist_up_line(scp))
if (!(flags & SCGETC_CN))
sc_bell(scp, bios_value.bell_pitch, BELL_DURATION);
goto next_code;
case SPCLKEY | FKEY | F(58): /* down arrow key */
sc_remove_cutmarking(scp);
if (sc_hist_down_line(scp))
if (!(flags & SCGETC_CN))
sc_bell(scp, bios_value.bell_pitch, BELL_DURATION);
goto next_code;
case SPCLKEY | FKEY | F(51): /* page up key */
sc_remove_cutmarking(scp);
for (i=0; i<scp->ysize; i++)
if (sc_hist_up_line(scp)) {
if (!(flags & SCGETC_CN))
sc_bell(scp, bios_value.bell_pitch, BELL_DURATION);
break;
}
goto next_code;
case SPCLKEY | FKEY | F(59): /* page down key */
sc_remove_cutmarking(scp);
for (i=0; i<scp->ysize; i++)
if (sc_hist_down_line(scp)) {
if (!(flags & SCGETC_CN))
sc_bell(scp, bios_value.bell_pitch, BELL_DURATION);
break;
}
goto next_code;
}
#endif /* SC_NO_HISTORY */
}
/*
* Process and consume special keys here. Return a plain char code
* or a char code with the META flag or a function key code.
*/
if (c & RELKEY) {
/* key released */
/* goto next_code */
} else {
/* key pressed */
if (c & SPCLKEY) {
c &= ~SPCLKEY;
switch (KEYCHAR(c)) {
/* LOCKING KEYS */
case NLK: case CLK: case ALK:
break;
case SLK:
(void)kbdd_ioctl(sc->kbd, KDGKBSTATE, (caddr_t)&f);
if (f & SLKED) {
scp->status |= SLKED;
} else {
if (scp->status & SLKED) {
scp->status &= ~SLKED;
#ifndef SC_NO_HISTORY
if (scp->status & BUFFER_SAVED) {
if (!sc_hist_restore(scp))
sc_remove_cutmarking(scp);
scp->status &= ~BUFFER_SAVED;
scp->status |= CURSOR_ENABLED;
sc_draw_cursor_image(scp);
}
tp = SC_DEV(sc, scp->index);
if (!kdb_active && tty_opened_ns(tp))
sctty_outwakeup(tp);
#endif
}
}
break;
case PASTE:
#ifndef SC_NO_CUTPASTE
sc_mouse_paste(scp);
#endif
break;
/* NON-LOCKING KEYS */
case NOP:
case LSH: case RSH: case LCTR: case RCTR:
case LALT: case RALT: case ASH: case META:
break;
case BTAB:
if (!(sc->flags & SC_SCRN_BLANKED))
return c;
break;
case SPSC:
#ifdef DEV_SPLASH
/* force activatation/deactivation of the screen saver */
if (!(sc->flags & SC_SCRN_BLANKED)) {
run_scrn_saver = TRUE;
sc->scrn_time_stamp -= scrn_blank_time;
}
if (cold) {
/*
* While devices are being probed, the screen saver need
* to be invoked explictly. XXX
*/
if (sc->flags & SC_SCRN_BLANKED) {
scsplash_stick(FALSE);
stop_scrn_saver(sc, current_saver);
} else {
if (!ISGRAPHSC(scp)) {
scsplash_stick(TRUE);
(*current_saver)(sc, TRUE);
}
}
}
#endif /* DEV_SPLASH */
break;
case RBT:
#ifndef SC_DISABLE_REBOOT
if (enable_reboot)
shutdown_nice(0);
#endif
break;
case HALT:
#ifndef SC_DISABLE_REBOOT
if (enable_reboot)
shutdown_nice(RB_HALT);
#endif
break;
case PDWN:
#ifndef SC_DISABLE_REBOOT
if (enable_reboot)
shutdown_nice(RB_HALT|RB_POWEROFF);
#endif
break;
case SUSP:
power_pm_suspend(POWER_SLEEP_STATE_SUSPEND);
break;
case STBY:
power_pm_suspend(POWER_SLEEP_STATE_STANDBY);
break;
case DBG:
#ifndef SC_DISABLE_KDBKEY
if (enable_kdbkey)
kdb_break();
#endif
break;
case PNC:
if (enable_panic_key)
panic("Forced by the panic key");
break;
case NEXT:
this_scr = scp->index;
for (i = (this_scr - sc->first_vty + 1)%sc->vtys;
sc->first_vty + i != this_scr;
i = (i + 1)%sc->vtys) {
struct tty *tp = SC_DEV(sc, sc->first_vty + i);
if (tty_opened_ns(tp)) {
sc_switch_scr(scp->sc, sc->first_vty + i);
break;
}
}
break;
case PREV:
this_scr = scp->index;
for (i = (this_scr - sc->first_vty + sc->vtys - 1)%sc->vtys;
sc->first_vty + i != this_scr;
i = (i + sc->vtys - 1)%sc->vtys) {
struct tty *tp = SC_DEV(sc, sc->first_vty + i);
if (tty_opened_ns(tp)) {
sc_switch_scr(scp->sc, sc->first_vty + i);
break;
}
}
break;
default:
if (KEYCHAR(c) >= F_SCR && KEYCHAR(c) <= L_SCR) {
sc_switch_scr(scp->sc, sc->first_vty + KEYCHAR(c) - F_SCR);
break;
}
/* assert(c & FKEY) */
if (!(sc->flags & SC_SCRN_BLANKED))
return c;
break;
}
/* goto next_code */
} else {
/* regular keys (maybe MKEY is set) */
#if !defined(SC_DISABLE_KDBKEY) && defined(KDB)
if (enable_kdbkey)
kdb_alt_break(c, &sc->sc_altbrk);
#endif
if (!(sc->flags & SC_SCRN_BLANKED))
return c;
}
}
goto next_code;
}
static int
sctty_mmap(struct tty *tp, vm_ooffset_t offset, vm_paddr_t *paddr,
int nprot, vm_memattr_t *memattr)
{
scr_stat *scp;
scp = sc_get_stat(tp);
if (scp != scp->sc->cur_scp)
return -1;
return vidd_mmap(scp->sc->adp, offset, paddr, nprot, memattr);
}
static void
update_font(scr_stat *scp)
{
#ifndef SC_NO_FONT_LOADING
/* load appropriate font */
if (!(scp->status & GRAPHICS_MODE)) {
if (!(scp->status & PIXEL_MODE) && ISFONTAVAIL(scp->sc->adp->va_flags)) {
if (scp->font_size < 14) {
if (scp->sc->fonts_loaded & FONT_8)
sc_load_font(scp, 0, 8, 8, scp->sc->font_8, 0, 256);
} else if (scp->font_size >= 16) {
if (scp->sc->fonts_loaded & FONT_16)
sc_load_font(scp, 0, 16, 8, scp->sc->font_16, 0, 256);
} else {
if (scp->sc->fonts_loaded & FONT_14)
sc_load_font(scp, 0, 14, 8, scp->sc->font_14, 0, 256);
}
/*
* FONT KLUDGE:
* This is an interim kludge to display correct font.
* Always use the font page #0 on the video plane 2.
* Somehow we cannot show the font in other font pages on
* some video cards... XXX
*/
sc_show_font(scp, 0);
}
mark_all(scp);
}
#endif /* !SC_NO_FONT_LOADING */
}
static int
save_kbd_state(scr_stat *scp)
{
int state;
int error;
error = kbdd_ioctl(scp->sc->kbd, KDGKBSTATE, (caddr_t)&state);
if (error == ENOIOCTL)
error = ENODEV;
if (error == 0) {
scp->status &= ~LOCK_MASK;
scp->status |= state;
}
return error;
}
static int
update_kbd_state(scr_stat *scp, int new_bits, int mask)
{
int state;
int error;
if (mask != LOCK_MASK) {
error = kbdd_ioctl(scp->sc->kbd, KDGKBSTATE, (caddr_t)&state);
if (error == ENOIOCTL)
error = ENODEV;
if (error)
return error;
state &= ~mask;
state |= new_bits & mask;
} else {
state = new_bits & LOCK_MASK;
}
error = kbdd_ioctl(scp->sc->kbd, KDSKBSTATE, (caddr_t)&state);
if (error == ENOIOCTL)
error = ENODEV;
return error;
}
static int
update_kbd_leds(scr_stat *scp, int which)
{
int error;
which &= LOCK_MASK;
error = kbdd_ioctl(scp->sc->kbd, KDSETLED, (caddr_t)&which);
if (error == ENOIOCTL)
error = ENODEV;
return error;
}
int
set_mode(scr_stat *scp)
{
video_info_t info;
/* reject unsupported mode */
if (vidd_get_info(scp->sc->adp, scp->mode, &info))
return 1;
/* if this vty is not currently showing, do nothing */
if (scp != scp->sc->cur_scp)
return 0;
/* setup video hardware for the given mode */
vidd_set_mode(scp->sc->adp, scp->mode);
scp->rndr->init(scp);
#ifndef __sparc64__
sc_vtb_init(&scp->scr, VTB_FRAMEBUFFER, scp->xsize, scp->ysize,
(void *)scp->sc->adp->va_window, FALSE);
#endif
update_font(scp);
sc_set_border(scp, scp->border);
sc_set_cursor_image(scp);
return 0;
}
void
sc_set_border(scr_stat *scp, int color)
{
SC_VIDEO_LOCK(scp->sc);
(*scp->rndr->draw_border)(scp, color);
SC_VIDEO_UNLOCK(scp->sc);
}
#ifndef SC_NO_FONT_LOADING
void
sc_load_font(scr_stat *scp, int page, int size, int width, u_char *buf,
int base, int count)
{
sc_softc_t *sc;
sc = scp->sc;
sc->font_loading_in_progress = TRUE;
vidd_load_font(sc->adp, page, size, width, buf, base, count);
sc->font_loading_in_progress = FALSE;
}
void
sc_save_font(scr_stat *scp, int page, int size, int width, u_char *buf,
int base, int count)
{
sc_softc_t *sc;
sc = scp->sc;
sc->font_loading_in_progress = TRUE;
vidd_save_font(sc->adp, page, size, width, buf, base, count);
sc->font_loading_in_progress = FALSE;
}
void
sc_show_font(scr_stat *scp, int page)
{
vidd_show_font(scp->sc->adp, page);
}
#endif /* !SC_NO_FONT_LOADING */
void
sc_paste(scr_stat *scp, const u_char *p, int count)
{
struct tty *tp;
u_char *rmap;
tp = SC_DEV(scp->sc, scp->sc->cur_scp->index);
if (!tty_opened_ns(tp))
return;
rmap = scp->sc->scr_rmap;
for (; count > 0; --count)
ttydisc_rint(tp, rmap[*p++], 0);
ttydisc_rint_done(tp);
}
void
sc_respond(scr_stat *scp, const u_char *p, int count, int wakeup)
{
struct tty *tp;
tp = SC_DEV(scp->sc, scp->sc->cur_scp->index);
if (!tty_opened_ns(tp))
return;
ttydisc_rint_simple(tp, p, count);
if (wakeup) {
/* XXX: we can't always call ttydisc_rint_done() here! */
ttydisc_rint_done(tp);
}
}
void
sc_bell(scr_stat *scp, int pitch, int duration)
{
if (cold || shutdown_in_progress || !enable_bell)
return;
if (scp != scp->sc->cur_scp && (scp->sc->flags & SC_QUIET_BELL))
return;
if (scp->sc->flags & SC_VISUAL_BELL) {
if (scp->sc->blink_in_progress)
return;
scp->sc->blink_in_progress = 3;
if (scp != scp->sc->cur_scp)
scp->sc->blink_in_progress += 2;
blink_screen(scp->sc->cur_scp);
} else if (duration != 0 && pitch != 0) {
if (scp != scp->sc->cur_scp)
pitch *= 2;
sysbeep(1193182 / pitch, duration);
}
}
static void
blink_screen(void *arg)
{
scr_stat *scp = arg;
struct tty *tp;
if (ISGRAPHSC(scp) || (scp->sc->blink_in_progress <= 1)) {
scp->sc->blink_in_progress = 0;
mark_all(scp);
tp = SC_DEV(scp->sc, scp->index);
if (tty_opened_ns(tp))
sctty_outwakeup(tp);
if (scp->sc->delayed_next_scr)
sc_switch_scr(scp->sc, scp->sc->delayed_next_scr - 1);
}
else {
(*scp->rndr->draw)(scp, 0, scp->xsize*scp->ysize,
scp->sc->blink_in_progress & 1);
scp->sc->blink_in_progress--;
callout_reset_sbt(&scp->sc->cblink, SBT_1S / 15, 0,
blink_screen, scp, C_PREL(0));
}
}
/*
* Until sc_attach_unit() gets called no dev structures will be available
* to store the per-screen current status. This is the case when the
* kernel is initially booting and needs access to its console. During
* this early phase of booting the console's current status is kept in
* one statically defined scr_stat structure, and any pointers to the
* dev structures will be NULL.
*/
static scr_stat *
sc_get_stat(struct tty *tp)
{
if (tp == NULL)
return (&main_console);
return (SC_STAT(tp));
}
/*
* Allocate active keyboard. Try to allocate "kbdmux" keyboard first, and,
* if found, add all non-busy keyboards to "kbdmux". Otherwise look for
* any keyboard.
*/
static int
sc_allocate_keyboard(sc_softc_t *sc, int unit)
{
int idx0, idx;
keyboard_t *k0, *k;
keyboard_info_t ki;
idx0 = kbd_allocate("kbdmux", -1, (void *)&sc->keyboard, sckbdevent, sc);
if (idx0 != -1) {
k0 = kbd_get_keyboard(idx0);
for (idx = kbd_find_keyboard2("*", -1, 0);
idx != -1;
idx = kbd_find_keyboard2("*", -1, idx + 1)) {
k = kbd_get_keyboard(idx);
if (idx == idx0 || KBD_IS_BUSY(k))
continue;
bzero(&ki, sizeof(ki));
strcpy(ki.kb_name, k->kb_name);
ki.kb_unit = k->kb_unit;
(void)kbdd_ioctl(k0, KBADDKBD, (caddr_t) &ki);
}
} else
idx0 = kbd_allocate("*", unit, (void *)&sc->keyboard, sckbdevent, sc);
return (idx0);
}
Index: projects/clang360-import/sys/dev/virtio/block/virtio_blk.c
===================================================================
--- projects/clang360-import/sys/dev/virtio/block/virtio_blk.c (revision 277808)
+++ projects/clang360-import/sys/dev/virtio/block/virtio_blk.c (revision 277809)
@@ -1,1410 +1,1400 @@
/*-
* Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Driver for VirtIO block devices. */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bio.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/sglist.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <geom/geom_disk.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/block/virtio_blk.h>
#include "virtio_if.h"
struct vtblk_request {
struct virtio_blk_outhdr vbr_hdr;
struct bio *vbr_bp;
uint8_t vbr_ack;
TAILQ_ENTRY(vtblk_request) vbr_link;
};
enum vtblk_cache_mode {
VTBLK_CACHE_WRITETHROUGH,
VTBLK_CACHE_WRITEBACK,
VTBLK_CACHE_MAX
};
struct vtblk_softc {
device_t vtblk_dev;
struct mtx vtblk_mtx;
uint64_t vtblk_features;
uint32_t vtblk_flags;
#define VTBLK_FLAG_INDIRECT 0x0001
#define VTBLK_FLAG_READONLY 0x0002
#define VTBLK_FLAG_DETACH 0x0004
#define VTBLK_FLAG_SUSPEND 0x0008
-#define VTBLK_FLAG_DUMPING 0x0010
-#define VTBLK_FLAG_BARRIER 0x0020
-#define VTBLK_FLAG_WC_CONFIG 0x0040
+#define VTBLK_FLAG_BARRIER 0x0010
+#define VTBLK_FLAG_WC_CONFIG 0x0020
struct virtqueue *vtblk_vq;
struct sglist *vtblk_sglist;
struct disk *vtblk_disk;
struct bio_queue_head vtblk_bioq;
TAILQ_HEAD(, vtblk_request)
vtblk_req_free;
TAILQ_HEAD(, vtblk_request)
vtblk_req_ready;
struct vtblk_request *vtblk_req_ordered;
int vtblk_max_nsegs;
int vtblk_request_count;
enum vtblk_cache_mode vtblk_write_cache;
+ struct bio_queue vtblk_dump_queue;
struct vtblk_request vtblk_dump_request;
};
static struct virtio_feature_desc vtblk_feature_desc[] = {
{ VIRTIO_BLK_F_BARRIER, "HostBarrier" },
{ VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" },
{ VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" },
{ VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" },
{ VIRTIO_BLK_F_RO, "ReadOnly" },
{ VIRTIO_BLK_F_BLK_SIZE, "BlockSize" },
{ VIRTIO_BLK_F_SCSI, "SCSICmds" },
{ VIRTIO_BLK_F_WCE, "WriteCache" },
{ VIRTIO_BLK_F_TOPOLOGY, "Topology" },
{ VIRTIO_BLK_F_CONFIG_WCE, "ConfigWCE" },
{ 0, NULL }
};
static int vtblk_modevent(module_t, int, void *);
static int vtblk_probe(device_t);
static int vtblk_attach(device_t);
static int vtblk_detach(device_t);
static int vtblk_suspend(device_t);
static int vtblk_resume(device_t);
static int vtblk_shutdown(device_t);
static int vtblk_config_change(device_t);
static int vtblk_open(struct disk *);
static int vtblk_close(struct disk *);
static int vtblk_ioctl(struct disk *, u_long, void *, int,
struct thread *);
static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
static void vtblk_strategy(struct bio *);
static void vtblk_negotiate_features(struct vtblk_softc *);
static void vtblk_setup_features(struct vtblk_softc *);
static int vtblk_maximum_segments(struct vtblk_softc *,
struct virtio_blk_config *);
static int vtblk_alloc_virtqueue(struct vtblk_softc *);
static void vtblk_resize_disk(struct vtblk_softc *, uint64_t);
static void vtblk_alloc_disk(struct vtblk_softc *,
struct virtio_blk_config *);
static void vtblk_create_disk(struct vtblk_softc *);
static int vtblk_request_prealloc(struct vtblk_softc *);
static void vtblk_request_free(struct vtblk_softc *);
static struct vtblk_request *
vtblk_request_dequeue(struct vtblk_softc *);
static void vtblk_request_enqueue(struct vtblk_softc *,
struct vtblk_request *);
static struct vtblk_request *
vtblk_request_next_ready(struct vtblk_softc *);
static void vtblk_request_requeue_ready(struct vtblk_softc *,
struct vtblk_request *);
static struct vtblk_request *
vtblk_request_next(struct vtblk_softc *);
static struct vtblk_request *
vtblk_request_bio(struct vtblk_softc *);
static int vtblk_request_execute(struct vtblk_softc *,
struct vtblk_request *);
static int vtblk_request_error(struct vtblk_request *);
static void vtblk_queue_completed(struct vtblk_softc *,
struct bio_queue *);
static void vtblk_done_completed(struct vtblk_softc *,
struct bio_queue *);
-static void vtblk_drain_vq(struct vtblk_softc *, int);
+static void vtblk_drain_vq(struct vtblk_softc *);
static void vtblk_drain(struct vtblk_softc *);
static void vtblk_startio(struct vtblk_softc *);
static void vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
static void vtblk_read_config(struct vtblk_softc *,
struct virtio_blk_config *);
static void vtblk_ident(struct vtblk_softc *);
static int vtblk_poll_request(struct vtblk_softc *,
struct vtblk_request *);
static int vtblk_quiesce(struct vtblk_softc *);
static void vtblk_vq_intr(void *);
static void vtblk_stop(struct vtblk_softc *);
-static void vtblk_dump_prepare(struct vtblk_softc *);
+static void vtblk_dump_quiesce(struct vtblk_softc *);
static int vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
static int vtblk_dump_flush(struct vtblk_softc *);
+static void vtblk_dump_complete(struct vtblk_softc *);
static void vtblk_set_write_cache(struct vtblk_softc *, int);
static int vtblk_write_cache_enabled(struct vtblk_softc *sc,
struct virtio_blk_config *);
static int vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
static void vtblk_setup_sysctl(struct vtblk_softc *);
static int vtblk_tunable_int(struct vtblk_softc *, const char *, int);
/* Tunables. */
static int vtblk_no_ident = 0;
TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
static int vtblk_writecache_mode = -1;
TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
/* Features desired/implemented by this driver. */
#define VTBLK_FEATURES \
(VIRTIO_BLK_F_BARRIER | \
VIRTIO_BLK_F_SIZE_MAX | \
VIRTIO_BLK_F_SEG_MAX | \
VIRTIO_BLK_F_GEOMETRY | \
VIRTIO_BLK_F_RO | \
VIRTIO_BLK_F_BLK_SIZE | \
VIRTIO_BLK_F_WCE | \
VIRTIO_BLK_F_CONFIG_WCE | \
VIRTIO_RING_F_INDIRECT_DESC)
#define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx
#define VTBLK_LOCK_INIT(_sc, _name) \
mtx_init(VTBLK_MTX((_sc)), (_name), \
"VirtIO Block Lock", MTX_DEF)
#define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc)))
#define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc)))
#define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc)))
#define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
#define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
#define VTBLK_DISK_NAME "vtbd"
#define VTBLK_QUIESCE_TIMEOUT (30 * hz)
/*
* Each block request uses at least two segments - one for the header
* and one for the status.
*/
#define VTBLK_MIN_SEGMENTS 2
static device_method_t vtblk_methods[] = {
/* Device methods. */
DEVMETHOD(device_probe, vtblk_probe),
DEVMETHOD(device_attach, vtblk_attach),
DEVMETHOD(device_detach, vtblk_detach),
DEVMETHOD(device_suspend, vtblk_suspend),
DEVMETHOD(device_resume, vtblk_resume),
DEVMETHOD(device_shutdown, vtblk_shutdown),
/* VirtIO methods. */
DEVMETHOD(virtio_config_change, vtblk_config_change),
DEVMETHOD_END
};
static driver_t vtblk_driver = {
"vtblk",
vtblk_methods,
sizeof(struct vtblk_softc)
};
static devclass_t vtblk_devclass;
DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
vtblk_modevent, 0);
DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
vtblk_modevent, 0);
MODULE_VERSION(virtio_blk, 1);
MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
static int
vtblk_modevent(module_t mod, int type, void *unused)
{
int error;
error = 0;
switch (type) {
case MOD_LOAD:
case MOD_QUIESCE:
case MOD_UNLOAD:
case MOD_SHUTDOWN:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static int
vtblk_probe(device_t dev)
{
if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
return (ENXIO);
device_set_desc(dev, "VirtIO Block Adapter");
return (BUS_PROBE_DEFAULT);
}
static int
vtblk_attach(device_t dev)
{
struct vtblk_softc *sc;
struct virtio_blk_config blkcfg;
int error;
virtio_set_feature_desc(dev, vtblk_feature_desc);
sc = device_get_softc(dev);
sc->vtblk_dev = dev;
VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
bioq_init(&sc->vtblk_bioq);
+ TAILQ_INIT(&sc->vtblk_dump_queue);
TAILQ_INIT(&sc->vtblk_req_free);
TAILQ_INIT(&sc->vtblk_req_ready);
vtblk_setup_sysctl(sc);
vtblk_setup_features(sc);
vtblk_read_config(sc, &blkcfg);
/*
* With the current sglist(9) implementation, it is not easy
* for us to support a maximum segment size as adjacent
* segments are coalesced. For now, just make sure it's larger
* than the maximum supported transfer size.
*/
if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
if (blkcfg.size_max < MAXPHYS) {
error = ENOTSUP;
device_printf(dev, "host requires unsupported "
"maximum segment size feature\n");
goto fail;
}
}
sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
error = EINVAL;
device_printf(dev, "fewer than minimum number of segments "
"allowed: %d\n", sc->vtblk_max_nsegs);
goto fail;
}
sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
if (sc->vtblk_sglist == NULL) {
error = ENOMEM;
device_printf(dev, "cannot allocate sglist\n");
goto fail;
}
error = vtblk_alloc_virtqueue(sc);
if (error) {
device_printf(dev, "cannot allocate virtqueue\n");
goto fail;
}
error = vtblk_request_prealloc(sc);
if (error) {
device_printf(dev, "cannot preallocate requests\n");
goto fail;
}
vtblk_alloc_disk(sc, &blkcfg);
error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
if (error) {
device_printf(dev, "cannot setup virtqueue interrupt\n");
goto fail;
}
vtblk_create_disk(sc);
virtqueue_enable_intr(sc->vtblk_vq);
fail:
if (error)
vtblk_detach(dev);
return (error);
}
static int
vtblk_detach(device_t dev)
{
struct vtblk_softc *sc;
sc = device_get_softc(dev);
VTBLK_LOCK(sc);
sc->vtblk_flags |= VTBLK_FLAG_DETACH;
if (device_is_attached(dev))
vtblk_stop(sc);
VTBLK_UNLOCK(sc);
vtblk_drain(sc);
if (sc->vtblk_disk != NULL) {
disk_destroy(sc->vtblk_disk);
sc->vtblk_disk = NULL;
}
if (sc->vtblk_sglist != NULL) {
sglist_free(sc->vtblk_sglist);
sc->vtblk_sglist = NULL;
}
VTBLK_LOCK_DESTROY(sc);
return (0);
}
static int
vtblk_suspend(device_t dev)
{
struct vtblk_softc *sc;
int error;
sc = device_get_softc(dev);
VTBLK_LOCK(sc);
sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
/* XXX BMV: virtio_stop(), etc needed here? */
error = vtblk_quiesce(sc);
if (error)
sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
VTBLK_UNLOCK(sc);
return (error);
}
static int
vtblk_resume(device_t dev)
{
struct vtblk_softc *sc;
sc = device_get_softc(dev);
VTBLK_LOCK(sc);
/* XXX BMV: virtio_reinit(), etc needed here? */
sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
vtblk_startio(sc);
VTBLK_UNLOCK(sc);
return (0);
}
static int
vtblk_shutdown(device_t dev)
{
return (0);
}
static int
vtblk_config_change(device_t dev)
{
struct vtblk_softc *sc;
struct virtio_blk_config blkcfg;
uint64_t capacity;
sc = device_get_softc(dev);
vtblk_read_config(sc, &blkcfg);
/* Capacity is always in 512-byte units. */
capacity = blkcfg.capacity * 512;
if (sc->vtblk_disk->d_mediasize != capacity)
vtblk_resize_disk(sc, capacity);
return (0);
}
static int
vtblk_open(struct disk *dp)
{
struct vtblk_softc *sc;
if ((sc = dp->d_drv1) == NULL)
return (ENXIO);
return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
}
static int
vtblk_close(struct disk *dp)
{
struct vtblk_softc *sc;
if ((sc = dp->d_drv1) == NULL)
return (ENXIO);
return (0);
}
static int
vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
struct thread *td)
{
struct vtblk_softc *sc;
if ((sc = dp->d_drv1) == NULL)
return (ENXIO);
return (ENOTTY);
}
static int
vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
size_t length)
{
struct disk *dp;
struct vtblk_softc *sc;
int error;
dp = arg;
+ error = 0;
if ((sc = dp->d_drv1) == NULL)
return (ENXIO);
VTBLK_LOCK(sc);
- if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) {
- vtblk_dump_prepare(sc);
- sc->vtblk_flags |= VTBLK_FLAG_DUMPING;
- }
+ vtblk_dump_quiesce(sc);
if (length > 0)
error = vtblk_dump_write(sc, virtual, offset, length);
- else if (virtual == NULL && offset == 0)
- error = vtblk_dump_flush(sc);
- else {
- error = EINVAL;
- sc->vtblk_flags &= ~VTBLK_FLAG_DUMPING;
- }
+ if (error || (virtual == NULL && offset == 0))
+ vtblk_dump_complete(sc);
VTBLK_UNLOCK(sc);
return (error);
}
static void
vtblk_strategy(struct bio *bp)
{
struct vtblk_softc *sc;
if ((sc = bp->bio_disk->d_drv1) == NULL) {
vtblk_bio_done(NULL, bp, EINVAL);
return;
}
/*
* Fail any write if RO. Unfortunately, there does not seem to
* be a better way to report our readonly'ness to GEOM above.
*/
if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
(bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
vtblk_bio_done(sc, bp, EROFS);
return;
}
VTBLK_LOCK(sc);
if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
VTBLK_UNLOCK(sc);
vtblk_bio_done(sc, bp, ENXIO);
return;
}
bioq_insert_tail(&sc->vtblk_bioq, bp);
vtblk_startio(sc);
VTBLK_UNLOCK(sc);
}
static void
vtblk_negotiate_features(struct vtblk_softc *sc)
{
device_t dev;
uint64_t features;
dev = sc->vtblk_dev;
features = VTBLK_FEATURES;
sc->vtblk_features = virtio_negotiate_features(dev, features);
}
static void
vtblk_setup_features(struct vtblk_softc *sc)
{
device_t dev;
dev = sc->vtblk_dev;
vtblk_negotiate_features(sc);
if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
sc->vtblk_flags |= VTBLK_FLAG_READONLY;
if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
}
static int
vtblk_maximum_segments(struct vtblk_softc *sc,
struct virtio_blk_config *blkcfg)
{
device_t dev;
int nsegs;
dev = sc->vtblk_dev;
nsegs = VTBLK_MIN_SEGMENTS;
if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
} else
nsegs += 1;
return (nsegs);
}
static int
vtblk_alloc_virtqueue(struct vtblk_softc *sc)
{
device_t dev;
struct vq_alloc_info vq_info;
dev = sc->vtblk_dev;
VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
vtblk_vq_intr, sc, &sc->vtblk_vq,
"%s request", device_get_nameunit(dev));
return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
}
static void
vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
{
device_t dev;
struct disk *dp;
int error;
dev = sc->vtblk_dev;
dp = sc->vtblk_disk;
dp->d_mediasize = new_capacity;
if (bootverbose) {
device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
(uintmax_t) dp->d_mediasize >> 20,
(uintmax_t) dp->d_mediasize / dp->d_sectorsize,
dp->d_sectorsize);
}
error = disk_resize(dp, M_NOWAIT);
if (error) {
device_printf(dev,
"disk_resize(9) failed, error: %d\n", error);
}
}
static void
vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
{
device_t dev;
struct disk *dp;
dev = sc->vtblk_dev;
sc->vtblk_disk = dp = disk_alloc();
dp->d_open = vtblk_open;
dp->d_close = vtblk_close;
dp->d_ioctl = vtblk_ioctl;
dp->d_strategy = vtblk_strategy;
dp->d_name = VTBLK_DISK_NAME;
dp->d_unit = device_get_unit(dev);
dp->d_drv1 = sc;
dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
DISKFLAG_DIRECT_COMPLETION;
dp->d_hba_vendor = virtio_get_vendor(dev);
dp->d_hba_device = virtio_get_device(dev);
dp->d_hba_subvendor = virtio_get_subvendor(dev);
dp->d_hba_subdevice = virtio_get_subdevice(dev);
if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
dp->d_dump = vtblk_dump;
/* Capacity is always in 512-byte units. */
dp->d_mediasize = blkcfg->capacity * 512;
if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
dp->d_sectorsize = blkcfg->blk_size;
else
dp->d_sectorsize = 512;
/*
* The VirtIO maximum I/O size is given in terms of segments.
* However, FreeBSD limits I/O size by logical buffer size, not
* by physically contiguous pages. Therefore, we have to assume
* no pages are contiguous. This may impose an artificially low
* maximum I/O size. But in practice, since QEMU advertises 128
* segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
* which is typically greater than MAXPHYS. Eventually we should
* just advertise MAXPHYS and split buffers that are too big.
*
* Note we must subtract one additional segment in case of non
* page aligned buffers.
*/
dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
PAGE_SIZE;
if (dp->d_maxsize < PAGE_SIZE)
dp->d_maxsize = PAGE_SIZE; /* XXX */
if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
dp->d_fwsectors = blkcfg->geometry.sectors;
dp->d_fwheads = blkcfg->geometry.heads;
}
if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY)) {
dp->d_stripesize = dp->d_sectorsize *
(1 << blkcfg->topology.physical_block_exp);
dp->d_stripeoffset = (dp->d_stripesize -
blkcfg->topology.alignment_offset * dp->d_sectorsize) %
dp->d_stripesize;
}
if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
else
sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
}
static void
vtblk_create_disk(struct vtblk_softc *sc)
{
struct disk *dp;
dp = sc->vtblk_disk;
vtblk_ident(sc);
device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
(uintmax_t) dp->d_mediasize >> 20,
(uintmax_t) dp->d_mediasize / dp->d_sectorsize,
dp->d_sectorsize);
disk_create(dp, DISK_VERSION);
}
static int
vtblk_request_prealloc(struct vtblk_softc *sc)
{
struct vtblk_request *req;
int i, nreqs;
nreqs = virtqueue_size(sc->vtblk_vq);
/*
* Preallocate sufficient requests to keep the virtqueue full. Each
* request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
* the number allocated when indirect descriptors are not available.
*/
if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
nreqs /= VTBLK_MIN_SEGMENTS;
for (i = 0; i < nreqs; i++) {
req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
if (req == NULL)
return (ENOMEM);
MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
sc->vtblk_request_count++;
vtblk_request_enqueue(sc, req);
}
return (0);
}
static void
vtblk_request_free(struct vtblk_softc *sc)
{
struct vtblk_request *req;
MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
while ((req = vtblk_request_dequeue(sc)) != NULL) {
sc->vtblk_request_count--;
free(req, M_DEVBUF);
}
KASSERT(sc->vtblk_request_count == 0,
("%s: leaked %d requests", __func__, sc->vtblk_request_count));
}
static struct vtblk_request *
vtblk_request_dequeue(struct vtblk_softc *sc)
{
struct vtblk_request *req;
req = TAILQ_FIRST(&sc->vtblk_req_free);
if (req != NULL) {
TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
bzero(req, sizeof(struct vtblk_request));
}
return (req);
}
static void
vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
{
TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
}
static struct vtblk_request *
vtblk_request_next_ready(struct vtblk_softc *sc)
{
struct vtblk_request *req;
req = TAILQ_FIRST(&sc->vtblk_req_ready);
if (req != NULL)
TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
return (req);
}
static void
vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
{
/* NOTE: Currently, there will be at most one request in the queue. */
TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
}
static struct vtblk_request *
vtblk_request_next(struct vtblk_softc *sc)
{
struct vtblk_request *req;
req = vtblk_request_next_ready(sc);
if (req != NULL)
return (req);
return (vtblk_request_bio(sc));
}
static struct vtblk_request *
vtblk_request_bio(struct vtblk_softc *sc)
{
struct bio_queue_head *bioq;
struct vtblk_request *req;
struct bio *bp;
bioq = &sc->vtblk_bioq;
if (bioq_first(bioq) == NULL)
return (NULL);
req = vtblk_request_dequeue(sc);
if (req == NULL)
return (NULL);
bp = bioq_takefirst(bioq);
req->vbr_bp = bp;
req->vbr_ack = -1;
req->vbr_hdr.ioprio = 1;
switch (bp->bio_cmd) {
case BIO_FLUSH:
req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
break;
case BIO_READ:
req->vbr_hdr.type = VIRTIO_BLK_T_IN;
req->vbr_hdr.sector = bp->bio_offset / 512;
break;
case BIO_WRITE:
req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
req->vbr_hdr.sector = bp->bio_offset / 512;
break;
default:
panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
}
if (bp->bio_flags & BIO_ORDERED)
req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
return (req);
}
static int
vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
{
struct virtqueue *vq;
struct sglist *sg;
struct bio *bp;
int ordered, readable, writable, error;
vq = sc->vtblk_vq;
sg = sc->vtblk_sglist;
bp = req->vbr_bp;
ordered = 0;
writable = 0;
/*
* Some hosts (such as bhyve) do not implement the barrier feature,
* so we emulate it in the driver by allowing the barrier request
* to be the only one in flight.
*/
if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
if (sc->vtblk_req_ordered != NULL)
return (EBUSY);
if (bp->bio_flags & BIO_ORDERED) {
if (!virtqueue_empty(vq))
return (EBUSY);
ordered = 1;
req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
}
}
sglist_reset(sg);
sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
error = sglist_append_bio(sg, bp);
if (error || sg->sg_nseg == sg->sg_maxseg) {
panic("%s: bio %p data buffer too big %d",
__func__, bp, error);
}
/* BIO_READ means the host writes into our buffer. */
if (bp->bio_cmd == BIO_READ)
writable = sg->sg_nseg - 1;
}
writable++;
sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
readable = sg->sg_nseg - writable;
error = virtqueue_enqueue(vq, req, sg, readable, writable);
if (error == 0 && ordered)
sc->vtblk_req_ordered = req;
return (error);
}
static int
vtblk_request_error(struct vtblk_request *req)
{
int error;
switch (req->vbr_ack) {
case VIRTIO_BLK_S_OK:
error = 0;
break;
case VIRTIO_BLK_S_UNSUPP:
error = ENOTSUP;
break;
default:
error = EIO;
break;
}
return (error);
}
static void
vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
{
struct vtblk_request *req;
struct bio *bp;
while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
if (sc->vtblk_req_ordered != NULL) {
MPASS(sc->vtblk_req_ordered == req);
sc->vtblk_req_ordered = NULL;
}
bp = req->vbr_bp;
bp->bio_error = vtblk_request_error(req);
TAILQ_INSERT_TAIL(queue, bp, bio_queue);
vtblk_request_enqueue(sc, req);
}
}
static void
vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
{
struct bio *bp, *tmp;
TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
if (bp->bio_error != 0)
disk_err(bp, "hard error", -1, 1);
vtblk_bio_done(sc, bp, bp->bio_error);
}
}
static void
-vtblk_drain_vq(struct vtblk_softc *sc, int skip_done)
+vtblk_drain_vq(struct vtblk_softc *sc)
{
struct virtqueue *vq;
struct vtblk_request *req;
int last;
vq = sc->vtblk_vq;
last = 0;
while ((req = virtqueue_drain(vq, &last)) != NULL) {
- if (!skip_done)
- vtblk_bio_done(sc, req->vbr_bp, ENXIO);
-
+ vtblk_bio_done(sc, req->vbr_bp, ENXIO);
vtblk_request_enqueue(sc, req);
}
sc->vtblk_req_ordered = NULL;
KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
}
static void
vtblk_drain(struct vtblk_softc *sc)
{
struct bio_queue queue;
struct bio_queue_head *bioq;
struct vtblk_request *req;
struct bio *bp;
bioq = &sc->vtblk_bioq;
TAILQ_INIT(&queue);
if (sc->vtblk_vq != NULL) {
vtblk_queue_completed(sc, &queue);
vtblk_done_completed(sc, &queue);
- vtblk_drain_vq(sc, 0);
+ vtblk_drain_vq(sc);
}
while ((req = vtblk_request_next_ready(sc)) != NULL) {
vtblk_bio_done(sc, req->vbr_bp, ENXIO);
vtblk_request_enqueue(sc, req);
}
while (bioq_first(bioq) != NULL) {
bp = bioq_takefirst(bioq);
vtblk_bio_done(sc, bp, ENXIO);
}
vtblk_request_free(sc);
}
static void
vtblk_startio(struct vtblk_softc *sc)
{
struct virtqueue *vq;
struct vtblk_request *req;
int enq;
VTBLK_LOCK_ASSERT(sc);
vq = sc->vtblk_vq;
enq = 0;
if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
return;
while (!virtqueue_full(vq)) {
req = vtblk_request_next(sc);
if (req == NULL)
break;
if (vtblk_request_execute(sc, req) != 0) {
vtblk_request_requeue_ready(sc, req);
break;
}
enq++;
}
if (enq > 0)
virtqueue_notify(vq);
}
static void
vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
{
/* Because of GEOM direct dispatch, we cannot hold any locks. */
if (sc != NULL)
VTBLK_LOCK_ASSERT_NOTOWNED(sc);
if (error) {
bp->bio_resid = bp->bio_bcount;
bp->bio_error = error;
bp->bio_flags |= BIO_ERROR;
}
biodone(bp);
}
#define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg) \
if (virtio_with_feature(_dev, _feature)) { \
virtio_read_device_config(_dev, \
offsetof(struct virtio_blk_config, _field), \
&(_cfg)->_field, sizeof((_cfg)->_field)); \
}
static void
vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
{
device_t dev;
dev = sc->vtblk_dev;
bzero(blkcfg, sizeof(struct virtio_blk_config));
/* The capacity is always available. */
virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
/* Read the configuration if the feature was negotiated. */
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
}
#undef VTBLK_GET_CONFIG
static void
vtblk_ident(struct vtblk_softc *sc)
{
struct bio buf;
struct disk *dp;
struct vtblk_request *req;
int len, error;
dp = sc->vtblk_disk;
len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
return;
req = vtblk_request_dequeue(sc);
if (req == NULL)
return;
req->vbr_ack = -1;
req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
req->vbr_hdr.ioprio = 1;
req->vbr_hdr.sector = 0;
req->vbr_bp = &buf;
bzero(&buf, sizeof(struct bio));
buf.bio_cmd = BIO_READ;
buf.bio_data = dp->d_ident;
buf.bio_bcount = len;
VTBLK_LOCK(sc);
error = vtblk_poll_request(sc, req);
VTBLK_UNLOCK(sc);
vtblk_request_enqueue(sc, req);
if (error) {
device_printf(sc->vtblk_dev,
"error getting device identifier: %d\n", error);
}
}
static int
vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
{
struct virtqueue *vq;
int error;
vq = sc->vtblk_vq;
if (!virtqueue_empty(vq))
return (EBUSY);
error = vtblk_request_execute(sc, req);
if (error)
return (error);
virtqueue_notify(vq);
virtqueue_poll(vq, NULL);
error = vtblk_request_error(req);
if (error && bootverbose) {
device_printf(sc->vtblk_dev,
"%s: IO error: %d\n", __func__, error);
}
return (error);
}
static int
vtblk_quiesce(struct vtblk_softc *sc)
{
int error;
VTBLK_LOCK_ASSERT(sc);
error = 0;
while (!virtqueue_empty(sc->vtblk_vq)) {
if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
error = EBUSY;
break;
}
}
return (error);
}
static void
vtblk_vq_intr(void *xsc)
{
struct vtblk_softc *sc;
struct virtqueue *vq;
struct bio_queue queue;
sc = xsc;
vq = sc->vtblk_vq;
TAILQ_INIT(&queue);
VTBLK_LOCK(sc);
again:
if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
goto out;
vtblk_queue_completed(sc, &queue);
vtblk_startio(sc);
if (virtqueue_enable_intr(vq) != 0) {
virtqueue_disable_intr(vq);
goto again;
}
if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
wakeup(&sc->vtblk_vq);
out:
VTBLK_UNLOCK(sc);
vtblk_done_completed(sc, &queue);
}
static void
vtblk_stop(struct vtblk_softc *sc)
{
virtqueue_disable_intr(sc->vtblk_vq);
virtio_stop(sc->vtblk_dev);
}
static void
-vtblk_dump_prepare(struct vtblk_softc *sc)
+vtblk_dump_quiesce(struct vtblk_softc *sc)
{
- device_t dev;
- struct virtqueue *vq;
- dev = sc->vtblk_dev;
- vq = sc->vtblk_vq;
-
- vtblk_stop(sc);
-
/*
- * Drain all requests caught in-flight in the virtqueue,
- * skipping biodone(). When dumping, only one request is
- * outstanding at a time, and we just poll the virtqueue
- * for the response.
+ * Spin here until all the requests in-flight at the time of the
+ * dump are completed and queued. The queued requests will be
+ * biodone'd once the dump is finished.
*/
- vtblk_drain_vq(sc, 1);
-
- if (virtio_reinit(dev, sc->vtblk_features) != 0) {
- panic("%s: cannot reinit VirtIO block device during dump",
- device_get_nameunit(dev));
- }
-
- virtqueue_disable_intr(vq);
- virtio_reinit_complete(dev);
+ while (!virtqueue_empty(sc->vtblk_vq))
+ vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
}
static int
vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
size_t length)
{
struct bio buf;
struct vtblk_request *req;
req = &sc->vtblk_dump_request;
req->vbr_ack = -1;
req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
req->vbr_hdr.ioprio = 1;
req->vbr_hdr.sector = offset / 512;
req->vbr_bp = &buf;
bzero(&buf, sizeof(struct bio));
buf.bio_cmd = BIO_WRITE;
buf.bio_data = virtual;
buf.bio_bcount = length;
return (vtblk_poll_request(sc, req));
}
static int
vtblk_dump_flush(struct vtblk_softc *sc)
{
struct bio buf;
struct vtblk_request *req;
req = &sc->vtblk_dump_request;
req->vbr_ack = -1;
req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
req->vbr_hdr.ioprio = 1;
req->vbr_hdr.sector = 0;
req->vbr_bp = &buf;
bzero(&buf, sizeof(struct bio));
buf.bio_cmd = BIO_FLUSH;
return (vtblk_poll_request(sc, req));
+}
+
+static void
+vtblk_dump_complete(struct vtblk_softc *sc)
+{
+
+ vtblk_dump_flush(sc);
+
+ VTBLK_UNLOCK(sc);
+ vtblk_done_completed(sc, &sc->vtblk_dump_queue);
+ VTBLK_LOCK(sc);
}
static void
vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
{
/* Set either writeback (1) or writethrough (0) mode. */
virtio_write_dev_config_1(sc->vtblk_dev,
offsetof(struct virtio_blk_config, writeback), wc);
}
static int
vtblk_write_cache_enabled(struct vtblk_softc *sc,
struct virtio_blk_config *blkcfg)
{
int wc;
if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
wc = vtblk_tunable_int(sc, "writecache_mode",
vtblk_writecache_mode);
if (wc >= 0 && wc < VTBLK_CACHE_MAX)
vtblk_set_write_cache(sc, wc);
else
wc = blkcfg->writeback;
} else
wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
return (wc);
}
static int
vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
{
struct vtblk_softc *sc;
int wc, error;
sc = oidp->oid_arg1;
wc = sc->vtblk_write_cache;
error = sysctl_handle_int(oidp, &wc, 0, req);
if (error || req->newptr == NULL)
return (error);
if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
return (EPERM);
if (wc < 0 || wc >= VTBLK_CACHE_MAX)
return (EINVAL);
VTBLK_LOCK(sc);
sc->vtblk_write_cache = wc;
vtblk_set_write_cache(sc, sc->vtblk_write_cache);
VTBLK_UNLOCK(sc);
return (0);
}
static void
vtblk_setup_sysctl(struct vtblk_softc *sc)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = sc->vtblk_dev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
"I", "Write cache mode (writethrough (0) or writeback (1))");
}
static int
vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
{
char path[64];
snprintf(path, sizeof(path),
"hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
TUNABLE_INT_FETCH(path, &def);
return (def);
}
Index: projects/clang360-import/sys/dev/vt/hw/fb/vt_fb.c
===================================================================
--- projects/clang360-import/sys/dev/vt/hw/fb/vt_fb.c (revision 277808)
+++ projects/clang360-import/sys/dev/vt/hw/fb/vt_fb.c (revision 277809)
@@ -1,464 +1,466 @@
/*-
* Copyright (c) 2013 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Aleksandr Rybalko under sponsorship from the
* FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/fbio.h>
#include <dev/vt/vt.h>
#include <dev/vt/hw/fb/vt_fb.h>
#include <dev/vt/colors/vt_termcolors.h>
static struct vt_driver vt_fb_driver = {
.vd_name = "fb",
.vd_init = vt_fb_init,
.vd_blank = vt_fb_blank,
.vd_bitblt_text = vt_fb_bitblt_text,
.vd_bitblt_bmp = vt_fb_bitblt_bitmap,
.vd_drawrect = vt_fb_drawrect,
.vd_setpixel = vt_fb_setpixel,
.vd_postswitch = vt_fb_postswitch,
.vd_priority = VD_PRIORITY_GENERIC+10,
.vd_fb_ioctl = vt_fb_ioctl,
.vd_fb_mmap = vt_fb_mmap,
+ .vd_suspend = vt_fb_suspend,
+ .vd_resume = vt_fb_resume,
};
VT_DRIVER_DECLARE(vt_fb, vt_fb_driver);
static void
vt_fb_mem_wr1(struct fb_info *sc, uint32_t o, uint8_t v)
{
KASSERT((o < sc->fb_size), ("Offset %#08x out of fb size", o));
*(uint8_t *)(sc->fb_vbase + o) = v;
}
static void
vt_fb_mem_wr2(struct fb_info *sc, uint32_t o, uint16_t v)
{
KASSERT((o < sc->fb_size), ("Offset %#08x out of fb size", o));
*(uint16_t *)(sc->fb_vbase + o) = v;
}
static void
vt_fb_mem_wr4(struct fb_info *sc, uint32_t o, uint32_t v)
{
KASSERT((o < sc->fb_size), ("Offset %#08x out of fb size", o));
*(uint32_t *)(sc->fb_vbase + o) = v;
}
int
vt_fb_ioctl(struct vt_device *vd, u_long cmd, caddr_t data, struct thread *td)
{
struct fb_info *info;
int error = 0;
info = vd->vd_softc;
switch (cmd) {
case FBIOGTYPE:
bcopy(info, (struct fbtype *)data, sizeof(struct fbtype));
break;
case FBIO_GETWINORG: /* get frame buffer window origin */
*(u_int *)data = 0;
break;
case FBIO_GETDISPSTART: /* get display start address */
((video_display_start_t *)data)->x = 0;
((video_display_start_t *)data)->y = 0;
break;
case FBIO_GETLINEWIDTH: /* get scan line width in bytes */
*(u_int *)data = info->fb_stride;
break;
case FBIO_BLANK: /* blank display */
if (vd->vd_driver->vd_blank == NULL)
return (ENODEV);
vd->vd_driver->vd_blank(vd, TC_BLACK);
break;
default:
error = ENOIOCTL;
break;
}
return (error);
}
int
vt_fb_mmap(struct vt_device *vd, vm_ooffset_t offset, vm_paddr_t *paddr,
int prot, vm_memattr_t *memattr)
{
struct fb_info *info;
info = vd->vd_softc;
if (info->fb_flags & FB_FLAG_NOMMAP)
return (ENODEV);
if (offset >= 0 && offset < info->fb_size) {
*paddr = info->fb_pbase + offset;
#ifdef VM_MEMATTR_WRITE_COMBINING
*memattr = VM_MEMATTR_WRITE_COMBINING;
#endif
return (0);
}
return (EINVAL);
}
void
vt_fb_setpixel(struct vt_device *vd, int x, int y, term_color_t color)
{
struct fb_info *info;
uint32_t c;
u_int o;
info = vd->vd_softc;
c = info->fb_cmap[color];
o = info->fb_stride * y + x * FBTYPE_GET_BYTESPP(info);
if (info->fb_flags & FB_FLAG_NOWRITE)
return;
KASSERT((info->fb_vbase != 0), ("Unmapped framebuffer"));
switch (FBTYPE_GET_BYTESPP(info)) {
case 1:
vt_fb_mem_wr1(info, o, c);
break;
case 2:
vt_fb_mem_wr2(info, o, c);
break;
case 3:
vt_fb_mem_wr1(info, o, (c >> 16) & 0xff);
vt_fb_mem_wr1(info, o + 1, (c >> 8) & 0xff);
vt_fb_mem_wr1(info, o + 2, c & 0xff);
break;
case 4:
vt_fb_mem_wr4(info, o, c);
break;
default:
/* panic? */
return;
}
}
void
vt_fb_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2, int fill,
term_color_t color)
{
int x, y;
for (y = y1; y <= y2; y++) {
if (fill || (y == y1) || (y == y2)) {
for (x = x1; x <= x2; x++)
vt_fb_setpixel(vd, x, y, color);
} else {
vt_fb_setpixel(vd, x1, y, color);
vt_fb_setpixel(vd, x2, y, color);
}
}
}
void
vt_fb_blank(struct vt_device *vd, term_color_t color)
{
struct fb_info *info;
uint32_t c;
u_int o, h;
info = vd->vd_softc;
c = info->fb_cmap[color];
if (info->fb_flags & FB_FLAG_NOWRITE)
return;
KASSERT((info->fb_vbase != 0), ("Unmapped framebuffer"));
switch (FBTYPE_GET_BYTESPP(info)) {
case 1:
for (h = 0; h < info->fb_height; h++)
for (o = 0; o < info->fb_stride; o++)
vt_fb_mem_wr1(info, h*info->fb_stride + o, c);
break;
case 2:
for (h = 0; h < info->fb_height; h++)
for (o = 0; o < info->fb_stride; o += 2)
vt_fb_mem_wr2(info, h*info->fb_stride + o, c);
break;
case 3:
for (h = 0; h < info->fb_height; h++)
for (o = 0; o < info->fb_stride; o += 3) {
vt_fb_mem_wr1(info, h*info->fb_stride + o,
(c >> 16) & 0xff);
vt_fb_mem_wr1(info, h*info->fb_stride + o + 1,
(c >> 8) & 0xff);
vt_fb_mem_wr1(info, h*info->fb_stride + o + 2,
c & 0xff);
}
break;
case 4:
for (h = 0; h < info->fb_height; h++)
for (o = 0; o < info->fb_stride; o += 4)
vt_fb_mem_wr4(info, h*info->fb_stride + o, c);
break;
default:
/* panic? */
return;
}
}
void
vt_fb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw,
const uint8_t *pattern, const uint8_t *mask,
unsigned int width, unsigned int height,
unsigned int x, unsigned int y, term_color_t fg, term_color_t bg)
{
struct fb_info *info;
uint32_t fgc, bgc, cc, o;
int c, l, bpp, bpl;
u_long line;
uint8_t b, m;
const uint8_t *ch;
info = vd->vd_softc;
bpp = FBTYPE_GET_BYTESPP(info);
fgc = info->fb_cmap[fg];
bgc = info->fb_cmap[bg];
b = m = 0;
bpl = (width + 7) >> 3; /* Bytes per source line. */
if (info->fb_flags & FB_FLAG_NOWRITE)
return;
KASSERT((info->fb_vbase != 0), ("Unmapped framebuffer"));
line = (info->fb_stride * y) + (x * bpp);
for (l = 0;
l < height && y + l < vw->vw_draw_area.tr_end.tp_row;
l++) {
ch = pattern;
for (c = 0;
c < width && x + c < vw->vw_draw_area.tr_end.tp_col;
c++) {
if (c % 8 == 0)
b = *ch++;
else
b <<= 1;
if (mask != NULL) {
if (c % 8 == 0)
m = *mask++;
else
m <<= 1;
/* Skip pixel write, if mask has no bit set. */
if ((m & 0x80) == 0)
continue;
}
o = line + (c * bpp);
cc = b & 0x80 ? fgc : bgc;
switch(bpp) {
case 1:
vt_fb_mem_wr1(info, o, cc);
break;
case 2:
vt_fb_mem_wr2(info, o, cc);
break;
case 3:
/* Packed mode, so unaligned. Byte access. */
vt_fb_mem_wr1(info, o, (cc >> 16) & 0xff);
vt_fb_mem_wr1(info, o + 1, (cc >> 8) & 0xff);
vt_fb_mem_wr1(info, o + 2, cc & 0xff);
break;
case 4:
vt_fb_mem_wr4(info, o, cc);
break;
default:
/* panic? */
break;
}
}
line += info->fb_stride;
pattern += bpl;
}
}
void
vt_fb_bitblt_text(struct vt_device *vd, const struct vt_window *vw,
const term_rect_t *area)
{
unsigned int col, row, x, y;
struct vt_font *vf;
term_char_t c;
term_color_t fg, bg;
const uint8_t *pattern;
vf = vw->vw_font;
for (row = area->tr_begin.tp_row; row < area->tr_end.tp_row; ++row) {
for (col = area->tr_begin.tp_col; col < area->tr_end.tp_col;
++col) {
x = col * vf->vf_width +
vw->vw_draw_area.tr_begin.tp_col;
y = row * vf->vf_height +
vw->vw_draw_area.tr_begin.tp_row;
c = VTBUF_GET_FIELD(&vw->vw_buf, row, col);
pattern = vtfont_lookup(vf, c);
vt_determine_colors(c,
VTBUF_ISCURSOR(&vw->vw_buf, row, col), &fg, &bg);
vt_fb_bitblt_bitmap(vd, vw,
pattern, NULL, vf->vf_width, vf->vf_height,
x, y, fg, bg);
}
}
#ifndef SC_NO_CUTPASTE
if (!vd->vd_mshown)
return;
term_rect_t drawn_area;
drawn_area.tr_begin.tp_col = area->tr_begin.tp_col * vf->vf_width;
drawn_area.tr_begin.tp_row = area->tr_begin.tp_row * vf->vf_height;
drawn_area.tr_end.tp_col = area->tr_end.tp_col * vf->vf_width;
drawn_area.tr_end.tp_row = area->tr_end.tp_row * vf->vf_height;
if (vt_is_cursor_in_area(vd, &drawn_area)) {
vt_fb_bitblt_bitmap(vd, vw,
vd->vd_mcursor->map, vd->vd_mcursor->mask,
vd->vd_mcursor->width, vd->vd_mcursor->height,
vd->vd_mx_drawn + vw->vw_draw_area.tr_begin.tp_col,
vd->vd_my_drawn + vw->vw_draw_area.tr_begin.tp_row,
vd->vd_mcursor_fg, vd->vd_mcursor_bg);
}
#endif
}
void
vt_fb_postswitch(struct vt_device *vd)
{
struct fb_info *info;
info = vd->vd_softc;
if (info->enter != NULL)
info->enter(info->fb_priv);
}
static int
vt_fb_init_cmap(uint32_t *cmap, int depth)
{
switch (depth) {
case 8:
return (vt_generate_cons_palette(cmap, COLOR_FORMAT_RGB,
0x7, 5, 0x7, 2, 0x3, 0));
case 15:
return (vt_generate_cons_palette(cmap, COLOR_FORMAT_RGB,
0x1f, 10, 0x1f, 5, 0x1f, 0));
case 16:
return (vt_generate_cons_palette(cmap, COLOR_FORMAT_RGB,
0x1f, 11, 0x3f, 5, 0x1f, 0));
case 24:
case 32: /* Ignore alpha. */
return (vt_generate_cons_palette(cmap, COLOR_FORMAT_RGB,
0xff, 16, 0xff, 8, 0xff, 0));
default:
return (1);
}
}
int
vt_fb_init(struct vt_device *vd)
{
struct fb_info *info;
int err;
info = vd->vd_softc;
vd->vd_height = info->fb_height;
vd->vd_width = info->fb_width;
if (info->fb_size == 0)
return (CN_DEAD);
if (info->fb_pbase == 0)
info->fb_flags |= FB_FLAG_NOMMAP;
if (info->fb_cmsize <= 0) {
err = vt_fb_init_cmap(info->fb_cmap, FBTYPE_GET_BPP(info));
if (err)
return (CN_DEAD);
info->fb_cmsize = 16;
}
/* Clear the screen. */
vd->vd_driver->vd_blank(vd, TC_BLACK);
/* Wakeup screen. KMS need this. */
vt_fb_postswitch(vd);
return (CN_INTERNAL);
}
int
vt_fb_attach(struct fb_info *info)
{
vt_allocate(&vt_fb_driver, info);
return (0);
}
void
-vt_fb_resume(void)
+vt_fb_suspend(struct vt_device *vd)
{
- vt_resume();
+ vt_suspend(vd);
}
void
-vt_fb_suspend(void)
+vt_fb_resume(struct vt_device *vd)
{
- vt_suspend();
+ vt_resume(vd);
}
Index: projects/clang360-import/sys/dev/vt/hw/fb/vt_fb.h
===================================================================
--- projects/clang360-import/sys/dev/vt/hw/fb/vt_fb.h (revision 277808)
+++ projects/clang360-import/sys/dev/vt/hw/fb/vt_fb.h (revision 277809)
@@ -1,49 +1,49 @@
/*-
* Copyright (c) 2013 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Aleksandr Rybalko under sponsorship from the
* FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_VT_HW_FB_VT_FB_H_
#define _DEV_VT_HW_FB_VT_FB_H_
/* Generic framebuffer interface call vt_fb_attach to init VT(9) */
int vt_fb_attach(struct fb_info *info);
-void vt_fb_resume(void);
-void vt_fb_suspend(void);
+void vt_fb_resume(struct vt_device *vd);
+void vt_fb_suspend(struct vt_device *vd);
vd_init_t vt_fb_init;
vd_blank_t vt_fb_blank;
vd_bitblt_text_t vt_fb_bitblt_text;
vd_bitblt_bmp_t vt_fb_bitblt_bitmap;
vd_drawrect_t vt_fb_drawrect;
vd_setpixel_t vt_fb_setpixel;
vd_postswitch_t vt_fb_postswitch;
vd_fb_ioctl_t vt_fb_ioctl;
vd_fb_mmap_t vt_fb_mmap;
#endif /* _DEV_VT_HW_FB_VT_FB_H_ */
Index: projects/clang360-import/sys/dev/vt/vt.h
===================================================================
--- projects/clang360-import/sys/dev/vt/vt.h (revision 277808)
+++ projects/clang360-import/sys/dev/vt/vt.h (revision 277809)
@@ -1,423 +1,430 @@
/*-
* Copyright (c) 2009, 2013 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Ed Schouten under sponsorship from the
* FreeBSD Foundation.
*
* Portions of this software were developed by Oleksandr Rybalko
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_VT_VT_H_
#define _DEV_VT_VT_H_
#include <sys/param.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
#include <sys/callout.h>
#include <sys/condvar.h>
#include <sys/conf.h>
#include <sys/consio.h>
#include <sys/kbio.h>
#include <sys/mouse.h>
#include <sys/terminal.h>
#include <sys/sysctl.h>
#include "opt_syscons.h"
#include "opt_splash.h"
#ifndef VT_MAXWINDOWS
#ifdef MAXCONS
#define VT_MAXWINDOWS MAXCONS
#else
#define VT_MAXWINDOWS 12
#endif
#endif
#ifndef VT_ALT_TO_ESC_HACK
#define VT_ALT_TO_ESC_HACK 1
#endif
#define VT_CONSWINDOW 0
#if defined(SC_TWOBUTTON_MOUSE) || defined(VT_TWOBUTTON_MOUSE)
#define VT_MOUSE_PASTEBUTTON MOUSE_BUTTON3DOWN /* right button */
#define VT_MOUSE_EXTENDBUTTON MOUSE_BUTTON2DOWN /* not really used */
#else
#define VT_MOUSE_PASTEBUTTON MOUSE_BUTTON2DOWN /* middle button */
#define VT_MOUSE_EXTENDBUTTON MOUSE_BUTTON3DOWN /* right button */
#endif /* defined(SC_TWOBUTTON_MOUSE) || defined(VT_TWOBUTTON_MOUSE) */
#define SC_DRIVER_NAME "vt"
#ifdef VT_DEBUG
#define DPRINTF(_l, ...) if (vt_debug > (_l)) printf( __VA_ARGS__ )
#define VT_CONSOLECTL_DEBUG
#define VT_SYSMOUSE_DEBUG
#else
#define DPRINTF(_l, ...) do {} while (0)
#endif
#define ISSIGVALID(sig) ((sig) > 0 && (sig) < NSIG)
#define VT_SYSCTL_INT(_name, _default, _descr) \
static int vt_##_name = _default; \
SYSCTL_INT(_kern_vt, OID_AUTO, _name, CTLFLAG_RWTUN, &vt_##_name, _default,\
_descr);
struct vt_driver;
void vt_allocate(struct vt_driver *, void *);
-void vt_resume(void);
-void vt_suspend(void);
typedef unsigned int vt_axis_t;
/*
* List of locks
* (d) locked by vd_lock
* (b) locked by vb_lock
* (G) locked by Giant
* (u) unlocked, locked by higher levels
* (c) const until freeing
* (?) yet to be determined
*/
/*
* Per-device datastructure.
*/
#ifndef SC_NO_CUTPASTE
struct vt_mouse_cursor;
#endif
struct vt_pastebuf {
term_char_t *vpb_buf; /* Copy-paste buffer. */
unsigned int vpb_bufsz; /* Buffer size. */
unsigned int vpb_len; /* Length of a last selection. */
};
struct vt_device {
struct vt_window *vd_windows[VT_MAXWINDOWS]; /* (c) Windows. */
struct vt_window *vd_curwindow; /* (d) Current window. */
struct vt_window *vd_savedwindow;/* (?) Saved for suspend. */
struct vt_pastebuf vd_pastebuf; /* (?) Copy/paste buf. */
const struct vt_driver *vd_driver; /* (c) Graphics driver. */
void *vd_softc; /* (u) Driver data. */
#ifndef SC_NO_CUTPASTE
struct vt_mouse_cursor *vd_mcursor; /* (?) Cursor bitmap. */
term_color_t vd_mcursor_fg; /* (?) Cursor fg color. */
term_color_t vd_mcursor_bg; /* (?) Cursor bg color. */
vt_axis_t vd_mx_drawn; /* (?) Mouse X and Y */
vt_axis_t vd_my_drawn; /* as of last redraw. */
int vd_mshown; /* (?) Mouse shown during */
#endif /* last redrawn. */
uint16_t vd_mx; /* (?) Current mouse X. */
uint16_t vd_my; /* (?) current mouse Y. */
uint32_t vd_mstate; /* (?) Mouse state. */
vt_axis_t vd_width; /* (?) Screen width. */
vt_axis_t vd_height; /* (?) Screen height. */
struct mtx vd_lock; /* Per-device lock. */
struct cv vd_winswitch; /* (d) Window switch notify. */
struct callout vd_timer; /* (d) Display timer. */
volatile unsigned int vd_timer_armed;/* (?) Display timer started.*/
int vd_flags; /* (d) Device flags. */
#define VDF_TEXTMODE 0x01 /* Do text mode rendering. */
#define VDF_SPLASH 0x02 /* Splash screen active. */
#define VDF_ASYNC 0x04 /* vt_timer() running. */
#define VDF_INVALID 0x08 /* Entire screen should be re-rendered. */
#define VDF_DEAD 0x10 /* Early probing found nothing. */
#define VDF_INITIALIZED 0x20 /* vtterm_cnprobe already done. */
#define VDF_MOUSECURSOR 0x40 /* Mouse cursor visible. */
#define VDF_QUIET_BELL 0x80 /* Disable bell. */
int vd_keyboard; /* (G) Keyboard index. */
unsigned int vd_kbstate; /* (?) Device unit. */
unsigned int vd_unit; /* (c) Device unit. */
int vd_altbrk; /* (?) Alt break seq. state */
};
#define VD_PASTEBUF(vd) ((vd)->vd_pastebuf.vpb_buf)
#define VD_PASTEBUFSZ(vd) ((vd)->vd_pastebuf.vpb_bufsz)
#define VD_PASTEBUFLEN(vd) ((vd)->vd_pastebuf.vpb_len)
+void vt_resume(struct vt_device *vd);
+void vt_suspend(struct vt_device *vd);
+
/*
* Per-window terminal screen buffer.
*
* Because redrawing is performed asynchronously, the buffer keeps track
* of a rectangle that needs to be redrawn (vb_dirtyrect). Because this
* approach seemed to cause suboptimal performance (when the top left
* and the bottom right of the screen are modified), it also uses a set
* of bitmasks to keep track of the rows and columns (mod 64) that have
* been modified.
*/
struct vt_buf {
struct mtx vb_lock; /* Buffer lock. */
term_pos_t vb_scr_size; /* (b) Screen dimensions. */
int vb_flags; /* (b) Flags. */
#define VBF_CURSOR 0x1 /* Cursor visible. */
#define VBF_STATIC 0x2 /* Buffer is statically allocated. */
#define VBF_MTX_INIT 0x4 /* Mutex initialized. */
#define VBF_SCROLL 0x8 /* scroll locked mode. */
#define VBF_HISTORY_FULL 0x10 /* All rows filled. */
unsigned int vb_history_size;
int vb_roffset; /* (b) History rows offset. */
int vb_curroffset; /* (b) Saved rows offset. */
term_pos_t vb_cursor; /* (u) Cursor position. */
term_pos_t vb_mark_start; /* (b) Copy region start. */
term_pos_t vb_mark_end; /* (b) Copy region end. */
int vb_mark_last; /* Last mouse event. */
term_rect_t vb_dirtyrect; /* (b) Dirty rectangle. */
term_char_t *vb_buffer; /* (u) Data buffer. */
term_char_t **vb_rows; /* (u) Array of rows */
};
#ifdef SC_HISTORY_SIZE
#define VBF_DEFAULT_HISTORY_SIZE SC_HISTORY_SIZE
#else
#define VBF_DEFAULT_HISTORY_SIZE 500
#endif
void vtbuf_copy(struct vt_buf *, const term_rect_t *, const term_pos_t *);
void vtbuf_fill_locked(struct vt_buf *, const term_rect_t *, term_char_t);
void vtbuf_init_early(struct vt_buf *);
void vtbuf_init(struct vt_buf *, const term_pos_t *);
void vtbuf_grow(struct vt_buf *, const term_pos_t *, unsigned int);
void vtbuf_putchar(struct vt_buf *, const term_pos_t *, term_char_t);
void vtbuf_cursor_position(struct vt_buf *, const term_pos_t *);
void vtbuf_scroll_mode(struct vt_buf *vb, int yes);
void vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area);
void vtbuf_undirty(struct vt_buf *, term_rect_t *);
void vtbuf_sethistory_size(struct vt_buf *, int);
int vtbuf_iscursor(const struct vt_buf *vb, int row, int col);
void vtbuf_cursor_visibility(struct vt_buf *, int);
#ifndef SC_NO_CUTPASTE
int vtbuf_set_mark(struct vt_buf *vb, int type, int col, int row);
int vtbuf_get_marked_len(struct vt_buf *vb);
void vtbuf_extract_marked(struct vt_buf *vb, term_char_t *buf, int sz);
#endif
#define VTB_MARK_NONE 0
#define VTB_MARK_END 1
#define VTB_MARK_START 2
#define VTB_MARK_WORD 3
#define VTB_MARK_ROW 4
#define VTB_MARK_EXTEND 5
#define VTB_MARK_MOVE 6
#define VTBUF_SLCK_ENABLE(vb) vtbuf_scroll_mode((vb), 1)
#define VTBUF_SLCK_DISABLE(vb) vtbuf_scroll_mode((vb), 0)
#define VTBUF_MAX_HEIGHT(vb) \
((vb)->vb_history_size)
#define VTBUF_GET_ROW(vb, r) \
((vb)->vb_rows[((vb)->vb_roffset + (r)) % VTBUF_MAX_HEIGHT(vb)])
#define VTBUF_GET_FIELD(vb, r, c) \
((vb)->vb_rows[((vb)->vb_roffset + (r)) % VTBUF_MAX_HEIGHT(vb)][(c)])
#define VTBUF_FIELD(vb, r, c) \
((vb)->vb_rows[((vb)->vb_curroffset + (r)) % VTBUF_MAX_HEIGHT(vb)][(c)])
#define VTBUF_ISCURSOR(vb, r, c) \
vtbuf_iscursor((vb), (r), (c))
#define VTBUF_DIRTYROW(mask, row) \
((mask)->vbm_row & ((uint64_t)1 << ((row) % 64)))
#define VTBUF_DIRTYCOL(mask, col) \
((mask)->vbm_col & ((uint64_t)1 << ((col) % 64)))
#define VTBUF_SPACE_CHAR(attr) (' ' | (attr))
#define VHS_SET 0
#define VHS_CUR 1
#define VHS_END 2
int vthistory_seek(struct vt_buf *, int offset, int whence);
void vthistory_addlines(struct vt_buf *vb, int offset);
void vthistory_getpos(const struct vt_buf *, unsigned int *offset);
/*
* Per-window datastructure.
*/
struct vt_window {
struct vt_device *vw_device; /* (c) Device. */
struct terminal *vw_terminal; /* (c) Terminal. */
struct vt_buf vw_buf; /* (u) Screen buffer. */
struct vt_font *vw_font; /* (d) Graphical font. */
term_rect_t vw_draw_area; /* (?) Drawable area. */
unsigned int vw_number; /* (c) Window number. */
int vw_kbdmode; /* (?) Keyboard mode. */
int vw_prev_kbdmode;/* (?) Previous mode. */
int vw_kbdstate; /* (?) Keyboard state. */
int vw_grabbed; /* (?) Grab count. */
char *vw_kbdsq; /* Escape sequence queue*/
unsigned int vw_flags; /* (d) Per-window flags. */
int vw_mouse_level;/* Mouse op mode. */
#define VWF_BUSY 0x1 /* Busy reconfiguring device. */
#define VWF_OPENED 0x2 /* TTY in use. */
#define VWF_SCROLL 0x4 /* Keys influence scrollback. */
#define VWF_CONSOLE 0x8 /* Kernel message console window. */
#define VWF_VTYLOCK 0x10 /* Prevent window switch. */
#define VWF_MOUSE_HIDE 0x20 /* Disable mouse events processing. */
#define VWF_READY 0x40 /* Window fully initialized. */
#define VWF_GRAPHICS 0x80 /* Window in graphics mode (KDSETMODE). */
#define VWF_SWWAIT_REL 0x10000 /* Program wait for VT acquire is done. */
#define VWF_SWWAIT_ACQ 0x20000 /* Program wait for VT release is done. */
pid_t vw_pid; /* Terminal holding process */
struct proc *vw_proc;
struct vt_mode vw_smode; /* switch mode */
struct callout vw_proc_dead_timer;
struct vt_window *vw_switch_to;
};
#define VT_AUTO 0 /* switching is automatic */
#define VT_PROCESS 1 /* switching controlled by prog */
#define VT_KERNEL 255 /* switching controlled in kernel */
#define IS_VT_PROC_MODE(vw) ((vw)->vw_smode.mode == VT_PROCESS)
/*
* Per-device driver routines.
*/
typedef int vd_init_t(struct vt_device *vd);
typedef int vd_probe_t(struct vt_device *vd);
typedef void vd_postswitch_t(struct vt_device *vd);
typedef void vd_blank_t(struct vt_device *vd, term_color_t color);
typedef void vd_bitblt_text_t(struct vt_device *vd, const struct vt_window *vw,
const term_rect_t *area);
typedef void vd_bitblt_bmp_t(struct vt_device *vd, const struct vt_window *vw,
const uint8_t *pattern, const uint8_t *mask,
unsigned int width, unsigned int height,
unsigned int x, unsigned int y, term_color_t fg, term_color_t bg);
typedef int vd_fb_ioctl_t(struct vt_device *, u_long, caddr_t, struct thread *);
typedef int vd_fb_mmap_t(struct vt_device *, vm_ooffset_t, vm_paddr_t *, int,
vm_memattr_t *);
typedef void vd_drawrect_t(struct vt_device *, int, int, int, int, int,
term_color_t);
typedef void vd_setpixel_t(struct vt_device *, int, int, term_color_t);
+typedef void vd_suspend_t(struct vt_device *);
+typedef void vd_resume_t(struct vt_device *);
struct vt_driver {
char vd_name[16];
/* Console attachment. */
vd_probe_t *vd_probe;
vd_init_t *vd_init;
/* Drawing. */
vd_blank_t *vd_blank;
vd_drawrect_t *vd_drawrect;
vd_setpixel_t *vd_setpixel;
vd_bitblt_text_t *vd_bitblt_text;
vd_bitblt_bmp_t *vd_bitblt_bmp;
/* Framebuffer ioctls, if present. */
vd_fb_ioctl_t *vd_fb_ioctl;
/* Framebuffer mmap, if present. */
vd_fb_mmap_t *vd_fb_mmap;
/* Update display setting on vt switch. */
vd_postswitch_t *vd_postswitch;
+
+ /* Suspend/resume handlers. */
+ vd_suspend_t *vd_suspend;
+ vd_resume_t *vd_resume;
/* Priority to know which one can override */
int vd_priority;
#define VD_PRIORITY_DUMB 10
#define VD_PRIORITY_GENERIC 100
#define VD_PRIORITY_SPECIFIC 1000
};
/*
* Console device madness.
*
* Utility macro to make early vt(4) instances work.
*/
extern const struct terminal_class vt_termclass;
void vt_upgrade(struct vt_device *vd);
#define PIXEL_WIDTH(w) ((w) / 8)
#define PIXEL_HEIGHT(h) ((h) / 16)
#ifndef VT_FB_DEFAULT_WIDTH
#define VT_FB_DEFAULT_WIDTH 2048
#endif
#ifndef VT_FB_DEFAULT_HEIGHT
#define VT_FB_DEFAULT_HEIGHT 1200
#endif
/* name argument is not used yet. */
#define VT_DRIVER_DECLARE(name, drv) DATA_SET(vt_drv_set, drv)
/*
* Fonts.
*
* Remapping tables are used to map Unicode points to glyphs. They need
* to be sorted, because vtfont_lookup() performs a binary search. Each
* font has two remapping tables, for normal and bold. When a character
* is not present in bold, it uses a normal glyph. When no glyph is
* available, it uses glyph 0, which is normally equal to U+FFFD.
*/
struct vt_font_map {
uint32_t vfm_src;
uint16_t vfm_dst;
uint16_t vfm_len;
};
struct vt_font {
struct vt_font_map *vf_map[VFNT_MAPS];
uint8_t *vf_bytes;
unsigned int vf_height, vf_width;
unsigned int vf_map_count[VFNT_MAPS];
unsigned int vf_refcount;
};
#ifndef SC_NO_CUTPASTE
struct vt_mouse_cursor {
uint8_t map[64 * 64 / 8];
uint8_t mask[64 * 64 / 8];
uint8_t width;
uint8_t height;
};
#endif
const uint8_t *vtfont_lookup(const struct vt_font *vf, term_char_t c);
struct vt_font *vtfont_ref(struct vt_font *vf);
void vtfont_unref(struct vt_font *vf);
int vtfont_load(vfnt_t *f, struct vt_font **ret);
/* Sysmouse. */
void sysmouse_process_event(mouse_info_t *mi);
#ifndef SC_NO_CUTPASTE
void vt_mouse_event(int type, int x, int y, int event, int cnt, int mlevel);
void vt_mouse_state(int show);
#endif
#define VT_MOUSE_SHOW 1
#define VT_MOUSE_HIDE 0
/* Utilities. */
void vt_determine_colors(term_char_t c, int cursor,
term_color_t *fg, term_color_t *bg);
int vt_is_cursor_in_area(const struct vt_device *vd,
const term_rect_t *area);
#endif /* !_DEV_VT_VT_H_ */
Index: projects/clang360-import/sys/dev/vt/vt_core.c
===================================================================
--- projects/clang360-import/sys/dev/vt/vt_core.c (revision 277808)
+++ projects/clang360-import/sys/dev/vt/vt_core.c (revision 277809)
@@ -1,2680 +1,2716 @@
/*-
* Copyright (c) 2009, 2013 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Ed Schouten under sponsorship from the
* FreeBSD Foundation.
*
* Portions of this software were developed by Oleksandr Rybalko
* under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_compat.h"
#include <sys/param.h>
#include <sys/consio.h>
#include <sys/eventhandler.h>
#include <sys/fbio.h>
#include <sys/kbio.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/power.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/reboot.h>
#include <sys/systm.h>
#include <sys/terminal.h>
#include <dev/kbd/kbdreg.h>
#include <dev/vt/vt.h>
#if defined(__i386__) || defined(__amd64__)
#include <machine/psl.h>
#include <machine/frame.h>
#endif
static tc_bell_t vtterm_bell;
static tc_cursor_t vtterm_cursor;
static tc_putchar_t vtterm_putchar;
static tc_fill_t vtterm_fill;
static tc_copy_t vtterm_copy;
static tc_param_t vtterm_param;
static tc_done_t vtterm_done;
static tc_cnprobe_t vtterm_cnprobe;
static tc_cngetc_t vtterm_cngetc;
static tc_cngrab_t vtterm_cngrab;
static tc_cnungrab_t vtterm_cnungrab;
static tc_opened_t vtterm_opened;
static tc_ioctl_t vtterm_ioctl;
static tc_mmap_t vtterm_mmap;
const struct terminal_class vt_termclass = {
.tc_bell = vtterm_bell,
.tc_cursor = vtterm_cursor,
.tc_putchar = vtterm_putchar,
.tc_fill = vtterm_fill,
.tc_copy = vtterm_copy,
.tc_param = vtterm_param,
.tc_done = vtterm_done,
.tc_cnprobe = vtterm_cnprobe,
.tc_cngetc = vtterm_cngetc,
.tc_cngrab = vtterm_cngrab,
.tc_cnungrab = vtterm_cnungrab,
.tc_opened = vtterm_opened,
.tc_ioctl = vtterm_ioctl,
.tc_mmap = vtterm_mmap,
};
/*
* Use a constant timer of 25 Hz to redraw the screen.
*
* XXX: In theory we should only fire up the timer when there is really
* activity. Unfortunately we cannot always start timers. We really
* don't want to process kernel messages synchronously, because it
* really slows down the system.
*/
#define VT_TIMERFREQ 25
/* Bell pitch/duration. */
#define VT_BELLDURATION ((5 * hz + 99) / 100)
#define VT_BELLPITCH 800
#define VT_LOCK(vd) mtx_lock(&(vd)->vd_lock)
#define VT_UNLOCK(vd) mtx_unlock(&(vd)->vd_lock)
#define VT_LOCK_ASSERT(vd, what) mtx_assert(&(vd)->vd_lock, what)
#define VT_UNIT(vw) ((vw)->vw_device->vd_unit * VT_MAXWINDOWS + \
(vw)->vw_number)
static SYSCTL_NODE(_kern, OID_AUTO, vt, CTLFLAG_RD, 0, "vt(9) parameters");
VT_SYSCTL_INT(enable_altgr, 1, "Enable AltGr key (Do not assume R.Alt as Alt)");
VT_SYSCTL_INT(enable_bell, 1, "Enable bell");
VT_SYSCTL_INT(debug, 0, "vt(9) debug level");
VT_SYSCTL_INT(deadtimer, 15, "Time to wait busy process in VT_PROCESS mode");
VT_SYSCTL_INT(suspendswitch, 1, "Switch to VT0 before suspend");
/* Allow to disable some keyboard combinations. */
VT_SYSCTL_INT(kbd_halt, 1, "Enable halt keyboard combination. "
"See kbdmap(5) to configure.");
VT_SYSCTL_INT(kbd_poweroff, 1, "Enable Power Off keyboard combination. "
"See kbdmap(5) to configure.");
VT_SYSCTL_INT(kbd_reboot, 1, "Enable reboot keyboard combination. "
"See kbdmap(5) to configure (typically Ctrl-Alt-Delete).");
VT_SYSCTL_INT(kbd_debug, 1, "Enable key combination to enter debugger. "
"See kbdmap(5) to configure (typically Ctrl-Alt-Esc).");
VT_SYSCTL_INT(kbd_panic, 0, "Enable request to panic. "
"See kbdmap(5) to configure.");
static struct vt_device vt_consdev;
static unsigned int vt_unit = 0;
static MALLOC_DEFINE(M_VT, "vt", "vt device");
struct vt_device *main_vd = &vt_consdev;
/* Boot logo. */
extern unsigned int vt_logo_width;
extern unsigned int vt_logo_height;
extern unsigned int vt_logo_depth;
extern unsigned char vt_logo_image[];
/* Font. */
extern struct vt_font vt_font_default;
#ifndef SC_NO_CUTPASTE
extern struct vt_mouse_cursor vt_default_mouse_pointer;
#endif
static int signal_vt_rel(struct vt_window *);
static int signal_vt_acq(struct vt_window *);
static int finish_vt_rel(struct vt_window *, int, int *);
static int finish_vt_acq(struct vt_window *);
static int vt_window_switch(struct vt_window *);
static int vt_late_window_switch(struct vt_window *);
static int vt_proc_alive(struct vt_window *);
static void vt_resize(struct vt_device *);
static void vt_update_static(void *);
#ifndef SC_NO_CUTPASTE
static void vt_mouse_paste(void);
#endif
+static void vt_suspend_handler(void *priv);
+static void vt_resume_handler(void *priv);
SET_DECLARE(vt_drv_set, struct vt_driver);
#define _VTDEFH MAX(100, PIXEL_HEIGHT(VT_FB_DEFAULT_HEIGHT))
#define _VTDEFW MAX(200, PIXEL_WIDTH(VT_FB_DEFAULT_WIDTH))
static struct terminal vt_consterm;
static struct vt_window vt_conswindow;
static struct vt_device vt_consdev = {
.vd_driver = NULL,
.vd_softc = NULL,
.vd_flags = VDF_INVALID,
.vd_windows = { [VT_CONSWINDOW] = &vt_conswindow, },
.vd_curwindow = &vt_conswindow,
.vd_kbstate = 0,
#ifndef SC_NO_CUTPASTE
.vd_pastebuf = {
.vpb_buf = NULL,
.vpb_bufsz = 0,
.vpb_len = 0
},
.vd_mcursor = &vt_default_mouse_pointer,
.vd_mcursor_fg = TC_WHITE,
.vd_mcursor_bg = TC_BLACK,
#endif
};
static term_char_t vt_constextbuf[(_VTDEFW) * (VBF_DEFAULT_HISTORY_SIZE)];
static term_char_t *vt_constextbufrows[VBF_DEFAULT_HISTORY_SIZE];
static struct vt_window vt_conswindow = {
.vw_number = VT_CONSWINDOW,
.vw_flags = VWF_CONSOLE,
.vw_buf = {
.vb_buffer = &vt_constextbuf[0],
.vb_rows = &vt_constextbufrows[0],
.vb_history_size = VBF_DEFAULT_HISTORY_SIZE,
.vb_curroffset = 0,
.vb_roffset = 0,
.vb_flags = VBF_STATIC,
.vb_mark_start = {.tp_row = 0, .tp_col = 0,},
.vb_mark_end = {.tp_row = 0, .tp_col = 0,},
.vb_scr_size = {
.tp_row = _VTDEFH,
.tp_col = _VTDEFW,
},
},
.vw_device = &vt_consdev,
.vw_terminal = &vt_consterm,
.vw_kbdmode = K_XLATE,
.vw_grabbed = 0,
};
static struct terminal vt_consterm = {
.tm_class = &vt_termclass,
.tm_softc = &vt_conswindow,
.tm_flags = TF_CONS,
};
static struct consdev vt_consterm_consdev = {
.cn_ops = &termcn_cnops,
.cn_arg = &vt_consterm,
.cn_name = "ttyv0",
};
/* Add to set of consoles. */
DATA_SET(cons_set, vt_consterm_consdev);
/*
* Right after kmem is done to allow early drivers to use locking and allocate
* memory.
*/
SYSINIT(vt_update_static, SI_SUB_KMEM, SI_ORDER_ANY, vt_update_static,
&vt_consdev);
/* Delay until all devices attached, to not waste time. */
SYSINIT(vt_early_cons, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, vt_upgrade,
&vt_consdev);
/* Initialize locks/mem depended members. */
static void
vt_update_static(void *dummy)
{
if (!vty_enabled(VTY_VT))
return;
if (main_vd->vd_driver != NULL)
printf("VT: running with driver \"%s\".\n",
main_vd->vd_driver->vd_name);
else
printf("VT: init without driver.\n");
mtx_init(&main_vd->vd_lock, "vtdev", NULL, MTX_DEF);
cv_init(&main_vd->vd_winswitch, "vtwswt");
}
static void
vt_schedule_flush(struct vt_device *vd, int ms)
{
if (ms <= 0)
/* Default to initial value. */
ms = 1000 / VT_TIMERFREQ;
callout_schedule(&vd->vd_timer, hz / (1000 / ms));
}
static void
vt_resume_flush_timer(struct vt_device *vd, int ms)
{
if (!(vd->vd_flags & VDF_ASYNC) ||
!atomic_cmpset_int(&vd->vd_timer_armed, 0, 1))
return;
vt_schedule_flush(vd, ms);
}
static void
vt_suspend_flush_timer(struct vt_device *vd)
{
/*
* As long as this function is called locked, callout_stop()
* has the same effect like callout_drain() with regard to
* preventing the callback function from executing.
*/
VT_LOCK_ASSERT(vd, MA_OWNED);
if (!(vd->vd_flags & VDF_ASYNC) ||
!atomic_cmpset_int(&vd->vd_timer_armed, 1, 0))
return;
callout_stop(&vd->vd_timer);
}
static void
vt_switch_timer(void *arg)
{
vt_late_window_switch((struct vt_window *)arg);
}
static int
vt_save_kbd_mode(struct vt_window *vw, keyboard_t *kbd)
{
int mode, ret;
mode = 0;
ret = kbdd_ioctl(kbd, KDGKBMODE, (caddr_t)&mode);
if (ret == ENOIOCTL)
ret = ENODEV;
if (ret != 0)
return (ret);
vw->vw_kbdmode = mode;
return (0);
}
static int
vt_update_kbd_mode(struct vt_window *vw, keyboard_t *kbd)
{
int ret;
ret = kbdd_ioctl(kbd, KDSKBMODE, (caddr_t)&vw->vw_kbdmode);
if (ret == ENOIOCTL)
ret = ENODEV;
return (ret);
}
static int
vt_save_kbd_state(struct vt_window *vw, keyboard_t *kbd)
{
int state, ret;
state = 0;
ret = kbdd_ioctl(kbd, KDGKBSTATE, (caddr_t)&state);
if (ret == ENOIOCTL)
ret = ENODEV;
if (ret != 0)
return (ret);
vw->vw_kbdstate &= ~LOCK_MASK;
vw->vw_kbdstate |= state & LOCK_MASK;
return (0);
}
static int
vt_update_kbd_state(struct vt_window *vw, keyboard_t *kbd)
{
int state, ret;
state = vw->vw_kbdstate & LOCK_MASK;
ret = kbdd_ioctl(kbd, KDSKBSTATE, (caddr_t)&state);
if (ret == ENOIOCTL)
ret = ENODEV;
return (ret);
}
static int
vt_save_kbd_leds(struct vt_window *vw, keyboard_t *kbd)
{
int leds, ret;
leds = 0;
ret = kbdd_ioctl(kbd, KDGETLED, (caddr_t)&leds);
if (ret == ENOIOCTL)
ret = ENODEV;
if (ret != 0)
return (ret);
vw->vw_kbdstate &= ~LED_MASK;
vw->vw_kbdstate |= leds & LED_MASK;
return (0);
}
static int
vt_update_kbd_leds(struct vt_window *vw, keyboard_t *kbd)
{
int leds, ret;
leds = vw->vw_kbdstate & LED_MASK;
ret = kbdd_ioctl(kbd, KDSETLED, (caddr_t)&leds);
if (ret == ENOIOCTL)
ret = ENODEV;
return (ret);
}
static int
vt_window_preswitch(struct vt_window *vw, struct vt_window *curvw)
{
DPRINTF(40, "%s\n", __func__);
curvw->vw_switch_to = vw;
/* Set timer to allow switch in case when process hang. */
callout_reset(&vw->vw_proc_dead_timer, hz * vt_deadtimer,
vt_switch_timer, (void *)vw);
/* Notify process about vt switch attempt. */
DPRINTF(30, "%s: Notify process.\n", __func__);
signal_vt_rel(curvw);
return (0);
}
static int
vt_window_postswitch(struct vt_window *vw)
{
signal_vt_acq(vw);
return (0);
}
/* vt_late_window_switch will done VT switching for regular case. */
static int
vt_late_window_switch(struct vt_window *vw)
{
int ret;
callout_stop(&vw->vw_proc_dead_timer);
ret = vt_window_switch(vw);
if (ret)
return (ret);
/* Notify owner process about terminal availability. */
if (vw->vw_smode.mode == VT_PROCESS) {
ret = vt_window_postswitch(vw);
}
return (ret);
}
/* Switch window. */
static int
vt_proc_window_switch(struct vt_window *vw)
{
struct vt_window *curvw;
struct vt_device *vd;
int ret;
vd = vw->vw_device;
curvw = vd->vd_curwindow;
if (curvw->vw_flags & VWF_VTYLOCK)
return (EBUSY);
/* Ask current process permission to switch away. */
if (curvw->vw_smode.mode == VT_PROCESS) {
DPRINTF(30, "%s: VT_PROCESS ", __func__);
if (vt_proc_alive(curvw) == FALSE) {
DPRINTF(30, "Dead. Cleaning.");
/* Dead */
} else {
DPRINTF(30, "%s: Signaling process.\n", __func__);
/* Alive, try to ask him. */
ret = vt_window_preswitch(vw, curvw);
/* Wait for process answer or timeout. */
return (ret);
}
DPRINTF(30, "\n");
}
ret = vt_late_window_switch(vw);
return (ret);
}
/* Switch window ignoring process locking. */
static int
vt_window_switch(struct vt_window *vw)
{
struct vt_device *vd = vw->vw_device;
struct vt_window *curvw = vd->vd_curwindow;
keyboard_t *kbd;
VT_LOCK(vd);
if (curvw == vw) {
/* Nothing to do. */
VT_UNLOCK(vd);
return (0);
}
if (!(vw->vw_flags & (VWF_OPENED|VWF_CONSOLE))) {
VT_UNLOCK(vd);
return (EINVAL);
}
vt_suspend_flush_timer(vd);
vd->vd_curwindow = vw;
vd->vd_flags |= VDF_INVALID;
cv_broadcast(&vd->vd_winswitch);
VT_UNLOCK(vd);
if (vd->vd_driver->vd_postswitch)
vd->vd_driver->vd_postswitch(vd);
vt_resume_flush_timer(vd, 0);
/* Restore per-window keyboard mode. */
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL) {
if (curvw->vw_kbdmode == K_XLATE)
vt_save_kbd_state(curvw, kbd);
vt_update_kbd_mode(vw, kbd);
vt_update_kbd_state(vw, kbd);
}
mtx_unlock(&Giant);
DPRINTF(10, "%s(ttyv%d) done\n", __func__, vw->vw_number);
return (0);
}
static inline void
vt_termsize(struct vt_device *vd, struct vt_font *vf, term_pos_t *size)
{
size->tp_row = vd->vd_height;
size->tp_col = vd->vd_width;
if (vf != NULL) {
size->tp_row /= vf->vf_height;
size->tp_col /= vf->vf_width;
}
}
static inline void
vt_winsize(struct vt_device *vd, struct vt_font *vf, struct winsize *size)
{
size->ws_row = size->ws_ypixel = vd->vd_height;
size->ws_col = size->ws_xpixel = vd->vd_width;
if (vf != NULL) {
size->ws_row /= vf->vf_height;
size->ws_col /= vf->vf_width;
}
}
static inline void
vt_compute_drawable_area(struct vt_window *vw)
{
struct vt_device *vd;
struct vt_font *vf;
vd = vw->vw_device;
if (vw->vw_font == NULL) {
vw->vw_draw_area.tr_begin.tp_col = 0;
vw->vw_draw_area.tr_begin.tp_row = 0;
vw->vw_draw_area.tr_end.tp_col = vd->vd_width;
vw->vw_draw_area.tr_end.tp_row = vd->vd_height;
return;
}
vf = vw->vw_font;
/*
* Compute the drawable area, so that the text is centered on
* the screen.
*/
vw->vw_draw_area.tr_begin.tp_col = (vd->vd_width % vf->vf_width) / 2;
vw->vw_draw_area.tr_begin.tp_row = (vd->vd_height % vf->vf_height) / 2;
vw->vw_draw_area.tr_end.tp_col = vw->vw_draw_area.tr_begin.tp_col +
vd->vd_width / vf->vf_width * vf->vf_width;
vw->vw_draw_area.tr_end.tp_row = vw->vw_draw_area.tr_begin.tp_row +
vd->vd_height / vf->vf_height * vf->vf_height;
}
static void
vt_scroll(struct vt_window *vw, int offset, int whence)
{
int diff;
term_pos_t size;
if ((vw->vw_flags & VWF_SCROLL) == 0)
return;
vt_termsize(vw->vw_device, vw->vw_font, &size);
diff = vthistory_seek(&vw->vw_buf, offset, whence);
if (diff)
vw->vw_device->vd_flags |= VDF_INVALID;
vt_resume_flush_timer(vw->vw_device, 0);
}
static int
vt_machine_kbdevent(int c)
{
switch (c) {
case SPCLKEY | DBG: /* kbdmap(5) keyword `debug`. */
if (vt_kbd_debug)
kdb_enter(KDB_WHY_BREAK, "manual escape to debugger");
return (1);
case SPCLKEY | HALT: /* kbdmap(5) keyword `halt`. */
if (vt_kbd_halt)
shutdown_nice(RB_HALT);
return (1);
case SPCLKEY | PASTE: /* kbdmap(5) keyword `paste`. */
#ifndef SC_NO_CUTPASTE
/* Insert text from cut-paste buffer. */
vt_mouse_paste();
#endif
break;
case SPCLKEY | PDWN: /* kbdmap(5) keyword `pdwn`. */
if (vt_kbd_poweroff)
shutdown_nice(RB_HALT|RB_POWEROFF);
return (1);
case SPCLKEY | PNC: /* kbdmap(5) keyword `panic`. */
/*
* Request to immediate panic if sysctl
* kern.vt.enable_panic_key allow it.
*/
if (vt_kbd_panic)
panic("Forced by the panic key");
return (1);
case SPCLKEY | RBT: /* kbdmap(5) keyword `boot`. */
if (vt_kbd_reboot)
shutdown_nice(RB_AUTOBOOT);
return (1);
case SPCLKEY | SPSC: /* kbdmap(5) keyword `spsc`. */
/* Force activatation/deactivation of the screen saver. */
/* TODO */
return (1);
case SPCLKEY | STBY: /* XXX Not present in kbdcontrol parser. */
/* Put machine into Stand-By mode. */
power_pm_suspend(POWER_SLEEP_STATE_STANDBY);
return (1);
case SPCLKEY | SUSP: /* kbdmap(5) keyword `susp`. */
/* Suspend machine. */
power_pm_suspend(POWER_SLEEP_STATE_SUSPEND);
return (1);
};
return (0);
}
static void
vt_scrollmode_kbdevent(struct vt_window *vw, int c, int console)
{
struct vt_device *vd;
term_pos_t size;
vd = vw->vw_device;
/* Only special keys handled in ScrollLock mode */
if ((c & SPCLKEY) == 0)
return;
c &= ~SPCLKEY;
if (console == 0) {
if (c >= F_SCR && c <= MIN(L_SCR, F_SCR + VT_MAXWINDOWS - 1)) {
vw = vd->vd_windows[c - F_SCR];
if (vw != NULL)
vt_proc_window_switch(vw);
return;
}
VT_LOCK(vd);
}
switch (c) {
case SLK: {
/* Turn scrolling off. */
vt_scroll(vw, 0, VHS_END);
VTBUF_SLCK_DISABLE(&vw->vw_buf);
vw->vw_flags &= ~VWF_SCROLL;
break;
}
case FKEY | F(49): /* Home key. */
vt_scroll(vw, 0, VHS_SET);
break;
case FKEY | F(50): /* Arrow up. */
vt_scroll(vw, -1, VHS_CUR);
break;
case FKEY | F(51): /* Page up. */
vt_termsize(vd, vw->vw_font, &size);
vt_scroll(vw, -size.tp_row, VHS_CUR);
break;
case FKEY | F(57): /* End key. */
vt_scroll(vw, 0, VHS_END);
break;
case FKEY | F(58): /* Arrow down. */
vt_scroll(vw, 1, VHS_CUR);
break;
case FKEY | F(59): /* Page down. */
vt_termsize(vd, vw->vw_font, &size);
vt_scroll(vw, size.tp_row, VHS_CUR);
break;
}
if (console == 0)
VT_UNLOCK(vd);
}
static int
vt_processkey(keyboard_t *kbd, struct vt_device *vd, int c)
{
struct vt_window *vw = vd->vd_curwindow;
#if VT_ALT_TO_ESC_HACK
if (c & RELKEY) {
switch (c & ~RELKEY) {
case (SPCLKEY | RALT):
if (vt_enable_altgr != 0)
break;
case (SPCLKEY | LALT):
vd->vd_kbstate &= ~ALKED;
}
/* Other keys ignored for RELKEY event. */
return (0);
} else {
switch (c & ~RELKEY) {
case (SPCLKEY | RALT):
if (vt_enable_altgr != 0)
break;
case (SPCLKEY | LALT):
vd->vd_kbstate |= ALKED;
}
}
#else
if (c & RELKEY)
/* Other keys ignored for RELKEY event. */
return (0);
#endif
if (vt_machine_kbdevent(c))
return (0);
if (vw->vw_flags & VWF_SCROLL) {
vt_scrollmode_kbdevent(vw, c, 0/* Not a console */);
/* Scroll mode keys handled, nothing to do more. */
return (0);
}
if (c & SPCLKEY) {
c &= ~SPCLKEY;
if (c >= F_SCR && c <= MIN(L_SCR, F_SCR + VT_MAXWINDOWS - 1)) {
vw = vd->vd_windows[c - F_SCR];
if (vw != NULL)
vt_proc_window_switch(vw);
return (0);
}
switch (c) {
case NEXT:
/* Switch to next VT. */
c = (vw->vw_number + 1) % VT_MAXWINDOWS;
vw = vd->vd_windows[c];
if (vw != NULL)
vt_proc_window_switch(vw);
return (0);
case PREV:
/* Switch to previous VT. */
c = (vw->vw_number - 1) % VT_MAXWINDOWS;
vw = vd->vd_windows[c];
if (vw != NULL)
vt_proc_window_switch(vw);
return (0);
case SLK: {
vt_save_kbd_state(vw, kbd);
VT_LOCK(vd);
if (vw->vw_kbdstate & SLKED) {
/* Turn scrolling on. */
vw->vw_flags |= VWF_SCROLL;
VTBUF_SLCK_ENABLE(&vw->vw_buf);
} else {
/* Turn scrolling off. */
vw->vw_flags &= ~VWF_SCROLL;
VTBUF_SLCK_DISABLE(&vw->vw_buf);
vt_scroll(vw, 0, VHS_END);
}
VT_UNLOCK(vd);
break;
}
case FKEY | F(1): case FKEY | F(2): case FKEY | F(3):
case FKEY | F(4): case FKEY | F(5): case FKEY | F(6):
case FKEY | F(7): case FKEY | F(8): case FKEY | F(9):
case FKEY | F(10): case FKEY | F(11): case FKEY | F(12):
/* F1 through F12 keys. */
terminal_input_special(vw->vw_terminal,
TKEY_F1 + c - (FKEY | F(1)));
break;
case FKEY | F(49): /* Home key. */
terminal_input_special(vw->vw_terminal, TKEY_HOME);
break;
case FKEY | F(50): /* Arrow up. */
terminal_input_special(vw->vw_terminal, TKEY_UP);
break;
case FKEY | F(51): /* Page up. */
terminal_input_special(vw->vw_terminal, TKEY_PAGE_UP);
break;
case FKEY | F(53): /* Arrow left. */
terminal_input_special(vw->vw_terminal, TKEY_LEFT);
break;
case FKEY | F(55): /* Arrow right. */
terminal_input_special(vw->vw_terminal, TKEY_RIGHT);
break;
case FKEY | F(57): /* End key. */
terminal_input_special(vw->vw_terminal, TKEY_END);
break;
case FKEY | F(58): /* Arrow down. */
terminal_input_special(vw->vw_terminal, TKEY_DOWN);
break;
case FKEY | F(59): /* Page down. */
terminal_input_special(vw->vw_terminal, TKEY_PAGE_DOWN);
break;
case FKEY | F(60): /* Insert key. */
terminal_input_special(vw->vw_terminal, TKEY_INSERT);
break;
case FKEY | F(61): /* Delete key. */
terminal_input_special(vw->vw_terminal, TKEY_DELETE);
break;
}
} else if (KEYFLAGS(c) == 0) {
/* Don't do UTF-8 conversion when doing raw mode. */
if (vw->vw_kbdmode == K_XLATE) {
#if VT_ALT_TO_ESC_HACK
if (vd->vd_kbstate & ALKED) {
/*
* Prepend ESC sequence if one of ALT keys down.
*/
terminal_input_char(vw->vw_terminal, 0x1b);
}
#endif
#if defined(KDB)
kdb_alt_break(c, &vd->vd_altbrk);
#endif
terminal_input_char(vw->vw_terminal, KEYCHAR(c));
} else
terminal_input_raw(vw->vw_terminal, c);
}
return (0);
}
static int
vt_kbdevent(keyboard_t *kbd, int event, void *arg)
{
struct vt_device *vd = arg;
int c;
switch (event) {
case KBDIO_KEYINPUT:
break;
case KBDIO_UNLOADING:
mtx_lock(&Giant);
vd->vd_keyboard = -1;
kbd_release(kbd, (void *)vd);
mtx_unlock(&Giant);
return (0);
default:
return (EINVAL);
}
while ((c = kbdd_read_char(kbd, 0)) != NOKEY)
vt_processkey(kbd, vd, c);
return (0);
}
static int
vt_allocate_keyboard(struct vt_device *vd)
{
int idx0, idx;
keyboard_t *k0, *k;
keyboard_info_t ki;
idx0 = kbd_allocate("kbdmux", -1, vd, vt_kbdevent, vd);
if (idx0 >= 0) {
DPRINTF(20, "%s: kbdmux allocated, idx = %d\n", __func__, idx0);
k0 = kbd_get_keyboard(idx0);
for (idx = kbd_find_keyboard2("*", -1, 0);
idx != -1;
idx = kbd_find_keyboard2("*", -1, idx + 1)) {
k = kbd_get_keyboard(idx);
if (idx == idx0 || KBD_IS_BUSY(k))
continue;
bzero(&ki, sizeof(ki));
strncpy(ki.kb_name, k->kb_name, sizeof(ki.kb_name));
ki.kb_name[sizeof(ki.kb_name) - 1] = '\0';
ki.kb_unit = k->kb_unit;
kbdd_ioctl(k0, KBADDKBD, (caddr_t) &ki);
}
} else {
DPRINTF(20, "%s: no kbdmux allocated\n", __func__);
idx0 = kbd_allocate("*", -1, vd, vt_kbdevent, vd);
if (idx0 < 0) {
DPRINTF(10, "%s: No keyboard found.\n", __func__);
return (-1);
}
}
vd->vd_keyboard = idx0;
DPRINTF(20, "%s: vd_keyboard = %d\n", __func__, vd->vd_keyboard);
return (idx0);
}
static void
vtterm_bell(struct terminal *tm)
{
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
if (!vt_enable_bell)
return;
if (vd->vd_flags & VDF_QUIET_BELL)
return;
sysbeep(1193182 / VT_BELLPITCH, VT_BELLDURATION);
}
static void
vtterm_beep(struct terminal *tm, u_int param)
{
u_int freq, period;
if (!vt_enable_bell)
return;
if ((param == 0) || ((param & 0xffff) == 0)) {
vtterm_bell(tm);
return;
}
period = ((param >> 16) & 0xffff) * hz / 1000;
freq = 1193182 / (param & 0xffff);
sysbeep(freq, period);
}
static void
vtterm_cursor(struct terminal *tm, const term_pos_t *p)
{
struct vt_window *vw = tm->tm_softc;
vtbuf_cursor_position(&vw->vw_buf, p);
vt_resume_flush_timer(vw->vw_device, 0);
}
static void
vtterm_putchar(struct terminal *tm, const term_pos_t *p, term_char_t c)
{
struct vt_window *vw = tm->tm_softc;
vtbuf_putchar(&vw->vw_buf, p, c);
vt_resume_flush_timer(vw->vw_device, 0);
}
static void
vtterm_fill(struct terminal *tm, const term_rect_t *r, term_char_t c)
{
struct vt_window *vw = tm->tm_softc;
vtbuf_fill_locked(&vw->vw_buf, r, c);
vt_resume_flush_timer(vw->vw_device, 0);
}
static void
vtterm_copy(struct terminal *tm, const term_rect_t *r,
const term_pos_t *p)
{
struct vt_window *vw = tm->tm_softc;
vtbuf_copy(&vw->vw_buf, r, p);
vt_resume_flush_timer(vw->vw_device, 0);
}
static void
vtterm_param(struct terminal *tm, int cmd, unsigned int arg)
{
struct vt_window *vw = tm->tm_softc;
switch (cmd) {
case TP_SHOWCURSOR:
vtbuf_cursor_visibility(&vw->vw_buf, arg);
vt_resume_flush_timer(vw->vw_device, 0);
break;
case TP_MOUSE:
vw->vw_mouse_level = arg;
break;
}
}
void
vt_determine_colors(term_char_t c, int cursor,
term_color_t *fg, term_color_t *bg)
{
term_color_t tmp;
int invert;
invert = 0;
*fg = TCHAR_FGCOLOR(c);
if (TCHAR_FORMAT(c) & TF_BOLD)
*fg = TCOLOR_LIGHT(*fg);
*bg = TCHAR_BGCOLOR(c);
if (TCHAR_FORMAT(c) & TF_REVERSE)
invert ^= 1;
if (cursor)
invert ^= 1;
if (invert) {
tmp = *fg;
*fg = *bg;
*bg = tmp;
}
}
#ifndef SC_NO_CUTPASTE
int
vt_is_cursor_in_area(const struct vt_device *vd, const term_rect_t *area)
{
unsigned int mx, my, x1, y1, x2, y2;
/*
* We use the cursor position saved during the current refresh,
* in case the cursor moved since.
*/
mx = vd->vd_mx_drawn + vd->vd_curwindow->vw_draw_area.tr_begin.tp_col;
my = vd->vd_my_drawn + vd->vd_curwindow->vw_draw_area.tr_begin.tp_row;
x1 = area->tr_begin.tp_col;
y1 = area->tr_begin.tp_row;
x2 = area->tr_end.tp_col;
y2 = area->tr_end.tp_row;
if (((mx >= x1 && x2 - 1 >= mx) ||
(mx < x1 && mx + vd->vd_mcursor->width >= x1)) &&
((my >= y1 && y2 - 1 >= my) ||
(my < y1 && my + vd->vd_mcursor->height >= y1)))
return (1);
return (0);
}
static void
vt_mark_mouse_position_as_dirty(struct vt_device *vd)
{
term_rect_t area;
struct vt_window *vw;
struct vt_font *vf;
int x, y;
vw = vd->vd_curwindow;
vf = vw->vw_font;
x = vd->vd_mx_drawn;
y = vd->vd_my_drawn;
if (vf != NULL) {
area.tr_begin.tp_col = x / vf->vf_width;
area.tr_begin.tp_row = y / vf->vf_height;
area.tr_end.tp_col =
((x + vd->vd_mcursor->width) / vf->vf_width) + 1;
area.tr_end.tp_row =
((y + vd->vd_mcursor->height) / vf->vf_height) + 1;
} else {
/*
* No font loaded (ie. vt_vga operating in textmode).
*
* FIXME: This fake area needs to be revisited once the
* mouse cursor is supported in vt_vga's textmode.
*/
area.tr_begin.tp_col = x;
area.tr_begin.tp_row = y;
area.tr_end.tp_col = x + 2;
area.tr_end.tp_row = y + 2;
}
vtbuf_dirty(&vw->vw_buf, &area);
}
#endif
static int
vt_flush(struct vt_device *vd)
{
struct vt_window *vw;
struct vt_font *vf;
term_rect_t tarea;
term_pos_t size;
#ifndef SC_NO_CUTPASTE
int cursor_was_shown, cursor_moved;
#endif
vw = vd->vd_curwindow;
if (vw == NULL)
return (0);
if (vd->vd_flags & VDF_SPLASH || vw->vw_flags & VWF_BUSY)
return (0);
vf = vw->vw_font;
if (((vd->vd_flags & VDF_TEXTMODE) == 0) && (vf == NULL))
return (0);
#ifndef SC_NO_CUTPASTE
cursor_was_shown = vd->vd_mshown;
cursor_moved = (vd->vd_mx != vd->vd_mx_drawn ||
vd->vd_my != vd->vd_my_drawn);
/* Check if the cursor should be displayed or not. */
if ((vd->vd_flags & VDF_MOUSECURSOR) && /* Mouse support enabled. */
!(vw->vw_flags & VWF_MOUSE_HIDE) && /* Cursor displayed. */
!kdb_active && panicstr == NULL) { /* DDB inactive. */
vd->vd_mshown = 1;
} else {
vd->vd_mshown = 0;
}
/*
* If the cursor changed display state or moved, we must mark
* the old position as dirty, so that it's erased.
*/
if (cursor_was_shown != vd->vd_mshown ||
(vd->vd_mshown && cursor_moved))
vt_mark_mouse_position_as_dirty(vd);
/*
* Save position of the mouse cursor. It's used by backends to
* know where to draw the cursor and during the next refresh to
* erase the previous position.
*/
vd->vd_mx_drawn = vd->vd_mx;
vd->vd_my_drawn = vd->vd_my;
/*
* If the cursor is displayed and has moved since last refresh,
* mark the new position as dirty.
*/
if (vd->vd_mshown && cursor_moved)
vt_mark_mouse_position_as_dirty(vd);
#endif
vtbuf_undirty(&vw->vw_buf, &tarea);
vt_termsize(vd, vf, &size);
/* Force a full redraw when the screen contents are invalid. */
if (vd->vd_flags & VDF_INVALID) {
tarea.tr_begin.tp_row = tarea.tr_begin.tp_col = 0;
tarea.tr_end = size;
vd->vd_flags &= ~VDF_INVALID;
}
if (tarea.tr_begin.tp_col < tarea.tr_end.tp_col) {
vd->vd_driver->vd_bitblt_text(vd, vw, &tarea);
return (1);
}
return (0);
}
static void
vt_timer(void *arg)
{
struct vt_device *vd;
int changed;
vd = arg;
/* Update screen if required. */
changed = vt_flush(vd);
/* Schedule for next update. */
if (changed)
vt_schedule_flush(vd, 0);
else
vd->vd_timer_armed = 0;
}
static void
vtterm_done(struct terminal *tm)
{
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
if (kdb_active || panicstr != NULL) {
/* Switch to the debugger. */
if (vd->vd_curwindow != vw) {
vd->vd_curwindow = vw;
vd->vd_flags |= VDF_INVALID;
if (vd->vd_driver->vd_postswitch)
vd->vd_driver->vd_postswitch(vd);
}
vd->vd_flags &= ~VDF_SPLASH;
vt_flush(vd);
} else if (!(vd->vd_flags & VDF_ASYNC)) {
vt_flush(vd);
}
}
#ifdef DEV_SPLASH
static void
vtterm_splash(struct vt_device *vd)
{
vt_axis_t top, left;
/* Display a nice boot splash. */
if (!(vd->vd_flags & VDF_TEXTMODE) && (boothowto & RB_MUTE)) {
top = (vd->vd_height - vt_logo_height) / 2;
left = (vd->vd_width - vt_logo_width) / 2;
switch (vt_logo_depth) {
case 1:
/* XXX: Unhardcode colors! */
vd->vd_driver->vd_bitblt_bmp(vd, vd->vd_curwindow,
vt_logo_image, NULL, vt_logo_width, vt_logo_height,
left, top, TC_WHITE, TC_BLACK);
}
vd->vd_flags |= VDF_SPLASH;
}
}
#endif
static void
vtterm_cnprobe(struct terminal *tm, struct consdev *cp)
{
struct vt_driver *vtd, **vtdlist, *vtdbest = NULL;
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
struct winsize wsz;
term_attr_t attr;
term_char_t c;
if (!vty_enabled(VTY_VT))
return;
if (vd->vd_flags & VDF_INITIALIZED)
/* Initialization already done. */
return;
SET_FOREACH(vtdlist, vt_drv_set) {
vtd = *vtdlist;
if (vtd->vd_probe == NULL)
continue;
if (vtd->vd_probe(vd) == CN_DEAD)
continue;
if ((vtdbest == NULL) ||
(vtd->vd_priority > vtdbest->vd_priority))
vtdbest = vtd;
}
if (vtdbest == NULL) {
cp->cn_pri = CN_DEAD;
vd->vd_flags |= VDF_DEAD;
} else {
vd->vd_driver = vtdbest;
cp->cn_pri = vd->vd_driver->vd_init(vd);
}
/* Check if driver's vt_init return CN_DEAD. */
if (cp->cn_pri == CN_DEAD) {
vd->vd_flags |= VDF_DEAD;
}
/* Initialize any early-boot keyboard drivers */
kbd_configure(KB_CONF_PROBE_ONLY);
vd->vd_unit = atomic_fetchadd_int(&vt_unit, 1);
vd->vd_windows[VT_CONSWINDOW] = vw;
sprintf(cp->cn_name, "ttyv%r", VT_UNIT(vw));
/* Attach default font if not in TEXTMODE. */
if ((vd->vd_flags & VDF_TEXTMODE) == 0) {
vw->vw_font = vtfont_ref(&vt_font_default);
vt_compute_drawable_area(vw);
}
/*
* The original screen size was faked (_VTDEFW x _VTDEFH). Now
* that we have the real viewable size, fix it in the static
* buffer.
*/
if (vd->vd_width != 0 && vd->vd_height != 0)
vt_termsize(vd, vw->vw_font, &vw->vw_buf.vb_scr_size);
vtbuf_init_early(&vw->vw_buf);
vt_winsize(vd, vw->vw_font, &wsz);
c = (boothowto & RB_MUTE) == 0 ? TERMINAL_KERN_ATTR :
TERMINAL_NORM_ATTR;
attr.ta_format = TCHAR_FORMAT(c);
attr.ta_fgcolor = TCHAR_FGCOLOR(c);
attr.ta_bgcolor = TCHAR_BGCOLOR(c);
terminal_set_winsize_blank(tm, &wsz, 1, &attr);
if (vtdbest != NULL) {
#ifdef DEV_SPLASH
vtterm_splash(vd);
#endif
vd->vd_flags |= VDF_INITIALIZED;
}
}
static int
vtterm_cngetc(struct terminal *tm)
{
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
keyboard_t *kbd;
u_int c;
if (vw->vw_kbdsq && *vw->vw_kbdsq)
return (*vw->vw_kbdsq++);
/* Make sure the splash screen is not there. */
if (vd->vd_flags & VDF_SPLASH) {
/* Remove splash */
vd->vd_flags &= ~VDF_SPLASH;
/* Mark screen as invalid to force update */
vd->vd_flags |= VDF_INVALID;
vt_flush(vd);
}
/* Stripped down keyboard handler. */
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd == NULL)
return (-1);
/* Force keyboard input mode to K_XLATE */
vw->vw_kbdmode = K_XLATE;
vt_update_kbd_mode(vw, kbd);
/* Switch the keyboard to polling to make it work here. */
kbdd_poll(kbd, TRUE);
c = kbdd_read_char(kbd, 0);
kbdd_poll(kbd, FALSE);
if (c & RELKEY)
return (-1);
if (vw->vw_flags & VWF_SCROLL) {
vt_scrollmode_kbdevent(vw, c, 1/* Console mode */);
vt_flush(vd);
return (-1);
}
/* Stripped down handling of vt_kbdevent(), without locking, etc. */
if (c & SPCLKEY) {
switch (c) {
case SPCLKEY | SLK:
vt_save_kbd_state(vw, kbd);
if (vw->vw_kbdstate & SLKED) {
/* Turn scrolling on. */
vw->vw_flags |= VWF_SCROLL;
VTBUF_SLCK_ENABLE(&vw->vw_buf);
} else {
/* Turn scrolling off. */
vt_scroll(vw, 0, VHS_END);
vw->vw_flags &= ~VWF_SCROLL;
VTBUF_SLCK_DISABLE(&vw->vw_buf);
}
break;
/* XXX: KDB can handle history. */
case SPCLKEY | FKEY | F(50): /* Arrow up. */
vw->vw_kbdsq = "\x1b[A";
break;
case SPCLKEY | FKEY | F(58): /* Arrow down. */
vw->vw_kbdsq = "\x1b[B";
break;
case SPCLKEY | FKEY | F(55): /* Arrow right. */
vw->vw_kbdsq = "\x1b[C";
break;
case SPCLKEY | FKEY | F(53): /* Arrow left. */
vw->vw_kbdsq = "\x1b[D";
break;
}
/* Force refresh to make scrollback work. */
vt_flush(vd);
} else if (KEYFLAGS(c) == 0) {
return (KEYCHAR(c));
}
if (vw->vw_kbdsq && *vw->vw_kbdsq)
return (*vw->vw_kbdsq++);
return (-1);
}
static void
vtterm_cngrab(struct terminal *tm)
{
struct vt_device *vd;
struct vt_window *vw;
keyboard_t *kbd;
vw = tm->tm_softc;
vd = vw->vw_device;
if (!cold)
vt_window_switch(vw);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd == NULL)
return;
if (vw->vw_grabbed++ > 0)
return;
/*
* Make sure the keyboard is accessible even when the kbd device
* driver is disabled.
*/
kbdd_enable(kbd);
/* We shall always use the keyboard in the XLATE mode here. */
vw->vw_prev_kbdmode = vw->vw_kbdmode;
vw->vw_kbdmode = K_XLATE;
vt_update_kbd_mode(vw, kbd);
kbdd_poll(kbd, TRUE);
}
static void
vtterm_cnungrab(struct terminal *tm)
{
struct vt_device *vd;
struct vt_window *vw;
keyboard_t *kbd;
vw = tm->tm_softc;
vd = vw->vw_device;
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd == NULL)
return;
if (--vw->vw_grabbed > 0)
return;
kbdd_poll(kbd, FALSE);
vw->vw_kbdmode = vw->vw_prev_kbdmode;
vt_update_kbd_mode(vw, kbd);
kbdd_disable(kbd);
}
static void
vtterm_opened(struct terminal *tm, int opened)
{
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
VT_LOCK(vd);
vd->vd_flags &= ~VDF_SPLASH;
if (opened)
vw->vw_flags |= VWF_OPENED;
else {
vw->vw_flags &= ~VWF_OPENED;
/* TODO: finish ACQ/REL */
}
VT_UNLOCK(vd);
}
static int
vt_set_border(struct vt_window *vw, term_color_t c)
{
struct vt_device *vd = vw->vw_device;
if (vd->vd_driver->vd_drawrect == NULL)
return (ENOTSUP);
/* Top bar. */
if (vw->vw_draw_area.tr_begin.tp_row > 0)
vd->vd_driver->vd_drawrect(vd,
0, 0,
vd->vd_width - 1, vw->vw_draw_area.tr_begin.tp_row - 1,
1, c);
/* Left bar. */
if (vw->vw_draw_area.tr_begin.tp_col > 0)
vd->vd_driver->vd_drawrect(vd,
0, 0,
vw->vw_draw_area.tr_begin.tp_col - 1, vd->vd_height - 1,
1, c);
/* Right bar. */
if (vw->vw_draw_area.tr_end.tp_col < vd->vd_width)
vd->vd_driver->vd_drawrect(vd,
vw->vw_draw_area.tr_end.tp_col - 1, 0,
vd->vd_width - 1, vd->vd_height - 1,
1, c);
/* Bottom bar. */
if (vw->vw_draw_area.tr_end.tp_row < vd->vd_height)
vd->vd_driver->vd_drawrect(vd,
0, vw->vw_draw_area.tr_end.tp_row - 1,
vd->vd_width - 1, vd->vd_height - 1,
1, c);
return (0);
}
static int
vt_change_font(struct vt_window *vw, struct vt_font *vf)
{
struct vt_device *vd = vw->vw_device;
struct terminal *tm = vw->vw_terminal;
term_pos_t size;
struct winsize wsz;
/*
* Changing fonts.
*
* Changing fonts is a little tricky. We must prevent
* simultaneous access to the device, so we must stop
* the display timer and the terminal from accessing.
* We need to switch fonts and grow our screen buffer.
*
* XXX: Right now the code uses terminal_mute() to
* prevent data from reaching the console driver while
* resizing the screen buffer. This isn't elegant...
*/
VT_LOCK(vd);
if (vw->vw_flags & VWF_BUSY) {
/* Another process is changing the font. */
VT_UNLOCK(vd);
return (EBUSY);
}
vw->vw_flags |= VWF_BUSY;
VT_UNLOCK(vd);
vt_termsize(vd, vf, &size);
vt_winsize(vd, vf, &wsz);
/* Grow the screen buffer and terminal. */
terminal_mute(tm, 1);
vtbuf_grow(&vw->vw_buf, &size, vw->vw_buf.vb_history_size);
terminal_set_winsize_blank(tm, &wsz, 0, NULL);
terminal_set_cursor(tm, &vw->vw_buf.vb_cursor);
terminal_mute(tm, 0);
/* Actually apply the font to the current window. */
VT_LOCK(vd);
if (vw->vw_font != vf && vw->vw_font != NULL && vf != NULL) {
/*
* In case vt_change_font called to update size we don't need
* to update font link.
*/
vtfont_unref(vw->vw_font);
vw->vw_font = vtfont_ref(vf);
}
/*
* Compute the drawable area and move the mouse cursor inside
* it, in case the new area is smaller than the previous one.
*/
vt_compute_drawable_area(vw);
vd->vd_mx = min(vd->vd_mx,
vw->vw_draw_area.tr_end.tp_col -
vw->vw_draw_area.tr_begin.tp_col - 1);
vd->vd_my = min(vd->vd_my,
vw->vw_draw_area.tr_end.tp_row -
vw->vw_draw_area.tr_begin.tp_row - 1);
/* Force a full redraw the next timer tick. */
if (vd->vd_curwindow == vw) {
vt_set_border(vw, TC_BLACK);
vd->vd_flags |= VDF_INVALID;
vt_resume_flush_timer(vw->vw_device, 0);
}
vw->vw_flags &= ~VWF_BUSY;
VT_UNLOCK(vd);
return (0);
}
static int
vt_proc_alive(struct vt_window *vw)
{
struct proc *p;
if (vw->vw_smode.mode != VT_PROCESS)
return (FALSE);
if (vw->vw_proc) {
if ((p = pfind(vw->vw_pid)) != NULL)
PROC_UNLOCK(p);
if (vw->vw_proc == p)
return (TRUE);
vw->vw_proc = NULL;
vw->vw_smode.mode = VT_AUTO;
DPRINTF(1, "vt controlling process %d died\n", vw->vw_pid);
vw->vw_pid = 0;
}
return (FALSE);
}
static int
signal_vt_rel(struct vt_window *vw)
{
if (vw->vw_smode.mode != VT_PROCESS)
return (FALSE);
if (vw->vw_proc == NULL || vt_proc_alive(vw) == FALSE) {
vw->vw_proc = NULL;
vw->vw_pid = 0;
return (TRUE);
}
vw->vw_flags |= VWF_SWWAIT_REL;
PROC_LOCK(vw->vw_proc);
kern_psignal(vw->vw_proc, vw->vw_smode.relsig);
PROC_UNLOCK(vw->vw_proc);
DPRINTF(1, "sending relsig to %d\n", vw->vw_pid);
return (TRUE);
}
static int
signal_vt_acq(struct vt_window *vw)
{
if (vw->vw_smode.mode != VT_PROCESS)
return (FALSE);
if (vw == vw->vw_device->vd_windows[VT_CONSWINDOW])
cnavailable(vw->vw_terminal->consdev, FALSE);
if (vw->vw_proc == NULL || vt_proc_alive(vw) == FALSE) {
vw->vw_proc = NULL;
vw->vw_pid = 0;
return (TRUE);
}
vw->vw_flags |= VWF_SWWAIT_ACQ;
PROC_LOCK(vw->vw_proc);
kern_psignal(vw->vw_proc, vw->vw_smode.acqsig);
PROC_UNLOCK(vw->vw_proc);
DPRINTF(1, "sending acqsig to %d\n", vw->vw_pid);
return (TRUE);
}
static int
finish_vt_rel(struct vt_window *vw, int release, int *s)
{
if (vw->vw_flags & VWF_SWWAIT_REL) {
vw->vw_flags &= ~VWF_SWWAIT_REL;
if (release) {
callout_drain(&vw->vw_proc_dead_timer);
vt_late_window_switch(vw->vw_switch_to);
}
return (0);
}
return (EINVAL);
}
static int
finish_vt_acq(struct vt_window *vw)
{
if (vw->vw_flags & VWF_SWWAIT_ACQ) {
vw->vw_flags &= ~VWF_SWWAIT_ACQ;
return (0);
}
return (EINVAL);
}
#ifndef SC_NO_CUTPASTE
static void
vt_mouse_terminput_button(struct vt_device *vd, int button)
{
struct vt_window *vw;
struct vt_font *vf;
char mouseb[6] = "\x1B[M";
int i, x, y;
vw = vd->vd_curwindow;
vf = vw->vw_font;
/* Translate to char position. */
x = vd->vd_mx / vf->vf_width;
y = vd->vd_my / vf->vf_height;
/* Avoid overflow. */
x = MIN(x, 255 - '!');
y = MIN(y, 255 - '!');
mouseb[3] = ' ' + button;
mouseb[4] = '!' + x;
mouseb[5] = '!' + y;
for (i = 0; i < sizeof(mouseb); i++)
terminal_input_char(vw->vw_terminal, mouseb[i]);
}
static void
vt_mouse_terminput(struct vt_device *vd, int type, int x, int y, int event,
int cnt)
{
switch (type) {
case MOUSE_BUTTON_EVENT:
if (cnt > 0) {
/* Mouse button pressed. */
if (event & MOUSE_BUTTON1DOWN)
vt_mouse_terminput_button(vd, 0);
if (event & MOUSE_BUTTON2DOWN)
vt_mouse_terminput_button(vd, 1);
if (event & MOUSE_BUTTON3DOWN)
vt_mouse_terminput_button(vd, 2);
} else {
/* Mouse button released. */
vt_mouse_terminput_button(vd, 3);
}
break;
#ifdef notyet
case MOUSE_MOTION_EVENT:
if (mouse->u.data.z < 0) {
/* Scroll up. */
sc_mouse_input_button(vd, 64);
} else if (mouse->u.data.z > 0) {
/* Scroll down. */
sc_mouse_input_button(vd, 65);
}
break;
#endif
}
}
static void
vt_mouse_paste()
{
term_char_t *buf;
int i, len;
len = VD_PASTEBUFLEN(main_vd);
buf = VD_PASTEBUF(main_vd);
len /= sizeof(term_char_t);
for (i = 0; i < len; i++) {
if (buf[i] == '\0')
continue;
terminal_input_char(main_vd->vd_curwindow->vw_terminal,
buf[i]);
}
}
void
vt_mouse_event(int type, int x, int y, int event, int cnt, int mlevel)
{
struct vt_device *vd;
struct vt_window *vw;
struct vt_font *vf;
term_pos_t size;
int len, mark;
vd = main_vd;
vw = vd->vd_curwindow;
vf = vw->vw_font;
mark = 0;
if (vw->vw_flags & (VWF_MOUSE_HIDE | VWF_GRAPHICS))
/*
* Either the mouse is disabled, or the window is in
* "graphics mode". The graphics mode is usually set by
* an X server, using the KDSETMODE ioctl.
*/
return;
if (vf == NULL) /* Text mode. */
return;
/*
* TODO: add flag about pointer position changed, to not redraw chars
* under mouse pointer when nothing changed.
*/
if (vw->vw_mouse_level > 0)
vt_mouse_terminput(vd, type, x, y, event, cnt);
switch (type) {
case MOUSE_ACTION:
case MOUSE_MOTION_EVENT:
/* Movement */
x += vd->vd_mx;
y += vd->vd_my;
vt_termsize(vd, vf, &size);
/* Apply limits. */
x = MAX(x, 0);
y = MAX(y, 0);
x = MIN(x, (size.tp_col * vf->vf_width) - 1);
y = MIN(y, (size.tp_row * vf->vf_height) - 1);
vd->vd_mx = x;
vd->vd_my = y;
if (vd->vd_mstate & MOUSE_BUTTON1DOWN)
vtbuf_set_mark(&vw->vw_buf, VTB_MARK_MOVE,
vd->vd_mx / vf->vf_width,
vd->vd_my / vf->vf_height);
vt_resume_flush_timer(vw->vw_device, 0);
return; /* Done */
case MOUSE_BUTTON_EVENT:
/* Buttons */
break;
default:
return; /* Done */
}
switch (event) {
case MOUSE_BUTTON1DOWN:
switch (cnt % 4) {
case 0: /* up */
mark = VTB_MARK_END;
break;
case 1: /* single click: start cut operation */
mark = VTB_MARK_START;
break;
case 2: /* double click: cut a word */
mark = VTB_MARK_WORD;
break;
case 3: /* triple click: cut a line */
mark = VTB_MARK_ROW;
break;
}
break;
case VT_MOUSE_PASTEBUTTON:
switch (cnt) {
case 0: /* up */
break;
default:
vt_mouse_paste();
break;
}
return; /* Done */
case VT_MOUSE_EXTENDBUTTON:
switch (cnt) {
case 0: /* up */
if (!(vd->vd_mstate & MOUSE_BUTTON1DOWN))
mark = VTB_MARK_EXTEND;
else
mark = 0;
break;
default:
mark = VTB_MARK_EXTEND;
break;
}
break;
default:
return; /* Done */
}
/* Save buttons state. */
if (cnt > 0)
vd->vd_mstate |= event;
else
vd->vd_mstate &= ~event;
if (vtbuf_set_mark(&vw->vw_buf, mark, vd->vd_mx / vf->vf_width,
vd->vd_my / vf->vf_height) == 1) {
/*
* We have something marked to copy, so update pointer to
* window with selection.
*/
vt_resume_flush_timer(vw->vw_device, 0);
switch (mark) {
case VTB_MARK_END:
case VTB_MARK_WORD:
case VTB_MARK_ROW:
case VTB_MARK_EXTEND:
break;
default:
/* Other types of mark do not require to copy data. */
return;
}
/* Get current selection size in bytes. */
len = vtbuf_get_marked_len(&vw->vw_buf);
if (len <= 0)
return;
/* Reallocate buffer only if old one is too small. */
if (len > VD_PASTEBUFSZ(vd)) {
VD_PASTEBUF(vd) = realloc(VD_PASTEBUF(vd), len, M_VT,
M_WAITOK | M_ZERO);
/* Update buffer size. */
VD_PASTEBUFSZ(vd) = len;
}
/* Request copy/paste buffer data, no more than `len' */
vtbuf_extract_marked(&vw->vw_buf, VD_PASTEBUF(vd),
VD_PASTEBUFSZ(vd));
VD_PASTEBUFLEN(vd) = len;
/* XXX VD_PASTEBUF(vd) have to be freed on shutdown/unload. */
}
}
void
vt_mouse_state(int show)
{
struct vt_device *vd;
struct vt_window *vw;
vd = main_vd;
vw = vd->vd_curwindow;
switch (show) {
case VT_MOUSE_HIDE:
vw->vw_flags |= VWF_MOUSE_HIDE;
break;
case VT_MOUSE_SHOW:
vw->vw_flags &= ~VWF_MOUSE_HIDE;
break;
}
/* Mark mouse position as dirty. */
vt_mark_mouse_position_as_dirty(vd);
vt_resume_flush_timer(vw->vw_device, 0);
}
#endif
static int
vtterm_mmap(struct terminal *tm, vm_ooffset_t offset, vm_paddr_t * paddr,
int nprot, vm_memattr_t *memattr)
{
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
if (vd->vd_driver->vd_fb_mmap)
return (vd->vd_driver->vd_fb_mmap(vd, offset, paddr, nprot,
memattr));
return (ENXIO);
}
static int
vtterm_ioctl(struct terminal *tm, u_long cmd, caddr_t data,
struct thread *td)
{
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
keyboard_t *kbd;
int error, i, s;
#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
int ival;
switch (cmd) {
case _IO('v', 4):
cmd = VT_RELDISP;
break;
case _IO('v', 5):
cmd = VT_ACTIVATE;
break;
case _IO('v', 6):
cmd = VT_WAITACTIVE;
break;
case _IO('K', 20):
cmd = KDSKBSTATE;
break;
case _IO('K', 67):
cmd = KDSETRAD;
break;
case _IO('K', 7):
cmd = KDSKBMODE;
break;
case _IO('K', 8):
cmd = KDMKTONE;
break;
case _IO('K', 63):
cmd = KIOCSOUND;
break;
case _IO('K', 66):
cmd = KDSETLED;
break;
case _IO('c', 110):
cmd = CONS_SETKBD;
break;
default:
goto skip_thunk;
}
ival = IOCPARM_IVAL(data);
data = (caddr_t)&ival;
skip_thunk:
#endif
switch (cmd) {
case KDSETRAD: /* set keyboard repeat & delay rates (old) */
if (*(int *)data & ~0x7f)
return (EINVAL);
/* FALLTHROUGH */
case GIO_KEYMAP:
case PIO_KEYMAP:
case GIO_DEADKEYMAP:
case PIO_DEADKEYMAP:
case GETFKEY:
case SETFKEY:
case KDGKBINFO:
case KDGKBTYPE:
case KDGETREPEAT: /* get keyboard repeat & delay rates */
case KDSETREPEAT: /* set keyboard repeat & delay rates (new) */
case KBADDKBD: /* add/remove keyboard to/from mux */
case KBRELKBD: {
error = 0;
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL)
error = kbdd_ioctl(kbd, cmd, data);
mtx_unlock(&Giant);
if (error == ENOIOCTL) {
if (cmd == KDGKBTYPE) {
/* always return something? XXX */
*(int *)data = 0;
} else {
return (ENODEV);
}
}
return (error);
}
case KDGKBSTATE: { /* get keyboard state (locks) */
error = 0;
if (vw == vd->vd_curwindow) {
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL)
error = vt_save_kbd_state(vw, kbd);
mtx_unlock(&Giant);
if (error != 0)
return (error);
}
*(int *)data = vw->vw_kbdstate & LOCK_MASK;
return (error);
}
case KDSKBSTATE: { /* set keyboard state (locks) */
int state;
state = *(int *)data;
if (state & ~LOCK_MASK)
return (EINVAL);
vw->vw_kbdstate &= ~LOCK_MASK;
vw->vw_kbdstate |= state;
error = 0;
if (vw == vd->vd_curwindow) {
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL)
error = vt_update_kbd_state(vw, kbd);
mtx_unlock(&Giant);
}
return (error);
}
case KDGETLED: { /* get keyboard LED status */
error = 0;
if (vw == vd->vd_curwindow) {
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL)
error = vt_save_kbd_leds(vw, kbd);
mtx_unlock(&Giant);
if (error != 0)
return (error);
}
*(int *)data = vw->vw_kbdstate & LED_MASK;
return (error);
}
case KDSETLED: { /* set keyboard LED status */
int leds;
leds = *(int *)data;
if (leds & ~LED_MASK)
return (EINVAL);
vw->vw_kbdstate &= ~LED_MASK;
vw->vw_kbdstate |= leds;
error = 0;
if (vw == vd->vd_curwindow) {
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL)
error = vt_update_kbd_leds(vw, kbd);
mtx_unlock(&Giant);
}
return (error);
}
case KDGKBMODE: {
error = 0;
if (vw == vd->vd_curwindow) {
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL)
error = vt_save_kbd_mode(vw, kbd);
mtx_unlock(&Giant);
if (error != 0)
return (error);
}
*(int *)data = vw->vw_kbdmode;
return (error);
}
case KDSKBMODE: {
int mode;
mode = *(int *)data;
switch (mode) {
case K_XLATE:
case K_RAW:
case K_CODE:
vw->vw_kbdmode = mode;
error = 0;
if (vw == vd->vd_curwindow) {
mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL)
error = vt_update_kbd_mode(vw, kbd);
mtx_unlock(&Giant);
}
return (error);
default:
return (EINVAL);
}
}
case FBIOGTYPE:
case FBIO_GETWINORG: /* get frame buffer window origin */
case FBIO_GETDISPSTART: /* get display start address */
case FBIO_GETLINEWIDTH: /* get scan line width in bytes */
case FBIO_BLANK: /* blank display */
if (vd->vd_driver->vd_fb_ioctl)
return (vd->vd_driver->vd_fb_ioctl(vd, cmd, data, td));
break;
case CONS_BLANKTIME:
/* XXX */
return (0);
case CONS_GET:
/* XXX */
*(int *)data = M_CG640x480;
return (0);
case CONS_BELLTYPE: /* set bell type sound */
if ((*(int *)data) & CONS_QUIET_BELL)
vd->vd_flags |= VDF_QUIET_BELL;
else
vd->vd_flags &= ~VDF_QUIET_BELL;
return (0);
case CONS_GETINFO: {
vid_info_t *vi = (vid_info_t *)data;
if (vi->size != sizeof(struct vid_info))
return (EINVAL);
if (vw == vd->vd_curwindow) {
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL)
vt_save_kbd_state(vw, kbd);
}
vi->m_num = vd->vd_curwindow->vw_number + 1;
vi->mk_keylock = vw->vw_kbdstate & LOCK_MASK;
/* XXX: other fields! */
return (0);
}
case CONS_GETVERS:
*(int *)data = 0x200;
return (0);
case CONS_MODEINFO:
/* XXX */
return (0);
case CONS_MOUSECTL: {
mouse_info_t *mouse = (mouse_info_t*)data;
/*
* All the commands except MOUSE_SHOW nd MOUSE_HIDE
* should not be applied to individual TTYs, but only to
* consolectl.
*/
switch (mouse->operation) {
case MOUSE_HIDE:
if (vd->vd_flags & VDF_MOUSECURSOR) {
vd->vd_flags &= ~VDF_MOUSECURSOR;
#ifndef SC_NO_CUTPASTE
vt_mouse_state(VT_MOUSE_HIDE);
#endif
}
return (0);
case MOUSE_SHOW:
if (!(vd->vd_flags & VDF_MOUSECURSOR)) {
vd->vd_flags |= VDF_MOUSECURSOR;
vd->vd_mx = vd->vd_width / 2;
vd->vd_my = vd->vd_height / 2;
#ifndef SC_NO_CUTPASTE
vt_mouse_state(VT_MOUSE_SHOW);
#endif
}
return (0);
default:
return (EINVAL);
}
}
case PIO_VFONT: {
struct vt_font *vf;
if (vd->vd_flags & VDF_TEXTMODE)
return (ENOTSUP);
error = vtfont_load((void *)data, &vf);
if (error != 0)
return (error);
error = vt_change_font(vw, vf);
vtfont_unref(vf);
return (error);
}
case PIO_VFONT_DEFAULT: {
/* Reset to default font. */
error = vt_change_font(vw, &vt_font_default);
return (error);
}
case GIO_SCRNMAP: {
scrmap_t *sm = (scrmap_t *)data;
/* We don't have screen maps, so return a handcrafted one. */
for (i = 0; i < 256; i++)
sm->scrmap[i] = i;
return (0);
}
case KDSETMODE:
/*
* FIXME: This implementation is incomplete compared to
* syscons.
*/
switch (*(int *)data) {
case KD_TEXT:
case KD_TEXT1:
case KD_PIXEL:
vw->vw_flags &= ~VWF_GRAPHICS;
break;
case KD_GRAPHICS:
vw->vw_flags |= VWF_GRAPHICS;
break;
}
return (0);
case KDENABIO: /* allow io operations */
error = priv_check(td, PRIV_IO);
if (error != 0)
return (error);
error = securelevel_gt(td->td_ucred, 0);
if (error != 0)
return (error);
#if defined(__i386__)
td->td_frame->tf_eflags |= PSL_IOPL;
#elif defined(__amd64__)
td->td_frame->tf_rflags |= PSL_IOPL;
#endif
return (0);
case KDDISABIO: /* disallow io operations (default) */
#if defined(__i386__)
td->td_frame->tf_eflags &= ~PSL_IOPL;
#elif defined(__amd64__)
td->td_frame->tf_rflags &= ~PSL_IOPL;
#endif
return (0);
case KDMKTONE: /* sound the bell */
vtterm_beep(tm, *(u_int *)data);
return (0);
case KIOCSOUND: /* make tone (*data) hz */
/* TODO */
return (0);
case CONS_SETKBD: /* set the new keyboard */
mtx_lock(&Giant);
error = 0;
if (vd->vd_keyboard != *(int *)data) {
kbd = kbd_get_keyboard(*(int *)data);
if (kbd == NULL) {
mtx_unlock(&Giant);
return (EINVAL);
}
i = kbd_allocate(kbd->kb_name, kbd->kb_unit,
(void *)vd, vt_kbdevent, vd);
if (i >= 0) {
if (vd->vd_keyboard != -1) {
vt_save_kbd_state(vd->vd_curwindow, kbd);
kbd_release(kbd, (void *)vd);
}
kbd = kbd_get_keyboard(i);
vd->vd_keyboard = i;
vt_update_kbd_mode(vd->vd_curwindow, kbd);
vt_update_kbd_state(vd->vd_curwindow, kbd);
} else {
error = EPERM; /* XXX */
}
}
mtx_unlock(&Giant);
return (error);
case CONS_RELKBD: /* release the current keyboard */
mtx_lock(&Giant);
error = 0;
if (vd->vd_keyboard != -1) {
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd == NULL) {
mtx_unlock(&Giant);
return (EINVAL);
}
vt_save_kbd_state(vd->vd_curwindow, kbd);
error = kbd_release(kbd, (void *)vd);
if (error == 0) {
vd->vd_keyboard = -1;
}
}
mtx_unlock(&Giant);
return (error);
case VT_ACTIVATE: {
int win;
win = *(int *)data - 1;
DPRINTF(5, "%s%d: VT_ACTIVATE ttyv%d ", SC_DRIVER_NAME,
VT_UNIT(vw), win);
if ((win >= VT_MAXWINDOWS) || (win < 0))
return (EINVAL);
return (vt_proc_window_switch(vd->vd_windows[win]));
}
case VT_GETACTIVE:
*(int *)data = vd->vd_curwindow->vw_number + 1;
return (0);
case VT_GETINDEX:
*(int *)data = vw->vw_number + 1;
return (0);
case VT_LOCKSWITCH:
/* TODO: Check current state, switching can be in progress. */
if ((*(int *)data) == 0x01)
vw->vw_flags |= VWF_VTYLOCK;
else if ((*(int *)data) == 0x02)
vw->vw_flags &= ~VWF_VTYLOCK;
else
return (EINVAL);
return (0);
case VT_OPENQRY:
VT_LOCK(vd);
for (i = 0; i < VT_MAXWINDOWS; i++) {
vw = vd->vd_windows[i];
if (vw == NULL)
continue;
if (!(vw->vw_flags & VWF_OPENED)) {
*(int *)data = vw->vw_number + 1;
VT_UNLOCK(vd);
return (0);
}
}
VT_UNLOCK(vd);
return (EINVAL);
case VT_WAITACTIVE:
error = 0;
i = *(unsigned int *)data;
if (i > VT_MAXWINDOWS)
return (EINVAL);
if (i != 0)
vw = vd->vd_windows[i - 1];
VT_LOCK(vd);
while (vd->vd_curwindow != vw && error == 0)
error = cv_wait_sig(&vd->vd_winswitch, &vd->vd_lock);
VT_UNLOCK(vd);
return (error);
case VT_SETMODE: { /* set screen switcher mode */
struct vt_mode *mode;
struct proc *p1;
mode = (struct vt_mode *)data;
DPRINTF(5, "%s%d: VT_SETMODE ", SC_DRIVER_NAME, VT_UNIT(vw));
if (vw->vw_smode.mode == VT_PROCESS) {
p1 = pfind(vw->vw_pid);
if (vw->vw_proc == p1 && vw->vw_proc != td->td_proc) {
if (p1)
PROC_UNLOCK(p1);
DPRINTF(5, "error EPERM\n");
return (EPERM);
}
if (p1)
PROC_UNLOCK(p1);
}
if (mode->mode == VT_AUTO) {
vw->vw_smode.mode = VT_AUTO;
vw->vw_proc = NULL;
vw->vw_pid = 0;
DPRINTF(5, "VT_AUTO, ");
if (vw == vw->vw_device->vd_windows[VT_CONSWINDOW])
cnavailable(vw->vw_terminal->consdev, TRUE);
/* were we in the middle of the vty switching process? */
if (finish_vt_rel(vw, TRUE, &s) == 0)
DPRINTF(5, "reset WAIT_REL, ");
if (finish_vt_acq(vw) == 0)
DPRINTF(5, "reset WAIT_ACQ, ");
return (0);
} else if (mode->mode == VT_PROCESS) {
if (!ISSIGVALID(mode->relsig) ||
!ISSIGVALID(mode->acqsig) ||
!ISSIGVALID(mode->frsig)) {
DPRINTF(5, "error EINVAL\n");
return (EINVAL);
}
DPRINTF(5, "VT_PROCESS %d, ", td->td_proc->p_pid);
bcopy(data, &vw->vw_smode, sizeof(struct vt_mode));
vw->vw_proc = td->td_proc;
vw->vw_pid = vw->vw_proc->p_pid;
if (vw == vw->vw_device->vd_windows[VT_CONSWINDOW])
cnavailable(vw->vw_terminal->consdev, FALSE);
} else {
DPRINTF(5, "VT_SETMODE failed, unknown mode %d\n",
mode->mode);
return (EINVAL);
}
DPRINTF(5, "\n");
return (0);
}
case VT_GETMODE: /* get screen switcher mode */
bcopy(&vw->vw_smode, data, sizeof(struct vt_mode));
return (0);
case VT_RELDISP: /* screen switcher ioctl */
/*
* This must be the current vty which is in the VT_PROCESS
* switching mode...
*/
if ((vw != vd->vd_curwindow) || (vw->vw_smode.mode !=
VT_PROCESS)) {
return (EINVAL);
}
/* ...and this process is controlling it. */
if (vw->vw_proc != td->td_proc) {
return (EPERM);
}
error = EINVAL;
switch(*(int *)data) {
case VT_FALSE: /* user refuses to release screen, abort */
if ((error = finish_vt_rel(vw, FALSE, &s)) == 0)
DPRINTF(5, "%s%d: VT_RELDISP: VT_FALSE\n",
SC_DRIVER_NAME, VT_UNIT(vw));
break;
case VT_TRUE: /* user has released screen, go on */
/* finish_vt_rel(..., TRUE, ...) should not be locked */
if (vw->vw_flags & VWF_SWWAIT_REL) {
if ((error = finish_vt_rel(vw, TRUE, &s)) == 0)
DPRINTF(5, "%s%d: VT_RELDISP: VT_TRUE\n",
SC_DRIVER_NAME, VT_UNIT(vw));
} else {
error = EINVAL;
}
return (error);
case VT_ACKACQ: /* acquire acknowledged, switch completed */
if ((error = finish_vt_acq(vw)) == 0)
DPRINTF(5, "%s%d: VT_RELDISP: VT_ACKACQ\n",
SC_DRIVER_NAME, VT_UNIT(vw));
break;
default:
break;
}
return (error);
}
return (ENOIOCTL);
}
static struct vt_window *
vt_allocate_window(struct vt_device *vd, unsigned int window)
{
struct vt_window *vw;
struct terminal *tm;
term_pos_t size;
struct winsize wsz;
vw = malloc(sizeof *vw, M_VT, M_WAITOK|M_ZERO);
vw->vw_device = vd;
vw->vw_number = window;
vw->vw_kbdmode = K_XLATE;
if ((vd->vd_flags & VDF_TEXTMODE) == 0) {
vw->vw_font = vtfont_ref(&vt_font_default);
vt_compute_drawable_area(vw);
}
vt_termsize(vd, vw->vw_font, &size);
vt_winsize(vd, vw->vw_font, &wsz);
vtbuf_init(&vw->vw_buf, &size);
tm = vw->vw_terminal = terminal_alloc(&vt_termclass, vw);
terminal_set_winsize(tm, &wsz);
vd->vd_windows[window] = vw;
callout_init(&vw->vw_proc_dead_timer, 0);
return (vw);
}
void
vt_upgrade(struct vt_device *vd)
{
struct vt_window *vw;
unsigned int i;
if (!vty_enabled(VTY_VT))
return;
if (main_vd->vd_driver == NULL)
return;
for (i = 0; i < VT_MAXWINDOWS; i++) {
vw = vd->vd_windows[i];
if (vw == NULL) {
/* New window. */
vw = vt_allocate_window(vd, i);
}
if (!(vw->vw_flags & VWF_READY)) {
callout_init(&vw->vw_proc_dead_timer, 0);
terminal_maketty(vw->vw_terminal, "v%r", VT_UNIT(vw));
vw->vw_flags |= VWF_READY;
if (vw->vw_flags & VWF_CONSOLE) {
/* For existing console window. */
EVENTHANDLER_REGISTER(shutdown_pre_sync,
vt_window_switch, vw, SHUTDOWN_PRI_DEFAULT);
}
}
}
VT_LOCK(vd);
if (vd->vd_curwindow == NULL)
vd->vd_curwindow = vd->vd_windows[VT_CONSWINDOW];
if (!(vd->vd_flags & VDF_ASYNC)) {
/* Attach keyboard. */
vt_allocate_keyboard(vd);
/* Init 25 Hz timer. */
callout_init_mtx(&vd->vd_timer, &vd->vd_lock, 0);
/* Start timer when everything ready. */
vd->vd_flags |= VDF_ASYNC;
callout_reset(&vd->vd_timer, hz / VT_TIMERFREQ, vt_timer, vd);
vd->vd_timer_armed = 1;
+
+ /* Register suspend/resume handlers. */
+ EVENTHANDLER_REGISTER(power_suspend_early, vt_suspend_handler,
+ vd, EVENTHANDLER_PRI_ANY);
+ EVENTHANDLER_REGISTER(power_resume, vt_resume_handler, vd,
+ EVENTHANDLER_PRI_ANY);
}
VT_UNLOCK(vd);
/* Refill settings with new sizes. */
vt_resize(vd);
}
static void
vt_resize(struct vt_device *vd)
{
struct vt_window *vw;
int i;
for (i = 0; i < VT_MAXWINDOWS; i++) {
vw = vd->vd_windows[i];
VT_LOCK(vd);
/* Assign default font to window, if not textmode. */
if (!(vd->vd_flags & VDF_TEXTMODE) && vw->vw_font == NULL)
vw->vw_font = vtfont_ref(&vt_font_default);
VT_UNLOCK(vd);
/* Resize terminal windows */
while (vt_change_font(vw, vw->vw_font) == EBUSY) {
DPRINTF(100, "%s: vt_change_font() is busy, "
"window %d\n", __func__, i);
}
}
}
void
vt_allocate(struct vt_driver *drv, void *softc)
{
struct vt_device *vd;
if (!vty_enabled(VTY_VT))
return;
if (main_vd->vd_driver == NULL) {
main_vd->vd_driver = drv;
printf("VT: initialize with new VT driver \"%s\".\n",
drv->vd_name);
} else {
/*
* Check if have rights to replace current driver. For example:
* it is bad idea to replace KMS driver with generic VGA one.
*/
if (drv->vd_priority <= main_vd->vd_driver->vd_priority) {
printf("VT: Driver priority %d too low. Current %d\n ",
drv->vd_priority, main_vd->vd_driver->vd_priority);
return;
}
printf("VT: Replacing driver \"%s\" with new \"%s\".\n",
main_vd->vd_driver->vd_name, drv->vd_name);
}
vd = main_vd;
if (vd->vd_flags & VDF_ASYNC) {
/* Stop vt_flush periodic task. */
VT_LOCK(vd);
vt_suspend_flush_timer(vd);
VT_UNLOCK(vd);
/*
* Mute current terminal until we done. vt_change_font (called
* from vt_resize) will unmute it.
*/
terminal_mute(vd->vd_curwindow->vw_terminal, 1);
}
/*
* Reset VDF_TEXTMODE flag, driver who require that flag (vt_vga) will
* set it.
*/
VT_LOCK(vd);
vd->vd_flags &= ~VDF_TEXTMODE;
vd->vd_driver = drv;
vd->vd_softc = softc;
vd->vd_driver->vd_init(vd);
VT_UNLOCK(vd);
/* Update windows sizes and initialize last items. */
vt_upgrade(vd);
#ifdef DEV_SPLASH
if (vd->vd_flags & VDF_SPLASH)
vtterm_splash(vd);
#endif
if (vd->vd_flags & VDF_ASYNC) {
/* Allow to put chars now. */
terminal_mute(vd->vd_curwindow->vw_terminal, 0);
/* Rerun timer for screen updates. */
vt_resume_flush_timer(vd, 0);
}
/*
* Register as console. If it already registered, cnadd() will ignore
* it.
*/
termcn_cnregister(vd->vd_windows[VT_CONSWINDOW]->vw_terminal);
}
+static void
+vt_suspend_handler(void *priv)
+{
+ struct vt_device *vd;
+
+ vd = priv;
+ if (vd->vd_driver != NULL && vd->vd_driver->vd_suspend != NULL)
+ vd->vd_driver->vd_suspend(vd);
+}
+
+static void
+vt_resume_handler(void *priv)
+{
+ struct vt_device *vd;
+
+ vd = priv;
+ if (vd->vd_driver != NULL && vd->vd_driver->vd_resume != NULL)
+ vd->vd_driver->vd_resume(vd);
+}
+
void
-vt_suspend()
+vt_suspend(struct vt_device *vd)
{
+ int error;
if (vt_suspendswitch == 0)
return;
/* Save current window. */
- main_vd->vd_savedwindow = main_vd->vd_curwindow;
+ vd->vd_savedwindow = vd->vd_curwindow;
/* Ask holding process to free window and switch to console window */
- vt_proc_window_switch(main_vd->vd_windows[VT_CONSWINDOW]);
+ vt_proc_window_switch(vd->vd_windows[VT_CONSWINDOW]);
+
+ /* Wait for the window switch to complete. */
+ error = 0;
+ VT_LOCK(vd);
+ while (vd->vd_curwindow != vd->vd_windows[VT_CONSWINDOW] && error == 0)
+ error = cv_wait_sig(&vd->vd_winswitch, &vd->vd_lock);
+ VT_UNLOCK(vd);
}
void
-vt_resume()
+vt_resume(struct vt_device *vd)
{
if (vt_suspendswitch == 0)
return;
/* Switch back to saved window */
- if (main_vd->vd_savedwindow != NULL)
- vt_proc_window_switch(main_vd->vd_savedwindow);
- main_vd->vd_savedwindow = NULL;
+ if (vd->vd_savedwindow != NULL)
+ vt_proc_window_switch(vd->vd_savedwindow);
+ vd->vd_savedwindow = NULL;
}
Index: projects/clang360-import/sys/netipsec/key.c
===================================================================
--- projects/clang360-import/sys/netipsec/key.c (revision 277808)
+++ projects/clang360-import/sys/netipsec/key.c (revision 277809)
@@ -1,7823 +1,7821 @@
/* $FreeBSD$ */
/* $KAME: key.c,v 1.191 2001/06/27 10:46:49 sakane Exp $ */
/*-
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the project nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This code is referd to RFC 2367
*/
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_ipsec.h"
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/domain.h>
#include <sys/protosw.h>
#include <sys/malloc.h>
#include <sys/rmlock.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/errno.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/refcount.h>
#include <sys/syslog.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/vnet.h>
#include <net/raw_cb.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/in_var.h>
#ifdef INET6
#include <netinet/ip6.h>
#include <netinet6/in6_var.h>
#include <netinet6/ip6_var.h>
#endif /* INET6 */
#if defined(INET) || defined(INET6)
#include <netinet/in_pcb.h>
#endif
#ifdef INET6
#include <netinet6/in6_pcb.h>
#endif /* INET6 */
#include <net/pfkeyv2.h>
#include <netipsec/keydb.h>
#include <netipsec/key.h>
#include <netipsec/keysock.h>
#include <netipsec/key_debug.h>
#include <netipsec/ipsec.h>
#ifdef INET6
#include <netipsec/ipsec6.h>
#endif
#include <netipsec/xform.h>
#include <machine/stdarg.h>
/* randomness */
#include <sys/random.h>
#define FULLMASK 0xff
#define _BITS(bytes) ((bytes) << 3)
/*
* Note on SA reference counting:
* - SAs that are not in DEAD state will have (total external reference + 1)
* following value in reference count field. they cannot be freed and are
* referenced from SA header.
* - SAs that are in DEAD state will have (total external reference)
* in reference count field. they are ready to be freed. reference from
* SA header will be removed in key_delsav(), when the reference count
* field hits 0 (= no external reference other than from SA header.
*/
VNET_DEFINE(u_int32_t, key_debug_level) = 0;
static VNET_DEFINE(u_int, key_spi_trycnt) = 1000;
static VNET_DEFINE(u_int32_t, key_spi_minval) = 0x100;
static VNET_DEFINE(u_int32_t, key_spi_maxval) = 0x0fffffff; /* XXX */
static VNET_DEFINE(u_int32_t, policy_id) = 0;
/*interval to initialize randseed,1(m)*/
static VNET_DEFINE(u_int, key_int_random) = 60;
/* interval to expire acquiring, 30(s)*/
static VNET_DEFINE(u_int, key_larval_lifetime) = 30;
/* counter for blocking SADB_ACQUIRE.*/
static VNET_DEFINE(int, key_blockacq_count) = 10;
/* lifetime for blocking SADB_ACQUIRE.*/
static VNET_DEFINE(int, key_blockacq_lifetime) = 20;
/* preferred old sa rather than new sa.*/
static VNET_DEFINE(int, key_preferred_oldsa) = 1;
#define V_key_spi_trycnt VNET(key_spi_trycnt)
#define V_key_spi_minval VNET(key_spi_minval)
#define V_key_spi_maxval VNET(key_spi_maxval)
#define V_policy_id VNET(policy_id)
#define V_key_int_random VNET(key_int_random)
#define V_key_larval_lifetime VNET(key_larval_lifetime)
#define V_key_blockacq_count VNET(key_blockacq_count)
#define V_key_blockacq_lifetime VNET(key_blockacq_lifetime)
#define V_key_preferred_oldsa VNET(key_preferred_oldsa)
static VNET_DEFINE(u_int32_t, acq_seq) = 0;
#define V_acq_seq VNET(acq_seq)
/* SPD */
static VNET_DEFINE(TAILQ_HEAD(_sptree, secpolicy), sptree[IPSEC_DIR_MAX]);
static struct rmlock sptree_lock;
#define V_sptree VNET(sptree)
#define SPTREE_LOCK_INIT() rm_init(&sptree_lock, "sptree")
#define SPTREE_LOCK_DESTROY() rm_destroy(&sptree_lock)
#define SPTREE_RLOCK_TRACKER struct rm_priotracker sptree_tracker
#define SPTREE_RLOCK() rm_rlock(&sptree_lock, &sptree_tracker)
#define SPTREE_RUNLOCK() rm_runlock(&sptree_lock, &sptree_tracker)
#define SPTREE_RLOCK_ASSERT() rm_assert(&sptree_lock, RA_RLOCKED)
#define SPTREE_WLOCK() rm_wlock(&sptree_lock)
#define SPTREE_WUNLOCK() rm_wunlock(&sptree_lock)
#define SPTREE_WLOCK_ASSERT() rm_assert(&sptree_lock, RA_WLOCKED)
#define SPTREE_UNLOCK_ASSERT() rm_assert(&sptree_lock, RA_UNLOCKED)
static VNET_DEFINE(LIST_HEAD(_sahtree, secashead), sahtree); /* SAD */
#define V_sahtree VNET(sahtree)
static struct mtx sahtree_lock;
#define SAHTREE_LOCK_INIT() \
mtx_init(&sahtree_lock, "sahtree", \
"fast ipsec security association database", MTX_DEF)
#define SAHTREE_LOCK_DESTROY() mtx_destroy(&sahtree_lock)
#define SAHTREE_LOCK() mtx_lock(&sahtree_lock)
#define SAHTREE_UNLOCK() mtx_unlock(&sahtree_lock)
#define SAHTREE_LOCK_ASSERT() mtx_assert(&sahtree_lock, MA_OWNED)
/* registed list */
static VNET_DEFINE(LIST_HEAD(_regtree, secreg), regtree[SADB_SATYPE_MAX + 1]);
#define V_regtree VNET(regtree)
static struct mtx regtree_lock;
#define REGTREE_LOCK_INIT() \
mtx_init(&regtree_lock, "regtree", "fast ipsec regtree", MTX_DEF)
#define REGTREE_LOCK_DESTROY() mtx_destroy(&regtree_lock)
#define REGTREE_LOCK() mtx_lock(&regtree_lock)
#define REGTREE_UNLOCK() mtx_unlock(&regtree_lock)
#define REGTREE_LOCK_ASSERT() mtx_assert(&regtree_lock, MA_OWNED)
static VNET_DEFINE(LIST_HEAD(_acqtree, secacq), acqtree); /* acquiring list */
#define V_acqtree VNET(acqtree)
static struct mtx acq_lock;
#define ACQ_LOCK_INIT() \
mtx_init(&acq_lock, "acqtree", "fast ipsec acquire list", MTX_DEF)
#define ACQ_LOCK_DESTROY() mtx_destroy(&acq_lock)
#define ACQ_LOCK() mtx_lock(&acq_lock)
#define ACQ_UNLOCK() mtx_unlock(&acq_lock)
#define ACQ_LOCK_ASSERT() mtx_assert(&acq_lock, MA_OWNED)
/* SP acquiring list */
static VNET_DEFINE(LIST_HEAD(_spacqtree, secspacq), spacqtree);
#define V_spacqtree VNET(spacqtree)
static struct mtx spacq_lock;
#define SPACQ_LOCK_INIT() \
mtx_init(&spacq_lock, "spacqtree", \
"fast ipsec security policy acquire list", MTX_DEF)
#define SPACQ_LOCK_DESTROY() mtx_destroy(&spacq_lock)
#define SPACQ_LOCK() mtx_lock(&spacq_lock)
#define SPACQ_UNLOCK() mtx_unlock(&spacq_lock)
#define SPACQ_LOCK_ASSERT() mtx_assert(&spacq_lock, MA_OWNED)
/* search order for SAs */
static const u_int saorder_state_valid_prefer_old[] = {
SADB_SASTATE_DYING, SADB_SASTATE_MATURE,
};
static const u_int saorder_state_valid_prefer_new[] = {
SADB_SASTATE_MATURE, SADB_SASTATE_DYING,
};
static const u_int saorder_state_alive[] = {
/* except DEAD */
SADB_SASTATE_MATURE, SADB_SASTATE_DYING, SADB_SASTATE_LARVAL
};
static const u_int saorder_state_any[] = {
SADB_SASTATE_MATURE, SADB_SASTATE_DYING,
SADB_SASTATE_LARVAL, SADB_SASTATE_DEAD
};
static const int minsize[] = {
sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */
sizeof(struct sadb_sa), /* SADB_EXT_SA */
sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */
sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */
sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */
sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_SRC */
sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_DST */
sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_PROXY */
sizeof(struct sadb_key), /* SADB_EXT_KEY_AUTH */
sizeof(struct sadb_key), /* SADB_EXT_KEY_ENCRYPT */
sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_SRC */
sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_DST */
sizeof(struct sadb_sens), /* SADB_EXT_SENSITIVITY */
sizeof(struct sadb_prop), /* SADB_EXT_PROPOSAL */
sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_AUTH */
sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_ENCRYPT */
sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */
0, /* SADB_X_EXT_KMPRIVATE */
sizeof(struct sadb_x_policy), /* SADB_X_EXT_POLICY */
sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */
sizeof(struct sadb_x_nat_t_type),/* SADB_X_EXT_NAT_T_TYPE */
sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_SPORT */
sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_DPORT */
sizeof(struct sadb_address), /* SADB_X_EXT_NAT_T_OAI */
sizeof(struct sadb_address), /* SADB_X_EXT_NAT_T_OAR */
sizeof(struct sadb_x_nat_t_frag),/* SADB_X_EXT_NAT_T_FRAG */
};
static const int maxsize[] = {
sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */
sizeof(struct sadb_sa), /* SADB_EXT_SA */
sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */
sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */
sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */
0, /* SADB_EXT_ADDRESS_SRC */
0, /* SADB_EXT_ADDRESS_DST */
0, /* SADB_EXT_ADDRESS_PROXY */
0, /* SADB_EXT_KEY_AUTH */
0, /* SADB_EXT_KEY_ENCRYPT */
0, /* SADB_EXT_IDENTITY_SRC */
0, /* SADB_EXT_IDENTITY_DST */
0, /* SADB_EXT_SENSITIVITY */
0, /* SADB_EXT_PROPOSAL */
0, /* SADB_EXT_SUPPORTED_AUTH */
0, /* SADB_EXT_SUPPORTED_ENCRYPT */
sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */
0, /* SADB_X_EXT_KMPRIVATE */
0, /* SADB_X_EXT_POLICY */
sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */
sizeof(struct sadb_x_nat_t_type),/* SADB_X_EXT_NAT_T_TYPE */
sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_SPORT */
sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_DPORT */
0, /* SADB_X_EXT_NAT_T_OAI */
0, /* SADB_X_EXT_NAT_T_OAR */
sizeof(struct sadb_x_nat_t_frag),/* SADB_X_EXT_NAT_T_FRAG */
};
static VNET_DEFINE(int, ipsec_esp_keymin) = 256;
static VNET_DEFINE(int, ipsec_esp_auth) = 0;
static VNET_DEFINE(int, ipsec_ah_keymin) = 128;
#define V_ipsec_esp_keymin VNET(ipsec_esp_keymin)
#define V_ipsec_esp_auth VNET(ipsec_esp_auth)
#define V_ipsec_ah_keymin VNET(ipsec_ah_keymin)
#ifdef SYSCTL_DECL
SYSCTL_DECL(_net_key);
#endif
SYSCTL_INT(_net_key, KEYCTL_DEBUG_LEVEL, debug,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_debug_level), 0, "");
/* max count of trial for the decision of spi value */
SYSCTL_INT(_net_key, KEYCTL_SPI_TRY, spi_trycnt,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_spi_trycnt), 0, "");
/* minimum spi value to allocate automatically. */
SYSCTL_INT(_net_key, KEYCTL_SPI_MIN_VALUE, spi_minval,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_spi_minval), 0, "");
/* maximun spi value to allocate automatically. */
SYSCTL_INT(_net_key, KEYCTL_SPI_MAX_VALUE, spi_maxval,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_spi_maxval), 0, "");
/* interval to initialize randseed */
SYSCTL_INT(_net_key, KEYCTL_RANDOM_INT, int_random,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_int_random), 0, "");
/* lifetime for larval SA */
SYSCTL_INT(_net_key, KEYCTL_LARVAL_LIFETIME, larval_lifetime,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_larval_lifetime), 0, "");
/* counter for blocking to send SADB_ACQUIRE to IKEd */
SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_COUNT, blockacq_count,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_blockacq_count), 0, "");
/* lifetime for blocking to send SADB_ACQUIRE to IKEd */
SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_LIFETIME, blockacq_lifetime,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_blockacq_lifetime), 0, "");
/* ESP auth */
SYSCTL_INT(_net_key, KEYCTL_ESP_AUTH, esp_auth,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_esp_auth), 0, "");
/* minimum ESP key length */
SYSCTL_INT(_net_key, KEYCTL_ESP_KEYMIN, esp_keymin,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_esp_keymin), 0, "");
/* minimum AH key length */
SYSCTL_INT(_net_key, KEYCTL_AH_KEYMIN, ah_keymin,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_ah_keymin), 0, "");
/* perfered old SA rather than new SA */
SYSCTL_INT(_net_key, KEYCTL_PREFERED_OLDSA, preferred_oldsa,
CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(key_preferred_oldsa), 0, "");
#define __LIST_CHAINED(elm) \
(!((elm)->chain.le_next == NULL && (elm)->chain.le_prev == NULL))
#define LIST_INSERT_TAIL(head, elm, type, field) \
do {\
struct type *curelm = LIST_FIRST(head); \
if (curelm == NULL) {\
LIST_INSERT_HEAD(head, elm, field); \
} else { \
while (LIST_NEXT(curelm, field)) \
curelm = LIST_NEXT(curelm, field);\
LIST_INSERT_AFTER(curelm, elm, field);\
}\
} while (0)
#define KEY_CHKSASTATE(head, sav, name) \
do { \
if ((head) != (sav)) { \
ipseclog((LOG_DEBUG, "%s: state mismatched (TREE=%d SA=%d)\n", \
(name), (head), (sav))); \
continue; \
} \
} while (0)
#define KEY_CHKSPDIR(head, sp, name) \
do { \
if ((head) != (sp)) { \
ipseclog((LOG_DEBUG, "%s: direction mismatched (TREE=%d SP=%d), " \
"anyway continue.\n", \
(name), (head), (sp))); \
} \
} while (0)
MALLOC_DEFINE(M_IPSEC_SA, "secasvar", "ipsec security association");
MALLOC_DEFINE(M_IPSEC_SAH, "sahead", "ipsec sa head");
MALLOC_DEFINE(M_IPSEC_SP, "ipsecpolicy", "ipsec security policy");
MALLOC_DEFINE(M_IPSEC_SR, "ipsecrequest", "ipsec security request");
MALLOC_DEFINE(M_IPSEC_MISC, "ipsec-misc", "ipsec miscellaneous");
MALLOC_DEFINE(M_IPSEC_SAQ, "ipsec-saq", "ipsec sa acquire");
MALLOC_DEFINE(M_IPSEC_SAR, "ipsec-reg", "ipsec sa acquire");
/*
* set parameters into secpolicyindex buffer.
* Must allocate secpolicyindex buffer passed to this function.
*/
#define KEY_SETSECSPIDX(_dir, s, d, ps, pd, ulp, idx) \
do { \
bzero((idx), sizeof(struct secpolicyindex)); \
(idx)->dir = (_dir); \
(idx)->prefs = (ps); \
(idx)->prefd = (pd); \
(idx)->ul_proto = (ulp); \
bcopy((s), &(idx)->src, ((const struct sockaddr *)(s))->sa_len); \
bcopy((d), &(idx)->dst, ((const struct sockaddr *)(d))->sa_len); \
} while (0)
/*
* set parameters into secasindex buffer.
* Must allocate secasindex buffer before calling this function.
*/
#define KEY_SETSECASIDX(p, m, r, s, d, idx) \
do { \
bzero((idx), sizeof(struct secasindex)); \
(idx)->proto = (p); \
(idx)->mode = (m); \
(idx)->reqid = (r); \
bcopy((s), &(idx)->src, ((const struct sockaddr *)(s))->sa_len); \
bcopy((d), &(idx)->dst, ((const struct sockaddr *)(d))->sa_len); \
} while (0)
/* key statistics */
struct _keystat {
u_long getspi_count; /* the avarage of count to try to get new SPI */
} keystat;
struct sadb_msghdr {
struct sadb_msg *msg;
struct sadb_ext *ext[SADB_EXT_MAX + 1];
int extoff[SADB_EXT_MAX + 1];
int extlen[SADB_EXT_MAX + 1];
};
#ifndef IPSEC_DEBUG2
static struct callout key_timer;
#endif
static struct secasvar *key_allocsa_policy(const struct secasindex *);
static void key_freesp_so(struct secpolicy **);
static struct secasvar *key_do_allocsa_policy(struct secashead *, u_int);
static void key_unlink(struct secpolicy *);
static struct secpolicy *key_getsp(struct secpolicyindex *);
static struct secpolicy *key_getspbyid(u_int32_t);
static u_int32_t key_newreqid(void);
static struct mbuf *key_gather_mbuf(struct mbuf *,
const struct sadb_msghdr *, int, int, ...);
static int key_spdadd(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static u_int32_t key_getnewspid(void);
static int key_spddelete(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_spddelete2(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_spdget(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_spdflush(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_spddump(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static struct mbuf *key_setdumpsp(struct secpolicy *,
u_int8_t, u_int32_t, u_int32_t);
static u_int key_getspreqmsglen(struct secpolicy *);
static int key_spdexpire(struct secpolicy *);
static struct secashead *key_newsah(struct secasindex *);
static void key_delsah(struct secashead *);
static struct secasvar *key_newsav(struct mbuf *,
const struct sadb_msghdr *, struct secashead *, int *,
const char*, int);
#define KEY_NEWSAV(m, sadb, sah, e) \
key_newsav(m, sadb, sah, e, __FILE__, __LINE__)
static void key_delsav(struct secasvar *);
static struct secashead *key_getsah(struct secasindex *);
static struct secasvar *key_checkspidup(struct secasindex *, u_int32_t);
static struct secasvar *key_getsavbyspi(struct secashead *, u_int32_t);
static int key_setsaval(struct secasvar *, struct mbuf *,
const struct sadb_msghdr *);
static int key_mature(struct secasvar *);
static struct mbuf *key_setdumpsa(struct secasvar *, u_int8_t,
u_int8_t, u_int32_t, u_int32_t);
static struct mbuf *key_setsadbmsg(u_int8_t, u_int16_t, u_int8_t,
u_int32_t, pid_t, u_int16_t);
static struct mbuf *key_setsadbsa(struct secasvar *);
static struct mbuf *key_setsadbaddr(u_int16_t,
const struct sockaddr *, u_int8_t, u_int16_t);
#ifdef IPSEC_NAT_T
static struct mbuf *key_setsadbxport(u_int16_t, u_int16_t);
static struct mbuf *key_setsadbxtype(u_int16_t);
#endif
static void key_porttosaddr(struct sockaddr *, u_int16_t);
#define KEY_PORTTOSADDR(saddr, port) \
key_porttosaddr((struct sockaddr *)(saddr), (port))
static struct mbuf *key_setsadbxsa2(u_int8_t, u_int32_t, u_int32_t);
static struct mbuf *key_setsadbxpolicy(u_int16_t, u_int8_t,
u_int32_t);
static struct seckey *key_dup_keymsg(const struct sadb_key *, u_int,
struct malloc_type *);
static struct seclifetime *key_dup_lifemsg(const struct sadb_lifetime *src,
struct malloc_type *type);
#ifdef INET6
static int key_ismyaddr6(struct sockaddr_in6 *);
#endif
/* flags for key_cmpsaidx() */
#define CMP_HEAD 1 /* protocol, addresses. */
#define CMP_MODE_REQID 2 /* additionally HEAD, reqid, mode. */
#define CMP_REQID 3 /* additionally HEAD, reaid. */
#define CMP_EXACTLY 4 /* all elements. */
static int key_cmpsaidx(const struct secasindex *,
const struct secasindex *, int);
static int key_cmpspidx_exactly(struct secpolicyindex *,
struct secpolicyindex *);
static int key_cmpspidx_withmask(struct secpolicyindex *,
struct secpolicyindex *);
static int key_sockaddrcmp(const struct sockaddr *,
const struct sockaddr *, int);
static int key_bbcmp(const void *, const void *, u_int);
static u_int16_t key_satype2proto(u_int8_t);
static u_int8_t key_proto2satype(u_int16_t);
static int key_getspi(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static u_int32_t key_do_getnewspi(struct sadb_spirange *,
struct secasindex *);
static int key_update(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
#ifdef IPSEC_DOSEQCHECK
static struct secasvar *key_getsavbyseq(struct secashead *, u_int32_t);
#endif
static int key_add(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_setident(struct secashead *, struct mbuf *,
const struct sadb_msghdr *);
static struct mbuf *key_getmsgbuf_x1(struct mbuf *,
const struct sadb_msghdr *);
static int key_delete(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_delete_all(struct socket *, struct mbuf *,
const struct sadb_msghdr *, u_int16_t);
static int key_get(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static void key_getcomb_setlifetime(struct sadb_comb *);
static struct mbuf *key_getcomb_esp(void);
static struct mbuf *key_getcomb_ah(void);
static struct mbuf *key_getcomb_ipcomp(void);
static struct mbuf *key_getprop(const struct secasindex *);
static int key_acquire(const struct secasindex *, struct secpolicy *);
static struct secacq *key_newacq(const struct secasindex *);
static struct secacq *key_getacq(const struct secasindex *);
static struct secacq *key_getacqbyseq(u_int32_t);
static struct secspacq *key_newspacq(struct secpolicyindex *);
static struct secspacq *key_getspacq(struct secpolicyindex *);
static int key_acquire2(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_register(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_expire(struct secasvar *);
static int key_flush(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_dump(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_promisc(struct socket *, struct mbuf *,
const struct sadb_msghdr *);
static int key_senderror(struct socket *, struct mbuf *, int);
static int key_validate_ext(const struct sadb_ext *, int);
static int key_align(struct mbuf *, struct sadb_msghdr *);
static struct mbuf *key_setlifetime(struct seclifetime *src,
u_int16_t exttype);
static struct mbuf *key_setkey(struct seckey *src, u_int16_t exttype);
#if 0
static const char *key_getfqdn(void);
static const char *key_getuserfqdn(void);
#endif
static void key_sa_chgstate(struct secasvar *, u_int8_t);
static __inline void
sa_initref(struct secasvar *sav)
{
refcount_init(&sav->refcnt, 1);
}
static __inline void
sa_addref(struct secasvar *sav)
{
refcount_acquire(&sav->refcnt);
IPSEC_ASSERT(sav->refcnt != 0, ("SA refcnt overflow"));
}
static __inline int
sa_delref(struct secasvar *sav)
{
IPSEC_ASSERT(sav->refcnt > 0, ("SA refcnt underflow"));
return (refcount_release(&sav->refcnt));
}
#define SP_ADDREF(p) refcount_acquire(&(p)->refcnt)
#define SP_DELREF(p) refcount_release(&(p)->refcnt)
/*
* Update the refcnt while holding the SPTREE lock.
*/
void
key_addref(struct secpolicy *sp)
{
SP_ADDREF(sp);
}
/*
* Return 0 when there are known to be no SP's for the specified
* direction. Otherwise return 1. This is used by IPsec code
* to optimize performance.
*/
int
key_havesp(u_int dir)
{
return (dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND ?
TAILQ_FIRST(&V_sptree[dir]) != NULL : 1);
}
/* %%% IPsec policy management */
/*
* allocating a SP for OUTBOUND or INBOUND packet.
* Must call key_freesp() later.
* OUT: NULL: not found
* others: found and return the pointer.
*/
struct secpolicy *
key_allocsp(struct secpolicyindex *spidx, u_int dir, const char* where,
int tag)
{
SPTREE_RLOCK_TRACKER;
struct secpolicy *sp;
IPSEC_ASSERT(spidx != NULL, ("null spidx"));
IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND,
("invalid direction %u", dir));
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s from %s:%u\n", __func__, where, tag));
/* get a SP entry */
KEYDEBUG(KEYDEBUG_IPSEC_DATA,
printf("*** objects\n");
kdebug_secpolicyindex(spidx));
SPTREE_RLOCK();
TAILQ_FOREACH(sp, &V_sptree[dir], chain) {
KEYDEBUG(KEYDEBUG_IPSEC_DATA,
printf("*** in SPD\n");
kdebug_secpolicyindex(&sp->spidx));
if (key_cmpspidx_withmask(&sp->spidx, spidx))
goto found;
}
sp = NULL;
found:
if (sp) {
/* sanity check */
KEY_CHKSPDIR(sp->spidx.dir, dir, __func__);
/* found a SPD entry */
sp->lastused = time_second;
SP_ADDREF(sp);
}
SPTREE_RUNLOCK();
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s return SP:%p (ID=%u) refcnt %u\n", __func__,
sp, sp ? sp->id : 0, sp ? sp->refcnt : 0));
return sp;
}
/*
* allocating a SP for OUTBOUND or INBOUND packet.
* Must call key_freesp() later.
* OUT: NULL: not found
* others: found and return the pointer.
*/
struct secpolicy *
key_allocsp2(u_int32_t spi, union sockaddr_union *dst, u_int8_t proto,
u_int dir, const char* where, int tag)
{
SPTREE_RLOCK_TRACKER;
struct secpolicy *sp;
IPSEC_ASSERT(dst != NULL, ("null dst"));
IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND,
("invalid direction %u", dir));
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s from %s:%u\n", __func__, where, tag));
/* get a SP entry */
KEYDEBUG(KEYDEBUG_IPSEC_DATA,
printf("*** objects\n");
printf("spi %u proto %u dir %u\n", spi, proto, dir);
kdebug_sockaddr(&dst->sa));
SPTREE_RLOCK();
TAILQ_FOREACH(sp, &V_sptree[dir], chain) {
KEYDEBUG(KEYDEBUG_IPSEC_DATA,
printf("*** in SPD\n");
kdebug_secpolicyindex(&sp->spidx));
/* compare simple values, then dst address */
if (sp->spidx.ul_proto != proto)
continue;
/* NB: spi's must exist and match */
if (!sp->req || !sp->req->sav || sp->req->sav->spi != spi)
continue;
if (key_sockaddrcmp(&sp->spidx.dst.sa, &dst->sa, 1) == 0)
goto found;
}
sp = NULL;
found:
if (sp) {
/* sanity check */
KEY_CHKSPDIR(sp->spidx.dir, dir, __func__);
/* found a SPD entry */
sp->lastused = time_second;
SP_ADDREF(sp);
}
SPTREE_RUNLOCK();
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s return SP:%p (ID=%u) refcnt %u\n", __func__,
sp, sp ? sp->id : 0, sp ? sp->refcnt : 0));
return sp;
}
#if 0
/*
* return a policy that matches this particular inbound packet.
* XXX slow
*/
struct secpolicy *
key_gettunnel(const struct sockaddr *osrc,
const struct sockaddr *odst,
const struct sockaddr *isrc,
const struct sockaddr *idst,
const char* where, int tag)
{
struct secpolicy *sp;
const int dir = IPSEC_DIR_INBOUND;
struct ipsecrequest *r1, *r2, *p;
struct secpolicyindex spidx;
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s from %s:%u\n", __func__, where, tag));
if (isrc->sa_family != idst->sa_family) {
ipseclog((LOG_ERR, "%s: protocol family mismatched %d != %d\n.",
__func__, isrc->sa_family, idst->sa_family));
sp = NULL;
goto done;
}
SPTREE_LOCK();
LIST_FOREACH(sp, &V_sptree[dir], chain) {
if (sp->state == IPSEC_SPSTATE_DEAD)
continue;
r1 = r2 = NULL;
for (p = sp->req; p; p = p->next) {
if (p->saidx.mode != IPSEC_MODE_TUNNEL)
continue;
r1 = r2;
r2 = p;
if (!r1) {
/* here we look at address matches only */
spidx = sp->spidx;
if (isrc->sa_len > sizeof(spidx.src) ||
idst->sa_len > sizeof(spidx.dst))
continue;
bcopy(isrc, &spidx.src, isrc->sa_len);
bcopy(idst, &spidx.dst, idst->sa_len);
if (!key_cmpspidx_withmask(&sp->spidx, &spidx))
continue;
} else {
if (key_sockaddrcmp(&r1->saidx.src.sa, isrc, 0) ||
key_sockaddrcmp(&r1->saidx.dst.sa, idst, 0))
continue;
}
if (key_sockaddrcmp(&r2->saidx.src.sa, osrc, 0) ||
key_sockaddrcmp(&r2->saidx.dst.sa, odst, 0))
continue;
goto found;
}
}
sp = NULL;
found:
if (sp) {
sp->lastused = time_second;
SP_ADDREF(sp);
}
SPTREE_UNLOCK();
done:
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s return SP:%p (ID=%u) refcnt %u\n", __func__,
sp, sp ? sp->id : 0, sp ? sp->refcnt : 0));
return sp;
}
#endif
/*
* allocating an SA entry for an *OUTBOUND* packet.
* checking each request entries in SP, and acquire an SA if need.
* OUT: 0: there are valid requests.
* ENOENT: policy may be valid, but SA with REQUIRE is on acquiring.
*/
int
key_checkrequest(struct ipsecrequest *isr, const struct secasindex *saidx)
{
u_int level;
int error;
struct secasvar *sav;
IPSEC_ASSERT(isr != NULL, ("null isr"));
IPSEC_ASSERT(saidx != NULL, ("null saidx"));
IPSEC_ASSERT(saidx->mode == IPSEC_MODE_TRANSPORT ||
saidx->mode == IPSEC_MODE_TUNNEL,
("unexpected policy %u", saidx->mode));
/*
* XXX guard against protocol callbacks from the crypto
* thread as they reference ipsecrequest.sav which we
* temporarily null out below. Need to rethink how we
* handle bundled SA's in the callback thread.
*/
IPSECREQUEST_LOCK_ASSERT(isr);
/* get current level */
level = ipsec_get_reqlevel(isr);
/*
* We check new SA in the IPsec request because a different
* SA may be involved each time this request is checked, either
* because new SAs are being configured, or this request is
* associated with an unconnected datagram socket, or this request
* is associated with a system default policy.
*
* key_allocsa_policy should allocate the oldest SA available.
* See key_do_allocsa_policy(), and draft-jenkins-ipsec-rekeying-03.txt.
*/
sav = key_allocsa_policy(saidx);
if (sav != isr->sav) {
/* SA need to be updated. */
if (!IPSECREQUEST_UPGRADE(isr)) {
/* Kick everyone off. */
IPSECREQUEST_UNLOCK(isr);
IPSECREQUEST_WLOCK(isr);
}
if (isr->sav != NULL)
KEY_FREESAV(&isr->sav);
isr->sav = sav;
IPSECREQUEST_DOWNGRADE(isr);
} else if (sav != NULL)
KEY_FREESAV(&sav);
/* When there is SA. */
if (isr->sav != NULL) {
if (isr->sav->state != SADB_SASTATE_MATURE &&
isr->sav->state != SADB_SASTATE_DYING)
return EINVAL;
return 0;
}
/* there is no SA */
error = key_acquire(saidx, isr->sp);
if (error != 0) {
/* XXX What should I do ? */
ipseclog((LOG_DEBUG, "%s: error %d returned from key_acquire\n",
__func__, error));
return error;
}
if (level != IPSEC_LEVEL_REQUIRE) {
/* XXX sigh, the interface to this routine is botched */
IPSEC_ASSERT(isr->sav == NULL, ("unexpected SA"));
return 0;
} else {
return ENOENT;
}
}
/*
* allocating a SA for policy entry from SAD.
* NOTE: searching SAD of aliving state.
* OUT: NULL: not found.
* others: found and return the pointer.
*/
static struct secasvar *
key_allocsa_policy(const struct secasindex *saidx)
{
#define N(a) _ARRAYLEN(a)
struct secashead *sah;
struct secasvar *sav;
u_int stateidx, arraysize;
const u_int *state_valid;
state_valid = NULL; /* silence gcc */
arraysize = 0; /* silence gcc */
SAHTREE_LOCK();
LIST_FOREACH(sah, &V_sahtree, chain) {
if (sah->state == SADB_SASTATE_DEAD)
continue;
if (key_cmpsaidx(&sah->saidx, saidx, CMP_MODE_REQID)) {
if (V_key_preferred_oldsa) {
state_valid = saorder_state_valid_prefer_old;
arraysize = N(saorder_state_valid_prefer_old);
} else {
state_valid = saorder_state_valid_prefer_new;
arraysize = N(saorder_state_valid_prefer_new);
}
break;
}
}
SAHTREE_UNLOCK();
if (sah == NULL)
return NULL;
/* search valid state */
for (stateidx = 0; stateidx < arraysize; stateidx++) {
sav = key_do_allocsa_policy(sah, state_valid[stateidx]);
if (sav != NULL)
return sav;
}
return NULL;
#undef N
}
/*
* searching SAD with direction, protocol, mode and state.
* called by key_allocsa_policy().
* OUT:
* NULL : not found
* others : found, pointer to a SA.
*/
static struct secasvar *
key_do_allocsa_policy(struct secashead *sah, u_int state)
{
struct secasvar *sav, *nextsav, *candidate, *d;
/* initilize */
candidate = NULL;
SAHTREE_LOCK();
for (sav = LIST_FIRST(&sah->savtree[state]);
sav != NULL;
sav = nextsav) {
nextsav = LIST_NEXT(sav, chain);
/* sanity check */
KEY_CHKSASTATE(sav->state, state, __func__);
/* initialize */
if (candidate == NULL) {
candidate = sav;
continue;
}
/* Which SA is the better ? */
IPSEC_ASSERT(candidate->lft_c != NULL,
("null candidate lifetime"));
IPSEC_ASSERT(sav->lft_c != NULL, ("null sav lifetime"));
/* What the best method is to compare ? */
if (V_key_preferred_oldsa) {
if (candidate->lft_c->addtime >
sav->lft_c->addtime) {
candidate = sav;
}
continue;
/*NOTREACHED*/
}
/* preferred new sa rather than old sa */
if (candidate->lft_c->addtime <
sav->lft_c->addtime) {
d = candidate;
candidate = sav;
} else
d = sav;
/*
* prepared to delete the SA when there is more
* suitable candidate and the lifetime of the SA is not
* permanent.
*/
if (d->lft_h->addtime != 0) {
struct mbuf *m, *result;
u_int8_t satype;
key_sa_chgstate(d, SADB_SASTATE_DEAD);
IPSEC_ASSERT(d->refcnt > 0, ("bogus ref count"));
satype = key_proto2satype(d->sah->saidx.proto);
if (satype == 0)
goto msgfail;
m = key_setsadbmsg(SADB_DELETE, 0,
satype, 0, 0, d->refcnt - 1);
if (!m)
goto msgfail;
result = m;
/* set sadb_address for saidx's. */
m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
&d->sah->saidx.src.sa,
d->sah->saidx.src.sa.sa_len << 3,
IPSEC_ULPROTO_ANY);
if (!m)
goto msgfail;
m_cat(result, m);
/* set sadb_address for saidx's. */
m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
&d->sah->saidx.dst.sa,
d->sah->saidx.dst.sa.sa_len << 3,
IPSEC_ULPROTO_ANY);
if (!m)
goto msgfail;
m_cat(result, m);
/* create SA extension */
m = key_setsadbsa(d);
if (!m)
goto msgfail;
m_cat(result, m);
if (result->m_len < sizeof(struct sadb_msg)) {
result = m_pullup(result,
sizeof(struct sadb_msg));
if (result == NULL)
goto msgfail;
}
result->m_pkthdr.len = 0;
for (m = result; m; m = m->m_next)
result->m_pkthdr.len += m->m_len;
mtod(result, struct sadb_msg *)->sadb_msg_len =
PFKEY_UNIT64(result->m_pkthdr.len);
if (key_sendup_mbuf(NULL, result,
KEY_SENDUP_REGISTERED))
goto msgfail;
msgfail:
KEY_FREESAV(&d);
}
}
if (candidate) {
sa_addref(candidate);
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s cause refcnt++:%d SA:%p\n",
__func__, candidate->refcnt, candidate));
}
SAHTREE_UNLOCK();
return candidate;
}
/*
* allocating a usable SA entry for a *INBOUND* packet.
* Must call key_freesav() later.
* OUT: positive: pointer to a usable sav (i.e. MATURE or DYING state).
* NULL: not found, or error occured.
*
* In the comparison, no source address is used--for RFC2401 conformance.
* To quote, from section 4.1:
* A security association is uniquely identified by a triple consisting
* of a Security Parameter Index (SPI), an IP Destination Address, and a
* security protocol (AH or ESP) identifier.
* Note that, however, we do need to keep source address in IPsec SA.
* IKE specification and PF_KEY specification do assume that we
* keep source address in IPsec SA. We see a tricky situation here.
*/
struct secasvar *
key_allocsa(union sockaddr_union *dst, u_int proto, u_int32_t spi,
const char* where, int tag)
{
struct secashead *sah;
struct secasvar *sav;
u_int stateidx, arraysize, state;
const u_int *saorder_state_valid;
#ifdef IPSEC_NAT_T
int natt_chkport;
#endif
IPSEC_ASSERT(dst != NULL, ("null dst address"));
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s from %s:%u\n", __func__, where, tag));
#ifdef IPSEC_NAT_T
natt_chkport = (dst->sa.sa_family == AF_INET &&
dst->sa.sa_len == sizeof(struct sockaddr_in) &&
dst->sin.sin_port != 0);
#endif
/*
* searching SAD.
* XXX: to be checked internal IP header somewhere. Also when
* IPsec tunnel packet is received. But ESP tunnel mode is
* encrypted so we can't check internal IP header.
*/
SAHTREE_LOCK();
if (V_key_preferred_oldsa) {
saorder_state_valid = saorder_state_valid_prefer_old;
arraysize = _ARRAYLEN(saorder_state_valid_prefer_old);
} else {
saorder_state_valid = saorder_state_valid_prefer_new;
arraysize = _ARRAYLEN(saorder_state_valid_prefer_new);
}
LIST_FOREACH(sah, &V_sahtree, chain) {
int checkport;
/* search valid state */
for (stateidx = 0; stateidx < arraysize; stateidx++) {
state = saorder_state_valid[stateidx];
LIST_FOREACH(sav, &sah->savtree[state], chain) {
/* sanity check */
KEY_CHKSASTATE(sav->state, state, __func__);
/* do not return entries w/ unusable state */
if (sav->state != SADB_SASTATE_MATURE &&
sav->state != SADB_SASTATE_DYING)
continue;
if (proto != sav->sah->saidx.proto)
continue;
if (spi != sav->spi)
continue;
checkport = 0;
#ifdef IPSEC_NAT_T
/*
* Really only check ports when this is a NAT-T
* SA. Otherwise other lookups providing ports
* might suffer.
*/
if (sav->natt_type && natt_chkport)
checkport = 1;
#endif
#if 0 /* don't check src */
/* check src address */
if (key_sockaddrcmp(&src->sa,
&sav->sah->saidx.src.sa, checkport) != 0)
continue;
#endif
/* check dst address */
if (key_sockaddrcmp(&dst->sa,
&sav->sah->saidx.dst.sa, checkport) != 0)
continue;
sa_addref(sav);
goto done;
}
}
}
sav = NULL;
done:
SAHTREE_UNLOCK();
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s return SA:%p; refcnt %u\n", __func__,
sav, sav ? sav->refcnt : 0));
return sav;
}
/*
* Must be called after calling key_allocsp().
* For both the packet without socket and key_freeso().
*/
void
_key_freesp(struct secpolicy **spp, const char* where, int tag)
{
struct ipsecrequest *isr, *nextisr;
struct secpolicy *sp = *spp;
IPSEC_ASSERT(sp != NULL, ("null sp"));
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s SP:%p (ID=%u) from %s:%u; refcnt now %u\n",
__func__, sp, sp->id, where, tag, sp->refcnt));
if (SP_DELREF(sp) == 0)
return;
*spp = NULL;
for (isr = sp->req; isr != NULL; isr = nextisr) {
if (isr->sav != NULL) {
KEY_FREESAV(&isr->sav);
isr->sav = NULL;
}
nextisr = isr->next;
ipsec_delisr(isr);
}
free(sp, M_IPSEC_SP);
}
static void
key_unlink(struct secpolicy *sp)
{
IPSEC_ASSERT(sp != NULL, ("null sp"));
IPSEC_ASSERT(sp->spidx.dir == IPSEC_DIR_INBOUND ||
sp->spidx.dir == IPSEC_DIR_OUTBOUND,
("invalid direction %u", sp->spidx.dir));
SPTREE_UNLOCK_ASSERT();
SPTREE_WLOCK();
TAILQ_REMOVE(&V_sptree[sp->spidx.dir], sp, chain);
SPTREE_WUNLOCK();
}
/*
* Must be called after calling key_allocsp().
* For the packet with socket.
*/
void
key_freeso(struct socket *so)
{
IPSEC_ASSERT(so != NULL, ("null so"));
switch (so->so_proto->pr_domain->dom_family) {
#if defined(INET) || defined(INET6)
#ifdef INET
case PF_INET:
#endif
#ifdef INET6
case PF_INET6:
#endif
{
struct inpcb *pcb = sotoinpcb(so);
/* Does it have a PCB ? */
if (pcb == NULL)
return;
key_freesp_so(&pcb->inp_sp->sp_in);
key_freesp_so(&pcb->inp_sp->sp_out);
}
break;
#endif /* INET || INET6 */
default:
ipseclog((LOG_DEBUG, "%s: unknown address family=%d.\n",
__func__, so->so_proto->pr_domain->dom_family));
return;
}
}
static void
key_freesp_so(struct secpolicy **sp)
{
IPSEC_ASSERT(sp != NULL && *sp != NULL, ("null sp"));
if ((*sp)->policy == IPSEC_POLICY_ENTRUST ||
(*sp)->policy == IPSEC_POLICY_BYPASS)
return;
IPSEC_ASSERT((*sp)->policy == IPSEC_POLICY_IPSEC,
("invalid policy %u", (*sp)->policy));
KEY_FREESP(sp);
}
void
key_addrefsa(struct secasvar *sav, const char* where, int tag)
{
IPSEC_ASSERT(sav != NULL, ("null sav"));
IPSEC_ASSERT(sav->refcnt > 0, ("refcount must exist"));
sa_addref(sav);
}
/*
* Must be called after calling key_allocsa().
* This function is called by key_freesp() to free some SA allocated
* for a policy.
*/
void
key_freesav(struct secasvar **psav, const char* where, int tag)
{
struct secasvar *sav = *psav;
IPSEC_ASSERT(sav != NULL, ("null sav"));
if (sa_delref(sav)) {
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s SA:%p (SPI %u) from %s:%u; refcnt now %u\n",
__func__, sav, ntohl(sav->spi), where, tag, sav->refcnt));
*psav = NULL;
key_delsav(sav);
} else {
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s SA:%p (SPI %u) from %s:%u; refcnt now %u\n",
__func__, sav, ntohl(sav->spi), where, tag, sav->refcnt));
}
}
/* %%% SPD management */
/*
* search SPD
* OUT: NULL : not found
* others : found, pointer to a SP.
*/
static struct secpolicy *
key_getsp(struct secpolicyindex *spidx)
{
SPTREE_RLOCK_TRACKER;
struct secpolicy *sp;
IPSEC_ASSERT(spidx != NULL, ("null spidx"));
SPTREE_RLOCK();
TAILQ_FOREACH(sp, &V_sptree[spidx->dir], chain) {
if (key_cmpspidx_exactly(spidx, &sp->spidx)) {
SP_ADDREF(sp);
break;
}
}
SPTREE_RUNLOCK();
return sp;
}
/*
* get SP by index.
* OUT: NULL : not found
* others : found, pointer to a SP.
*/
static struct secpolicy *
key_getspbyid(u_int32_t id)
{
SPTREE_RLOCK_TRACKER;
struct secpolicy *sp;
SPTREE_RLOCK();
TAILQ_FOREACH(sp, &V_sptree[IPSEC_DIR_INBOUND], chain) {
if (sp->id == id) {
SP_ADDREF(sp);
goto done;
}
}
TAILQ_FOREACH(sp, &V_sptree[IPSEC_DIR_OUTBOUND], chain) {
if (sp->id == id) {
SP_ADDREF(sp);
goto done;
}
}
done:
SPTREE_RUNLOCK();
return sp;
}
struct secpolicy *
key_newsp(const char* where, int tag)
{
struct secpolicy *newsp = NULL;
newsp = (struct secpolicy *)
malloc(sizeof(struct secpolicy), M_IPSEC_SP, M_NOWAIT|M_ZERO);
if (newsp)
refcount_init(&newsp->refcnt, 1);
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s from %s:%u return SP:%p\n", __func__,
where, tag, newsp));
return newsp;
}
/*
* create secpolicy structure from sadb_x_policy structure.
* NOTE: `state', `secpolicyindex' in secpolicy structure are not set,
* so must be set properly later.
*/
struct secpolicy *
key_msg2sp(struct sadb_x_policy *xpl0, size_t len, int *error)
{
struct secpolicy *newsp;
IPSEC_ASSERT(xpl0 != NULL, ("null xpl0"));
IPSEC_ASSERT(len >= sizeof(*xpl0), ("policy too short: %zu", len));
if (len != PFKEY_EXTLEN(xpl0)) {
ipseclog((LOG_DEBUG, "%s: Invalid msg length.\n", __func__));
*error = EINVAL;
return NULL;
}
if ((newsp = KEY_NEWSP()) == NULL) {
*error = ENOBUFS;
return NULL;
}
newsp->spidx.dir = xpl0->sadb_x_policy_dir;
newsp->policy = xpl0->sadb_x_policy_type;
/* check policy */
switch (xpl0->sadb_x_policy_type) {
case IPSEC_POLICY_DISCARD:
case IPSEC_POLICY_NONE:
case IPSEC_POLICY_ENTRUST:
case IPSEC_POLICY_BYPASS:
newsp->req = NULL;
break;
case IPSEC_POLICY_IPSEC:
{
int tlen;
struct sadb_x_ipsecrequest *xisr;
struct ipsecrequest **p_isr = &newsp->req;
/* validity check */
if (PFKEY_EXTLEN(xpl0) < sizeof(*xpl0)) {
ipseclog((LOG_DEBUG, "%s: Invalid msg length.\n",
__func__));
KEY_FREESP(&newsp);
*error = EINVAL;
return NULL;
}
tlen = PFKEY_EXTLEN(xpl0) - sizeof(*xpl0);
xisr = (struct sadb_x_ipsecrequest *)(xpl0 + 1);
while (tlen > 0) {
/* length check */
if (xisr->sadb_x_ipsecrequest_len < sizeof(*xisr)) {
ipseclog((LOG_DEBUG, "%s: invalid ipsecrequest "
"length.\n", __func__));
KEY_FREESP(&newsp);
*error = EINVAL;
return NULL;
}
/* allocate request buffer */
/* NB: data structure is zero'd */
*p_isr = ipsec_newisr();
if ((*p_isr) == NULL) {
ipseclog((LOG_DEBUG,
"%s: No more memory.\n", __func__));
KEY_FREESP(&newsp);
*error = ENOBUFS;
return NULL;
}
/* set values */
switch (xisr->sadb_x_ipsecrequest_proto) {
case IPPROTO_ESP:
case IPPROTO_AH:
case IPPROTO_IPCOMP:
break;
default:
ipseclog((LOG_DEBUG,
"%s: invalid proto type=%u\n", __func__,
xisr->sadb_x_ipsecrequest_proto));
KEY_FREESP(&newsp);
*error = EPROTONOSUPPORT;
return NULL;
}
(*p_isr)->saidx.proto = xisr->sadb_x_ipsecrequest_proto;
switch (xisr->sadb_x_ipsecrequest_mode) {
case IPSEC_MODE_TRANSPORT:
case IPSEC_MODE_TUNNEL:
break;
case IPSEC_MODE_ANY:
default:
ipseclog((LOG_DEBUG,
"%s: invalid mode=%u\n", __func__,
xisr->sadb_x_ipsecrequest_mode));
KEY_FREESP(&newsp);
*error = EINVAL;
return NULL;
}
(*p_isr)->saidx.mode = xisr->sadb_x_ipsecrequest_mode;
switch (xisr->sadb_x_ipsecrequest_level) {
case IPSEC_LEVEL_DEFAULT:
case IPSEC_LEVEL_USE:
case IPSEC_LEVEL_REQUIRE:
break;
case IPSEC_LEVEL_UNIQUE:
/* validity check */
/*
* If range violation of reqid, kernel will
* update it, don't refuse it.
*/
if (xisr->sadb_x_ipsecrequest_reqid
> IPSEC_MANUAL_REQID_MAX) {
ipseclog((LOG_DEBUG,
"%s: reqid=%d range "
"violation, updated by kernel.\n",
__func__,
xisr->sadb_x_ipsecrequest_reqid));
xisr->sadb_x_ipsecrequest_reqid = 0;
}
/* allocate new reqid id if reqid is zero. */
if (xisr->sadb_x_ipsecrequest_reqid == 0) {
u_int32_t reqid;
if ((reqid = key_newreqid()) == 0) {
KEY_FREESP(&newsp);
*error = ENOBUFS;
return NULL;
}
(*p_isr)->saidx.reqid = reqid;
xisr->sadb_x_ipsecrequest_reqid = reqid;
} else {
/* set it for manual keying. */
(*p_isr)->saidx.reqid =
xisr->sadb_x_ipsecrequest_reqid;
}
break;
default:
ipseclog((LOG_DEBUG, "%s: invalid level=%u\n",
__func__,
xisr->sadb_x_ipsecrequest_level));
KEY_FREESP(&newsp);
*error = EINVAL;
return NULL;
}
(*p_isr)->level = xisr->sadb_x_ipsecrequest_level;
/* set IP addresses if there */
if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) {
struct sockaddr *paddr;
paddr = (struct sockaddr *)(xisr + 1);
/* validity check */
if (paddr->sa_len
> sizeof((*p_isr)->saidx.src)) {
ipseclog((LOG_DEBUG, "%s: invalid "
"request address length.\n",
__func__));
KEY_FREESP(&newsp);
*error = EINVAL;
return NULL;
}
bcopy(paddr, &(*p_isr)->saidx.src,
paddr->sa_len);
paddr = (struct sockaddr *)((caddr_t)paddr
+ paddr->sa_len);
/* validity check */
if (paddr->sa_len
> sizeof((*p_isr)->saidx.dst)) {
ipseclog((LOG_DEBUG, "%s: invalid "
"request address length.\n",
__func__));
KEY_FREESP(&newsp);
*error = EINVAL;
return NULL;
}
bcopy(paddr, &(*p_isr)->saidx.dst,
paddr->sa_len);
}
(*p_isr)->sp = newsp;
/* initialization for the next. */
p_isr = &(*p_isr)->next;
tlen -= xisr->sadb_x_ipsecrequest_len;
/* validity check */
if (tlen < 0) {
ipseclog((LOG_DEBUG, "%s: becoming tlen < 0.\n",
__func__));
KEY_FREESP(&newsp);
*error = EINVAL;
return NULL;
}
xisr = (struct sadb_x_ipsecrequest *)((caddr_t)xisr
+ xisr->sadb_x_ipsecrequest_len);
}
}
break;
default:
ipseclog((LOG_DEBUG, "%s: invalid policy type.\n", __func__));
KEY_FREESP(&newsp);
*error = EINVAL;
return NULL;
}
*error = 0;
return newsp;
}
static u_int32_t
key_newreqid()
{
static u_int32_t auto_reqid = IPSEC_MANUAL_REQID_MAX + 1;
auto_reqid = (auto_reqid == ~0
? IPSEC_MANUAL_REQID_MAX + 1 : auto_reqid + 1);
/* XXX should be unique check */
return auto_reqid;
}
/*
* copy secpolicy struct to sadb_x_policy structure indicated.
*/
struct mbuf *
key_sp2msg(struct secpolicy *sp)
{
struct sadb_x_policy *xpl;
int tlen;
caddr_t p;
struct mbuf *m;
IPSEC_ASSERT(sp != NULL, ("null policy"));
tlen = key_getspreqmsglen(sp);
m = m_get2(tlen, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return (NULL);
m_align(m, tlen);
m->m_len = tlen;
xpl = mtod(m, struct sadb_x_policy *);
bzero(xpl, tlen);
xpl->sadb_x_policy_len = PFKEY_UNIT64(tlen);
xpl->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
xpl->sadb_x_policy_type = sp->policy;
xpl->sadb_x_policy_dir = sp->spidx.dir;
xpl->sadb_x_policy_id = sp->id;
p = (caddr_t)xpl + sizeof(*xpl);
/* if is the policy for ipsec ? */
if (sp->policy == IPSEC_POLICY_IPSEC) {
struct sadb_x_ipsecrequest *xisr;
struct ipsecrequest *isr;
for (isr = sp->req; isr != NULL; isr = isr->next) {
xisr = (struct sadb_x_ipsecrequest *)p;
xisr->sadb_x_ipsecrequest_proto = isr->saidx.proto;
xisr->sadb_x_ipsecrequest_mode = isr->saidx.mode;
xisr->sadb_x_ipsecrequest_level = isr->level;
xisr->sadb_x_ipsecrequest_reqid = isr->saidx.reqid;
p += sizeof(*xisr);
bcopy(&isr->saidx.src, p, isr->saidx.src.sa.sa_len);
p += isr->saidx.src.sa.sa_len;
bcopy(&isr->saidx.dst, p, isr->saidx.dst.sa.sa_len);
p += isr->saidx.src.sa.sa_len;
xisr->sadb_x_ipsecrequest_len =
PFKEY_ALIGN8(sizeof(*xisr)
+ isr->saidx.src.sa.sa_len
+ isr->saidx.dst.sa.sa_len);
}
}
return m;
}
/* m will not be freed nor modified */
static struct mbuf *
key_gather_mbuf(struct mbuf *m, const struct sadb_msghdr *mhp,
int ndeep, int nitem, ...)
{
va_list ap;
int idx;
int i;
struct mbuf *result = NULL, *n;
int len;
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
va_start(ap, nitem);
for (i = 0; i < nitem; i++) {
idx = va_arg(ap, int);
if (idx < 0 || idx > SADB_EXT_MAX)
goto fail;
/* don't attempt to pull empty extension */
if (idx == SADB_EXT_RESERVED && mhp->msg == NULL)
continue;
if (idx != SADB_EXT_RESERVED &&
(mhp->ext[idx] == NULL || mhp->extlen[idx] == 0))
continue;
if (idx == SADB_EXT_RESERVED) {
len = PFKEY_ALIGN8(sizeof(struct sadb_msg));
IPSEC_ASSERT(len <= MHLEN, ("header too big %u", len));
MGETHDR(n, M_NOWAIT, MT_DATA);
if (!n)
goto fail;
n->m_len = len;
n->m_next = NULL;
m_copydata(m, 0, sizeof(struct sadb_msg),
mtod(n, caddr_t));
} else if (i < ndeep) {
len = mhp->extlen[idx];
n = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (n == NULL)
goto fail;
m_align(n, len);
n->m_len = len;
m_copydata(m, mhp->extoff[idx], mhp->extlen[idx],
mtod(n, caddr_t));
} else {
n = m_copym(m, mhp->extoff[idx], mhp->extlen[idx],
M_NOWAIT);
}
if (n == NULL)
goto fail;
if (result)
m_cat(result, n);
else
result = n;
}
va_end(ap);
if ((result->m_flags & M_PKTHDR) != 0) {
result->m_pkthdr.len = 0;
for (n = result; n; n = n->m_next)
result->m_pkthdr.len += n->m_len;
}
return result;
fail:
m_freem(result);
va_end(ap);
return NULL;
}
/*
* SADB_X_SPDADD, SADB_X_SPDSETIDX or SADB_X_SPDUPDATE processing
* add an entry to SP database, when received
* <base, address(SD), (lifetime(H),) policy>
* from the user(?).
* Adding to SP database,
* and send
* <base, address(SD), (lifetime(H),) policy>
* to the socket which was send.
*
* SPDADD set a unique policy entry.
* SPDSETIDX like SPDADD without a part of policy requests.
* SPDUPDATE replace a unique policy entry.
*
* m will always be freed.
*/
static int
key_spdadd(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct sadb_address *src0, *dst0;
struct sadb_x_policy *xpl0, *xpl;
struct sadb_lifetime *lft = NULL;
struct secpolicyindex spidx;
struct secpolicy *newsp;
int error;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
mhp->ext[SADB_X_EXT_POLICY] == NULL) {
ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n"));
return key_senderror(so, m, EINVAL);
}
if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL) {
if (mhp->extlen[SADB_EXT_LIFETIME_HARD]
< sizeof(struct sadb_lifetime)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
lft = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_HARD];
}
src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
xpl0 = (struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY];
/*
* Note: do not parse SADB_X_EXT_NAT_T_* here:
* we are processing traffic endpoints.
*/
/* make secindex */
/* XXX boundary check against sa_len */
KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir,
src0 + 1,
dst0 + 1,
src0->sadb_address_prefixlen,
dst0->sadb_address_prefixlen,
src0->sadb_address_proto,
&spidx);
/* checking the direciton. */
switch (xpl0->sadb_x_policy_dir) {
case IPSEC_DIR_INBOUND:
case IPSEC_DIR_OUTBOUND:
break;
default:
ipseclog((LOG_DEBUG, "%s: Invalid SP direction.\n", __func__));
mhp->msg->sadb_msg_errno = EINVAL;
return 0;
}
/* check policy */
/* key_spdadd() accepts DISCARD, NONE and IPSEC. */
if (xpl0->sadb_x_policy_type == IPSEC_POLICY_ENTRUST
|| xpl0->sadb_x_policy_type == IPSEC_POLICY_BYPASS) {
ipseclog((LOG_DEBUG, "%s: Invalid policy type.\n", __func__));
return key_senderror(so, m, EINVAL);
}
/* policy requests are mandatory when action is ipsec. */
if (mhp->msg->sadb_msg_type != SADB_X_SPDSETIDX
&& xpl0->sadb_x_policy_type == IPSEC_POLICY_IPSEC
&& mhp->extlen[SADB_X_EXT_POLICY] <= sizeof(*xpl0)) {
ipseclog((LOG_DEBUG, "%s: some policy requests part required\n",
__func__));
return key_senderror(so, m, EINVAL);
}
/*
* checking there is SP already or not.
* SPDUPDATE doesn't depend on whether there is a SP or not.
* If the type is either SPDADD or SPDSETIDX AND a SP is found,
* then error.
*/
newsp = key_getsp(&spidx);
if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) {
if (newsp) {
key_unlink(newsp);
KEY_FREESP(&newsp);
}
} else {
if (newsp != NULL) {
KEY_FREESP(&newsp);
ipseclog((LOG_DEBUG, "%s: a SP entry exists already.\n",
__func__));
return key_senderror(so, m, EEXIST);
}
}
/* XXX: there is race between key_getsp and key_msg2sp. */
/* allocation new SP entry */
if ((newsp = key_msg2sp(xpl0, PFKEY_EXTLEN(xpl0), &error)) == NULL) {
return key_senderror(so, m, error);
}
if ((newsp->id = key_getnewspid()) == 0) {
KEY_FREESP(&newsp);
return key_senderror(so, m, ENOBUFS);
}
/* XXX boundary check against sa_len */
KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir,
src0 + 1,
dst0 + 1,
src0->sadb_address_prefixlen,
dst0->sadb_address_prefixlen,
src0->sadb_address_proto,
&newsp->spidx);
/* sanity check on addr pair */
if (((struct sockaddr *)(src0 + 1))->sa_family !=
((struct sockaddr *)(dst0+ 1))->sa_family) {
KEY_FREESP(&newsp);
return key_senderror(so, m, EINVAL);
}
if (((struct sockaddr *)(src0 + 1))->sa_len !=
((struct sockaddr *)(dst0+ 1))->sa_len) {
KEY_FREESP(&newsp);
return key_senderror(so, m, EINVAL);
}
#if 1
if (newsp->req && newsp->req->saidx.src.sa.sa_family &&
newsp->req->saidx.dst.sa.sa_family) {
if (newsp->req->saidx.src.sa.sa_family !=
newsp->req->saidx.dst.sa.sa_family) {
KEY_FREESP(&newsp);
return key_senderror(so, m, EINVAL);
}
}
#endif
newsp->created = time_second;
newsp->lastused = newsp->created;
newsp->lifetime = lft ? lft->sadb_lifetime_addtime : 0;
newsp->validtime = lft ? lft->sadb_lifetime_usetime : 0;
SPTREE_WLOCK();
TAILQ_INSERT_TAIL(&V_sptree[newsp->spidx.dir], newsp, chain);
SPTREE_WUNLOCK();
/* delete the entry in spacqtree */
if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) {
struct secspacq *spacq = key_getspacq(&spidx);
if (spacq != NULL) {
/* reset counter in order to deletion by timehandler. */
spacq->created = time_second;
spacq->count = 0;
SPACQ_UNLOCK();
}
}
{
struct mbuf *n, *mpolicy;
struct sadb_msg *newmsg;
int off;
/*
* Note: do not send SADB_X_EXT_NAT_T_* here:
* we are sending traffic endpoints.
*/
/* create new sadb_msg to reply. */
if (lft) {
n = key_gather_mbuf(m, mhp, 2, 5, SADB_EXT_RESERVED,
SADB_X_EXT_POLICY, SADB_EXT_LIFETIME_HARD,
SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
} else {
n = key_gather_mbuf(m, mhp, 2, 4, SADB_EXT_RESERVED,
SADB_X_EXT_POLICY,
SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
}
if (!n)
return key_senderror(so, m, ENOBUFS);
if (n->m_len < sizeof(*newmsg)) {
n = m_pullup(n, sizeof(*newmsg));
if (!n)
return key_senderror(so, m, ENOBUFS);
}
newmsg = mtod(n, struct sadb_msg *);
newmsg->sadb_msg_errno = 0;
newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
off = 0;
mpolicy = m_pulldown(n, PFKEY_ALIGN8(sizeof(struct sadb_msg)),
sizeof(*xpl), &off);
if (mpolicy == NULL) {
/* n is already freed */
return key_senderror(so, m, ENOBUFS);
}
xpl = (struct sadb_x_policy *)(mtod(mpolicy, caddr_t) + off);
if (xpl->sadb_x_policy_exttype != SADB_X_EXT_POLICY) {
m_freem(n);
return key_senderror(so, m, EINVAL);
}
xpl->sadb_x_policy_id = newsp->id;
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
}
}
/*
* get new policy id.
* OUT:
* 0: failure.
* others: success.
*/
static u_int32_t
key_getnewspid()
{
u_int32_t newid = 0;
int count = V_key_spi_trycnt; /* XXX */
struct secpolicy *sp;
/* when requesting to allocate spi ranged */
while (count--) {
newid = (V_policy_id = (V_policy_id == ~0 ? 1 : V_policy_id + 1));
if ((sp = key_getspbyid(newid)) == NULL)
break;
KEY_FREESP(&sp);
}
if (count == 0 || newid == 0) {
ipseclog((LOG_DEBUG, "%s: to allocate policy id is failed.\n",
__func__));
return 0;
}
return newid;
}
/*
* SADB_SPDDELETE processing
* receive
* <base, address(SD), policy(*)>
* from the user(?), and set SADB_SASTATE_DEAD,
* and send,
* <base, address(SD), policy(*)>
* to the ikmpd.
* policy(*) including direction of policy.
*
* m will always be freed.
*/
static int
key_spddelete(struct socket *so, struct mbuf *m,
const struct sadb_msghdr *mhp)
{
struct sadb_address *src0, *dst0;
struct sadb_x_policy *xpl0;
struct secpolicyindex spidx;
struct secpolicy *sp;
IPSEC_ASSERT(so != NULL, ("null so"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
mhp->ext[SADB_X_EXT_POLICY] == NULL) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
xpl0 = (struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY];
/*
* Note: do not parse SADB_X_EXT_NAT_T_* here:
* we are processing traffic endpoints.
*/
/* make secindex */
/* XXX boundary check against sa_len */
KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir,
src0 + 1,
dst0 + 1,
src0->sadb_address_prefixlen,
dst0->sadb_address_prefixlen,
src0->sadb_address_proto,
&spidx);
/* checking the direciton. */
switch (xpl0->sadb_x_policy_dir) {
case IPSEC_DIR_INBOUND:
case IPSEC_DIR_OUTBOUND:
break;
default:
ipseclog((LOG_DEBUG, "%s: Invalid SP direction.\n", __func__));
return key_senderror(so, m, EINVAL);
}
/* Is there SP in SPD ? */
if ((sp = key_getsp(&spidx)) == NULL) {
ipseclog((LOG_DEBUG, "%s: no SP found.\n", __func__));
return key_senderror(so, m, EINVAL);
}
/* save policy id to buffer to be returned. */
xpl0->sadb_x_policy_id = sp->id;
key_unlink(sp);
KEY_FREESP(&sp);
{
struct mbuf *n;
struct sadb_msg *newmsg;
/*
* Note: do not send SADB_X_EXT_NAT_T_* here:
* we are sending traffic endpoints.
*/
/* create new sadb_msg to reply. */
n = key_gather_mbuf(m, mhp, 1, 4, SADB_EXT_RESERVED,
SADB_X_EXT_POLICY, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
if (!n)
return key_senderror(so, m, ENOBUFS);
newmsg = mtod(n, struct sadb_msg *);
newmsg->sadb_msg_errno = 0;
newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
}
}
/*
* SADB_SPDDELETE2 processing
* receive
* <base, policy(*)>
* from the user(?), and set SADB_SASTATE_DEAD,
* and send,
* <base, policy(*)>
* to the ikmpd.
* policy(*) including direction of policy.
*
* m will always be freed.
*/
static int
key_spddelete2(struct socket *so, struct mbuf *m,
const struct sadb_msghdr *mhp)
{
u_int32_t id;
struct secpolicy *sp;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
if (mhp->ext[SADB_X_EXT_POLICY] == NULL ||
mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__));
return key_senderror(so, m, EINVAL);
}
id = ((struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id;
/* Is there SP in SPD ? */
if ((sp = key_getspbyid(id)) == NULL) {
ipseclog((LOG_DEBUG, "%s: no SP found id:%u.\n", __func__, id));
return key_senderror(so, m, EINVAL);
}
key_unlink(sp);
KEY_FREESP(&sp);
{
struct mbuf *n, *nn;
struct sadb_msg *newmsg;
int off, len;
/* create new sadb_msg to reply. */
len = PFKEY_ALIGN8(sizeof(struct sadb_msg));
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n && len > MHLEN) {
if (!(MCLGET(n, M_NOWAIT))) {
m_freem(n);
n = NULL;
}
}
if (!n)
return key_senderror(so, m, ENOBUFS);
n->m_len = len;
n->m_next = NULL;
off = 0;
m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off);
off += PFKEY_ALIGN8(sizeof(struct sadb_msg));
IPSEC_ASSERT(off == len, ("length inconsistency (off %u len %u)",
off, len));
n->m_next = m_copym(m, mhp->extoff[SADB_X_EXT_POLICY],
mhp->extlen[SADB_X_EXT_POLICY], M_NOWAIT);
if (!n->m_next) {
m_freem(n);
return key_senderror(so, m, ENOBUFS);
}
n->m_pkthdr.len = 0;
for (nn = n; nn; nn = nn->m_next)
n->m_pkthdr.len += nn->m_len;
newmsg = mtod(n, struct sadb_msg *);
newmsg->sadb_msg_errno = 0;
newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
}
}
/*
* SADB_X_GET processing
* receive
* <base, policy(*)>
* from the user(?),
* and send,
* <base, address(SD), policy>
* to the ikmpd.
* policy(*) including direction of policy.
*
* m will always be freed.
*/
static int
key_spdget(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
u_int32_t id;
struct secpolicy *sp;
struct mbuf *n;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
if (mhp->ext[SADB_X_EXT_POLICY] == NULL ||
mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
id = ((struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id;
/* Is there SP in SPD ? */
if ((sp = key_getspbyid(id)) == NULL) {
ipseclog((LOG_DEBUG, "%s: no SP found id:%u.\n", __func__, id));
return key_senderror(so, m, ENOENT);
}
n = key_setdumpsp(sp, SADB_X_SPDGET, 0, mhp->msg->sadb_msg_pid);
KEY_FREESP(&sp);
if (n != NULL) {
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
} else
return key_senderror(so, m, ENOBUFS);
}
/*
* SADB_X_SPDACQUIRE processing.
* Acquire policy and SA(s) for a *OUTBOUND* packet.
* send
* <base, policy(*)>
* to KMD, and expect to receive
* <base> with SADB_X_SPDACQUIRE if error occured,
* or
* <base, policy>
* with SADB_X_SPDUPDATE from KMD by PF_KEY.
* policy(*) is without policy requests.
*
* 0 : succeed
* others: error number
*/
int
key_spdacquire(struct secpolicy *sp)
{
struct mbuf *result = NULL, *m;
struct secspacq *newspacq;
IPSEC_ASSERT(sp != NULL, ("null secpolicy"));
IPSEC_ASSERT(sp->req == NULL, ("policy exists"));
IPSEC_ASSERT(sp->policy == IPSEC_POLICY_IPSEC,
("policy not IPSEC %u", sp->policy));
/* Get an entry to check whether sent message or not. */
newspacq = key_getspacq(&sp->spidx);
if (newspacq != NULL) {
if (V_key_blockacq_count < newspacq->count) {
/* reset counter and do send message. */
newspacq->count = 0;
} else {
/* increment counter and do nothing. */
newspacq->count++;
SPACQ_UNLOCK();
return (0);
}
SPACQ_UNLOCK();
} else {
/* make new entry for blocking to send SADB_ACQUIRE. */
newspacq = key_newspacq(&sp->spidx);
if (newspacq == NULL)
return ENOBUFS;
}
/* create new sadb_msg to reply. */
m = key_setsadbmsg(SADB_X_SPDACQUIRE, 0, 0, 0, 0, 0);
if (!m)
return ENOBUFS;
result = m;
result->m_pkthdr.len = 0;
for (m = result; m; m = m->m_next)
result->m_pkthdr.len += m->m_len;
mtod(result, struct sadb_msg *)->sadb_msg_len =
PFKEY_UNIT64(result->m_pkthdr.len);
return key_sendup_mbuf(NULL, m, KEY_SENDUP_REGISTERED);
}
/*
* SADB_SPDFLUSH processing
* receive
* <base>
* from the user, and free all entries in secpctree.
* and send,
* <base>
* to the user.
* NOTE: what to do is only marking SADB_SASTATE_DEAD.
*
* m will always be freed.
*/
static int
key_spdflush(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
TAILQ_HEAD(, secpolicy) drainq;
struct sadb_msg *newmsg;
struct secpolicy *sp, *nextsp;
u_int dir;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
if (m->m_len != PFKEY_ALIGN8(sizeof(struct sadb_msg)))
return key_senderror(so, m, EINVAL);
TAILQ_INIT(&drainq);
SPTREE_WLOCK();
for (dir = 0; dir < IPSEC_DIR_MAX; dir++) {
TAILQ_CONCAT(&drainq, &V_sptree[dir], chain);
}
SPTREE_WUNLOCK();
sp = TAILQ_FIRST(&drainq);
while (sp != NULL) {
nextsp = TAILQ_NEXT(sp, chain);
KEY_FREESP(&sp);
sp = nextsp;
}
if (sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
return key_senderror(so, m, ENOBUFS);
}
if (m->m_next)
m_freem(m->m_next);
m->m_next = NULL;
m->m_pkthdr.len = m->m_len = PFKEY_ALIGN8(sizeof(struct sadb_msg));
newmsg = mtod(m, struct sadb_msg *);
newmsg->sadb_msg_errno = 0;
newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len);
return key_sendup_mbuf(so, m, KEY_SENDUP_ALL);
}
/*
* SADB_SPDDUMP processing
* receive
* <base>
* from the user, and dump all SP leaves
* and send,
* <base> .....
* to the ikmpd.
*
* m will always be freed.
*/
static int
key_spddump(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
SPTREE_RLOCK_TRACKER;
struct secpolicy *sp;
int cnt;
u_int dir;
struct mbuf *n;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* search SPD entry and get buffer size. */
cnt = 0;
SPTREE_RLOCK();
for (dir = 0; dir < IPSEC_DIR_MAX; dir++) {
TAILQ_FOREACH(sp, &V_sptree[dir], chain) {
cnt++;
}
}
if (cnt == 0) {
SPTREE_RUNLOCK();
return key_senderror(so, m, ENOENT);
}
for (dir = 0; dir < IPSEC_DIR_MAX; dir++) {
TAILQ_FOREACH(sp, &V_sptree[dir], chain) {
--cnt;
n = key_setdumpsp(sp, SADB_X_SPDDUMP, cnt,
mhp->msg->sadb_msg_pid);
if (n)
key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
}
}
SPTREE_RUNLOCK();
m_freem(m);
return 0;
}
static struct mbuf *
key_setdumpsp(struct secpolicy *sp, u_int8_t type, u_int32_t seq,
u_int32_t pid)
{
struct mbuf *result = NULL, *m;
struct seclifetime lt;
- SPTREE_RLOCK_ASSERT();
-
m = key_setsadbmsg(type, 0, SADB_SATYPE_UNSPEC, seq, pid, sp->refcnt);
if (!m)
goto fail;
result = m;
/*
* Note: do not send SADB_X_EXT_NAT_T_* here:
* we are sending traffic endpoints.
*/
m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
&sp->spidx.src.sa, sp->spidx.prefs,
sp->spidx.ul_proto);
if (!m)
goto fail;
m_cat(result, m);
m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
&sp->spidx.dst.sa, sp->spidx.prefd,
sp->spidx.ul_proto);
if (!m)
goto fail;
m_cat(result, m);
m = key_sp2msg(sp);
if (!m)
goto fail;
m_cat(result, m);
if(sp->lifetime){
lt.addtime=sp->created;
lt.usetime= sp->lastused;
m = key_setlifetime(&lt, SADB_EXT_LIFETIME_CURRENT);
if (!m)
goto fail;
m_cat(result, m);
lt.addtime=sp->lifetime;
lt.usetime= sp->validtime;
m = key_setlifetime(&lt, SADB_EXT_LIFETIME_HARD);
if (!m)
goto fail;
m_cat(result, m);
}
if ((result->m_flags & M_PKTHDR) == 0)
goto fail;
if (result->m_len < sizeof(struct sadb_msg)) {
result = m_pullup(result, sizeof(struct sadb_msg));
if (result == NULL)
goto fail;
}
result->m_pkthdr.len = 0;
for (m = result; m; m = m->m_next)
result->m_pkthdr.len += m->m_len;
mtod(result, struct sadb_msg *)->sadb_msg_len =
PFKEY_UNIT64(result->m_pkthdr.len);
return result;
fail:
m_freem(result);
return NULL;
}
/*
* get PFKEY message length for security policy and request.
*/
static u_int
key_getspreqmsglen(struct secpolicy *sp)
{
u_int tlen;
tlen = sizeof(struct sadb_x_policy);
/* if is the policy for ipsec ? */
if (sp->policy != IPSEC_POLICY_IPSEC)
return tlen;
/* get length of ipsec requests */
{
struct ipsecrequest *isr;
int len;
for (isr = sp->req; isr != NULL; isr = isr->next) {
len = sizeof(struct sadb_x_ipsecrequest)
+ isr->saidx.src.sa.sa_len
+ isr->saidx.dst.sa.sa_len;
tlen += PFKEY_ALIGN8(len);
}
}
return tlen;
}
/*
* SADB_SPDEXPIRE processing
* send
* <base, address(SD), lifetime(CH), policy>
* to KMD by PF_KEY.
*
* OUT: 0 : succeed
* others : error number
*/
static int
key_spdexpire(struct secpolicy *sp)
{
struct mbuf *result = NULL, *m;
int len;
int error = -1;
struct sadb_lifetime *lt;
/* XXX: Why do we lock ? */
IPSEC_ASSERT(sp != NULL, ("null secpolicy"));
/* set msg header */
m = key_setsadbmsg(SADB_X_SPDEXPIRE, 0, 0, 0, 0, 0);
if (!m) {
error = ENOBUFS;
goto fail;
}
result = m;
/* create lifetime extension (current and hard) */
len = PFKEY_ALIGN8(sizeof(*lt)) * 2;
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL) {
error = ENOBUFS;
goto fail;
}
m_align(m, len);
m->m_len = len;
bzero(mtod(m, caddr_t), len);
lt = mtod(m, struct sadb_lifetime *);
lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT;
lt->sadb_lifetime_allocations = 0;
lt->sadb_lifetime_bytes = 0;
lt->sadb_lifetime_addtime = sp->created;
lt->sadb_lifetime_usetime = sp->lastused;
lt = (struct sadb_lifetime *)(mtod(m, caddr_t) + len / 2);
lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD;
lt->sadb_lifetime_allocations = 0;
lt->sadb_lifetime_bytes = 0;
lt->sadb_lifetime_addtime = sp->lifetime;
lt->sadb_lifetime_usetime = sp->validtime;
m_cat(result, m);
/*
* Note: do not send SADB_X_EXT_NAT_T_* here:
* we are sending traffic endpoints.
*/
/* set sadb_address for source */
m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
&sp->spidx.src.sa,
sp->spidx.prefs, sp->spidx.ul_proto);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
/* set sadb_address for destination */
m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
&sp->spidx.dst.sa,
sp->spidx.prefd, sp->spidx.ul_proto);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
/* set secpolicy */
m = key_sp2msg(sp);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
if ((result->m_flags & M_PKTHDR) == 0) {
error = EINVAL;
goto fail;
}
if (result->m_len < sizeof(struct sadb_msg)) {
result = m_pullup(result, sizeof(struct sadb_msg));
if (result == NULL) {
error = ENOBUFS;
goto fail;
}
}
result->m_pkthdr.len = 0;
for (m = result; m; m = m->m_next)
result->m_pkthdr.len += m->m_len;
mtod(result, struct sadb_msg *)->sadb_msg_len =
PFKEY_UNIT64(result->m_pkthdr.len);
return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED);
fail:
if (result)
m_freem(result);
return error;
}
/* %%% SAD management */
/*
* allocating a memory for new SA head, and copy from the values of mhp.
* OUT: NULL : failure due to the lack of memory.
* others : pointer to new SA head.
*/
static struct secashead *
key_newsah(struct secasindex *saidx)
{
struct secashead *newsah;
IPSEC_ASSERT(saidx != NULL, ("null saidx"));
newsah = malloc(sizeof(struct secashead), M_IPSEC_SAH, M_NOWAIT|M_ZERO);
if (newsah != NULL) {
int i;
for (i = 0; i < sizeof(newsah->savtree)/sizeof(newsah->savtree[0]); i++)
LIST_INIT(&newsah->savtree[i]);
newsah->saidx = *saidx;
/* add to saidxtree */
newsah->state = SADB_SASTATE_MATURE;
SAHTREE_LOCK();
LIST_INSERT_HEAD(&V_sahtree, newsah, chain);
SAHTREE_UNLOCK();
}
return(newsah);
}
/*
* delete SA index and all SA registerd.
*/
static void
key_delsah(struct secashead *sah)
{
struct secasvar *sav, *nextsav;
u_int stateidx;
int zombie = 0;
IPSEC_ASSERT(sah != NULL, ("NULL sah"));
SAHTREE_LOCK_ASSERT();
/* searching all SA registerd in the secindex. */
for (stateidx = 0;
stateidx < _ARRAYLEN(saorder_state_any);
stateidx++) {
u_int state = saorder_state_any[stateidx];
LIST_FOREACH_SAFE(sav, &sah->savtree[state], chain, nextsav) {
if (sav->refcnt == 0) {
/* sanity check */
KEY_CHKSASTATE(state, sav->state, __func__);
/*
* do NOT call KEY_FREESAV here:
* it will only delete the sav if refcnt == 1,
* where we already know that refcnt == 0
*/
key_delsav(sav);
} else {
/* give up to delete this sa */
zombie++;
}
}
}
if (!zombie) { /* delete only if there are savs */
/* remove from tree of SA index */
if (__LIST_CHAINED(sah))
LIST_REMOVE(sah, chain);
free(sah, M_IPSEC_SAH);
}
}
/*
* allocating a new SA with LARVAL state. key_add() and key_getspi() call,
* and copy the values of mhp into new buffer.
* When SAD message type is GETSPI:
* to set sequence number from acq_seq++,
* to set zero to SPI.
* not to call key_setsava().
* OUT: NULL : fail
* others : pointer to new secasvar.
*
* does not modify mbuf. does not free mbuf on error.
*/
static struct secasvar *
key_newsav(struct mbuf *m, const struct sadb_msghdr *mhp,
struct secashead *sah, int *errp, const char *where, int tag)
{
struct secasvar *newsav;
const struct sadb_sa *xsa;
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
IPSEC_ASSERT(sah != NULL, ("null secashead"));
newsav = malloc(sizeof(struct secasvar), M_IPSEC_SA, M_NOWAIT|M_ZERO);
if (newsav == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
*errp = ENOBUFS;
goto done;
}
switch (mhp->msg->sadb_msg_type) {
case SADB_GETSPI:
newsav->spi = 0;
#ifdef IPSEC_DOSEQCHECK
/* sync sequence number */
if (mhp->msg->sadb_msg_seq == 0)
newsav->seq =
(V_acq_seq = (V_acq_seq == ~0 ? 1 : ++V_acq_seq));
else
#endif
newsav->seq = mhp->msg->sadb_msg_seq;
break;
case SADB_ADD:
/* sanity check */
if (mhp->ext[SADB_EXT_SA] == NULL) {
free(newsav, M_IPSEC_SA);
newsav = NULL;
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
*errp = EINVAL;
goto done;
}
xsa = (const struct sadb_sa *)mhp->ext[SADB_EXT_SA];
newsav->spi = xsa->sadb_sa_spi;
newsav->seq = mhp->msg->sadb_msg_seq;
break;
default:
free(newsav, M_IPSEC_SA);
newsav = NULL;
*errp = EINVAL;
goto done;
}
/* copy sav values */
if (mhp->msg->sadb_msg_type != SADB_GETSPI) {
*errp = key_setsaval(newsav, m, mhp);
if (*errp) {
free(newsav, M_IPSEC_SA);
newsav = NULL;
goto done;
}
}
SECASVAR_LOCK_INIT(newsav);
/* reset created */
newsav->created = time_second;
newsav->pid = mhp->msg->sadb_msg_pid;
/* add to satree */
newsav->sah = sah;
sa_initref(newsav);
newsav->state = SADB_SASTATE_LARVAL;
SAHTREE_LOCK();
LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_LARVAL], newsav,
secasvar, chain);
SAHTREE_UNLOCK();
done:
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s from %s:%u return SP:%p\n", __func__,
where, tag, newsav));
return newsav;
}
/*
* free() SA variable entry.
*/
static void
key_cleansav(struct secasvar *sav)
{
/*
* Cleanup xform state. Note that zeroize'ing causes the
* keys to be cleared; otherwise we must do it ourself.
*/
if (sav->tdb_xform != NULL) {
sav->tdb_xform->xf_zeroize(sav);
sav->tdb_xform = NULL;
} else {
KASSERT(sav->iv == NULL, ("iv but no xform"));
if (sav->key_auth != NULL)
bzero(sav->key_auth->key_data, _KEYLEN(sav->key_auth));
if (sav->key_enc != NULL)
bzero(sav->key_enc->key_data, _KEYLEN(sav->key_enc));
}
if (sav->key_auth != NULL) {
if (sav->key_auth->key_data != NULL)
free(sav->key_auth->key_data, M_IPSEC_MISC);
free(sav->key_auth, M_IPSEC_MISC);
sav->key_auth = NULL;
}
if (sav->key_enc != NULL) {
if (sav->key_enc->key_data != NULL)
free(sav->key_enc->key_data, M_IPSEC_MISC);
free(sav->key_enc, M_IPSEC_MISC);
sav->key_enc = NULL;
}
if (sav->sched) {
bzero(sav->sched, sav->schedlen);
free(sav->sched, M_IPSEC_MISC);
sav->sched = NULL;
}
if (sav->replay != NULL) {
free(sav->replay, M_IPSEC_MISC);
sav->replay = NULL;
}
if (sav->lft_c != NULL) {
free(sav->lft_c, M_IPSEC_MISC);
sav->lft_c = NULL;
}
if (sav->lft_h != NULL) {
free(sav->lft_h, M_IPSEC_MISC);
sav->lft_h = NULL;
}
if (sav->lft_s != NULL) {
free(sav->lft_s, M_IPSEC_MISC);
sav->lft_s = NULL;
}
}
/*
* free() SA variable entry.
*/
static void
key_delsav(struct secasvar *sav)
{
IPSEC_ASSERT(sav != NULL, ("null sav"));
IPSEC_ASSERT(sav->refcnt == 0, ("reference count %u > 0", sav->refcnt));
/* remove from SA header */
if (__LIST_CHAINED(sav))
LIST_REMOVE(sav, chain);
key_cleansav(sav);
SECASVAR_LOCK_DESTROY(sav);
free(sav, M_IPSEC_SA);
}
/*
* search SAD.
* OUT:
* NULL : not found
* others : found, pointer to a SA.
*/
static struct secashead *
key_getsah(struct secasindex *saidx)
{
struct secashead *sah;
SAHTREE_LOCK();
LIST_FOREACH(sah, &V_sahtree, chain) {
if (sah->state == SADB_SASTATE_DEAD)
continue;
if (key_cmpsaidx(&sah->saidx, saidx, CMP_REQID))
break;
}
SAHTREE_UNLOCK();
return sah;
}
/*
* check not to be duplicated SPI.
* NOTE: this function is too slow due to searching all SAD.
* OUT:
* NULL : not found
* others : found, pointer to a SA.
*/
static struct secasvar *
key_checkspidup(struct secasindex *saidx, u_int32_t spi)
{
struct secashead *sah;
struct secasvar *sav;
/* check address family */
if (saidx->src.sa.sa_family != saidx->dst.sa.sa_family) {
ipseclog((LOG_DEBUG, "%s: address family mismatched.\n",
__func__));
return NULL;
}
sav = NULL;
/* check all SAD */
SAHTREE_LOCK();
LIST_FOREACH(sah, &V_sahtree, chain) {
if (!key_ismyaddr((struct sockaddr *)&sah->saidx.dst))
continue;
sav = key_getsavbyspi(sah, spi);
if (sav != NULL)
break;
}
SAHTREE_UNLOCK();
return sav;
}
/*
* search SAD litmited alive SA, protocol, SPI.
* OUT:
* NULL : not found
* others : found, pointer to a SA.
*/
static struct secasvar *
key_getsavbyspi(struct secashead *sah, u_int32_t spi)
{
struct secasvar *sav;
u_int stateidx, state;
sav = NULL;
SAHTREE_LOCK_ASSERT();
/* search all status */
for (stateidx = 0;
stateidx < _ARRAYLEN(saorder_state_alive);
stateidx++) {
state = saorder_state_alive[stateidx];
LIST_FOREACH(sav, &sah->savtree[state], chain) {
/* sanity check */
if (sav->state != state) {
ipseclog((LOG_DEBUG, "%s: "
"invalid sav->state (queue: %d SA: %d)\n",
__func__, state, sav->state));
continue;
}
if (sav->spi == spi)
return sav;
}
}
return NULL;
}
/*
* copy SA values from PF_KEY message except *SPI, SEQ, PID, STATE and TYPE*.
* You must update these if need.
* OUT: 0: success.
* !0: failure.
*
* does not modify mbuf. does not free mbuf on error.
*/
static int
key_setsaval(struct secasvar *sav, struct mbuf *m,
const struct sadb_msghdr *mhp)
{
int error = 0;
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* initialization */
sav->replay = NULL;
sav->key_auth = NULL;
sav->key_enc = NULL;
sav->sched = NULL;
sav->schedlen = 0;
sav->iv = NULL;
sav->lft_c = NULL;
sav->lft_h = NULL;
sav->lft_s = NULL;
sav->tdb_xform = NULL; /* transform */
sav->tdb_encalgxform = NULL; /* encoding algorithm */
sav->tdb_authalgxform = NULL; /* authentication algorithm */
sav->tdb_compalgxform = NULL; /* compression algorithm */
/* Initialize even if NAT-T not compiled in: */
sav->natt_type = 0;
sav->natt_esp_frag_len = 0;
/* SA */
if (mhp->ext[SADB_EXT_SA] != NULL) {
const struct sadb_sa *sa0;
sa0 = (const struct sadb_sa *)mhp->ext[SADB_EXT_SA];
if (mhp->extlen[SADB_EXT_SA] < sizeof(*sa0)) {
error = EINVAL;
goto fail;
}
sav->alg_auth = sa0->sadb_sa_auth;
sav->alg_enc = sa0->sadb_sa_encrypt;
sav->flags = sa0->sadb_sa_flags;
/* replay window */
if ((sa0->sadb_sa_flags & SADB_X_EXT_OLD) == 0) {
sav->replay = (struct secreplay *)
malloc(sizeof(struct secreplay)+sa0->sadb_sa_replay, M_IPSEC_MISC, M_NOWAIT|M_ZERO);
if (sav->replay == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n",
__func__));
error = ENOBUFS;
goto fail;
}
if (sa0->sadb_sa_replay != 0)
sav->replay->bitmap = (caddr_t)(sav->replay+1);
sav->replay->wsize = sa0->sadb_sa_replay;
}
}
/* Authentication keys */
if (mhp->ext[SADB_EXT_KEY_AUTH] != NULL) {
const struct sadb_key *key0;
int len;
key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_AUTH];
len = mhp->extlen[SADB_EXT_KEY_AUTH];
error = 0;
if (len < sizeof(*key0)) {
error = EINVAL;
goto fail;
}
switch (mhp->msg->sadb_msg_satype) {
case SADB_SATYPE_AH:
case SADB_SATYPE_ESP:
case SADB_X_SATYPE_TCPSIGNATURE:
if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) &&
sav->alg_auth != SADB_X_AALG_NULL)
error = EINVAL;
break;
case SADB_X_SATYPE_IPCOMP:
default:
error = EINVAL;
break;
}
if (error) {
ipseclog((LOG_DEBUG, "%s: invalid key_auth values.\n",
__func__));
goto fail;
}
sav->key_auth = (struct seckey *)key_dup_keymsg(key0, len,
M_IPSEC_MISC);
if (sav->key_auth == NULL ) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n",
__func__));
error = ENOBUFS;
goto fail;
}
}
/* Encryption key */
if (mhp->ext[SADB_EXT_KEY_ENCRYPT] != NULL) {
const struct sadb_key *key0;
int len;
key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_ENCRYPT];
len = mhp->extlen[SADB_EXT_KEY_ENCRYPT];
error = 0;
if (len < sizeof(*key0)) {
error = EINVAL;
goto fail;
}
switch (mhp->msg->sadb_msg_satype) {
case SADB_SATYPE_ESP:
if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) &&
sav->alg_enc != SADB_EALG_NULL) {
error = EINVAL;
break;
}
sav->key_enc = (struct seckey *)key_dup_keymsg(key0,
len,
M_IPSEC_MISC);
if (sav->key_enc == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n",
__func__));
error = ENOBUFS;
goto fail;
}
break;
case SADB_X_SATYPE_IPCOMP:
if (len != PFKEY_ALIGN8(sizeof(struct sadb_key)))
error = EINVAL;
sav->key_enc = NULL; /*just in case*/
break;
case SADB_SATYPE_AH:
case SADB_X_SATYPE_TCPSIGNATURE:
default:
error = EINVAL;
break;
}
if (error) {
ipseclog((LOG_DEBUG, "%s: invalid key_enc value.\n",
__func__));
goto fail;
}
}
/* set iv */
sav->ivlen = 0;
switch (mhp->msg->sadb_msg_satype) {
case SADB_SATYPE_AH:
error = xform_init(sav, XF_AH);
break;
case SADB_SATYPE_ESP:
error = xform_init(sav, XF_ESP);
break;
case SADB_X_SATYPE_IPCOMP:
error = xform_init(sav, XF_IPCOMP);
break;
case SADB_X_SATYPE_TCPSIGNATURE:
error = xform_init(sav, XF_TCPSIGNATURE);
break;
}
if (error) {
ipseclog((LOG_DEBUG, "%s: unable to initialize SA type %u.\n",
__func__, mhp->msg->sadb_msg_satype));
goto fail;
}
/* reset created */
sav->created = time_second;
/* make lifetime for CURRENT */
sav->lft_c = malloc(sizeof(struct seclifetime), M_IPSEC_MISC, M_NOWAIT);
if (sav->lft_c == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
error = ENOBUFS;
goto fail;
}
sav->lft_c->allocations = 0;
sav->lft_c->bytes = 0;
sav->lft_c->addtime = time_second;
sav->lft_c->usetime = 0;
/* lifetimes for HARD and SOFT */
{
const struct sadb_lifetime *lft0;
lft0 = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_HARD];
if (lft0 != NULL) {
if (mhp->extlen[SADB_EXT_LIFETIME_HARD] < sizeof(*lft0)) {
error = EINVAL;
goto fail;
}
sav->lft_h = key_dup_lifemsg(lft0, M_IPSEC_MISC);
if (sav->lft_h == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__));
error = ENOBUFS;
goto fail;
}
/* to be initialize ? */
}
lft0 = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_SOFT];
if (lft0 != NULL) {
if (mhp->extlen[SADB_EXT_LIFETIME_SOFT] < sizeof(*lft0)) {
error = EINVAL;
goto fail;
}
sav->lft_s = key_dup_lifemsg(lft0, M_IPSEC_MISC);
if (sav->lft_s == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__));
error = ENOBUFS;
goto fail;
}
/* to be initialize ? */
}
}
return 0;
fail:
/* initialization */
key_cleansav(sav);
return error;
}
/*
* validation with a secasvar entry, and set SADB_SATYPE_MATURE.
* OUT: 0: valid
* other: errno
*/
static int
key_mature(struct secasvar *sav)
{
int error;
/* check SPI value */
switch (sav->sah->saidx.proto) {
case IPPROTO_ESP:
case IPPROTO_AH:
/*
* RFC 4302, 2.4. Security Parameters Index (SPI), SPI values
* 1-255 reserved by IANA for future use,
* 0 for implementation specific, local use.
*/
if (ntohl(sav->spi) <= 255) {
ipseclog((LOG_DEBUG, "%s: illegal range of SPI %u.\n",
__func__, (u_int32_t)ntohl(sav->spi)));
return EINVAL;
}
break;
}
/* check satype */
switch (sav->sah->saidx.proto) {
case IPPROTO_ESP:
/* check flags */
if ((sav->flags & (SADB_X_EXT_OLD|SADB_X_EXT_DERIV)) ==
(SADB_X_EXT_OLD|SADB_X_EXT_DERIV)) {
ipseclog((LOG_DEBUG, "%s: invalid flag (derived) "
"given to old-esp.\n", __func__));
return EINVAL;
}
error = xform_init(sav, XF_ESP);
break;
case IPPROTO_AH:
/* check flags */
if (sav->flags & SADB_X_EXT_DERIV) {
ipseclog((LOG_DEBUG, "%s: invalid flag (derived) "
"given to AH SA.\n", __func__));
return EINVAL;
}
if (sav->alg_enc != SADB_EALG_NONE) {
ipseclog((LOG_DEBUG, "%s: protocol and algorithm "
"mismated.\n", __func__));
return(EINVAL);
}
error = xform_init(sav, XF_AH);
break;
case IPPROTO_IPCOMP:
if (sav->alg_auth != SADB_AALG_NONE) {
ipseclog((LOG_DEBUG, "%s: protocol and algorithm "
"mismated.\n", __func__));
return(EINVAL);
}
if ((sav->flags & SADB_X_EXT_RAWCPI) == 0
&& ntohl(sav->spi) >= 0x10000) {
ipseclog((LOG_DEBUG, "%s: invalid cpi for IPComp.\n",
__func__));
return(EINVAL);
}
error = xform_init(sav, XF_IPCOMP);
break;
case IPPROTO_TCP:
if (sav->alg_enc != SADB_EALG_NONE) {
ipseclog((LOG_DEBUG, "%s: protocol and algorithm "
"mismated.\n", __func__));
return(EINVAL);
}
error = xform_init(sav, XF_TCPSIGNATURE);
break;
default:
ipseclog((LOG_DEBUG, "%s: Invalid satype.\n", __func__));
error = EPROTONOSUPPORT;
break;
}
if (error == 0) {
SAHTREE_LOCK();
key_sa_chgstate(sav, SADB_SASTATE_MATURE);
SAHTREE_UNLOCK();
}
return (error);
}
/*
* subroutine for SADB_GET and SADB_DUMP.
*/
static struct mbuf *
key_setdumpsa(struct secasvar *sav, u_int8_t type, u_int8_t satype,
u_int32_t seq, u_int32_t pid)
{
struct mbuf *result = NULL, *tres = NULL, *m;
int i;
int dumporder[] = {
SADB_EXT_SA, SADB_X_EXT_SA2,
SADB_EXT_LIFETIME_HARD, SADB_EXT_LIFETIME_SOFT,
SADB_EXT_LIFETIME_CURRENT, SADB_EXT_ADDRESS_SRC,
SADB_EXT_ADDRESS_DST, SADB_EXT_ADDRESS_PROXY, SADB_EXT_KEY_AUTH,
SADB_EXT_KEY_ENCRYPT, SADB_EXT_IDENTITY_SRC,
SADB_EXT_IDENTITY_DST, SADB_EXT_SENSITIVITY,
#ifdef IPSEC_NAT_T
SADB_X_EXT_NAT_T_TYPE,
SADB_X_EXT_NAT_T_SPORT, SADB_X_EXT_NAT_T_DPORT,
SADB_X_EXT_NAT_T_OAI, SADB_X_EXT_NAT_T_OAR,
SADB_X_EXT_NAT_T_FRAG,
#endif
};
m = key_setsadbmsg(type, 0, satype, seq, pid, sav->refcnt);
if (m == NULL)
goto fail;
result = m;
for (i = sizeof(dumporder)/sizeof(dumporder[0]) - 1; i >= 0; i--) {
m = NULL;
switch (dumporder[i]) {
case SADB_EXT_SA:
m = key_setsadbsa(sav);
if (!m)
goto fail;
break;
case SADB_X_EXT_SA2:
m = key_setsadbxsa2(sav->sah->saidx.mode,
sav->replay ? sav->replay->count : 0,
sav->sah->saidx.reqid);
if (!m)
goto fail;
break;
case SADB_EXT_ADDRESS_SRC:
m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
&sav->sah->saidx.src.sa,
FULLMASK, IPSEC_ULPROTO_ANY);
if (!m)
goto fail;
break;
case SADB_EXT_ADDRESS_DST:
m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
&sav->sah->saidx.dst.sa,
FULLMASK, IPSEC_ULPROTO_ANY);
if (!m)
goto fail;
break;
case SADB_EXT_KEY_AUTH:
if (!sav->key_auth)
continue;
m = key_setkey(sav->key_auth, SADB_EXT_KEY_AUTH);
if (!m)
goto fail;
break;
case SADB_EXT_KEY_ENCRYPT:
if (!sav->key_enc)
continue;
m = key_setkey(sav->key_enc, SADB_EXT_KEY_ENCRYPT);
if (!m)
goto fail;
break;
case SADB_EXT_LIFETIME_CURRENT:
if (!sav->lft_c)
continue;
m = key_setlifetime(sav->lft_c,
SADB_EXT_LIFETIME_CURRENT);
if (!m)
goto fail;
break;
case SADB_EXT_LIFETIME_HARD:
if (!sav->lft_h)
continue;
m = key_setlifetime(sav->lft_h,
SADB_EXT_LIFETIME_HARD);
if (!m)
goto fail;
break;
case SADB_EXT_LIFETIME_SOFT:
if (!sav->lft_s)
continue;
m = key_setlifetime(sav->lft_s,
SADB_EXT_LIFETIME_SOFT);
if (!m)
goto fail;
break;
#ifdef IPSEC_NAT_T
case SADB_X_EXT_NAT_T_TYPE:
m = key_setsadbxtype(sav->natt_type);
if (!m)
goto fail;
break;
case SADB_X_EXT_NAT_T_DPORT:
m = key_setsadbxport(
KEY_PORTFROMSADDR(&sav->sah->saidx.dst),
SADB_X_EXT_NAT_T_DPORT);
if (!m)
goto fail;
break;
case SADB_X_EXT_NAT_T_SPORT:
m = key_setsadbxport(
KEY_PORTFROMSADDR(&sav->sah->saidx.src),
SADB_X_EXT_NAT_T_SPORT);
if (!m)
goto fail;
break;
case SADB_X_EXT_NAT_T_OAI:
case SADB_X_EXT_NAT_T_OAR:
case SADB_X_EXT_NAT_T_FRAG:
/* We do not (yet) support those. */
continue;
#endif
case SADB_EXT_ADDRESS_PROXY:
case SADB_EXT_IDENTITY_SRC:
case SADB_EXT_IDENTITY_DST:
/* XXX: should we brought from SPD ? */
case SADB_EXT_SENSITIVITY:
default:
continue;
}
if (!m)
goto fail;
if (tres)
m_cat(m, tres);
tres = m;
}
m_cat(result, tres);
if (result->m_len < sizeof(struct sadb_msg)) {
result = m_pullup(result, sizeof(struct sadb_msg));
if (result == NULL)
goto fail;
}
result->m_pkthdr.len = 0;
for (m = result; m; m = m->m_next)
result->m_pkthdr.len += m->m_len;
mtod(result, struct sadb_msg *)->sadb_msg_len =
PFKEY_UNIT64(result->m_pkthdr.len);
return result;
fail:
m_freem(result);
m_freem(tres);
return NULL;
}
/*
* set data into sadb_msg.
*/
static struct mbuf *
key_setsadbmsg(u_int8_t type, u_int16_t tlen, u_int8_t satype, u_int32_t seq,
pid_t pid, u_int16_t reserved)
{
struct mbuf *m;
struct sadb_msg *p;
int len;
len = PFKEY_ALIGN8(sizeof(struct sadb_msg));
if (len > MCLBYTES)
return NULL;
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m && len > MHLEN) {
if (!(MCLGET(m, M_NOWAIT))) {
m_freem(m);
m = NULL;
}
}
if (!m)
return NULL;
m->m_pkthdr.len = m->m_len = len;
m->m_next = NULL;
p = mtod(m, struct sadb_msg *);
bzero(p, len);
p->sadb_msg_version = PF_KEY_V2;
p->sadb_msg_type = type;
p->sadb_msg_errno = 0;
p->sadb_msg_satype = satype;
p->sadb_msg_len = PFKEY_UNIT64(tlen);
p->sadb_msg_reserved = reserved;
p->sadb_msg_seq = seq;
p->sadb_msg_pid = (u_int32_t)pid;
return m;
}
/*
* copy secasvar data into sadb_address.
*/
static struct mbuf *
key_setsadbsa(struct secasvar *sav)
{
struct mbuf *m;
struct sadb_sa *p;
int len;
len = PFKEY_ALIGN8(sizeof(struct sadb_sa));
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return (NULL);
m_align(m, len);
m->m_len = len;
p = mtod(m, struct sadb_sa *);
bzero(p, len);
p->sadb_sa_len = PFKEY_UNIT64(len);
p->sadb_sa_exttype = SADB_EXT_SA;
p->sadb_sa_spi = sav->spi;
p->sadb_sa_replay = (sav->replay != NULL ? sav->replay->wsize : 0);
p->sadb_sa_state = sav->state;
p->sadb_sa_auth = sav->alg_auth;
p->sadb_sa_encrypt = sav->alg_enc;
p->sadb_sa_flags = sav->flags;
return m;
}
/*
* set data into sadb_address.
*/
static struct mbuf *
key_setsadbaddr(u_int16_t exttype, const struct sockaddr *saddr,
u_int8_t prefixlen, u_int16_t ul_proto)
{
struct mbuf *m;
struct sadb_address *p;
size_t len;
len = PFKEY_ALIGN8(sizeof(struct sadb_address)) +
PFKEY_ALIGN8(saddr->sa_len);
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return (NULL);
m_align(m, len);
m->m_len = len;
p = mtod(m, struct sadb_address *);
bzero(p, len);
p->sadb_address_len = PFKEY_UNIT64(len);
p->sadb_address_exttype = exttype;
p->sadb_address_proto = ul_proto;
if (prefixlen == FULLMASK) {
switch (saddr->sa_family) {
case AF_INET:
prefixlen = sizeof(struct in_addr) << 3;
break;
case AF_INET6:
prefixlen = sizeof(struct in6_addr) << 3;
break;
default:
; /*XXX*/
}
}
p->sadb_address_prefixlen = prefixlen;
p->sadb_address_reserved = 0;
bcopy(saddr,
mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_address)),
saddr->sa_len);
return m;
}
/*
* set data into sadb_x_sa2.
*/
static struct mbuf *
key_setsadbxsa2(u_int8_t mode, u_int32_t seq, u_int32_t reqid)
{
struct mbuf *m;
struct sadb_x_sa2 *p;
size_t len;
len = PFKEY_ALIGN8(sizeof(struct sadb_x_sa2));
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return (NULL);
m_align(m, len);
m->m_len = len;
p = mtod(m, struct sadb_x_sa2 *);
bzero(p, len);
p->sadb_x_sa2_len = PFKEY_UNIT64(len);
p->sadb_x_sa2_exttype = SADB_X_EXT_SA2;
p->sadb_x_sa2_mode = mode;
p->sadb_x_sa2_reserved1 = 0;
p->sadb_x_sa2_reserved2 = 0;
p->sadb_x_sa2_sequence = seq;
p->sadb_x_sa2_reqid = reqid;
return m;
}
#ifdef IPSEC_NAT_T
/*
* Set a type in sadb_x_nat_t_type.
*/
static struct mbuf *
key_setsadbxtype(u_int16_t type)
{
struct mbuf *m;
size_t len;
struct sadb_x_nat_t_type *p;
len = PFKEY_ALIGN8(sizeof(struct sadb_x_nat_t_type));
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return (NULL);
m_align(m, len);
m->m_len = len;
p = mtod(m, struct sadb_x_nat_t_type *);
bzero(p, len);
p->sadb_x_nat_t_type_len = PFKEY_UNIT64(len);
p->sadb_x_nat_t_type_exttype = SADB_X_EXT_NAT_T_TYPE;
p->sadb_x_nat_t_type_type = type;
return (m);
}
/*
* Set a port in sadb_x_nat_t_port.
* In contrast to default RFC 2367 behaviour, port is in network byte order.
*/
static struct mbuf *
key_setsadbxport(u_int16_t port, u_int16_t type)
{
struct mbuf *m;
size_t len;
struct sadb_x_nat_t_port *p;
len = PFKEY_ALIGN8(sizeof(struct sadb_x_nat_t_port));
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return (NULL);
m_align(m, len);
m->m_len = len;
p = mtod(m, struct sadb_x_nat_t_port *);
bzero(p, len);
p->sadb_x_nat_t_port_len = PFKEY_UNIT64(len);
p->sadb_x_nat_t_port_exttype = type;
p->sadb_x_nat_t_port_port = port;
return (m);
}
/*
* Get port from sockaddr. Port is in network byte order.
*/
u_int16_t
key_portfromsaddr(struct sockaddr *sa)
{
switch (sa->sa_family) {
#ifdef INET
case AF_INET:
return ((struct sockaddr_in *)sa)->sin_port;
#endif
#ifdef INET6
case AF_INET6:
return ((struct sockaddr_in6 *)sa)->sin6_port;
#endif
}
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s unexpected address family %d\n",
__func__, sa->sa_family));
return (0);
}
#endif /* IPSEC_NAT_T */
/*
* Set port in struct sockaddr. Port is in network byte order.
*/
static void
key_porttosaddr(struct sockaddr *sa, u_int16_t port)
{
switch (sa->sa_family) {
#ifdef INET
case AF_INET:
((struct sockaddr_in *)sa)->sin_port = port;
break;
#endif
#ifdef INET6
case AF_INET6:
((struct sockaddr_in6 *)sa)->sin6_port = port;
break;
#endif
default:
ipseclog((LOG_DEBUG, "%s: unexpected address family %d.\n",
__func__, sa->sa_family));
break;
}
}
/*
* set data into sadb_x_policy
*/
static struct mbuf *
key_setsadbxpolicy(u_int16_t type, u_int8_t dir, u_int32_t id)
{
struct mbuf *m;
struct sadb_x_policy *p;
size_t len;
len = PFKEY_ALIGN8(sizeof(struct sadb_x_policy));
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return (NULL);
m_align(m, len);
m->m_len = len;
p = mtod(m, struct sadb_x_policy *);
bzero(p, len);
p->sadb_x_policy_len = PFKEY_UNIT64(len);
p->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
p->sadb_x_policy_type = type;
p->sadb_x_policy_dir = dir;
p->sadb_x_policy_id = id;
return m;
}
/* %%% utilities */
/* Take a key message (sadb_key) from the socket and turn it into one
* of the kernel's key structures (seckey).
*
* IN: pointer to the src
* OUT: NULL no more memory
*/
struct seckey *
key_dup_keymsg(const struct sadb_key *src, u_int len,
struct malloc_type *type)
{
struct seckey *dst;
dst = (struct seckey *)malloc(sizeof(struct seckey), type, M_NOWAIT);
if (dst != NULL) {
dst->bits = src->sadb_key_bits;
dst->key_data = (char *)malloc(len, type, M_NOWAIT);
if (dst->key_data != NULL) {
bcopy((const char *)src + sizeof(struct sadb_key),
dst->key_data, len);
} else {
ipseclog((LOG_DEBUG, "%s: No more memory.\n",
__func__));
free(dst, type);
dst = NULL;
}
} else {
ipseclog((LOG_DEBUG, "%s: No more memory.\n",
__func__));
}
return dst;
}
/* Take a lifetime message (sadb_lifetime) passed in on a socket and
* turn it into one of the kernel's lifetime structures (seclifetime).
*
* IN: pointer to the destination, source and malloc type
* OUT: NULL, no more memory
*/
static struct seclifetime *
key_dup_lifemsg(const struct sadb_lifetime *src, struct malloc_type *type)
{
struct seclifetime *dst = NULL;
dst = (struct seclifetime *)malloc(sizeof(struct seclifetime),
type, M_NOWAIT);
if (dst == NULL) {
/* XXX counter */
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
} else {
dst->allocations = src->sadb_lifetime_allocations;
dst->bytes = src->sadb_lifetime_bytes;
dst->addtime = src->sadb_lifetime_addtime;
dst->usetime = src->sadb_lifetime_usetime;
}
return dst;
}
/* compare my own address
* OUT: 1: true, i.e. my address.
* 0: false
*/
int
key_ismyaddr(struct sockaddr *sa)
{
IPSEC_ASSERT(sa != NULL, ("null sockaddr"));
switch (sa->sa_family) {
#ifdef INET
case AF_INET:
return (in_localip(satosin(sa)->sin_addr));
#endif
#ifdef INET6
case AF_INET6:
return key_ismyaddr6((struct sockaddr_in6 *)sa);
#endif
}
return 0;
}
#ifdef INET6
/*
* compare my own address for IPv6.
* 1: ours
* 0: other
* NOTE: derived ip6_input() in KAME. This is necessary to modify more.
*/
#include <netinet6/in6_var.h>
static int
key_ismyaddr6(struct sockaddr_in6 *sin6)
{
struct in6_ifaddr *ia;
#if 0
struct in6_multi *in6m;
#endif
IN6_IFADDR_RLOCK();
TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
if (key_sockaddrcmp((struct sockaddr *)&sin6,
(struct sockaddr *)&ia->ia_addr, 0) == 0) {
IN6_IFADDR_RUNLOCK();
return 1;
}
#if 0
/*
* XXX Multicast
* XXX why do we care about multlicast here while we don't care
* about IPv4 multicast??
* XXX scope
*/
in6m = NULL;
IN6_LOOKUP_MULTI(sin6->sin6_addr, ia->ia_ifp, in6m);
if (in6m) {
IN6_IFADDR_RUNLOCK();
return 1;
}
#endif
}
IN6_IFADDR_RUNLOCK();
/* loopback, just for safety */
if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))
return 1;
return 0;
}
#endif /*INET6*/
/*
* compare two secasindex structure.
* flag can specify to compare 2 saidxes.
* compare two secasindex structure without both mode and reqid.
* don't compare port.
* IN:
* saidx0: source, it can be in SAD.
* saidx1: object.
* OUT:
* 1 : equal
* 0 : not equal
*/
static int
key_cmpsaidx(const struct secasindex *saidx0, const struct secasindex *saidx1,
int flag)
{
int chkport = 0;
/* sanity */
if (saidx0 == NULL && saidx1 == NULL)
return 1;
if (saidx0 == NULL || saidx1 == NULL)
return 0;
if (saidx0->proto != saidx1->proto)
return 0;
if (flag == CMP_EXACTLY) {
if (saidx0->mode != saidx1->mode)
return 0;
if (saidx0->reqid != saidx1->reqid)
return 0;
if (bcmp(&saidx0->src, &saidx1->src, saidx0->src.sa.sa_len) != 0 ||
bcmp(&saidx0->dst, &saidx1->dst, saidx0->dst.sa.sa_len) != 0)
return 0;
} else {
/* CMP_MODE_REQID, CMP_REQID, CMP_HEAD */
if (flag == CMP_MODE_REQID
||flag == CMP_REQID) {
/*
* If reqid of SPD is non-zero, unique SA is required.
* The result must be of same reqid in this case.
*/
if (saidx1->reqid != 0 && saidx0->reqid != saidx1->reqid)
return 0;
}
if (flag == CMP_MODE_REQID) {
if (saidx0->mode != IPSEC_MODE_ANY
&& saidx0->mode != saidx1->mode)
return 0;
}
#ifdef IPSEC_NAT_T
/*
* If NAT-T is enabled, check ports for tunnel mode.
* Do not check ports if they are set to zero in the SPD.
* Also do not do it for native transport mode, as there
* is no port information available in the SP.
*/
if ((saidx1->mode == IPSEC_MODE_TUNNEL ||
(saidx1->mode == IPSEC_MODE_TRANSPORT &&
saidx1->proto == IPPROTO_ESP)) &&
saidx1->src.sa.sa_family == AF_INET &&
saidx1->dst.sa.sa_family == AF_INET &&
((const struct sockaddr_in *)(&saidx1->src))->sin_port &&
((const struct sockaddr_in *)(&saidx1->dst))->sin_port)
chkport = 1;
#endif /* IPSEC_NAT_T */
if (key_sockaddrcmp(&saidx0->src.sa, &saidx1->src.sa, chkport) != 0) {
return 0;
}
if (key_sockaddrcmp(&saidx0->dst.sa, &saidx1->dst.sa, chkport) != 0) {
return 0;
}
}
return 1;
}
/*
* compare two secindex structure exactly.
* IN:
* spidx0: source, it is often in SPD.
* spidx1: object, it is often from PFKEY message.
* OUT:
* 1 : equal
* 0 : not equal
*/
static int
key_cmpspidx_exactly(struct secpolicyindex *spidx0,
struct secpolicyindex *spidx1)
{
/* sanity */
if (spidx0 == NULL && spidx1 == NULL)
return 1;
if (spidx0 == NULL || spidx1 == NULL)
return 0;
if (spidx0->prefs != spidx1->prefs
|| spidx0->prefd != spidx1->prefd
|| spidx0->ul_proto != spidx1->ul_proto)
return 0;
return key_sockaddrcmp(&spidx0->src.sa, &spidx1->src.sa, 1) == 0 &&
key_sockaddrcmp(&spidx0->dst.sa, &spidx1->dst.sa, 1) == 0;
}
/*
* compare two secindex structure with mask.
* IN:
* spidx0: source, it is often in SPD.
* spidx1: object, it is often from IP header.
* OUT:
* 1 : equal
* 0 : not equal
*/
static int
key_cmpspidx_withmask(struct secpolicyindex *spidx0,
struct secpolicyindex *spidx1)
{
/* sanity */
if (spidx0 == NULL && spidx1 == NULL)
return 1;
if (spidx0 == NULL || spidx1 == NULL)
return 0;
if (spidx0->src.sa.sa_family != spidx1->src.sa.sa_family ||
spidx0->dst.sa.sa_family != spidx1->dst.sa.sa_family ||
spidx0->src.sa.sa_len != spidx1->src.sa.sa_len ||
spidx0->dst.sa.sa_len != spidx1->dst.sa.sa_len)
return 0;
/* if spidx.ul_proto == IPSEC_ULPROTO_ANY, ignore. */
if (spidx0->ul_proto != (u_int16_t)IPSEC_ULPROTO_ANY
&& spidx0->ul_proto != spidx1->ul_proto)
return 0;
switch (spidx0->src.sa.sa_family) {
case AF_INET:
if (spidx0->src.sin.sin_port != IPSEC_PORT_ANY
&& spidx0->src.sin.sin_port != spidx1->src.sin.sin_port)
return 0;
if (!key_bbcmp(&spidx0->src.sin.sin_addr,
&spidx1->src.sin.sin_addr, spidx0->prefs))
return 0;
break;
case AF_INET6:
if (spidx0->src.sin6.sin6_port != IPSEC_PORT_ANY
&& spidx0->src.sin6.sin6_port != spidx1->src.sin6.sin6_port)
return 0;
/*
* scope_id check. if sin6_scope_id is 0, we regard it
* as a wildcard scope, which matches any scope zone ID.
*/
if (spidx0->src.sin6.sin6_scope_id &&
spidx1->src.sin6.sin6_scope_id &&
spidx0->src.sin6.sin6_scope_id != spidx1->src.sin6.sin6_scope_id)
return 0;
if (!key_bbcmp(&spidx0->src.sin6.sin6_addr,
&spidx1->src.sin6.sin6_addr, spidx0->prefs))
return 0;
break;
default:
/* XXX */
if (bcmp(&spidx0->src, &spidx1->src, spidx0->src.sa.sa_len) != 0)
return 0;
break;
}
switch (spidx0->dst.sa.sa_family) {
case AF_INET:
if (spidx0->dst.sin.sin_port != IPSEC_PORT_ANY
&& spidx0->dst.sin.sin_port != spidx1->dst.sin.sin_port)
return 0;
if (!key_bbcmp(&spidx0->dst.sin.sin_addr,
&spidx1->dst.sin.sin_addr, spidx0->prefd))
return 0;
break;
case AF_INET6:
if (spidx0->dst.sin6.sin6_port != IPSEC_PORT_ANY
&& spidx0->dst.sin6.sin6_port != spidx1->dst.sin6.sin6_port)
return 0;
/*
* scope_id check. if sin6_scope_id is 0, we regard it
* as a wildcard scope, which matches any scope zone ID.
*/
if (spidx0->dst.sin6.sin6_scope_id &&
spidx1->dst.sin6.sin6_scope_id &&
spidx0->dst.sin6.sin6_scope_id != spidx1->dst.sin6.sin6_scope_id)
return 0;
if (!key_bbcmp(&spidx0->dst.sin6.sin6_addr,
&spidx1->dst.sin6.sin6_addr, spidx0->prefd))
return 0;
break;
default:
/* XXX */
if (bcmp(&spidx0->dst, &spidx1->dst, spidx0->dst.sa.sa_len) != 0)
return 0;
break;
}
/* XXX Do we check other field ? e.g. flowinfo */
return 1;
}
/* returns 0 on match */
static int
key_sockaddrcmp(const struct sockaddr *sa1, const struct sockaddr *sa2,
int port)
{
#ifdef satosin
#undef satosin
#endif
#define satosin(s) ((const struct sockaddr_in *)s)
#ifdef satosin6
#undef satosin6
#endif
#define satosin6(s) ((const struct sockaddr_in6 *)s)
if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len)
return 1;
switch (sa1->sa_family) {
case AF_INET:
if (sa1->sa_len != sizeof(struct sockaddr_in))
return 1;
if (satosin(sa1)->sin_addr.s_addr !=
satosin(sa2)->sin_addr.s_addr) {
return 1;
}
if (port && satosin(sa1)->sin_port != satosin(sa2)->sin_port)
return 1;
break;
case AF_INET6:
if (sa1->sa_len != sizeof(struct sockaddr_in6))
return 1; /*EINVAL*/
if (satosin6(sa1)->sin6_scope_id !=
satosin6(sa2)->sin6_scope_id) {
return 1;
}
if (!IN6_ARE_ADDR_EQUAL(&satosin6(sa1)->sin6_addr,
&satosin6(sa2)->sin6_addr)) {
return 1;
}
if (port &&
satosin6(sa1)->sin6_port != satosin6(sa2)->sin6_port) {
return 1;
}
break;
default:
if (bcmp(sa1, sa2, sa1->sa_len) != 0)
return 1;
break;
}
return 0;
#undef satosin
#undef satosin6
}
/*
* compare two buffers with mask.
* IN:
* addr1: source
* addr2: object
* bits: Number of bits to compare
* OUT:
* 1 : equal
* 0 : not equal
*/
static int
key_bbcmp(const void *a1, const void *a2, u_int bits)
{
const unsigned char *p1 = a1;
const unsigned char *p2 = a2;
/* XXX: This could be considerably faster if we compare a word
* at a time, but it is complicated on LSB Endian machines */
/* Handle null pointers */
if (p1 == NULL || p2 == NULL)
return (p1 == p2);
while (bits >= 8) {
if (*p1++ != *p2++)
return 0;
bits -= 8;
}
if (bits > 0) {
u_int8_t mask = ~((1<<(8-bits))-1);
if ((*p1 & mask) != (*p2 & mask))
return 0;
}
return 1; /* Match! */
}
static void
key_flush_spd(time_t now)
{
SPTREE_RLOCK_TRACKER;
struct secpolicy *sp;
u_int dir;
/* SPD */
for (dir = 0; dir < IPSEC_DIR_MAX; dir++) {
restart:
SPTREE_RLOCK();
TAILQ_FOREACH(sp, &V_sptree[dir], chain) {
if (sp->lifetime == 0 && sp->validtime == 0)
continue;
if ((sp->lifetime &&
now - sp->created > sp->lifetime) ||
(sp->validtime &&
now - sp->lastused > sp->validtime)) {
SPTREE_RUNLOCK();
key_unlink(sp);
key_spdexpire(sp);
KEY_FREESP(&sp);
goto restart;
}
}
SPTREE_RUNLOCK();
}
}
static void
key_flush_sad(time_t now)
{
struct secashead *sah, *nextsah;
struct secasvar *sav, *nextsav;
/* SAD */
SAHTREE_LOCK();
LIST_FOREACH_SAFE(sah, &V_sahtree, chain, nextsah) {
/* if sah has been dead, then delete it and process next sah. */
if (sah->state == SADB_SASTATE_DEAD) {
key_delsah(sah);
continue;
}
/* if LARVAL entry doesn't become MATURE, delete it. */
LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_LARVAL], chain, nextsav) {
/* Need to also check refcnt for a larval SA ??? */
if (now - sav->created > V_key_larval_lifetime)
KEY_FREESAV(&sav);
}
/*
* check MATURE entry to start to send expire message
* whether or not.
*/
LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_MATURE], chain, nextsav) {
/* we don't need to check. */
if (sav->lft_s == NULL)
continue;
/* sanity check */
if (sav->lft_c == NULL) {
ipseclog((LOG_DEBUG,"%s: there is no CURRENT "
"time, why?\n", __func__));
continue;
}
/* check SOFT lifetime */
if (sav->lft_s->addtime != 0 &&
now - sav->created > sav->lft_s->addtime) {
key_sa_chgstate(sav, SADB_SASTATE_DYING);
/*
* Actually, only send expire message if
* SA has been used, as it was done before,
* but should we always send such message,
* and let IKE daemon decide if it should be
* renegotiated or not ?
* XXX expire message will actually NOT be
* sent if SA is only used after soft
* lifetime has been reached, see below
* (DYING state)
*/
if (sav->lft_c->usetime != 0)
key_expire(sav);
}
/* check SOFT lifetime by bytes */
/*
* XXX I don't know the way to delete this SA
* when new SA is installed. Caution when it's
* installed too big lifetime by time.
*/
else if (sav->lft_s->bytes != 0 &&
sav->lft_s->bytes < sav->lft_c->bytes) {
key_sa_chgstate(sav, SADB_SASTATE_DYING);
/*
* XXX If we keep to send expire
* message in the status of
* DYING. Do remove below code.
*/
key_expire(sav);
}
}
/* check DYING entry to change status to DEAD. */
LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_DYING], chain, nextsav) {
/* we don't need to check. */
if (sav->lft_h == NULL)
continue;
/* sanity check */
if (sav->lft_c == NULL) {
ipseclog((LOG_DEBUG, "%s: there is no CURRENT "
"time, why?\n", __func__));
continue;
}
if (sav->lft_h->addtime != 0 &&
now - sav->created > sav->lft_h->addtime) {
key_sa_chgstate(sav, SADB_SASTATE_DEAD);
KEY_FREESAV(&sav);
}
#if 0 /* XXX Should we keep to send expire message until HARD lifetime ? */
else if (sav->lft_s != NULL
&& sav->lft_s->addtime != 0
&& now - sav->created > sav->lft_s->addtime) {
/*
* XXX: should be checked to be
* installed the valid SA.
*/
/*
* If there is no SA then sending
* expire message.
*/
key_expire(sav);
}
#endif
/* check HARD lifetime by bytes */
else if (sav->lft_h->bytes != 0 &&
sav->lft_h->bytes < sav->lft_c->bytes) {
key_sa_chgstate(sav, SADB_SASTATE_DEAD);
KEY_FREESAV(&sav);
}
}
/* delete entry in DEAD */
LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_DEAD], chain, nextsav) {
/* sanity check */
if (sav->state != SADB_SASTATE_DEAD) {
ipseclog((LOG_DEBUG, "%s: invalid sav->state "
"(queue: %d SA: %d): kill it anyway\n",
__func__,
SADB_SASTATE_DEAD, sav->state));
}
/*
* do not call key_freesav() here.
* sav should already be freed, and sav->refcnt
* shows other references to sav
* (such as from SPD).
*/
}
}
SAHTREE_UNLOCK();
}
static void
key_flush_acq(time_t now)
{
struct secacq *acq, *nextacq;
/* ACQ tree */
ACQ_LOCK();
for (acq = LIST_FIRST(&V_acqtree); acq != NULL; acq = nextacq) {
nextacq = LIST_NEXT(acq, chain);
if (now - acq->created > V_key_blockacq_lifetime
&& __LIST_CHAINED(acq)) {
LIST_REMOVE(acq, chain);
free(acq, M_IPSEC_SAQ);
}
}
ACQ_UNLOCK();
}
static void
key_flush_spacq(time_t now)
{
struct secspacq *acq, *nextacq;
/* SP ACQ tree */
SPACQ_LOCK();
for (acq = LIST_FIRST(&V_spacqtree); acq != NULL; acq = nextacq) {
nextacq = LIST_NEXT(acq, chain);
if (now - acq->created > V_key_blockacq_lifetime
&& __LIST_CHAINED(acq)) {
LIST_REMOVE(acq, chain);
free(acq, M_IPSEC_SAQ);
}
}
SPACQ_UNLOCK();
}
/*
* time handler.
* scanning SPD and SAD to check status for each entries,
* and do to remove or to expire.
* XXX: year 2038 problem may remain.
*/
static void
key_timehandler(void *arg)
{
VNET_ITERATOR_DECL(vnet_iter);
time_t now = time_second;
VNET_LIST_RLOCK_NOSLEEP();
VNET_FOREACH(vnet_iter) {
CURVNET_SET(vnet_iter);
key_flush_spd(now);
key_flush_sad(now);
key_flush_acq(now);
key_flush_spacq(now);
CURVNET_RESTORE();
}
VNET_LIST_RUNLOCK_NOSLEEP();
#ifndef IPSEC_DEBUG2
/* do exchange to tick time !! */
callout_schedule(&key_timer, hz);
#endif /* IPSEC_DEBUG2 */
}
u_long
key_random()
{
u_long value;
key_randomfill(&value, sizeof(value));
return value;
}
void
key_randomfill(void *p, size_t l)
{
size_t n;
u_long v;
static int warn = 1;
n = 0;
n = (size_t)read_random(p, (u_int)l);
/* last resort */
while (n < l) {
v = random();
bcopy(&v, (u_int8_t *)p + n,
l - n < sizeof(v) ? l - n : sizeof(v));
n += sizeof(v);
if (warn) {
printf("WARNING: pseudo-random number generator "
"used for IPsec processing\n");
warn = 0;
}
}
}
/*
* map SADB_SATYPE_* to IPPROTO_*.
* if satype == SADB_SATYPE then satype is mapped to ~0.
* OUT:
* 0: invalid satype.
*/
static u_int16_t
key_satype2proto(u_int8_t satype)
{
switch (satype) {
case SADB_SATYPE_UNSPEC:
return IPSEC_PROTO_ANY;
case SADB_SATYPE_AH:
return IPPROTO_AH;
case SADB_SATYPE_ESP:
return IPPROTO_ESP;
case SADB_X_SATYPE_IPCOMP:
return IPPROTO_IPCOMP;
case SADB_X_SATYPE_TCPSIGNATURE:
return IPPROTO_TCP;
default:
return 0;
}
/* NOTREACHED */
}
/*
* map IPPROTO_* to SADB_SATYPE_*
* OUT:
* 0: invalid protocol type.
*/
static u_int8_t
key_proto2satype(u_int16_t proto)
{
switch (proto) {
case IPPROTO_AH:
return SADB_SATYPE_AH;
case IPPROTO_ESP:
return SADB_SATYPE_ESP;
case IPPROTO_IPCOMP:
return SADB_X_SATYPE_IPCOMP;
case IPPROTO_TCP:
return SADB_X_SATYPE_TCPSIGNATURE;
default:
return 0;
}
/* NOTREACHED */
}
/* %%% PF_KEY */
/*
* SADB_GETSPI processing is to receive
* <base, (SA2), src address, dst address, (SPI range)>
* from the IKMPd, to assign a unique spi value, to hang on the INBOUND
* tree with the status of LARVAL, and send
* <base, SA(*), address(SD)>
* to the IKMPd.
*
* IN: mhp: pointer to the pointer to each header.
* OUT: NULL if fail.
* other if success, return pointer to the message to send.
*/
static int
key_getspi(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct sadb_address *src0, *dst0;
struct secasindex saidx;
struct secashead *newsah;
struct secasvar *newsav;
u_int8_t proto;
u_int32_t spi;
u_int8_t mode;
u_int32_t reqid;
int error;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_X_EXT_SA2] != NULL) {
mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode;
reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid;
} else {
mode = IPSEC_MODE_ANY;
reqid = 0;
}
src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]);
dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]);
/* map satype to proto */
if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
/*
* Make sure the port numbers are zero.
* In case of NAT-T we will update them later if needed.
*/
switch (((struct sockaddr *)(src0 + 1))->sa_family) {
case AF_INET:
if (((struct sockaddr *)(src0 + 1))->sa_len !=
sizeof(struct sockaddr_in))
return key_senderror(so, m, EINVAL);
((struct sockaddr_in *)(src0 + 1))->sin_port = 0;
break;
case AF_INET6:
if (((struct sockaddr *)(src0 + 1))->sa_len !=
sizeof(struct sockaddr_in6))
return key_senderror(so, m, EINVAL);
((struct sockaddr_in6 *)(src0 + 1))->sin6_port = 0;
break;
default:
; /*???*/
}
switch (((struct sockaddr *)(dst0 + 1))->sa_family) {
case AF_INET:
if (((struct sockaddr *)(dst0 + 1))->sa_len !=
sizeof(struct sockaddr_in))
return key_senderror(so, m, EINVAL);
((struct sockaddr_in *)(dst0 + 1))->sin_port = 0;
break;
case AF_INET6:
if (((struct sockaddr *)(dst0 + 1))->sa_len !=
sizeof(struct sockaddr_in6))
return key_senderror(so, m, EINVAL);
((struct sockaddr_in6 *)(dst0 + 1))->sin6_port = 0;
break;
default:
; /*???*/
}
/* XXX boundary check against sa_len */
KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx);
#ifdef IPSEC_NAT_T
/*
* Handle NAT-T info if present.
* We made sure the port numbers are zero above, so we do
* not have to worry in case we do not update them.
*/
if (mhp->ext[SADB_X_EXT_NAT_T_OAI] != NULL)
ipseclog((LOG_DEBUG, "%s: NAT-T OAi present\n", __func__));
if (mhp->ext[SADB_X_EXT_NAT_T_OAR] != NULL)
ipseclog((LOG_DEBUG, "%s: NAT-T OAr present\n", __func__));
if (mhp->ext[SADB_X_EXT_NAT_T_TYPE] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
struct sadb_x_nat_t_type *type;
struct sadb_x_nat_t_port *sport, *dport;
if (mhp->extlen[SADB_X_EXT_NAT_T_TYPE] < sizeof(*type) ||
mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
ipseclog((LOG_DEBUG, "%s: invalid nat-t message "
"passed.\n", __func__));
return key_senderror(so, m, EINVAL);
}
sport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_SPORT];
dport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_DPORT];
if (sport)
KEY_PORTTOSADDR(&saidx.src, sport->sadb_x_nat_t_port_port);
if (dport)
KEY_PORTTOSADDR(&saidx.dst, dport->sadb_x_nat_t_port_port);
}
#endif
/* SPI allocation */
spi = key_do_getnewspi((struct sadb_spirange *)mhp->ext[SADB_EXT_SPIRANGE],
&saidx);
if (spi == 0)
return key_senderror(so, m, EINVAL);
/* get a SA index */
if ((newsah = key_getsah(&saidx)) == NULL) {
/* create a new SA index */
if ((newsah = key_newsah(&saidx)) == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__));
return key_senderror(so, m, ENOBUFS);
}
}
/* get a new SA */
/* XXX rewrite */
newsav = KEY_NEWSAV(m, mhp, newsah, &error);
if (newsav == NULL) {
/* XXX don't free new SA index allocated in above. */
return key_senderror(so, m, error);
}
/* set spi */
newsav->spi = htonl(spi);
/* delete the entry in acqtree */
if (mhp->msg->sadb_msg_seq != 0) {
struct secacq *acq;
if ((acq = key_getacqbyseq(mhp->msg->sadb_msg_seq)) != NULL) {
/* reset counter in order to deletion by timehandler. */
acq->created = time_second;
acq->count = 0;
}
}
{
struct mbuf *n, *nn;
struct sadb_sa *m_sa;
struct sadb_msg *newmsg;
int off, len;
/* create new sadb_msg to reply. */
len = PFKEY_ALIGN8(sizeof(struct sadb_msg)) +
PFKEY_ALIGN8(sizeof(struct sadb_sa));
MGETHDR(n, M_NOWAIT, MT_DATA);
if (len > MHLEN) {
if (!(MCLGET(n, M_NOWAIT))) {
m_freem(n);
n = NULL;
}
}
if (!n)
return key_senderror(so, m, ENOBUFS);
n->m_len = len;
n->m_next = NULL;
off = 0;
m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off);
off += PFKEY_ALIGN8(sizeof(struct sadb_msg));
m_sa = (struct sadb_sa *)(mtod(n, caddr_t) + off);
m_sa->sadb_sa_len = PFKEY_UNIT64(sizeof(struct sadb_sa));
m_sa->sadb_sa_exttype = SADB_EXT_SA;
m_sa->sadb_sa_spi = htonl(spi);
off += PFKEY_ALIGN8(sizeof(struct sadb_sa));
IPSEC_ASSERT(off == len,
("length inconsistency (off %u len %u)", off, len));
n->m_next = key_gather_mbuf(m, mhp, 0, 2, SADB_EXT_ADDRESS_SRC,
SADB_EXT_ADDRESS_DST);
if (!n->m_next) {
m_freem(n);
return key_senderror(so, m, ENOBUFS);
}
if (n->m_len < sizeof(struct sadb_msg)) {
n = m_pullup(n, sizeof(struct sadb_msg));
if (n == NULL)
return key_sendup_mbuf(so, m, KEY_SENDUP_ONE);
}
n->m_pkthdr.len = 0;
for (nn = n; nn; nn = nn->m_next)
n->m_pkthdr.len += nn->m_len;
newmsg = mtod(n, struct sadb_msg *);
newmsg->sadb_msg_seq = newsav->seq;
newmsg->sadb_msg_errno = 0;
newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
}
}
/*
* allocating new SPI
* called by key_getspi().
* OUT:
* 0: failure.
* others: success.
*/
static u_int32_t
key_do_getnewspi(struct sadb_spirange *spirange, struct secasindex *saidx)
{
u_int32_t newspi;
u_int32_t min, max;
int count = V_key_spi_trycnt;
/* set spi range to allocate */
if (spirange != NULL) {
min = spirange->sadb_spirange_min;
max = spirange->sadb_spirange_max;
} else {
min = V_key_spi_minval;
max = V_key_spi_maxval;
}
/* IPCOMP needs 2-byte SPI */
if (saidx->proto == IPPROTO_IPCOMP) {
u_int32_t t;
if (min >= 0x10000)
min = 0xffff;
if (max >= 0x10000)
max = 0xffff;
if (min > max) {
t = min; min = max; max = t;
}
}
if (min == max) {
if (key_checkspidup(saidx, min) != NULL) {
ipseclog((LOG_DEBUG, "%s: SPI %u exists already.\n",
__func__, min));
return 0;
}
count--; /* taking one cost. */
newspi = min;
} else {
/* init SPI */
newspi = 0;
/* when requesting to allocate spi ranged */
while (count--) {
/* generate pseudo-random SPI value ranged. */
newspi = min + (key_random() % (max - min + 1));
if (key_checkspidup(saidx, newspi) == NULL)
break;
}
if (count == 0 || newspi == 0) {
ipseclog((LOG_DEBUG, "%s: to allocate spi is failed.\n",
__func__));
return 0;
}
}
/* statistics */
keystat.getspi_count =
(keystat.getspi_count + V_key_spi_trycnt - count) / 2;
return newspi;
}
/*
* SADB_UPDATE processing
* receive
* <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),)
* key(AE), (identity(SD),) (sensitivity)>
* from the ikmpd, and update a secasvar entry whose status is SADB_SASTATE_LARVAL.
* and send
* <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),)
* (identity(SD),) (sensitivity)>
* to the ikmpd.
*
* m will always be freed.
*/
static int
key_update(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct sadb_sa *sa0;
struct sadb_address *src0, *dst0;
#ifdef IPSEC_NAT_T
struct sadb_x_nat_t_type *type;
struct sadb_x_nat_t_port *sport, *dport;
struct sadb_address *iaddr, *raddr;
struct sadb_x_nat_t_frag *frag;
#endif
struct secasindex saidx;
struct secashead *sah;
struct secasvar *sav;
u_int16_t proto;
u_int8_t mode;
u_int32_t reqid;
int error;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* map satype to proto */
if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_EXT_SA] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
(mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP &&
mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) ||
(mhp->msg->sadb_msg_satype == SADB_SATYPE_AH &&
mhp->ext[SADB_EXT_KEY_AUTH] == NULL) ||
(mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL &&
mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) ||
(mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL &&
mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) ||
mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_X_EXT_SA2] != NULL) {
mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode;
reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid;
} else {
mode = IPSEC_MODE_ANY;
reqid = 0;
}
/* XXX boundary checking for other extensions */
sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA];
src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]);
dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]);
/* XXX boundary check against sa_len */
KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx);
/*
* Make sure the port numbers are zero.
* In case of NAT-T we will update them later if needed.
*/
KEY_PORTTOSADDR(&saidx.src, 0);
KEY_PORTTOSADDR(&saidx.dst, 0);
#ifdef IPSEC_NAT_T
/*
* Handle NAT-T info if present.
*/
if (mhp->ext[SADB_X_EXT_NAT_T_TYPE] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
if (mhp->extlen[SADB_X_EXT_NAT_T_TYPE] < sizeof(*type) ||
mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
ipseclog((LOG_DEBUG, "%s: invalid message.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
type = (struct sadb_x_nat_t_type *)
mhp->ext[SADB_X_EXT_NAT_T_TYPE];
sport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_SPORT];
dport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_DPORT];
} else {
type = 0;
sport = dport = 0;
}
if (mhp->ext[SADB_X_EXT_NAT_T_OAI] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_OAR] != NULL) {
if (mhp->extlen[SADB_X_EXT_NAT_T_OAI] < sizeof(*iaddr) ||
mhp->extlen[SADB_X_EXT_NAT_T_OAR] < sizeof(*raddr)) {
ipseclog((LOG_DEBUG, "%s: invalid message\n",
__func__));
return key_senderror(so, m, EINVAL);
}
iaddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAI];
raddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAR];
ipseclog((LOG_DEBUG, "%s: NAT-T OAi/r present\n", __func__));
} else {
iaddr = raddr = NULL;
}
if (mhp->ext[SADB_X_EXT_NAT_T_FRAG] != NULL) {
if (mhp->extlen[SADB_X_EXT_NAT_T_FRAG] < sizeof(*frag)) {
ipseclog((LOG_DEBUG, "%s: invalid message\n",
__func__));
return key_senderror(so, m, EINVAL);
}
frag = (struct sadb_x_nat_t_frag *)
mhp->ext[SADB_X_EXT_NAT_T_FRAG];
} else {
frag = 0;
}
#endif
/* get a SA header */
if ((sah = key_getsah(&saidx)) == NULL) {
ipseclog((LOG_DEBUG, "%s: no SA index found.\n", __func__));
return key_senderror(so, m, ENOENT);
}
/* set spidx if there */
/* XXX rewrite */
error = key_setident(sah, m, mhp);
if (error)
return key_senderror(so, m, error);
/* find a SA with sequence number. */
#ifdef IPSEC_DOSEQCHECK
if (mhp->msg->sadb_msg_seq != 0
&& (sav = key_getsavbyseq(sah, mhp->msg->sadb_msg_seq)) == NULL) {
ipseclog((LOG_DEBUG, "%s: no larval SA with sequence %u "
"exists.\n", __func__, mhp->msg->sadb_msg_seq));
return key_senderror(so, m, ENOENT);
}
#else
SAHTREE_LOCK();
sav = key_getsavbyspi(sah, sa0->sadb_sa_spi);
SAHTREE_UNLOCK();
if (sav == NULL) {
ipseclog((LOG_DEBUG, "%s: no such a SA found (spi:%u)\n",
__func__, (u_int32_t)ntohl(sa0->sadb_sa_spi)));
return key_senderror(so, m, EINVAL);
}
#endif
/* validity check */
if (sav->sah->saidx.proto != proto) {
ipseclog((LOG_DEBUG, "%s: protocol mismatched "
"(DB=%u param=%u)\n", __func__,
sav->sah->saidx.proto, proto));
return key_senderror(so, m, EINVAL);
}
#ifdef IPSEC_DOSEQCHECK
if (sav->spi != sa0->sadb_sa_spi) {
ipseclog((LOG_DEBUG, "%s: SPI mismatched (DB:%u param:%u)\n",
__func__,
(u_int32_t)ntohl(sav->spi),
(u_int32_t)ntohl(sa0->sadb_sa_spi)));
return key_senderror(so, m, EINVAL);
}
#endif
if (sav->pid != mhp->msg->sadb_msg_pid) {
ipseclog((LOG_DEBUG, "%s: pid mismatched (DB:%u param:%u)\n",
__func__, sav->pid, mhp->msg->sadb_msg_pid));
return key_senderror(so, m, EINVAL);
}
/* copy sav values */
error = key_setsaval(sav, m, mhp);
if (error) {
KEY_FREESAV(&sav);
return key_senderror(so, m, error);
}
#ifdef IPSEC_NAT_T
/*
* Handle more NAT-T info if present,
* now that we have a sav to fill.
*/
if (type)
sav->natt_type = type->sadb_x_nat_t_type_type;
if (sport)
KEY_PORTTOSADDR(&sav->sah->saidx.src,
sport->sadb_x_nat_t_port_port);
if (dport)
KEY_PORTTOSADDR(&sav->sah->saidx.dst,
dport->sadb_x_nat_t_port_port);
#if 0
/*
* In case SADB_X_EXT_NAT_T_FRAG was not given, leave it at 0.
* We should actually check for a minimum MTU here, if we
* want to support it in ip_output.
*/
if (frag)
sav->natt_esp_frag_len = frag->sadb_x_nat_t_frag_fraglen;
#endif
#endif
/* check SA values to be mature. */
if ((mhp->msg->sadb_msg_errno = key_mature(sav)) != 0) {
KEY_FREESAV(&sav);
return key_senderror(so, m, 0);
}
{
struct mbuf *n;
/* set msg buf from mhp */
n = key_getmsgbuf_x1(m, mhp);
if (n == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
return key_senderror(so, m, ENOBUFS);
}
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
}
}
/*
* search SAD with sequence for a SA which state is SADB_SASTATE_LARVAL.
* only called by key_update().
* OUT:
* NULL : not found
* others : found, pointer to a SA.
*/
#ifdef IPSEC_DOSEQCHECK
static struct secasvar *
key_getsavbyseq(struct secashead *sah, u_int32_t seq)
{
struct secasvar *sav;
u_int state;
state = SADB_SASTATE_LARVAL;
/* search SAD with sequence number ? */
LIST_FOREACH(sav, &sah->savtree[state], chain) {
KEY_CHKSASTATE(state, sav->state, __func__);
if (sav->seq == seq) {
sa_addref(sav);
KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
printf("DP %s cause refcnt++:%d SA:%p\n",
__func__, sav->refcnt, sav));
return sav;
}
}
return NULL;
}
#endif
/*
* SADB_ADD processing
* add an entry to SA database, when received
* <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),)
* key(AE), (identity(SD),) (sensitivity)>
* from the ikmpd,
* and send
* <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),)
* (identity(SD),) (sensitivity)>
* to the ikmpd.
*
* IGNORE identity and sensitivity messages.
*
* m will always be freed.
*/
static int
key_add(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct sadb_sa *sa0;
struct sadb_address *src0, *dst0;
#ifdef IPSEC_NAT_T
struct sadb_x_nat_t_type *type;
struct sadb_address *iaddr, *raddr;
struct sadb_x_nat_t_frag *frag;
#endif
struct secasindex saidx;
struct secashead *newsah;
struct secasvar *newsav;
u_int16_t proto;
u_int8_t mode;
u_int32_t reqid;
int error;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* map satype to proto */
if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_EXT_SA] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
(mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP &&
mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) ||
(mhp->msg->sadb_msg_satype == SADB_SATYPE_AH &&
mhp->ext[SADB_EXT_KEY_AUTH] == NULL) ||
(mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL &&
mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) ||
(mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL &&
mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) ||
mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
/* XXX need more */
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_X_EXT_SA2] != NULL) {
mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode;
reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid;
} else {
mode = IPSEC_MODE_ANY;
reqid = 0;
}
sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA];
src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
/* XXX boundary check against sa_len */
KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx);
/*
* Make sure the port numbers are zero.
* In case of NAT-T we will update them later if needed.
*/
KEY_PORTTOSADDR(&saidx.src, 0);
KEY_PORTTOSADDR(&saidx.dst, 0);
#ifdef IPSEC_NAT_T
/*
* Handle NAT-T info if present.
*/
if (mhp->ext[SADB_X_EXT_NAT_T_TYPE] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
struct sadb_x_nat_t_port *sport, *dport;
if (mhp->extlen[SADB_X_EXT_NAT_T_TYPE] < sizeof(*type) ||
mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
ipseclog((LOG_DEBUG, "%s: invalid message.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
type = (struct sadb_x_nat_t_type *)
mhp->ext[SADB_X_EXT_NAT_T_TYPE];
sport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_SPORT];
dport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_DPORT];
if (sport)
KEY_PORTTOSADDR(&saidx.src,
sport->sadb_x_nat_t_port_port);
if (dport)
KEY_PORTTOSADDR(&saidx.dst,
dport->sadb_x_nat_t_port_port);
} else {
type = 0;
}
if (mhp->ext[SADB_X_EXT_NAT_T_OAI] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_OAR] != NULL) {
if (mhp->extlen[SADB_X_EXT_NAT_T_OAI] < sizeof(*iaddr) ||
mhp->extlen[SADB_X_EXT_NAT_T_OAR] < sizeof(*raddr)) {
ipseclog((LOG_DEBUG, "%s: invalid message\n",
__func__));
return key_senderror(so, m, EINVAL);
}
iaddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAI];
raddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAR];
ipseclog((LOG_DEBUG, "%s: NAT-T OAi/r present\n", __func__));
} else {
iaddr = raddr = NULL;
}
if (mhp->ext[SADB_X_EXT_NAT_T_FRAG] != NULL) {
if (mhp->extlen[SADB_X_EXT_NAT_T_FRAG] < sizeof(*frag)) {
ipseclog((LOG_DEBUG, "%s: invalid message\n",
__func__));
return key_senderror(so, m, EINVAL);
}
frag = (struct sadb_x_nat_t_frag *)
mhp->ext[SADB_X_EXT_NAT_T_FRAG];
} else {
frag = 0;
}
#endif
/* get a SA header */
if ((newsah = key_getsah(&saidx)) == NULL) {
/* create a new SA header */
if ((newsah = key_newsah(&saidx)) == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__));
return key_senderror(so, m, ENOBUFS);
}
}
/* set spidx if there */
/* XXX rewrite */
error = key_setident(newsah, m, mhp);
if (error) {
return key_senderror(so, m, error);
}
/* create new SA entry. */
/* We can create new SA only if SPI is differenct. */
SAHTREE_LOCK();
newsav = key_getsavbyspi(newsah, sa0->sadb_sa_spi);
SAHTREE_UNLOCK();
if (newsav != NULL) {
ipseclog((LOG_DEBUG, "%s: SA already exists.\n", __func__));
return key_senderror(so, m, EEXIST);
}
newsav = KEY_NEWSAV(m, mhp, newsah, &error);
if (newsav == NULL) {
return key_senderror(so, m, error);
}
#ifdef IPSEC_NAT_T
/*
* Handle more NAT-T info if present,
* now that we have a sav to fill.
*/
if (type)
newsav->natt_type = type->sadb_x_nat_t_type_type;
#if 0
/*
* In case SADB_X_EXT_NAT_T_FRAG was not given, leave it at 0.
* We should actually check for a minimum MTU here, if we
* want to support it in ip_output.
*/
if (frag)
newsav->natt_esp_frag_len = frag->sadb_x_nat_t_frag_fraglen;
#endif
#endif
/* check SA values to be mature. */
if ((error = key_mature(newsav)) != 0) {
KEY_FREESAV(&newsav);
return key_senderror(so, m, error);
}
/*
* don't call key_freesav() here, as we would like to keep the SA
* in the database on success.
*/
{
struct mbuf *n;
/* set msg buf from mhp */
n = key_getmsgbuf_x1(m, mhp);
if (n == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
return key_senderror(so, m, ENOBUFS);
}
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
}
}
/* m is retained */
static int
key_setident(struct secashead *sah, struct mbuf *m,
const struct sadb_msghdr *mhp)
{
const struct sadb_ident *idsrc, *iddst;
int idsrclen, iddstlen;
IPSEC_ASSERT(sah != NULL, ("null secashead"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* don't make buffer if not there */
if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL &&
mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) {
sah->idents = NULL;
sah->identd = NULL;
return 0;
}
if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL ||
mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) {
ipseclog((LOG_DEBUG, "%s: invalid identity.\n", __func__));
return EINVAL;
}
idsrc = (const struct sadb_ident *)mhp->ext[SADB_EXT_IDENTITY_SRC];
iddst = (const struct sadb_ident *)mhp->ext[SADB_EXT_IDENTITY_DST];
idsrclen = mhp->extlen[SADB_EXT_IDENTITY_SRC];
iddstlen = mhp->extlen[SADB_EXT_IDENTITY_DST];
/* validity check */
if (idsrc->sadb_ident_type != iddst->sadb_ident_type) {
ipseclog((LOG_DEBUG, "%s: ident type mismatch.\n", __func__));
return EINVAL;
}
switch (idsrc->sadb_ident_type) {
case SADB_IDENTTYPE_PREFIX:
case SADB_IDENTTYPE_FQDN:
case SADB_IDENTTYPE_USERFQDN:
default:
/* XXX do nothing */
sah->idents = NULL;
sah->identd = NULL;
return 0;
}
/* make structure */
sah->idents = malloc(sizeof(struct secident), M_IPSEC_MISC, M_NOWAIT);
if (sah->idents == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
return ENOBUFS;
}
sah->identd = malloc(sizeof(struct secident), M_IPSEC_MISC, M_NOWAIT);
if (sah->identd == NULL) {
free(sah->idents, M_IPSEC_MISC);
sah->idents = NULL;
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
return ENOBUFS;
}
sah->idents->type = idsrc->sadb_ident_type;
sah->idents->id = idsrc->sadb_ident_id;
sah->identd->type = iddst->sadb_ident_type;
sah->identd->id = iddst->sadb_ident_id;
return 0;
}
/*
* m will not be freed on return.
* it is caller's responsibility to free the result.
*/
static struct mbuf *
key_getmsgbuf_x1(struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct mbuf *n;
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* create new sadb_msg to reply. */
n = key_gather_mbuf(m, mhp, 1, 9, SADB_EXT_RESERVED,
SADB_EXT_SA, SADB_X_EXT_SA2,
SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST,
SADB_EXT_LIFETIME_HARD, SADB_EXT_LIFETIME_SOFT,
SADB_EXT_IDENTITY_SRC, SADB_EXT_IDENTITY_DST);
if (!n)
return NULL;
if (n->m_len < sizeof(struct sadb_msg)) {
n = m_pullup(n, sizeof(struct sadb_msg));
if (n == NULL)
return NULL;
}
mtod(n, struct sadb_msg *)->sadb_msg_errno = 0;
mtod(n, struct sadb_msg *)->sadb_msg_len =
PFKEY_UNIT64(n->m_pkthdr.len);
return n;
}
/*
* SADB_DELETE processing
* receive
* <base, SA(*), address(SD)>
* from the ikmpd, and set SADB_SASTATE_DEAD,
* and send,
* <base, SA(*), address(SD)>
* to the ikmpd.
*
* m will always be freed.
*/
static int
key_delete(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct sadb_sa *sa0;
struct sadb_address *src0, *dst0;
struct secasindex saidx;
struct secashead *sah;
struct secasvar *sav = NULL;
u_int16_t proto;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* map satype to proto */
if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_EXT_SA] == NULL) {
/*
* Caller wants us to delete all non-LARVAL SAs
* that match the src/dst. This is used during
* IKE INITIAL-CONTACT.
*/
ipseclog((LOG_DEBUG, "%s: doing delete all.\n", __func__));
return key_delete_all(so, m, mhp, proto);
} else if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA];
src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]);
dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]);
/* XXX boundary check against sa_len */
KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx);
/*
* Make sure the port numbers are zero.
* In case of NAT-T we will update them later if needed.
*/
KEY_PORTTOSADDR(&saidx.src, 0);
KEY_PORTTOSADDR(&saidx.dst, 0);
#ifdef IPSEC_NAT_T
/*
* Handle NAT-T info if present.
*/
if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
struct sadb_x_nat_t_port *sport, *dport;
if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
ipseclog((LOG_DEBUG, "%s: invalid message.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
sport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_SPORT];
dport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_DPORT];
if (sport)
KEY_PORTTOSADDR(&saidx.src,
sport->sadb_x_nat_t_port_port);
if (dport)
KEY_PORTTOSADDR(&saidx.dst,
dport->sadb_x_nat_t_port_port);
}
#endif
/* get a SA header */
SAHTREE_LOCK();
LIST_FOREACH(sah, &V_sahtree, chain) {
if (sah->state == SADB_SASTATE_DEAD)
continue;
if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0)
continue;
/* get a SA with SPI. */
sav = key_getsavbyspi(sah, sa0->sadb_sa_spi);
if (sav)
break;
}
if (sah == NULL) {
SAHTREE_UNLOCK();
ipseclog((LOG_DEBUG, "%s: no SA found.\n", __func__));
return key_senderror(so, m, ENOENT);
}
key_sa_chgstate(sav, SADB_SASTATE_DEAD);
KEY_FREESAV(&sav);
SAHTREE_UNLOCK();
{
struct mbuf *n;
struct sadb_msg *newmsg;
/* create new sadb_msg to reply. */
/* XXX-BZ NAT-T extensions? */
n = key_gather_mbuf(m, mhp, 1, 4, SADB_EXT_RESERVED,
SADB_EXT_SA, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
if (!n)
return key_senderror(so, m, ENOBUFS);
if (n->m_len < sizeof(struct sadb_msg)) {
n = m_pullup(n, sizeof(struct sadb_msg));
if (n == NULL)
return key_senderror(so, m, ENOBUFS);
}
newmsg = mtod(n, struct sadb_msg *);
newmsg->sadb_msg_errno = 0;
newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
}
}
/*
* delete all SAs for src/dst. Called from key_delete().
*/
static int
key_delete_all(struct socket *so, struct mbuf *m,
const struct sadb_msghdr *mhp, u_int16_t proto)
{
struct sadb_address *src0, *dst0;
struct secasindex saidx;
struct secashead *sah;
struct secasvar *sav, *nextsav;
u_int stateidx, state;
src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]);
dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]);
/* XXX boundary check against sa_len */
KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx);
/*
* Make sure the port numbers are zero.
* In case of NAT-T we will update them later if needed.
*/
KEY_PORTTOSADDR(&saidx.src, 0);
KEY_PORTTOSADDR(&saidx.dst, 0);
#ifdef IPSEC_NAT_T
/*
* Handle NAT-T info if present.
*/
if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
struct sadb_x_nat_t_port *sport, *dport;
if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
ipseclog((LOG_DEBUG, "%s: invalid message.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
sport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_SPORT];
dport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_DPORT];
if (sport)
KEY_PORTTOSADDR(&saidx.src,
sport->sadb_x_nat_t_port_port);
if (dport)
KEY_PORTTOSADDR(&saidx.dst,
dport->sadb_x_nat_t_port_port);
}
#endif
SAHTREE_LOCK();
LIST_FOREACH(sah, &V_sahtree, chain) {
if (sah->state == SADB_SASTATE_DEAD)
continue;
if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0)
continue;
/* Delete all non-LARVAL SAs. */
for (stateidx = 0;
stateidx < _ARRAYLEN(saorder_state_alive);
stateidx++) {
state = saorder_state_alive[stateidx];
if (state == SADB_SASTATE_LARVAL)
continue;
for (sav = LIST_FIRST(&sah->savtree[state]);
sav != NULL; sav = nextsav) {
nextsav = LIST_NEXT(sav, chain);
/* sanity check */
if (sav->state != state) {
ipseclog((LOG_DEBUG, "%s: invalid "
"sav->state (queue %d SA %d)\n",
__func__, state, sav->state));
continue;
}
key_sa_chgstate(sav, SADB_SASTATE_DEAD);
KEY_FREESAV(&sav);
}
}
}
SAHTREE_UNLOCK();
{
struct mbuf *n;
struct sadb_msg *newmsg;
/* create new sadb_msg to reply. */
/* XXX-BZ NAT-T extensions? */
n = key_gather_mbuf(m, mhp, 1, 3, SADB_EXT_RESERVED,
SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
if (!n)
return key_senderror(so, m, ENOBUFS);
if (n->m_len < sizeof(struct sadb_msg)) {
n = m_pullup(n, sizeof(struct sadb_msg));
if (n == NULL)
return key_senderror(so, m, ENOBUFS);
}
newmsg = mtod(n, struct sadb_msg *);
newmsg->sadb_msg_errno = 0;
newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
}
}
/*
* SADB_GET processing
* receive
* <base, SA(*), address(SD)>
* from the ikmpd, and get a SP and a SA to respond,
* and send,
* <base, SA, (lifetime(HSC),) address(SD), (address(P),) key(AE),
* (identity(SD),) (sensitivity)>
* to the ikmpd.
*
* m will always be freed.
*/
static int
key_get(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct sadb_sa *sa0;
struct sadb_address *src0, *dst0;
struct secasindex saidx;
struct secashead *sah;
struct secasvar *sav = NULL;
u_int16_t proto;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* map satype to proto */
if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_EXT_SA] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) ||
mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA];
src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
/* XXX boundary check against sa_len */
KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx);
/*
* Make sure the port numbers are zero.
* In case of NAT-T we will update them later if needed.
*/
KEY_PORTTOSADDR(&saidx.src, 0);
KEY_PORTTOSADDR(&saidx.dst, 0);
#ifdef IPSEC_NAT_T
/*
* Handle NAT-T info if present.
*/
if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
struct sadb_x_nat_t_port *sport, *dport;
if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
ipseclog((LOG_DEBUG, "%s: invalid message.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
sport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_SPORT];
dport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_DPORT];
if (sport)
KEY_PORTTOSADDR(&saidx.src,
sport->sadb_x_nat_t_port_port);
if (dport)
KEY_PORTTOSADDR(&saidx.dst,
dport->sadb_x_nat_t_port_port);
}
#endif
/* get a SA header */
SAHTREE_LOCK();
LIST_FOREACH(sah, &V_sahtree, chain) {
if (sah->state == SADB_SASTATE_DEAD)
continue;
if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0)
continue;
/* get a SA with SPI. */
sav = key_getsavbyspi(sah, sa0->sadb_sa_spi);
if (sav)
break;
}
SAHTREE_UNLOCK();
if (sah == NULL) {
ipseclog((LOG_DEBUG, "%s: no SA found.\n", __func__));
return key_senderror(so, m, ENOENT);
}
{
struct mbuf *n;
u_int8_t satype;
/* map proto to satype */
if ((satype = key_proto2satype(sah->saidx.proto)) == 0) {
ipseclog((LOG_DEBUG, "%s: there was invalid proto in SAD.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
/* create new sadb_msg to reply. */
n = key_setdumpsa(sav, SADB_GET, satype, mhp->msg->sadb_msg_seq,
mhp->msg->sadb_msg_pid);
if (!n)
return key_senderror(so, m, ENOBUFS);
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
}
}
/* XXX make it sysctl-configurable? */
static void
key_getcomb_setlifetime(struct sadb_comb *comb)
{
comb->sadb_comb_soft_allocations = 1;
comb->sadb_comb_hard_allocations = 1;
comb->sadb_comb_soft_bytes = 0;
comb->sadb_comb_hard_bytes = 0;
comb->sadb_comb_hard_addtime = 86400; /* 1 day */
comb->sadb_comb_soft_addtime = comb->sadb_comb_soft_addtime * 80 / 100;
comb->sadb_comb_soft_usetime = 28800; /* 8 hours */
comb->sadb_comb_hard_usetime = comb->sadb_comb_hard_usetime * 80 / 100;
}
/*
* XXX reorder combinations by preference
* XXX no idea if the user wants ESP authentication or not
*/
static struct mbuf *
key_getcomb_esp()
{
struct sadb_comb *comb;
struct enc_xform *algo;
struct mbuf *result = NULL, *m, *n;
int encmin;
int i, off, o;
int totlen;
const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb));
m = NULL;
for (i = 1; i <= SADB_EALG_MAX; i++) {
algo = esp_algorithm_lookup(i);
if (algo == NULL)
continue;
/* discard algorithms with key size smaller than system min */
if (_BITS(algo->maxkey) < V_ipsec_esp_keymin)
continue;
if (_BITS(algo->minkey) < V_ipsec_esp_keymin)
encmin = V_ipsec_esp_keymin;
else
encmin = _BITS(algo->minkey);
if (V_ipsec_esp_auth)
m = key_getcomb_ah();
else {
IPSEC_ASSERT(l <= MLEN,
("l=%u > MLEN=%lu", l, (u_long) MLEN));
MGET(m, M_NOWAIT, MT_DATA);
if (m) {
M_ALIGN(m, l);
m->m_len = l;
m->m_next = NULL;
bzero(mtod(m, caddr_t), m->m_len);
}
}
if (!m)
goto fail;
totlen = 0;
for (n = m; n; n = n->m_next)
totlen += n->m_len;
IPSEC_ASSERT((totlen % l) == 0, ("totlen=%u, l=%u", totlen, l));
for (off = 0; off < totlen; off += l) {
n = m_pulldown(m, off, l, &o);
if (!n) {
/* m is already freed */
goto fail;
}
comb = (struct sadb_comb *)(mtod(n, caddr_t) + o);
bzero(comb, sizeof(*comb));
key_getcomb_setlifetime(comb);
comb->sadb_comb_encrypt = i;
comb->sadb_comb_encrypt_minbits = encmin;
comb->sadb_comb_encrypt_maxbits = _BITS(algo->maxkey);
}
if (!result)
result = m;
else
m_cat(result, m);
}
return result;
fail:
if (result)
m_freem(result);
return NULL;
}
static void
key_getsizes_ah(const struct auth_hash *ah, int alg, u_int16_t* min,
u_int16_t* max)
{
*min = *max = ah->keysize;
if (ah->keysize == 0) {
/*
* Transform takes arbitrary key size but algorithm
* key size is restricted. Enforce this here.
*/
switch (alg) {
case SADB_X_AALG_MD5: *min = *max = 16; break;
case SADB_X_AALG_SHA: *min = *max = 20; break;
case SADB_X_AALG_NULL: *min = 1; *max = 256; break;
case SADB_X_AALG_SHA2_256: *min = *max = 32; break;
case SADB_X_AALG_SHA2_384: *min = *max = 48; break;
case SADB_X_AALG_SHA2_512: *min = *max = 64; break;
default:
DPRINTF(("%s: unknown AH algorithm %u\n",
__func__, alg));
break;
}
}
}
/*
* XXX reorder combinations by preference
*/
static struct mbuf *
key_getcomb_ah()
{
struct sadb_comb *comb;
struct auth_hash *algo;
struct mbuf *m;
u_int16_t minkeysize, maxkeysize;
int i;
const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb));
m = NULL;
for (i = 1; i <= SADB_AALG_MAX; i++) {
#if 1
/* we prefer HMAC algorithms, not old algorithms */
if (i != SADB_AALG_SHA1HMAC &&
i != SADB_AALG_MD5HMAC &&
i != SADB_X_AALG_SHA2_256 &&
i != SADB_X_AALG_SHA2_384 &&
i != SADB_X_AALG_SHA2_512)
continue;
#endif
algo = ah_algorithm_lookup(i);
if (!algo)
continue;
key_getsizes_ah(algo, i, &minkeysize, &maxkeysize);
/* discard algorithms with key size smaller than system min */
if (_BITS(minkeysize) < V_ipsec_ah_keymin)
continue;
if (!m) {
IPSEC_ASSERT(l <= MLEN,
("l=%u > MLEN=%lu", l, (u_long) MLEN));
MGET(m, M_NOWAIT, MT_DATA);
if (m) {
M_ALIGN(m, l);
m->m_len = l;
m->m_next = NULL;
}
} else
M_PREPEND(m, l, M_NOWAIT);
if (!m)
return NULL;
comb = mtod(m, struct sadb_comb *);
bzero(comb, sizeof(*comb));
key_getcomb_setlifetime(comb);
comb->sadb_comb_auth = i;
comb->sadb_comb_auth_minbits = _BITS(minkeysize);
comb->sadb_comb_auth_maxbits = _BITS(maxkeysize);
}
return m;
}
/*
* not really an official behavior. discussed in pf_key@inner.net in Sep2000.
* XXX reorder combinations by preference
*/
static struct mbuf *
key_getcomb_ipcomp()
{
struct sadb_comb *comb;
struct comp_algo *algo;
struct mbuf *m;
int i;
const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb));
m = NULL;
for (i = 1; i <= SADB_X_CALG_MAX; i++) {
algo = ipcomp_algorithm_lookup(i);
if (!algo)
continue;
if (!m) {
IPSEC_ASSERT(l <= MLEN,
("l=%u > MLEN=%lu", l, (u_long) MLEN));
MGET(m, M_NOWAIT, MT_DATA);
if (m) {
M_ALIGN(m, l);
m->m_len = l;
m->m_next = NULL;
}
} else
M_PREPEND(m, l, M_NOWAIT);
if (!m)
return NULL;
comb = mtod(m, struct sadb_comb *);
bzero(comb, sizeof(*comb));
key_getcomb_setlifetime(comb);
comb->sadb_comb_encrypt = i;
/* what should we set into sadb_comb_*_{min,max}bits? */
}
return m;
}
/*
* XXX no way to pass mode (transport/tunnel) to userland
* XXX replay checking?
* XXX sysctl interface to ipsec_{ah,esp}_keymin
*/
static struct mbuf *
key_getprop(const struct secasindex *saidx)
{
struct sadb_prop *prop;
struct mbuf *m, *n;
const int l = PFKEY_ALIGN8(sizeof(struct sadb_prop));
int totlen;
switch (saidx->proto) {
case IPPROTO_ESP:
m = key_getcomb_esp();
break;
case IPPROTO_AH:
m = key_getcomb_ah();
break;
case IPPROTO_IPCOMP:
m = key_getcomb_ipcomp();
break;
default:
return NULL;
}
if (!m)
return NULL;
M_PREPEND(m, l, M_NOWAIT);
if (!m)
return NULL;
totlen = 0;
for (n = m; n; n = n->m_next)
totlen += n->m_len;
prop = mtod(m, struct sadb_prop *);
bzero(prop, sizeof(*prop));
prop->sadb_prop_len = PFKEY_UNIT64(totlen);
prop->sadb_prop_exttype = SADB_EXT_PROPOSAL;
prop->sadb_prop_replay = 32; /* XXX */
return m;
}
/*
* SADB_ACQUIRE processing called by key_checkrequest() and key_acquire2().
* send
* <base, SA, address(SD), (address(P)), x_policy,
* (identity(SD),) (sensitivity,) proposal>
* to KMD, and expect to receive
* <base> with SADB_ACQUIRE if error occured,
* or
* <base, src address, dst address, (SPI range)> with SADB_GETSPI
* from KMD by PF_KEY.
*
* XXX x_policy is outside of RFC2367 (KAME extension).
* XXX sensitivity is not supported.
* XXX for ipcomp, RFC2367 does not define how to fill in proposal.
* see comment for key_getcomb_ipcomp().
*
* OUT:
* 0 : succeed
* others: error number
*/
static int
key_acquire(const struct secasindex *saidx, struct secpolicy *sp)
{
struct mbuf *result = NULL, *m;
struct secacq *newacq;
u_int8_t satype;
int error = -1;
u_int32_t seq;
IPSEC_ASSERT(saidx != NULL, ("null saidx"));
satype = key_proto2satype(saidx->proto);
IPSEC_ASSERT(satype != 0, ("null satype, protocol %u", saidx->proto));
/*
* We never do anything about acquirng SA. There is anather
* solution that kernel blocks to send SADB_ACQUIRE message until
* getting something message from IKEd. In later case, to be
* managed with ACQUIRING list.
*/
/* Get an entry to check whether sending message or not. */
if ((newacq = key_getacq(saidx)) != NULL) {
if (V_key_blockacq_count < newacq->count) {
/* reset counter and do send message. */
newacq->count = 0;
} else {
/* increment counter and do nothing. */
newacq->count++;
return 0;
}
} else {
/* make new entry for blocking to send SADB_ACQUIRE. */
if ((newacq = key_newacq(saidx)) == NULL)
return ENOBUFS;
}
seq = newacq->seq;
m = key_setsadbmsg(SADB_ACQUIRE, 0, satype, seq, 0, 0);
if (!m) {
error = ENOBUFS;
goto fail;
}
result = m;
/*
* No SADB_X_EXT_NAT_T_* here: we do not know
* anything related to NAT-T at this time.
*/
/* set sadb_address for saidx's. */
m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
&saidx->src.sa, FULLMASK, IPSEC_ULPROTO_ANY);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
&saidx->dst.sa, FULLMASK, IPSEC_ULPROTO_ANY);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
/* XXX proxy address (optional) */
/* set sadb_x_policy */
if (sp) {
m = key_setsadbxpolicy(sp->policy, sp->spidx.dir, sp->id);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
}
/* XXX identity (optional) */
#if 0
if (idexttype && fqdn) {
/* create identity extension (FQDN) */
struct sadb_ident *id;
int fqdnlen;
fqdnlen = strlen(fqdn) + 1; /* +1 for terminating-NUL */
id = (struct sadb_ident *)p;
bzero(id, sizeof(*id) + PFKEY_ALIGN8(fqdnlen));
id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(fqdnlen));
id->sadb_ident_exttype = idexttype;
id->sadb_ident_type = SADB_IDENTTYPE_FQDN;
bcopy(fqdn, id + 1, fqdnlen);
p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(fqdnlen);
}
if (idexttype) {
/* create identity extension (USERFQDN) */
struct sadb_ident *id;
int userfqdnlen;
if (userfqdn) {
/* +1 for terminating-NUL */
userfqdnlen = strlen(userfqdn) + 1;
} else
userfqdnlen = 0;
id = (struct sadb_ident *)p;
bzero(id, sizeof(*id) + PFKEY_ALIGN8(userfqdnlen));
id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(userfqdnlen));
id->sadb_ident_exttype = idexttype;
id->sadb_ident_type = SADB_IDENTTYPE_USERFQDN;
/* XXX is it correct? */
if (curproc && curproc->p_cred)
id->sadb_ident_id = curproc->p_cred->p_ruid;
if (userfqdn && userfqdnlen)
bcopy(userfqdn, id + 1, userfqdnlen);
p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(userfqdnlen);
}
#endif
/* XXX sensitivity (optional) */
/* create proposal/combination extension */
m = key_getprop(saidx);
#if 0
/*
* spec conformant: always attach proposal/combination extension,
* the problem is that we have no way to attach it for ipcomp,
* due to the way sadb_comb is declared in RFC2367.
*/
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
#else
/*
* outside of spec; make proposal/combination extension optional.
*/
if (m)
m_cat(result, m);
#endif
if ((result->m_flags & M_PKTHDR) == 0) {
error = EINVAL;
goto fail;
}
if (result->m_len < sizeof(struct sadb_msg)) {
result = m_pullup(result, sizeof(struct sadb_msg));
if (result == NULL) {
error = ENOBUFS;
goto fail;
}
}
result->m_pkthdr.len = 0;
for (m = result; m; m = m->m_next)
result->m_pkthdr.len += m->m_len;
mtod(result, struct sadb_msg *)->sadb_msg_len =
PFKEY_UNIT64(result->m_pkthdr.len);
return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED);
fail:
if (result)
m_freem(result);
return error;
}
static struct secacq *
key_newacq(const struct secasindex *saidx)
{
struct secacq *newacq;
/* get new entry */
newacq = malloc(sizeof(struct secacq), M_IPSEC_SAQ, M_NOWAIT|M_ZERO);
if (newacq == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
return NULL;
}
/* copy secindex */
bcopy(saidx, &newacq->saidx, sizeof(newacq->saidx));
newacq->seq = (V_acq_seq == ~0 ? 1 : ++V_acq_seq);
newacq->created = time_second;
newacq->count = 0;
/* add to acqtree */
ACQ_LOCK();
LIST_INSERT_HEAD(&V_acqtree, newacq, chain);
ACQ_UNLOCK();
return newacq;
}
static struct secacq *
key_getacq(const struct secasindex *saidx)
{
struct secacq *acq;
ACQ_LOCK();
LIST_FOREACH(acq, &V_acqtree, chain) {
if (key_cmpsaidx(saidx, &acq->saidx, CMP_EXACTLY))
break;
}
ACQ_UNLOCK();
return acq;
}
static struct secacq *
key_getacqbyseq(u_int32_t seq)
{
struct secacq *acq;
ACQ_LOCK();
LIST_FOREACH(acq, &V_acqtree, chain) {
if (acq->seq == seq)
break;
}
ACQ_UNLOCK();
return acq;
}
static struct secspacq *
key_newspacq(struct secpolicyindex *spidx)
{
struct secspacq *acq;
/* get new entry */
acq = malloc(sizeof(struct secspacq), M_IPSEC_SAQ, M_NOWAIT|M_ZERO);
if (acq == NULL) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
return NULL;
}
/* copy secindex */
bcopy(spidx, &acq->spidx, sizeof(acq->spidx));
acq->created = time_second;
acq->count = 0;
/* add to spacqtree */
SPACQ_LOCK();
LIST_INSERT_HEAD(&V_spacqtree, acq, chain);
SPACQ_UNLOCK();
return acq;
}
static struct secspacq *
key_getspacq(struct secpolicyindex *spidx)
{
struct secspacq *acq;
SPACQ_LOCK();
LIST_FOREACH(acq, &V_spacqtree, chain) {
if (key_cmpspidx_exactly(spidx, &acq->spidx)) {
/* NB: return holding spacq_lock */
return acq;
}
}
SPACQ_UNLOCK();
return NULL;
}
/*
* SADB_ACQUIRE processing,
* in first situation, is receiving
* <base>
* from the ikmpd, and clear sequence of its secasvar entry.
*
* In second situation, is receiving
* <base, address(SD), (address(P),) (identity(SD),) (sensitivity,) proposal>
* from a user land process, and return
* <base, address(SD), (address(P),) (identity(SD),) (sensitivity,) proposal>
* to the socket.
*
* m will always be freed.
*/
static int
key_acquire2(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
const struct sadb_address *src0, *dst0;
struct secasindex saidx;
struct secashead *sah;
u_int16_t proto;
int error;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/*
* Error message from KMd.
* We assume that if error was occured in IKEd, the length of PFKEY
* message is equal to the size of sadb_msg structure.
* We do not raise error even if error occured in this function.
*/
if (mhp->msg->sadb_msg_len == PFKEY_UNIT64(sizeof(struct sadb_msg))) {
struct secacq *acq;
/* check sequence number */
if (mhp->msg->sadb_msg_seq == 0) {
ipseclog((LOG_DEBUG, "%s: must specify sequence "
"number.\n", __func__));
m_freem(m);
return 0;
}
if ((acq = key_getacqbyseq(mhp->msg->sadb_msg_seq)) == NULL) {
/*
* the specified larval SA is already gone, or we got
* a bogus sequence number. we can silently ignore it.
*/
m_freem(m);
return 0;
}
/* reset acq counter in order to deletion by timehander. */
acq->created = time_second;
acq->count = 0;
m_freem(m);
return 0;
}
/*
* This message is from user land.
*/
/* map satype to proto */
if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
mhp->ext[SADB_EXT_PROPOSAL] == NULL) {
/* error */
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) ||
mhp->extlen[SADB_EXT_PROPOSAL] < sizeof(struct sadb_prop)) {
/* error */
ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
/* XXX boundary check against sa_len */
KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx);
/*
* Make sure the port numbers are zero.
* In case of NAT-T we will update them later if needed.
*/
KEY_PORTTOSADDR(&saidx.src, 0);
KEY_PORTTOSADDR(&saidx.dst, 0);
#ifndef IPSEC_NAT_T
/*
* Handle NAT-T info if present.
*/
if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
struct sadb_x_nat_t_port *sport, *dport;
if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
ipseclog((LOG_DEBUG, "%s: invalid message.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
sport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_SPORT];
dport = (struct sadb_x_nat_t_port *)
mhp->ext[SADB_X_EXT_NAT_T_DPORT];
if (sport)
KEY_PORTTOSADDR(&saidx.src,
sport->sadb_x_nat_t_port_port);
if (dport)
KEY_PORTTOSADDR(&saidx.dst,
dport->sadb_x_nat_t_port_port);
}
#endif
/* get a SA index */
SAHTREE_LOCK();
LIST_FOREACH(sah, &V_sahtree, chain) {
if (sah->state == SADB_SASTATE_DEAD)
continue;
if (key_cmpsaidx(&sah->saidx, &saidx, CMP_MODE_REQID))
break;
}
SAHTREE_UNLOCK();
if (sah != NULL) {
ipseclog((LOG_DEBUG, "%s: a SA exists already.\n", __func__));
return key_senderror(so, m, EEXIST);
}
error = key_acquire(&saidx, NULL);
if (error != 0) {
ipseclog((LOG_DEBUG, "%s: error %d returned from key_acquire\n",
__func__, mhp->msg->sadb_msg_errno));
return key_senderror(so, m, error);
}
return key_sendup_mbuf(so, m, KEY_SENDUP_REGISTERED);
}
/*
* SADB_REGISTER processing.
* If SATYPE_UNSPEC has been passed as satype, only return sabd_supported.
* receive
* <base>
* from the ikmpd, and register a socket to send PF_KEY messages,
* and send
* <base, supported>
* to KMD by PF_KEY.
* If socket is detached, must free from regnode.
*
* m will always be freed.
*/
static int
key_register(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct secreg *reg, *newreg = 0;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* check for invalid register message */
if (mhp->msg->sadb_msg_satype >= sizeof(V_regtree)/sizeof(V_regtree[0]))
return key_senderror(so, m, EINVAL);
/* When SATYPE_UNSPEC is specified, only return sabd_supported. */
if (mhp->msg->sadb_msg_satype == SADB_SATYPE_UNSPEC)
goto setmsg;
/* check whether existing or not */
REGTREE_LOCK();
LIST_FOREACH(reg, &V_regtree[mhp->msg->sadb_msg_satype], chain) {
if (reg->so == so) {
REGTREE_UNLOCK();
ipseclog((LOG_DEBUG, "%s: socket exists already.\n",
__func__));
return key_senderror(so, m, EEXIST);
}
}
/* create regnode */
newreg = malloc(sizeof(struct secreg), M_IPSEC_SAR, M_NOWAIT|M_ZERO);
if (newreg == NULL) {
REGTREE_UNLOCK();
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
return key_senderror(so, m, ENOBUFS);
}
newreg->so = so;
((struct keycb *)sotorawcb(so))->kp_registered++;
/* add regnode to regtree. */
LIST_INSERT_HEAD(&V_regtree[mhp->msg->sadb_msg_satype], newreg, chain);
REGTREE_UNLOCK();
setmsg:
{
struct mbuf *n;
struct sadb_msg *newmsg;
struct sadb_supported *sup;
u_int len, alen, elen;
int off;
int i;
struct sadb_alg *alg;
/* create new sadb_msg to reply. */
alen = 0;
for (i = 1; i <= SADB_AALG_MAX; i++) {
if (ah_algorithm_lookup(i))
alen += sizeof(struct sadb_alg);
}
if (alen)
alen += sizeof(struct sadb_supported);
elen = 0;
for (i = 1; i <= SADB_EALG_MAX; i++) {
if (esp_algorithm_lookup(i))
elen += sizeof(struct sadb_alg);
}
if (elen)
elen += sizeof(struct sadb_supported);
len = sizeof(struct sadb_msg) + alen + elen;
if (len > MCLBYTES)
return key_senderror(so, m, ENOBUFS);
MGETHDR(n, M_NOWAIT, MT_DATA);
if (len > MHLEN) {
if (!(MCLGET(n, M_NOWAIT))) {
m_freem(n);
n = NULL;
}
}
if (!n)
return key_senderror(so, m, ENOBUFS);
n->m_pkthdr.len = n->m_len = len;
n->m_next = NULL;
off = 0;
m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off);
newmsg = mtod(n, struct sadb_msg *);
newmsg->sadb_msg_errno = 0;
newmsg->sadb_msg_len = PFKEY_UNIT64(len);
off += PFKEY_ALIGN8(sizeof(struct sadb_msg));
/* for authentication algorithm */
if (alen) {
sup = (struct sadb_supported *)(mtod(n, caddr_t) + off);
sup->sadb_supported_len = PFKEY_UNIT64(alen);
sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH;
off += PFKEY_ALIGN8(sizeof(*sup));
for (i = 1; i <= SADB_AALG_MAX; i++) {
struct auth_hash *aalgo;
u_int16_t minkeysize, maxkeysize;
aalgo = ah_algorithm_lookup(i);
if (!aalgo)
continue;
alg = (struct sadb_alg *)(mtod(n, caddr_t) + off);
alg->sadb_alg_id = i;
alg->sadb_alg_ivlen = 0;
key_getsizes_ah(aalgo, i, &minkeysize, &maxkeysize);
alg->sadb_alg_minbits = _BITS(minkeysize);
alg->sadb_alg_maxbits = _BITS(maxkeysize);
off += PFKEY_ALIGN8(sizeof(*alg));
}
}
/* for encryption algorithm */
if (elen) {
sup = (struct sadb_supported *)(mtod(n, caddr_t) + off);
sup->sadb_supported_len = PFKEY_UNIT64(elen);
sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_ENCRYPT;
off += PFKEY_ALIGN8(sizeof(*sup));
for (i = 1; i <= SADB_EALG_MAX; i++) {
struct enc_xform *ealgo;
ealgo = esp_algorithm_lookup(i);
if (!ealgo)
continue;
alg = (struct sadb_alg *)(mtod(n, caddr_t) + off);
alg->sadb_alg_id = i;
alg->sadb_alg_ivlen = ealgo->blocksize;
alg->sadb_alg_minbits = _BITS(ealgo->minkey);
alg->sadb_alg_maxbits = _BITS(ealgo->maxkey);
off += PFKEY_ALIGN8(sizeof(struct sadb_alg));
}
}
IPSEC_ASSERT(off == len,
("length assumption failed (off %u len %u)", off, len));
m_freem(m);
return key_sendup_mbuf(so, n, KEY_SENDUP_REGISTERED);
}
}
/*
* free secreg entry registered.
* XXX: I want to do free a socket marked done SADB_RESIGER to socket.
*/
void
key_freereg(struct socket *so)
{
struct secreg *reg;
int i;
IPSEC_ASSERT(so != NULL, ("NULL so"));
/*
* check whether existing or not.
* check all type of SA, because there is a potential that
* one socket is registered to multiple type of SA.
*/
REGTREE_LOCK();
for (i = 0; i <= SADB_SATYPE_MAX; i++) {
LIST_FOREACH(reg, &V_regtree[i], chain) {
if (reg->so == so && __LIST_CHAINED(reg)) {
LIST_REMOVE(reg, chain);
free(reg, M_IPSEC_SAR);
break;
}
}
}
REGTREE_UNLOCK();
}
/*
* SADB_EXPIRE processing
* send
* <base, SA, SA2, lifetime(C and one of HS), address(SD)>
* to KMD by PF_KEY.
* NOTE: We send only soft lifetime extension.
*
* OUT: 0 : succeed
* others : error number
*/
static int
key_expire(struct secasvar *sav)
{
int satype;
struct mbuf *result = NULL, *m;
int len;
int error = -1;
struct sadb_lifetime *lt;
IPSEC_ASSERT (sav != NULL, ("null sav"));
IPSEC_ASSERT (sav->sah != NULL, ("null sa header"));
/* set msg header */
satype = key_proto2satype(sav->sah->saidx.proto);
IPSEC_ASSERT(satype != 0, ("invalid proto, satype %u", satype));
m = key_setsadbmsg(SADB_EXPIRE, 0, satype, sav->seq, 0, sav->refcnt);
if (!m) {
error = ENOBUFS;
goto fail;
}
result = m;
/* create SA extension */
m = key_setsadbsa(sav);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
/* create SA extension */
m = key_setsadbxsa2(sav->sah->saidx.mode,
sav->replay ? sav->replay->count : 0,
sav->sah->saidx.reqid);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
/* create lifetime extension (current and soft) */
len = PFKEY_ALIGN8(sizeof(*lt)) * 2;
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL) {
error = ENOBUFS;
goto fail;
}
m_align(m, len);
m->m_len = len;
bzero(mtod(m, caddr_t), len);
lt = mtod(m, struct sadb_lifetime *);
lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT;
lt->sadb_lifetime_allocations = sav->lft_c->allocations;
lt->sadb_lifetime_bytes = sav->lft_c->bytes;
lt->sadb_lifetime_addtime = sav->lft_c->addtime;
lt->sadb_lifetime_usetime = sav->lft_c->usetime;
lt = (struct sadb_lifetime *)(mtod(m, caddr_t) + len / 2);
lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT;
lt->sadb_lifetime_allocations = sav->lft_s->allocations;
lt->sadb_lifetime_bytes = sav->lft_s->bytes;
lt->sadb_lifetime_addtime = sav->lft_s->addtime;
lt->sadb_lifetime_usetime = sav->lft_s->usetime;
m_cat(result, m);
/* set sadb_address for source */
m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
&sav->sah->saidx.src.sa,
FULLMASK, IPSEC_ULPROTO_ANY);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
/* set sadb_address for destination */
m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
&sav->sah->saidx.dst.sa,
FULLMASK, IPSEC_ULPROTO_ANY);
if (!m) {
error = ENOBUFS;
goto fail;
}
m_cat(result, m);
/*
* XXX-BZ Handle NAT-T extensions here.
*/
if ((result->m_flags & M_PKTHDR) == 0) {
error = EINVAL;
goto fail;
}
if (result->m_len < sizeof(struct sadb_msg)) {
result = m_pullup(result, sizeof(struct sadb_msg));
if (result == NULL) {
error = ENOBUFS;
goto fail;
}
}
result->m_pkthdr.len = 0;
for (m = result; m; m = m->m_next)
result->m_pkthdr.len += m->m_len;
mtod(result, struct sadb_msg *)->sadb_msg_len =
PFKEY_UNIT64(result->m_pkthdr.len);
return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED);
fail:
if (result)
m_freem(result);
return error;
}
/*
* SADB_FLUSH processing
* receive
* <base>
* from the ikmpd, and free all entries in secastree.
* and send,
* <base>
* to the ikmpd.
* NOTE: to do is only marking SADB_SASTATE_DEAD.
*
* m will always be freed.
*/
static int
key_flush(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct sadb_msg *newmsg;
struct secashead *sah, *nextsah;
struct secasvar *sav, *nextsav;
u_int16_t proto;
u_int8_t state;
u_int stateidx;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* map satype to proto */
if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
/* no SATYPE specified, i.e. flushing all SA. */
SAHTREE_LOCK();
for (sah = LIST_FIRST(&V_sahtree);
sah != NULL;
sah = nextsah) {
nextsah = LIST_NEXT(sah, chain);
if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC
&& proto != sah->saidx.proto)
continue;
for (stateidx = 0;
stateidx < _ARRAYLEN(saorder_state_alive);
stateidx++) {
state = saorder_state_any[stateidx];
for (sav = LIST_FIRST(&sah->savtree[state]);
sav != NULL;
sav = nextsav) {
nextsav = LIST_NEXT(sav, chain);
key_sa_chgstate(sav, SADB_SASTATE_DEAD);
KEY_FREESAV(&sav);
}
}
sah->state = SADB_SASTATE_DEAD;
}
SAHTREE_UNLOCK();
if (m->m_len < sizeof(struct sadb_msg) ||
sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) {
ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
return key_senderror(so, m, ENOBUFS);
}
if (m->m_next)
m_freem(m->m_next);
m->m_next = NULL;
m->m_pkthdr.len = m->m_len = sizeof(struct sadb_msg);
newmsg = mtod(m, struct sadb_msg *);
newmsg->sadb_msg_errno = 0;
newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len);
return key_sendup_mbuf(so, m, KEY_SENDUP_ALL);
}
/*
* SADB_DUMP processing
* dump all entries including status of DEAD in SAD.
* receive
* <base>
* from the ikmpd, and dump all secasvar leaves
* and send,
* <base> .....
* to the ikmpd.
*
* m will always be freed.
*/
static int
key_dump(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
struct secashead *sah;
struct secasvar *sav;
u_int16_t proto;
u_int stateidx;
u_int8_t satype;
u_int8_t state;
int cnt;
struct sadb_msg *newmsg;
struct mbuf *n;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
/* map satype to proto */
if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
__func__));
return key_senderror(so, m, EINVAL);
}
/* count sav entries to be sent to the userland. */
cnt = 0;
SAHTREE_LOCK();
LIST_FOREACH(sah, &V_sahtree, chain) {
if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC
&& proto != sah->saidx.proto)
continue;
for (stateidx = 0;
stateidx < _ARRAYLEN(saorder_state_any);
stateidx++) {
state = saorder_state_any[stateidx];
LIST_FOREACH(sav, &sah->savtree[state], chain) {
cnt++;
}
}
}
if (cnt == 0) {
SAHTREE_UNLOCK();
return key_senderror(so, m, ENOENT);
}
/* send this to the userland, one at a time. */
newmsg = NULL;
LIST_FOREACH(sah, &V_sahtree, chain) {
if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC
&& proto != sah->saidx.proto)
continue;
/* map proto to satype */
if ((satype = key_proto2satype(sah->saidx.proto)) == 0) {
SAHTREE_UNLOCK();
ipseclog((LOG_DEBUG, "%s: there was invalid proto in "
"SAD.\n", __func__));
return key_senderror(so, m, EINVAL);
}
for (stateidx = 0;
stateidx < _ARRAYLEN(saorder_state_any);
stateidx++) {
state = saorder_state_any[stateidx];
LIST_FOREACH(sav, &sah->savtree[state], chain) {
n = key_setdumpsa(sav, SADB_DUMP, satype,
--cnt, mhp->msg->sadb_msg_pid);
if (!n) {
SAHTREE_UNLOCK();
return key_senderror(so, m, ENOBUFS);
}
key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
}
}
}
SAHTREE_UNLOCK();
m_freem(m);
return 0;
}
/*
* SADB_X_PROMISC processing
*
* m will always be freed.
*/
static int
key_promisc(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp)
{
int olen;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
olen = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len);
if (olen < sizeof(struct sadb_msg)) {
#if 1
return key_senderror(so, m, EINVAL);
#else
m_freem(m);
return 0;
#endif
} else if (olen == sizeof(struct sadb_msg)) {
/* enable/disable promisc mode */
struct keycb *kp;
if ((kp = (struct keycb *)sotorawcb(so)) == NULL)
return key_senderror(so, m, EINVAL);
mhp->msg->sadb_msg_errno = 0;
switch (mhp->msg->sadb_msg_satype) {
case 0:
case 1:
kp->kp_promisc = mhp->msg->sadb_msg_satype;
break;
default:
return key_senderror(so, m, EINVAL);
}
/* send the original message back to everyone */
mhp->msg->sadb_msg_errno = 0;
return key_sendup_mbuf(so, m, KEY_SENDUP_ALL);
} else {
/* send packet as is */
m_adj(m, PFKEY_ALIGN8(sizeof(struct sadb_msg)));
/* TODO: if sadb_msg_seq is specified, send to specific pid */
return key_sendup_mbuf(so, m, KEY_SENDUP_ALL);
}
}
static int (*key_typesw[])(struct socket *, struct mbuf *,
const struct sadb_msghdr *) = {
NULL, /* SADB_RESERVED */
key_getspi, /* SADB_GETSPI */
key_update, /* SADB_UPDATE */
key_add, /* SADB_ADD */
key_delete, /* SADB_DELETE */
key_get, /* SADB_GET */
key_acquire2, /* SADB_ACQUIRE */
key_register, /* SADB_REGISTER */
NULL, /* SADB_EXPIRE */
key_flush, /* SADB_FLUSH */
key_dump, /* SADB_DUMP */
key_promisc, /* SADB_X_PROMISC */
NULL, /* SADB_X_PCHANGE */
key_spdadd, /* SADB_X_SPDUPDATE */
key_spdadd, /* SADB_X_SPDADD */
key_spddelete, /* SADB_X_SPDDELETE */
key_spdget, /* SADB_X_SPDGET */
NULL, /* SADB_X_SPDACQUIRE */
key_spddump, /* SADB_X_SPDDUMP */
key_spdflush, /* SADB_X_SPDFLUSH */
key_spdadd, /* SADB_X_SPDSETIDX */
NULL, /* SADB_X_SPDEXPIRE */
key_spddelete2, /* SADB_X_SPDDELETE2 */
};
/*
* parse sadb_msg buffer to process PFKEYv2,
* and create a data to response if needed.
* I think to be dealed with mbuf directly.
* IN:
* msgp : pointer to pointer to a received buffer pulluped.
* This is rewrited to response.
* so : pointer to socket.
* OUT:
* length for buffer to send to user process.
*/
int
key_parse(struct mbuf *m, struct socket *so)
{
struct sadb_msg *msg;
struct sadb_msghdr mh;
u_int orglen;
int error;
int target;
IPSEC_ASSERT(so != NULL, ("null socket"));
IPSEC_ASSERT(m != NULL, ("null mbuf"));
#if 0 /*kdebug_sadb assumes msg in linear buffer*/
KEYDEBUG(KEYDEBUG_KEY_DUMP,
ipseclog((LOG_DEBUG, "%s: passed sadb_msg\n", __func__));
kdebug_sadb(msg));
#endif
if (m->m_len < sizeof(struct sadb_msg)) {
m = m_pullup(m, sizeof(struct sadb_msg));
if (!m)
return ENOBUFS;
}
msg = mtod(m, struct sadb_msg *);
orglen = PFKEY_UNUNIT64(msg->sadb_msg_len);
target = KEY_SENDUP_ONE;
if ((m->m_flags & M_PKTHDR) == 0 ||
m->m_pkthdr.len != m->m_pkthdr.len) {
ipseclog((LOG_DEBUG, "%s: invalid message length.\n",__func__));
PFKEYSTAT_INC(out_invlen);
error = EINVAL;
goto senderror;
}
if (msg->sadb_msg_version != PF_KEY_V2) {
ipseclog((LOG_DEBUG, "%s: PF_KEY version %u is mismatched.\n",
__func__, msg->sadb_msg_version));
PFKEYSTAT_INC(out_invver);
error = EINVAL;
goto senderror;
}
if (msg->sadb_msg_type > SADB_MAX) {
ipseclog((LOG_DEBUG, "%s: invalid type %u is passed.\n",
__func__, msg->sadb_msg_type));
PFKEYSTAT_INC(out_invmsgtype);
error = EINVAL;
goto senderror;
}
/* for old-fashioned code - should be nuked */
if (m->m_pkthdr.len > MCLBYTES) {
m_freem(m);
return ENOBUFS;
}
if (m->m_next) {
struct mbuf *n;
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n && m->m_pkthdr.len > MHLEN) {
if (!(MCLGET(n, M_NOWAIT))) {
m_free(n);
n = NULL;
}
}
if (!n) {
m_freem(m);
return ENOBUFS;
}
m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
n->m_pkthdr.len = n->m_len = m->m_pkthdr.len;
n->m_next = NULL;
m_freem(m);
m = n;
}
/* align the mbuf chain so that extensions are in contiguous region. */
error = key_align(m, &mh);
if (error)
return error;
msg = mh.msg;
/* check SA type */
switch (msg->sadb_msg_satype) {
case SADB_SATYPE_UNSPEC:
switch (msg->sadb_msg_type) {
case SADB_GETSPI:
case SADB_UPDATE:
case SADB_ADD:
case SADB_DELETE:
case SADB_GET:
case SADB_ACQUIRE:
case SADB_EXPIRE:
ipseclog((LOG_DEBUG, "%s: must specify satype "
"when msg type=%u.\n", __func__,
msg->sadb_msg_type));
PFKEYSTAT_INC(out_invsatype);
error = EINVAL;
goto senderror;
}
break;
case SADB_SATYPE_AH:
case SADB_SATYPE_ESP:
case SADB_X_SATYPE_IPCOMP:
case SADB_X_SATYPE_TCPSIGNATURE:
switch (msg->sadb_msg_type) {
case SADB_X_SPDADD:
case SADB_X_SPDDELETE:
case SADB_X_SPDGET:
case SADB_X_SPDDUMP:
case SADB_X_SPDFLUSH:
case SADB_X_SPDSETIDX:
case SADB_X_SPDUPDATE:
case SADB_X_SPDDELETE2:
ipseclog((LOG_DEBUG, "%s: illegal satype=%u\n",
__func__, msg->sadb_msg_type));
PFKEYSTAT_INC(out_invsatype);
error = EINVAL;
goto senderror;
}
break;
case SADB_SATYPE_RSVP:
case SADB_SATYPE_OSPFV2:
case SADB_SATYPE_RIPV2:
case SADB_SATYPE_MIP:
ipseclog((LOG_DEBUG, "%s: type %u isn't supported.\n",
__func__, msg->sadb_msg_satype));
PFKEYSTAT_INC(out_invsatype);
error = EOPNOTSUPP;
goto senderror;
case 1: /* XXX: What does it do? */
if (msg->sadb_msg_type == SADB_X_PROMISC)
break;
/*FALLTHROUGH*/
default:
ipseclog((LOG_DEBUG, "%s: invalid type %u is passed.\n",
__func__, msg->sadb_msg_satype));
PFKEYSTAT_INC(out_invsatype);
error = EINVAL;
goto senderror;
}
/* check field of upper layer protocol and address family */
if (mh.ext[SADB_EXT_ADDRESS_SRC] != NULL
&& mh.ext[SADB_EXT_ADDRESS_DST] != NULL) {
struct sadb_address *src0, *dst0;
u_int plen;
src0 = (struct sadb_address *)(mh.ext[SADB_EXT_ADDRESS_SRC]);
dst0 = (struct sadb_address *)(mh.ext[SADB_EXT_ADDRESS_DST]);
/* check upper layer protocol */
if (src0->sadb_address_proto != dst0->sadb_address_proto) {
ipseclog((LOG_DEBUG, "%s: upper layer protocol "
"mismatched.\n", __func__));
PFKEYSTAT_INC(out_invaddr);
error = EINVAL;
goto senderror;
}
/* check family */
if (PFKEY_ADDR_SADDR(src0)->sa_family !=
PFKEY_ADDR_SADDR(dst0)->sa_family) {
ipseclog((LOG_DEBUG, "%s: address family mismatched.\n",
__func__));
PFKEYSTAT_INC(out_invaddr);
error = EINVAL;
goto senderror;
}
if (PFKEY_ADDR_SADDR(src0)->sa_len !=
PFKEY_ADDR_SADDR(dst0)->sa_len) {
ipseclog((LOG_DEBUG, "%s: address struct size "
"mismatched.\n", __func__));
PFKEYSTAT_INC(out_invaddr);
error = EINVAL;
goto senderror;
}
switch (PFKEY_ADDR_SADDR(src0)->sa_family) {
case AF_INET:
if (PFKEY_ADDR_SADDR(src0)->sa_len !=
sizeof(struct sockaddr_in)) {
PFKEYSTAT_INC(out_invaddr);
error = EINVAL;
goto senderror;
}
break;
case AF_INET6:
if (PFKEY_ADDR_SADDR(src0)->sa_len !=
sizeof(struct sockaddr_in6)) {
PFKEYSTAT_INC(out_invaddr);
error = EINVAL;
goto senderror;
}
break;
default:
ipseclog((LOG_DEBUG, "%s: unsupported address family\n",
__func__));
PFKEYSTAT_INC(out_invaddr);
error = EAFNOSUPPORT;
goto senderror;
}
switch (PFKEY_ADDR_SADDR(src0)->sa_family) {
case AF_INET:
plen = sizeof(struct in_addr) << 3;
break;
case AF_INET6:
plen = sizeof(struct in6_addr) << 3;
break;
default:
plen = 0; /*fool gcc*/
break;
}
/* check max prefix length */
if (src0->sadb_address_prefixlen > plen ||
dst0->sadb_address_prefixlen > plen) {
ipseclog((LOG_DEBUG, "%s: illegal prefixlen.\n",
__func__));
PFKEYSTAT_INC(out_invaddr);
error = EINVAL;
goto senderror;
}
/*
* prefixlen == 0 is valid because there can be a case when
* all addresses are matched.
*/
}
if (msg->sadb_msg_type >= sizeof(key_typesw)/sizeof(key_typesw[0]) ||
key_typesw[msg->sadb_msg_type] == NULL) {
PFKEYSTAT_INC(out_invmsgtype);
error = EINVAL;
goto senderror;
}
return (*key_typesw[msg->sadb_msg_type])(so, m, &mh);
senderror:
msg->sadb_msg_errno = error;
return key_sendup_mbuf(so, m, target);
}
static int
key_senderror(struct socket *so, struct mbuf *m, int code)
{
struct sadb_msg *msg;
IPSEC_ASSERT(m->m_len >= sizeof(struct sadb_msg),
("mbuf too small, len %u", m->m_len));
msg = mtod(m, struct sadb_msg *);
msg->sadb_msg_errno = code;
return key_sendup_mbuf(so, m, KEY_SENDUP_ONE);
}
/*
* set the pointer to each header into message buffer.
* m will be freed on error.
* XXX larger-than-MCLBYTES extension?
*/
static int
key_align(struct mbuf *m, struct sadb_msghdr *mhp)
{
struct mbuf *n;
struct sadb_ext *ext;
size_t off, end;
int extlen;
int toff;
IPSEC_ASSERT(m != NULL, ("null mbuf"));
IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
IPSEC_ASSERT(m->m_len >= sizeof(struct sadb_msg),
("mbuf too small, len %u", m->m_len));
/* initialize */
bzero(mhp, sizeof(*mhp));
mhp->msg = mtod(m, struct sadb_msg *);
mhp->ext[0] = (struct sadb_ext *)mhp->msg; /*XXX backward compat */
end = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len);
extlen = end; /*just in case extlen is not updated*/
for (off = sizeof(struct sadb_msg); off < end; off += extlen) {
n = m_pulldown(m, off, sizeof(struct sadb_ext), &toff);
if (!n) {
/* m is already freed */
return ENOBUFS;
}
ext = (struct sadb_ext *)(mtod(n, caddr_t) + toff);
/* set pointer */
switch (ext->sadb_ext_type) {
case SADB_EXT_SA:
case SADB_EXT_ADDRESS_SRC:
case SADB_EXT_ADDRESS_DST:
case SADB_EXT_ADDRESS_PROXY:
case SADB_EXT_LIFETIME_CURRENT:
case SADB_EXT_LIFETIME_HARD:
case SADB_EXT_LIFETIME_SOFT:
case SADB_EXT_KEY_AUTH:
case SADB_EXT_KEY_ENCRYPT:
case SADB_EXT_IDENTITY_SRC:
case SADB_EXT_IDENTITY_DST:
case SADB_EXT_SENSITIVITY:
case SADB_EXT_PROPOSAL:
case SADB_EXT_SUPPORTED_AUTH:
case SADB_EXT_SUPPORTED_ENCRYPT:
case SADB_EXT_SPIRANGE:
case SADB_X_EXT_POLICY:
case SADB_X_EXT_SA2:
#ifdef IPSEC_NAT_T
case SADB_X_EXT_NAT_T_TYPE:
case SADB_X_EXT_NAT_T_SPORT:
case SADB_X_EXT_NAT_T_DPORT:
case SADB_X_EXT_NAT_T_OAI:
case SADB_X_EXT_NAT_T_OAR:
case SADB_X_EXT_NAT_T_FRAG:
#endif
/* duplicate check */
/*
* XXX Are there duplication payloads of either
* KEY_AUTH or KEY_ENCRYPT ?
*/
if (mhp->ext[ext->sadb_ext_type] != NULL) {
ipseclog((LOG_DEBUG, "%s: duplicate ext_type "
"%u\n", __func__, ext->sadb_ext_type));
m_freem(m);
PFKEYSTAT_INC(out_dupext);
return EINVAL;
}
break;
default:
ipseclog((LOG_DEBUG, "%s: invalid ext_type %u\n",
__func__, ext->sadb_ext_type));
m_freem(m);
PFKEYSTAT_INC(out_invexttype);
return EINVAL;
}
extlen = PFKEY_UNUNIT64(ext->sadb_ext_len);
if (key_validate_ext(ext, extlen)) {
m_freem(m);
PFKEYSTAT_INC(out_invlen);
return EINVAL;
}
n = m_pulldown(m, off, extlen, &toff);
if (!n) {
/* m is already freed */
return ENOBUFS;
}
ext = (struct sadb_ext *)(mtod(n, caddr_t) + toff);
mhp->ext[ext->sadb_ext_type] = ext;
mhp->extoff[ext->sadb_ext_type] = off;
mhp->extlen[ext->sadb_ext_type] = extlen;
}
if (off != end) {
m_freem(m);
PFKEYSTAT_INC(out_invlen);
return EINVAL;
}
return 0;
}
static int
key_validate_ext(const struct sadb_ext *ext, int len)
{
const struct sockaddr *sa;
enum { NONE, ADDR } checktype = NONE;
int baselen = 0;
const int sal = offsetof(struct sockaddr, sa_len) + sizeof(sa->sa_len);
if (len != PFKEY_UNUNIT64(ext->sadb_ext_len))
return EINVAL;
/* if it does not match minimum/maximum length, bail */
if (ext->sadb_ext_type >= sizeof(minsize) / sizeof(minsize[0]) ||
ext->sadb_ext_type >= sizeof(maxsize) / sizeof(maxsize[0]))
return EINVAL;
if (!minsize[ext->sadb_ext_type] || len < minsize[ext->sadb_ext_type])
return EINVAL;
if (maxsize[ext->sadb_ext_type] && len > maxsize[ext->sadb_ext_type])
return EINVAL;
/* more checks based on sadb_ext_type XXX need more */
switch (ext->sadb_ext_type) {
case SADB_EXT_ADDRESS_SRC:
case SADB_EXT_ADDRESS_DST:
case SADB_EXT_ADDRESS_PROXY:
baselen = PFKEY_ALIGN8(sizeof(struct sadb_address));
checktype = ADDR;
break;
case SADB_EXT_IDENTITY_SRC:
case SADB_EXT_IDENTITY_DST:
if (((const struct sadb_ident *)ext)->sadb_ident_type ==
SADB_X_IDENTTYPE_ADDR) {
baselen = PFKEY_ALIGN8(sizeof(struct sadb_ident));
checktype = ADDR;
} else
checktype = NONE;
break;
default:
checktype = NONE;
break;
}
switch (checktype) {
case NONE:
break;
case ADDR:
sa = (const struct sockaddr *)(((const u_int8_t*)ext)+baselen);
if (len < baselen + sal)
return EINVAL;
if (baselen + PFKEY_ALIGN8(sa->sa_len) != len)
return EINVAL;
break;
}
return 0;
}
void
key_init(void)
{
int i;
for (i = 0; i < IPSEC_DIR_MAX; i++)
TAILQ_INIT(&V_sptree[i]);
LIST_INIT(&V_sahtree);
for (i = 0; i <= SADB_SATYPE_MAX; i++)
LIST_INIT(&V_regtree[i]);
LIST_INIT(&V_acqtree);
LIST_INIT(&V_spacqtree);
if (!IS_DEFAULT_VNET(curvnet))
return;
SPTREE_LOCK_INIT();
REGTREE_LOCK_INIT();
SAHTREE_LOCK_INIT();
ACQ_LOCK_INIT();
SPACQ_LOCK_INIT();
#ifndef IPSEC_DEBUG2
callout_init(&key_timer, CALLOUT_MPSAFE);
callout_reset(&key_timer, hz, key_timehandler, NULL);
#endif /*IPSEC_DEBUG2*/
/* initialize key statistics */
keystat.getspi_count = 1;
printf("IPsec: Initialized Security Association Processing.\n");
}
#ifdef VIMAGE
void
key_destroy(void)
{
TAILQ_HEAD(, secpolicy) drainq;
struct secpolicy *sp, *nextsp;
struct secacq *acq, *nextacq;
struct secspacq *spacq, *nextspacq;
struct secashead *sah, *nextsah;
struct secreg *reg;
int i;
TAILQ_INIT(&drainq);
SPTREE_WLOCK();
for (i = 0; i < IPSEC_DIR_MAX; i++) {
TAILQ_CONCAT(&drainq, &V_sptree[i], chain);
}
SPTREE_WUNLOCK();
sp = TAILQ_FIRST(&drainq);
while (sp != NULL) {
nextsp = TAILQ_NEXT(sp, chain);
KEY_FREESP(&sp);
sp = nextsp;
}
SAHTREE_LOCK();
for (sah = LIST_FIRST(&V_sahtree); sah != NULL; sah = nextsah) {
nextsah = LIST_NEXT(sah, chain);
if (__LIST_CHAINED(sah)) {
LIST_REMOVE(sah, chain);
free(sah, M_IPSEC_SAH);
}
}
SAHTREE_UNLOCK();
REGTREE_LOCK();
for (i = 0; i <= SADB_SATYPE_MAX; i++) {
LIST_FOREACH(reg, &V_regtree[i], chain) {
if (__LIST_CHAINED(reg)) {
LIST_REMOVE(reg, chain);
free(reg, M_IPSEC_SAR);
break;
}
}
}
REGTREE_UNLOCK();
ACQ_LOCK();
for (acq = LIST_FIRST(&V_acqtree); acq != NULL; acq = nextacq) {
nextacq = LIST_NEXT(acq, chain);
if (__LIST_CHAINED(acq)) {
LIST_REMOVE(acq, chain);
free(acq, M_IPSEC_SAQ);
}
}
ACQ_UNLOCK();
SPACQ_LOCK();
for (spacq = LIST_FIRST(&V_spacqtree); spacq != NULL;
spacq = nextspacq) {
nextspacq = LIST_NEXT(spacq, chain);
if (__LIST_CHAINED(spacq)) {
LIST_REMOVE(spacq, chain);
free(spacq, M_IPSEC_SAQ);
}
}
SPACQ_UNLOCK();
}
#endif
/*
* XXX: maybe This function is called after INBOUND IPsec processing.
*
* Special check for tunnel-mode packets.
* We must make some checks for consistency between inner and outer IP header.
*
* xxx more checks to be provided
*/
int
key_checktunnelsanity(struct secasvar *sav, u_int family, caddr_t src,
caddr_t dst)
{
IPSEC_ASSERT(sav->sah != NULL, ("null SA header"));
/* XXX: check inner IP header */
return 1;
}
/* record data transfer on SA, and update timestamps */
void
key_sa_recordxfer(struct secasvar *sav, struct mbuf *m)
{
IPSEC_ASSERT(sav != NULL, ("Null secasvar"));
IPSEC_ASSERT(m != NULL, ("Null mbuf"));
if (!sav->lft_c)
return;
/*
* XXX Currently, there is a difference of bytes size
* between inbound and outbound processing.
*/
sav->lft_c->bytes += m->m_pkthdr.len;
/* to check bytes lifetime is done in key_timehandler(). */
/*
* We use the number of packets as the unit of
* allocations. We increment the variable
* whenever {esp,ah}_{in,out}put is called.
*/
sav->lft_c->allocations++;
/* XXX check for expires? */
/*
* NOTE: We record CURRENT usetime by using wall clock,
* in seconds. HARD and SOFT lifetime are measured by the time
* difference (again in seconds) from usetime.
*
* usetime
* v expire expire
* -----+-----+--------+---> t
* <--------------> HARD
* <-----> SOFT
*/
sav->lft_c->usetime = time_second;
/* XXX check for expires? */
return;
}
static void
key_sa_chgstate(struct secasvar *sav, u_int8_t state)
{
IPSEC_ASSERT(sav != NULL, ("NULL sav"));
SAHTREE_LOCK_ASSERT();
if (sav->state != state) {
if (__LIST_CHAINED(sav))
LIST_REMOVE(sav, chain);
sav->state = state;
LIST_INSERT_HEAD(&sav->sah->savtree[state], sav, chain);
}
}
void
key_sa_stir_iv(struct secasvar *sav)
{
IPSEC_ASSERT(sav->iv != NULL, ("null IV"));
key_randomfill(sav->iv, sav->ivlen);
}
/*
* Take one of the kernel's security keys and convert it into a PF_KEY
* structure within an mbuf, suitable for sending up to a waiting
* application in user land.
*
* IN:
* src: A pointer to a kernel security key.
* exttype: Which type of key this is. Refer to the PF_KEY data structures.
* OUT:
* a valid mbuf or NULL indicating an error
*
*/
static struct mbuf *
key_setkey(struct seckey *src, u_int16_t exttype)
{
struct mbuf *m;
struct sadb_key *p;
int len;
if (src == NULL)
return NULL;
len = PFKEY_ALIGN8(sizeof(struct sadb_key) + _KEYLEN(src));
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return NULL;
m_align(m, len);
m->m_len = len;
p = mtod(m, struct sadb_key *);
bzero(p, len);
p->sadb_key_len = PFKEY_UNIT64(len);
p->sadb_key_exttype = exttype;
p->sadb_key_bits = src->bits;
bcopy(src->key_data, _KEYBUF(p), _KEYLEN(src));
return m;
}
/*
* Take one of the kernel's lifetime data structures and convert it
* into a PF_KEY structure within an mbuf, suitable for sending up to
* a waiting application in user land.
*
* IN:
* src: A pointer to a kernel lifetime structure.
* exttype: Which type of lifetime this is. Refer to the PF_KEY
* data structures for more information.
* OUT:
* a valid mbuf or NULL indicating an error
*
*/
static struct mbuf *
key_setlifetime(struct seclifetime *src, u_int16_t exttype)
{
struct mbuf *m = NULL;
struct sadb_lifetime *p;
int len = PFKEY_ALIGN8(sizeof(struct sadb_lifetime));
if (src == NULL)
return NULL;
m = m_get2(len, M_NOWAIT, MT_DATA, 0);
if (m == NULL)
return m;
m_align(m, len);
m->m_len = len;
p = mtod(m, struct sadb_lifetime *);
bzero(p, len);
p->sadb_lifetime_len = PFKEY_UNIT64(len);
p->sadb_lifetime_exttype = exttype;
p->sadb_lifetime_allocations = src->allocations;
p->sadb_lifetime_bytes = src->bytes;
p->sadb_lifetime_addtime = src->addtime;
p->sadb_lifetime_usetime = src->usetime;
return m;
}
Index: projects/clang360-import/sys/powerpc/pseries/plpar_iommu.c
===================================================================
--- projects/clang360-import/sys/powerpc/pseries/plpar_iommu.c (revision 277808)
+++ projects/clang360-import/sys/powerpc/pseries/plpar_iommu.c (revision 277809)
@@ -1,244 +1,244 @@
/*-
* Copyright (c) 2013, Nathan Whitehorn <nwhitehorn@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/libkern.h>
#include <sys/module.h>
#include <sys/vmem.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/ofw/openfirm.h>
#include <machine/bus.h>
#include <powerpc/pseries/phyp-hvcall.h>
#include <powerpc/pseries/plpar_iommu.h>
MALLOC_DEFINE(M_PHYPIOMMU, "iommu", "IOMMU data for PAPR LPARs");
struct papr_iommu_map {
uint32_t iobn;
vmem_t *vmem;
struct papr_iommu_map *next;
};
static SLIST_HEAD(iommu_maps, iommu_map) iommu_map_head =
SLIST_HEAD_INITIALIZER(iommu_map_head);
static int papr_supports_stuff_tce = -1;
struct iommu_map {
uint32_t iobn;
vmem_t *vmem;
SLIST_ENTRY(iommu_map) entries;
};
struct dma_window {
struct iommu_map *map;
bus_addr_t start;
bus_addr_t end;
};
int
phyp_iommu_set_dma_tag(device_t bus, device_t dev, bus_dma_tag_t tag)
{
device_t p;
phandle_t node;
cell_t dma_acells, dma_scells, dmawindow[6];
struct iommu_map *i;
int cell;
for (p = dev; device_get_parent(p) != NULL; p = device_get_parent(p)) {
if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
break;
if (ofw_bus_has_prop(p, "ibm,dma-window"))
break;
}
if (p == NULL)
return (ENXIO);
node = ofw_bus_get_node(p);
if (OF_getprop(node, "ibm,#dma-size-cells", &dma_scells,
sizeof(cell_t)) <= 0)
OF_searchprop(node, "#size-cells", &dma_scells, sizeof(cell_t));
if (OF_getprop(node, "ibm,#dma-address-cells", &dma_acells,
sizeof(cell_t)) <= 0)
OF_searchprop(node, "#address-cells", &dma_acells,
sizeof(cell_t));
if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
OF_getprop(node, "ibm,my-dma-window", dmawindow,
sizeof(cell_t)*(dma_scells + dma_acells + 1));
else
OF_getprop(node, "ibm,dma-window", dmawindow,
sizeof(cell_t)*(dma_scells + dma_acells + 1));
struct dma_window *window = malloc(sizeof(struct dma_window),
M_PHYPIOMMU, M_WAITOK);
window->start = 0;
for (cell = 1; cell < 1 + dma_acells; cell++) {
window->start <<= 32;
window->start |= dmawindow[cell];
}
window->end = 0;
for (; cell < 1 + dma_acells + dma_scells; cell++) {
window->end <<= 32;
window->end |= dmawindow[cell];
}
window->end += window->start;
if (bootverbose)
device_printf(dev, "Mapping IOMMU domain %#x\n", dmawindow[0]);
window->map = NULL;
SLIST_FOREACH(i, &iommu_map_head, entries) {
if (i->iobn == dmawindow[0]) {
window->map = i;
break;
}
}
if (window->map == NULL) {
window->map = malloc(sizeof(struct iommu_map), M_PHYPIOMMU,
M_WAITOK);
window->map->iobn = dmawindow[0];
/*
* Allocate IOMMU range beginning at PAGE_SIZE. Some drivers
* (em(4), for example) do not like getting mappings at 0.
*/
window->map->vmem = vmem_create("IOMMU mappings", PAGE_SIZE,
trunc_page(VMEM_ADDR_MAX) - PAGE_SIZE, PAGE_SIZE, 0,
M_BESTFIT | M_NOWAIT);
SLIST_INSERT_HEAD(&iommu_map_head, window->map, entries);
}
/*
* Check experimentally whether we can use H_STUFF_TCE. It is required
* by the spec but some firmware (e.g. QEMU) does not actually support
* it
*/
if (papr_supports_stuff_tce == -1)
papr_supports_stuff_tce = !(phyp_hcall(H_STUFF_TCE,
window->map->iobn, 0, 0, 0) == H_FUNCTION);
bus_dma_tag_set_iommu(tag, bus, window);
return (0);
}
int
phyp_iommu_map(device_t dev, bus_dma_segment_t *segs, int *nsegs,
bus_addr_t min, bus_addr_t max, bus_size_t alignment, bus_addr_t boundary,
void *cookie)
{
struct dma_window *window = cookie;
bus_addr_t minaddr, maxaddr;
bus_addr_t alloced;
bus_size_t allocsize;
int error, i, j;
uint64_t tce;
minaddr = window->start;
maxaddr = window->end;
/* XXX: handle exclusion range in a more useful way */
if (min < maxaddr)
maxaddr = min;
/* XXX: consolidate segs? */
for (i = 0; i < *nsegs; i++) {
allocsize = round_page(segs[i].ds_len +
(segs[i].ds_addr & PAGE_MASK));
error = vmem_xalloc(window->map->vmem, allocsize,
(alignment < PAGE_SIZE) ? PAGE_SIZE : alignment, 0,
boundary, minaddr, maxaddr, M_BESTFIT | M_NOWAIT, &alloced);
if (error != 0) {
panic("VMEM failure: %d\n", error);
return (error);
}
KASSERT(alloced % PAGE_SIZE == 0, ("Alloc not page aligned"));
KASSERT((alloced + (segs[i].ds_addr & PAGE_MASK)) %
alignment == 0,
("Allocated segment does not match alignment constraint"));
tce = trunc_page(segs[i].ds_addr);
tce |= 0x3; /* read/write */
- if (papr_supports_stuff_tce) {
- error = phyp_hcall(H_STUFF_TCE, window->map->iobn,
- alloced, tce, allocsize/PAGE_SIZE);
- } else {
- for (j = 0; j < allocsize; j += PAGE_SIZE)
- error = phyp_hcall(H_PUT_TCE, window->map->iobn,
- alloced + j, tce + j);
+ for (j = 0; j < allocsize; j += PAGE_SIZE) {
+ error = phyp_hcall(H_PUT_TCE, window->map->iobn,
+ alloced + j, tce + j);
+ if (error < 0) {
+ panic("IOMMU mapping error: %d\n", error);
+ return (ENOMEM);
+ }
}
segs[i].ds_addr = alloced + (segs[i].ds_addr & PAGE_MASK);
KASSERT(segs[i].ds_addr > 0, ("Address needs to be positive"));
KASSERT(segs[i].ds_addr + segs[i].ds_len < maxaddr,
("Address not in range"));
if (error < 0) {
panic("IOMMU mapping error: %d\n", error);
return (ENOMEM);
}
}
return (0);
}
int
phyp_iommu_unmap(device_t dev, bus_dma_segment_t *segs, int nsegs, void *cookie)
{
struct dma_window *window = cookie;
bus_addr_t pageround;
bus_size_t roundedsize;
int i;
bus_addr_t j;
for (i = 0; i < nsegs; i++) {
pageround = trunc_page(segs[i].ds_addr);
roundedsize = round_page(segs[i].ds_len +
(segs[i].ds_addr & PAGE_MASK));
if (papr_supports_stuff_tce) {
phyp_hcall(H_STUFF_TCE, window->map->iobn, pageround, 0,
roundedsize/PAGE_SIZE);
} else {
for (j = 0; j < roundedsize; j += PAGE_SIZE)
phyp_hcall(H_PUT_TCE, window->map->iobn,
pageround + j, 0);
}
vmem_xfree(window->map->vmem, pageround, roundedsize);
}
return (0);
}
Index: projects/clang360-import/sys/sys/eventhandler.h
===================================================================
--- projects/clang360-import/sys/sys/eventhandler.h (revision 277808)
+++ projects/clang360-import/sys/sys/eventhandler.h (revision 277809)
@@ -1,272 +1,273 @@
/*-
* Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_EVENTHANDLER_H_
#define _SYS_EVENTHANDLER_H_
#include <sys/lock.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
#include <sys/queue.h>
struct eventhandler_entry {
TAILQ_ENTRY(eventhandler_entry) ee_link;
int ee_priority;
#define EHE_DEAD_PRIORITY (-1)
void *ee_arg;
};
#ifdef VIMAGE
struct eventhandler_entry_vimage {
void (* func)(void); /* Original function registered. */
void *ee_arg; /* Original argument registered. */
void *sparep[2];
};
#endif
struct eventhandler_list {
char *el_name;
int el_flags;
#define EHL_INITTED (1<<0)
u_int el_runcount;
struct mtx el_lock;
TAILQ_ENTRY(eventhandler_list) el_link;
TAILQ_HEAD(,eventhandler_entry) el_entries;
};
typedef struct eventhandler_entry *eventhandler_tag;
#define EHL_LOCK(p) mtx_lock(&(p)->el_lock)
#define EHL_UNLOCK(p) mtx_unlock(&(p)->el_lock)
#define EHL_LOCK_ASSERT(p, x) mtx_assert(&(p)->el_lock, x)
/*
* Macro to invoke the handlers for a given event.
*/
#define _EVENTHANDLER_INVOKE(name, list, ...) do { \
struct eventhandler_entry *_ep; \
struct eventhandler_entry_ ## name *_t; \
\
KASSERT((list)->el_flags & EHL_INITTED, \
("eventhandler_invoke: running non-inited list")); \
EHL_LOCK_ASSERT((list), MA_OWNED); \
(list)->el_runcount++; \
KASSERT((list)->el_runcount > 0, \
("eventhandler_invoke: runcount overflow")); \
CTR0(KTR_EVH, "eventhandler_invoke(\"" __STRING(name) "\")"); \
TAILQ_FOREACH(_ep, &((list)->el_entries), ee_link) { \
if (_ep->ee_priority != EHE_DEAD_PRIORITY) { \
EHL_UNLOCK((list)); \
_t = (struct eventhandler_entry_ ## name *)_ep; \
CTR1(KTR_EVH, "eventhandler_invoke: executing %p", \
(void *)_t->eh_func); \
_t->eh_func(_ep->ee_arg , ## __VA_ARGS__); \
EHL_LOCK((list)); \
} \
} \
KASSERT((list)->el_runcount > 0, \
("eventhandler_invoke: runcount underflow")); \
(list)->el_runcount--; \
if ((list)->el_runcount == 0) \
eventhandler_prune_list(list); \
EHL_UNLOCK((list)); \
} while (0)
/*
* Slow handlers are entirely dynamic; lists are created
* when entries are added to them, and thus have no concept of "owner",
*
* Slow handlers need to be declared, but do not need to be defined. The
* declaration must be in scope wherever the handler is to be invoked.
*/
#define EVENTHANDLER_DECLARE(name, type) \
struct eventhandler_entry_ ## name \
{ \
struct eventhandler_entry ee; \
type eh_func; \
}; \
struct __hack
#define EVENTHANDLER_DEFINE(name, func, arg, priority) \
static eventhandler_tag name ## _tag; \
static void name ## _evh_init(void *ctx) \
{ \
name ## _tag = EVENTHANDLER_REGISTER(name, func, ctx, \
priority); \
} \
SYSINIT(name ## _evh_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, \
name ## _evh_init, arg); \
struct __hack
#define EVENTHANDLER_INVOKE(name, ...) \
do { \
struct eventhandler_list *_el; \
\
if ((_el = eventhandler_find_list(#name)) != NULL) \
_EVENTHANDLER_INVOKE(name, _el , ## __VA_ARGS__); \
} while (0)
#define EVENTHANDLER_REGISTER(name, func, arg, priority) \
eventhandler_register(NULL, #name, func, arg, priority)
#define EVENTHANDLER_DEREGISTER(name, tag) \
do { \
struct eventhandler_list *_el; \
\
if ((_el = eventhandler_find_list(#name)) != NULL) \
eventhandler_deregister(_el, tag); \
} while(0)
eventhandler_tag eventhandler_register(struct eventhandler_list *list,
const char *name, void *func, void *arg, int priority);
void eventhandler_deregister(struct eventhandler_list *list,
eventhandler_tag tag);
struct eventhandler_list *eventhandler_find_list(const char *name);
void eventhandler_prune_list(struct eventhandler_list *list);
#ifdef VIMAGE
typedef void (*vimage_iterator_func_t)(void *, ...);
eventhandler_tag vimage_eventhandler_register(struct eventhandler_list *list,
const char *name, void *func, void *arg, int priority,
vimage_iterator_func_t);
#endif
/*
* Standard system event queues.
*/
/* Generic priority levels */
#define EVENTHANDLER_PRI_FIRST 0
#define EVENTHANDLER_PRI_ANY 10000
#define EVENTHANDLER_PRI_LAST 20000
/* Shutdown events */
typedef void (*shutdown_fn)(void *, int);
#define SHUTDOWN_PRI_FIRST EVENTHANDLER_PRI_FIRST
#define SHUTDOWN_PRI_DEFAULT EVENTHANDLER_PRI_ANY
#define SHUTDOWN_PRI_LAST EVENTHANDLER_PRI_LAST
EVENTHANDLER_DECLARE(shutdown_pre_sync, shutdown_fn); /* before fs sync */
EVENTHANDLER_DECLARE(shutdown_post_sync, shutdown_fn); /* after fs sync */
EVENTHANDLER_DECLARE(shutdown_final, shutdown_fn);
/* Power state change events */
typedef void (*power_change_fn)(void *);
EVENTHANDLER_DECLARE(power_resume, power_change_fn);
EVENTHANDLER_DECLARE(power_suspend, power_change_fn);
+EVENTHANDLER_DECLARE(power_suspend_early, power_change_fn);
/* Low memory event */
typedef void (*vm_lowmem_handler_t)(void *, int);
#define LOWMEM_PRI_DEFAULT EVENTHANDLER_PRI_FIRST
EVENTHANDLER_DECLARE(vm_lowmem, vm_lowmem_handler_t);
/* Root mounted event */
typedef void (*mountroot_handler_t)(void *);
EVENTHANDLER_DECLARE(mountroot, mountroot_handler_t);
/* File system mount events */
struct mount;
struct vnode;
struct thread;
typedef void (*vfs_mounted_notify_fn)(void *, struct mount *, struct vnode *,
struct thread *);
typedef void (*vfs_unmounted_notify_fn)(void *, struct mount *,
struct thread *);
EVENTHANDLER_DECLARE(vfs_mounted, vfs_mounted_notify_fn);
EVENTHANDLER_DECLARE(vfs_unmounted, vfs_unmounted_notify_fn);
/*
* Process events
* process_fork and exit handlers are called without Giant.
* exec handlers are called with Giant, but that is by accident.
*/
struct proc;
struct image_params;
typedef void (*exitlist_fn)(void *, struct proc *);
typedef void (*forklist_fn)(void *, struct proc *, struct proc *, int);
typedef void (*execlist_fn)(void *, struct proc *, struct image_params *);
typedef void (*proc_ctor_fn)(void *, struct proc *);
typedef void (*proc_dtor_fn)(void *, struct proc *);
typedef void (*proc_init_fn)(void *, struct proc *);
typedef void (*proc_fini_fn)(void *, struct proc *);
EVENTHANDLER_DECLARE(process_ctor, proc_ctor_fn);
EVENTHANDLER_DECLARE(process_dtor, proc_dtor_fn);
EVENTHANDLER_DECLARE(process_init, proc_init_fn);
EVENTHANDLER_DECLARE(process_fini, proc_fini_fn);
EVENTHANDLER_DECLARE(process_exit, exitlist_fn);
EVENTHANDLER_DECLARE(process_fork, forklist_fn);
EVENTHANDLER_DECLARE(process_exec, execlist_fn);
/*
* application dump event
*/
typedef void (*app_coredump_start_fn)(void *, struct thread *, char *name);
typedef void (*app_coredump_progress_fn)(void *, struct thread *td, int byte_count);
typedef void (*app_coredump_finish_fn)(void *, struct thread *td);
typedef void (*app_coredump_error_fn)(void *, struct thread *td, char *msg, ...);
EVENTHANDLER_DECLARE(app_coredump_start, app_coredump_start_fn);
EVENTHANDLER_DECLARE(app_coredump_progress, app_coredump_progress_fn);
EVENTHANDLER_DECLARE(app_coredump_finish, app_coredump_finish_fn);
EVENTHANDLER_DECLARE(app_coredump_error, app_coredump_error_fn);
typedef void (*thread_ctor_fn)(void *, struct thread *);
typedef void (*thread_dtor_fn)(void *, struct thread *);
typedef void (*thread_fini_fn)(void *, struct thread *);
typedef void (*thread_init_fn)(void *, struct thread *);
EVENTHANDLER_DECLARE(thread_ctor, thread_ctor_fn);
EVENTHANDLER_DECLARE(thread_dtor, thread_dtor_fn);
EVENTHANDLER_DECLARE(thread_init, thread_init_fn);
EVENTHANDLER_DECLARE(thread_fini, thread_fini_fn);
typedef void (*uma_zone_chfn)(void *);
EVENTHANDLER_DECLARE(nmbclusters_change, uma_zone_chfn);
EVENTHANDLER_DECLARE(nmbufs_change, uma_zone_chfn);
EVENTHANDLER_DECLARE(maxsockets_change, uma_zone_chfn);
/* Kernel linker file load and unload events */
struct linker_file;
typedef void (*kld_load_fn)(void *, struct linker_file *);
typedef void (*kld_unload_fn)(void *, const char *, caddr_t, size_t);
typedef void (*kld_unload_try_fn)(void *, struct linker_file *, int *);
EVENTHANDLER_DECLARE(kld_load, kld_load_fn);
EVENTHANDLER_DECLARE(kld_unload, kld_unload_fn);
EVENTHANDLER_DECLARE(kld_unload_try, kld_unload_try_fn);
/* Generic graphics framebuffer interface */
struct fb_info;
typedef void (*register_framebuffer_fn)(void *, struct fb_info *);
typedef void (*unregister_framebuffer_fn)(void *, struct fb_info *);
EVENTHANDLER_DECLARE(register_framebuffer, register_framebuffer_fn);
EVENTHANDLER_DECLARE(unregister_framebuffer, unregister_framebuffer_fn);
#endif /* _SYS_EVENTHANDLER_H_ */
Index: projects/clang360-import/sys/ufs/ufs/ufs_quota.c
===================================================================
--- projects/clang360-import/sys/ufs/ufs/ufs_quota.c (revision 277808)
+++ projects/clang360-import/sys/ufs/ufs/ufs_quota.c (revision 277809)
@@ -1,1843 +1,1847 @@
/*-
* Copyright (c) 1982, 1986, 1990, 1993, 1995
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Robert Elz at The University of Melbourne.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_ffs.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/fcntl.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/namei.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <ufs/ufs/extattr.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ufs/ufsmount.h>
#include <ufs/ufs/ufs_extern.h>
CTASSERT(sizeof(struct dqblk64) == sizeof(struct dqhdr64));
static int unprivileged_get_quota = 0;
SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_get_quota, CTLFLAG_RW,
&unprivileged_get_quota, 0,
"Unprivileged processes may retrieve quotas for other uids and gids");
static MALLOC_DEFINE(M_DQUOT, "ufs_quota", "UFS quota entries");
/*
* Quota name to error message mapping.
*/
static char *quotatypes[] = INITQFNAMES;
static int chkdqchg(struct inode *, ufs2_daddr_t, struct ucred *, int, int *);
static int chkiqchg(struct inode *, int, struct ucred *, int, int *);
static int dqopen(struct vnode *, struct ufsmount *, int);
static int dqget(struct vnode *,
u_long, struct ufsmount *, int, struct dquot **);
static int dqsync(struct vnode *, struct dquot *);
static int dqflush(struct vnode *);
static int quotaoff1(struct thread *td, struct mount *mp, int type);
static int quotaoff_inchange(struct thread *td, struct mount *mp, int type);
/* conversion functions - from_to() */
static void dqb32_dq(const struct dqblk32 *, struct dquot *);
static void dqb64_dq(const struct dqblk64 *, struct dquot *);
static void dq_dqb32(const struct dquot *, struct dqblk32 *);
static void dq_dqb64(const struct dquot *, struct dqblk64 *);
static void dqb32_dqb64(const struct dqblk32 *, struct dqblk64 *);
static void dqb64_dqb32(const struct dqblk64 *, struct dqblk32 *);
#ifdef DIAGNOSTIC
static void dqref(struct dquot *);
static void chkdquot(struct inode *);
#endif
/*
* Set up the quotas for an inode.
*
* This routine completely defines the semantics of quotas.
* If other criterion want to be used to establish quotas, the
* MAXQUOTAS value in quota.h should be increased, and the
* additional dquots set up here.
*/
int
getinoquota(struct inode *ip)
{
struct ufsmount *ump;
struct vnode *vp;
int error;
vp = ITOV(ip);
/*
* Disk quotas must be turned off for system files. Currently
* snapshot and quota files.
*/
if ((vp->v_vflag & VV_SYSTEM) != 0)
return (0);
/*
* XXX: Turn off quotas for files with a negative UID or GID.
* This prevents the creation of 100GB+ quota files.
*/
if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0)
return (0);
ump = VFSTOUFS(vp->v_mount);
/*
* Set up the user quota based on file uid.
* EINVAL means that quotas are not enabled.
*/
if ((error =
dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) &&
error != EINVAL)
return (error);
/*
* Set up the group quota based on file gid.
* EINVAL means that quotas are not enabled.
*/
if ((error =
dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) &&
error != EINVAL)
return (error);
return (0);
}
/*
* Update disk usage, and take corrective action.
*/
int
chkdq(struct inode *ip, ufs2_daddr_t change, struct ucred *cred, int flags)
{
struct dquot *dq;
ufs2_daddr_t ncurblocks;
struct vnode *vp = ITOV(ip);
int i, error, warn, do_check;
/*
* Disk quotas must be turned off for system files. Currently
* snapshot and quota files.
*/
if ((vp->v_vflag & VV_SYSTEM) != 0)
return (0);
/*
* XXX: Turn off quotas for files with a negative UID or GID.
* This prevents the creation of 100GB+ quota files.
*/
if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0)
return (0);
#ifdef DIAGNOSTIC
if ((flags & CHOWN) == 0)
chkdquot(ip);
#endif
if (change == 0)
return (0);
if (change < 0) {
for (i = 0; i < MAXQUOTAS; i++) {
if ((dq = ip->i_dquot[i]) == NODQUOT)
continue;
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "chkdq1");
ncurblocks = dq->dq_curblocks + change;
if (ncurblocks >= 0)
dq->dq_curblocks = ncurblocks;
else
dq->dq_curblocks = 0;
dq->dq_flags &= ~DQ_BLKS;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
}
return (0);
}
if ((flags & FORCE) == 0 &&
priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
do_check = 1;
else
do_check = 0;
for (i = 0; i < MAXQUOTAS; i++) {
if ((dq = ip->i_dquot[i]) == NODQUOT)
continue;
warn = 0;
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "chkdq2");
if (do_check) {
error = chkdqchg(ip, change, cred, i, &warn);
if (error) {
/*
* Roll back user quota changes when
* group quota failed.
*/
while (i > 0) {
--i;
dq = ip->i_dquot[i];
if (dq == NODQUOT)
continue;
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "chkdq3");
ncurblocks = dq->dq_curblocks - change;
if (ncurblocks >= 0)
dq->dq_curblocks = ncurblocks;
else
dq->dq_curblocks = 0;
dq->dq_flags &= ~DQ_BLKS;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
}
return (error);
}
}
/* Reset timer when crossing soft limit */
if (dq->dq_curblocks + change >= dq->dq_bsoftlimit &&
dq->dq_curblocks < dq->dq_bsoftlimit)
dq->dq_btime = time_second + ip->i_ump->um_btime[i];
dq->dq_curblocks += change;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
if (warn)
uprintf("\n%s: warning, %s disk quota exceeded\n",
ITOV(ip)->v_mount->mnt_stat.f_mntonname,
quotatypes[i]);
}
return (0);
}
/*
* Check for a valid change to a users allocation.
* Issue an error message if appropriate.
*/
static int
chkdqchg(struct inode *ip, ufs2_daddr_t change, struct ucred *cred,
int type, int *warn)
{
struct dquot *dq = ip->i_dquot[type];
ufs2_daddr_t ncurblocks = dq->dq_curblocks + change;
/*
* If user would exceed their hard limit, disallow space allocation.
*/
if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
if ((dq->dq_flags & DQ_BLKS) == 0 &&
ip->i_uid == cred->cr_uid) {
dq->dq_flags |= DQ_BLKS;
DQI_UNLOCK(dq);
uprintf("\n%s: write failed, %s disk limit reached\n",
ITOV(ip)->v_mount->mnt_stat.f_mntonname,
quotatypes[type]);
return (EDQUOT);
}
DQI_UNLOCK(dq);
return (EDQUOT);
}
/*
* If user is over their soft limit for too long, disallow space
* allocation. Reset time limit as they cross their soft limit.
*/
if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
if (dq->dq_curblocks < dq->dq_bsoftlimit) {
dq->dq_btime = time_second + ip->i_ump->um_btime[type];
if (ip->i_uid == cred->cr_uid)
*warn = 1;
return (0);
}
if (time_second > dq->dq_btime) {
if ((dq->dq_flags & DQ_BLKS) == 0 &&
ip->i_uid == cred->cr_uid) {
dq->dq_flags |= DQ_BLKS;
DQI_UNLOCK(dq);
uprintf("\n%s: write failed, %s "
"disk quota exceeded for too long\n",
ITOV(ip)->v_mount->mnt_stat.f_mntonname,
quotatypes[type]);
return (EDQUOT);
}
DQI_UNLOCK(dq);
return (EDQUOT);
}
}
return (0);
}
/*
* Check the inode limit, applying corrective action.
*/
int
chkiq(struct inode *ip, int change, struct ucred *cred, int flags)
{
struct dquot *dq;
int i, error, warn, do_check;
#ifdef DIAGNOSTIC
if ((flags & CHOWN) == 0)
chkdquot(ip);
#endif
if (change == 0)
return (0);
if (change < 0) {
for (i = 0; i < MAXQUOTAS; i++) {
if ((dq = ip->i_dquot[i]) == NODQUOT)
continue;
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "chkiq1");
if (dq->dq_curinodes >= -change)
dq->dq_curinodes += change;
else
dq->dq_curinodes = 0;
dq->dq_flags &= ~DQ_INODS;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
}
return (0);
}
if ((flags & FORCE) == 0 &&
priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
do_check = 1;
else
do_check = 0;
for (i = 0; i < MAXQUOTAS; i++) {
if ((dq = ip->i_dquot[i]) == NODQUOT)
continue;
warn = 0;
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "chkiq2");
if (do_check) {
error = chkiqchg(ip, change, cred, i, &warn);
if (error) {
/*
* Roll back user quota changes when
* group quota failed.
*/
while (i > 0) {
--i;
dq = ip->i_dquot[i];
if (dq == NODQUOT)
continue;
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "chkiq3");
if (dq->dq_curinodes >= change)
dq->dq_curinodes -= change;
else
dq->dq_curinodes = 0;
dq->dq_flags &= ~DQ_INODS;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
}
return (error);
}
}
/* Reset timer when crossing soft limit */
if (dq->dq_curinodes + change >= dq->dq_isoftlimit &&
dq->dq_curinodes < dq->dq_isoftlimit)
dq->dq_itime = time_second + ip->i_ump->um_itime[i];
dq->dq_curinodes += change;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
if (warn)
uprintf("\n%s: warning, %s inode quota exceeded\n",
ITOV(ip)->v_mount->mnt_stat.f_mntonname,
quotatypes[i]);
}
return (0);
}
/*
* Check for a valid change to a users allocation.
* Issue an error message if appropriate.
*/
static int
chkiqchg(struct inode *ip, int change, struct ucred *cred, int type, int *warn)
{
struct dquot *dq = ip->i_dquot[type];
ino_t ncurinodes = dq->dq_curinodes + change;
/*
* If user would exceed their hard limit, disallow inode allocation.
*/
if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) {
if ((dq->dq_flags & DQ_INODS) == 0 &&
ip->i_uid == cred->cr_uid) {
dq->dq_flags |= DQ_INODS;
DQI_UNLOCK(dq);
uprintf("\n%s: write failed, %s inode limit reached\n",
ITOV(ip)->v_mount->mnt_stat.f_mntonname,
quotatypes[type]);
return (EDQUOT);
}
DQI_UNLOCK(dq);
return (EDQUOT);
}
/*
* If user is over their soft limit for too long, disallow inode
* allocation. Reset time limit as they cross their soft limit.
*/
if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) {
if (dq->dq_curinodes < dq->dq_isoftlimit) {
dq->dq_itime = time_second + ip->i_ump->um_itime[type];
if (ip->i_uid == cred->cr_uid)
*warn = 1;
return (0);
}
if (time_second > dq->dq_itime) {
if ((dq->dq_flags & DQ_INODS) == 0 &&
ip->i_uid == cred->cr_uid) {
dq->dq_flags |= DQ_INODS;
DQI_UNLOCK(dq);
uprintf("\n%s: write failed, %s "
"inode quota exceeded for too long\n",
ITOV(ip)->v_mount->mnt_stat.f_mntonname,
quotatypes[type]);
return (EDQUOT);
}
DQI_UNLOCK(dq);
return (EDQUOT);
}
}
return (0);
}
#ifdef DIAGNOSTIC
/*
* On filesystems with quotas enabled, it is an error for a file to change
* size and not to have a dquot structure associated with it.
*/
static void
chkdquot(struct inode *ip)
{
struct ufsmount *ump = ip->i_ump;
struct vnode *vp = ITOV(ip);
int i;
/*
* Disk quotas must be turned off for system files. Currently
* these are snapshots and quota files.
*/
if ((vp->v_vflag & VV_SYSTEM) != 0)
return;
/*
* XXX: Turn off quotas for files with a negative UID or GID.
* This prevents the creation of 100GB+ quota files.
*/
if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0)
return;
UFS_LOCK(ump);
for (i = 0; i < MAXQUOTAS; i++) {
if (ump->um_quotas[i] == NULLVP ||
(ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
continue;
if (ip->i_dquot[i] == NODQUOT) {
UFS_UNLOCK(ump);
vprint("chkdquot: missing dquot", ITOV(ip));
panic("chkdquot: missing dquot");
}
}
UFS_UNLOCK(ump);
}
#endif
/*
* Code to process quotactl commands.
*/
/*
* Q_QUOTAON - set up a quota file for a particular filesystem.
*/
int
quotaon(struct thread *td, struct mount *mp, int type, void *fname)
{
struct ufsmount *ump;
struct vnode *vp, **vpp;
struct vnode *mvp;
struct dquot *dq;
int error, flags;
struct nameidata nd;
error = priv_check(td, PRIV_UFS_QUOTAON);
- if (error)
+ if (error != 0) {
+ vfs_unbusy(mp);
return (error);
+ }
- if (mp->mnt_flag & MNT_RDONLY)
+ if ((mp->mnt_flag & MNT_RDONLY) != 0) {
+ vfs_unbusy(mp);
return (EROFS);
+ }
ump = VFSTOUFS(mp);
dq = NODQUOT;
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, td);
flags = FREAD | FWRITE;
vfs_ref(mp);
vfs_unbusy(mp);
error = vn_open(&nd, &flags, 0, NULL);
if (error != 0) {
vfs_rel(mp);
return (error);
}
NDFREE(&nd, NDF_ONLY_PNBUF);
vp = nd.ni_vp;
error = vfs_busy(mp, MBF_NOWAIT);
vfs_rel(mp);
if (error == 0) {
if (vp->v_type != VREG) {
error = EACCES;
vfs_unbusy(mp);
}
}
if (error != 0) {
VOP_UNLOCK(vp, 0);
(void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
return (error);
}
UFS_LOCK(ump);
if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) {
UFS_UNLOCK(ump);
VOP_UNLOCK(vp, 0);
(void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
vfs_unbusy(mp);
return (EALREADY);
}
ump->um_qflags[type] |= QTF_OPENING|QTF_CLOSING;
UFS_UNLOCK(ump);
if ((error = dqopen(vp, ump, type)) != 0) {
VOP_UNLOCK(vp, 0);
UFS_LOCK(ump);
ump->um_qflags[type] &= ~(QTF_OPENING|QTF_CLOSING);
UFS_UNLOCK(ump);
(void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
vfs_unbusy(mp);
return (error);
}
VOP_UNLOCK(vp, 0);
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_QUOTA;
MNT_IUNLOCK(mp);
vpp = &ump->um_quotas[type];
if (*vpp != vp)
quotaoff1(td, mp, type);
/*
* When the directory vnode containing the quota file is
* inactivated, due to the shared lookup of the quota file
* vput()ing the dvp, the qsyncvp() call for the containing
* directory would try to acquire the quota lock exclusive.
* At the same time, lookup already locked the quota vnode
* shared. Mark the quota vnode lock as allowing recursion
* and automatically converting shared locks to exclusive.
*
* Also mark quota vnode as system.
*/
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vp->v_vflag |= VV_SYSTEM;
VN_LOCK_AREC(vp);
VN_LOCK_DSHARE(vp);
VOP_UNLOCK(vp, 0);
*vpp = vp;
/*
* Save the credential of the process that turned on quotas.
* Set up the time limits for this quota.
*/
ump->um_cred[type] = crhold(td->td_ucred);
ump->um_btime[type] = MAX_DQ_TIME;
ump->um_itime[type] = MAX_IQ_TIME;
if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
if (dq->dq_btime > 0)
ump->um_btime[type] = dq->dq_btime;
if (dq->dq_itime > 0)
ump->um_itime[type] = dq->dq_itime;
dqrele(NULLVP, dq);
}
/*
* Allow the getdq from getinoquota below to read the quota
* from file.
*/
UFS_LOCK(ump);
ump->um_qflags[type] &= ~QTF_CLOSING;
UFS_UNLOCK(ump);
/*
* Search vnodes associated with this mount point,
* adding references to quota file being opened.
* NB: only need to add dquot's for inodes being modified.
*/
again:
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto again;
}
if (vp->v_type == VNON || vp->v_writecount == 0) {
VOP_UNLOCK(vp, 0);
vrele(vp);
continue;
}
error = getinoquota(VTOI(vp));
VOP_UNLOCK(vp, 0);
vrele(vp);
if (error) {
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
break;
}
}
if (error)
quotaoff_inchange(td, mp, type);
UFS_LOCK(ump);
ump->um_qflags[type] &= ~QTF_OPENING;
KASSERT((ump->um_qflags[type] & QTF_CLOSING) == 0,
("quotaon: leaking flags"));
UFS_UNLOCK(ump);
vfs_unbusy(mp);
return (error);
}
/*
* Main code to turn off disk quotas for a filesystem. Does not change
* flags.
*/
static int
quotaoff1(struct thread *td, struct mount *mp, int type)
{
struct vnode *vp;
struct vnode *qvp, *mvp;
struct ufsmount *ump;
struct dquot *dq;
struct inode *ip;
struct ucred *cr;
int error;
ump = VFSTOUFS(mp);
UFS_LOCK(ump);
KASSERT((ump->um_qflags[type] & QTF_CLOSING) != 0,
("quotaoff1: flags are invalid"));
if ((qvp = ump->um_quotas[type]) == NULLVP) {
UFS_UNLOCK(ump);
return (0);
}
cr = ump->um_cred[type];
UFS_UNLOCK(ump);
/*
* Search vnodes associated with this mount point,
* deleting any references to quota file being closed.
*/
again:
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
continue;
}
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
goto again;
}
ip = VTOI(vp);
dq = ip->i_dquot[type];
ip->i_dquot[type] = NODQUOT;
dqrele(vp, dq);
VOP_UNLOCK(vp, 0);
vrele(vp);
}
error = dqflush(qvp);
if (error != 0)
return (error);
/*
* Clear um_quotas before closing the quota vnode to prevent
* access to the closed vnode from dqget/dqsync
*/
UFS_LOCK(ump);
ump->um_quotas[type] = NULLVP;
ump->um_cred[type] = NOCRED;
UFS_UNLOCK(ump);
vn_lock(qvp, LK_EXCLUSIVE | LK_RETRY);
qvp->v_vflag &= ~VV_SYSTEM;
VOP_UNLOCK(qvp, 0);
error = vn_close(qvp, FREAD|FWRITE, td->td_ucred, td);
crfree(cr);
return (error);
}
/*
* Turns off quotas, assumes that ump->um_qflags are already checked
* and QTF_CLOSING is set to indicate operation in progress. Fixes
* ump->um_qflags and mp->mnt_flag after.
*/
int
quotaoff_inchange(struct thread *td, struct mount *mp, int type)
{
struct ufsmount *ump;
int i;
int error;
error = quotaoff1(td, mp, type);
ump = VFSTOUFS(mp);
UFS_LOCK(ump);
ump->um_qflags[type] &= ~QTF_CLOSING;
for (i = 0; i < MAXQUOTAS; i++)
if (ump->um_quotas[i] != NULLVP)
break;
if (i == MAXQUOTAS) {
MNT_ILOCK(mp);
mp->mnt_flag &= ~MNT_QUOTA;
MNT_IUNLOCK(mp);
}
UFS_UNLOCK(ump);
return (error);
}
/*
* Q_QUOTAOFF - turn off disk quotas for a filesystem.
*/
int
quotaoff(struct thread *td, struct mount *mp, int type)
{
struct ufsmount *ump;
int error;
error = priv_check(td, PRIV_UFS_QUOTAOFF);
if (error)
return (error);
ump = VFSTOUFS(mp);
UFS_LOCK(ump);
if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) {
UFS_UNLOCK(ump);
return (EALREADY);
}
ump->um_qflags[type] |= QTF_CLOSING;
UFS_UNLOCK(ump);
return (quotaoff_inchange(td, mp, type));
}
/*
* Q_GETQUOTA - return current values in a dqblk structure.
*/
static int
_getquota(struct thread *td, struct mount *mp, u_long id, int type,
struct dqblk64 *dqb)
{
struct dquot *dq;
int error;
switch (type) {
case USRQUOTA:
if ((td->td_ucred->cr_uid != id) && !unprivileged_get_quota) {
error = priv_check(td, PRIV_VFS_GETQUOTA);
if (error)
return (error);
}
break;
case GRPQUOTA:
if (!groupmember(id, td->td_ucred) &&
!unprivileged_get_quota) {
error = priv_check(td, PRIV_VFS_GETQUOTA);
if (error)
return (error);
}
break;
default:
return (EINVAL);
}
dq = NODQUOT;
error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq);
if (error)
return (error);
*dqb = dq->dq_dqb;
dqrele(NULLVP, dq);
return (error);
}
/*
* Q_SETQUOTA - assign an entire dqblk structure.
*/
static int
_setquota(struct thread *td, struct mount *mp, u_long id, int type,
struct dqblk64 *dqb)
{
struct dquot *dq;
struct dquot *ndq;
struct ufsmount *ump;
struct dqblk64 newlim;
int error;
error = priv_check(td, PRIV_VFS_SETQUOTA);
if (error)
return (error);
newlim = *dqb;
ndq = NODQUOT;
ump = VFSTOUFS(mp);
error = dqget(NULLVP, id, ump, type, &ndq);
if (error)
return (error);
dq = ndq;
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "setqta");
/*
* Copy all but the current values.
* Reset time limit if previously had no soft limit or were
* under it, but now have a soft limit and are over it.
*/
newlim.dqb_curblocks = dq->dq_curblocks;
newlim.dqb_curinodes = dq->dq_curinodes;
if (dq->dq_id != 0) {
newlim.dqb_btime = dq->dq_btime;
newlim.dqb_itime = dq->dq_itime;
}
if (newlim.dqb_bsoftlimit &&
dq->dq_curblocks >= newlim.dqb_bsoftlimit &&
(dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit))
newlim.dqb_btime = time_second + ump->um_btime[type];
if (newlim.dqb_isoftlimit &&
dq->dq_curinodes >= newlim.dqb_isoftlimit &&
(dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit))
newlim.dqb_itime = time_second + ump->um_itime[type];
dq->dq_dqb = newlim;
if (dq->dq_curblocks < dq->dq_bsoftlimit)
dq->dq_flags &= ~DQ_BLKS;
if (dq->dq_curinodes < dq->dq_isoftlimit)
dq->dq_flags &= ~DQ_INODS;
if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
dq->dq_flags |= DQ_FAKE;
else
dq->dq_flags &= ~DQ_FAKE;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
dqrele(NULLVP, dq);
return (0);
}
/*
* Q_SETUSE - set current inode and block usage.
*/
static int
_setuse(struct thread *td, struct mount *mp, u_long id, int type,
struct dqblk64 *dqb)
{
struct dquot *dq;
struct ufsmount *ump;
struct dquot *ndq;
struct dqblk64 usage;
int error;
error = priv_check(td, PRIV_UFS_SETUSE);
if (error)
return (error);
usage = *dqb;
ump = VFSTOUFS(mp);
ndq = NODQUOT;
error = dqget(NULLVP, id, ump, type, &ndq);
if (error)
return (error);
dq = ndq;
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "setuse");
/*
* Reset time limit if have a soft limit and were
* previously under it, but are now over it.
*/
if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit &&
usage.dqb_curblocks >= dq->dq_bsoftlimit)
dq->dq_btime = time_second + ump->um_btime[type];
if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit &&
usage.dqb_curinodes >= dq->dq_isoftlimit)
dq->dq_itime = time_second + ump->um_itime[type];
dq->dq_curblocks = usage.dqb_curblocks;
dq->dq_curinodes = usage.dqb_curinodes;
if (dq->dq_curblocks < dq->dq_bsoftlimit)
dq->dq_flags &= ~DQ_BLKS;
if (dq->dq_curinodes < dq->dq_isoftlimit)
dq->dq_flags &= ~DQ_INODS;
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
dqrele(NULLVP, dq);
return (0);
}
int
getquota32(struct thread *td, struct mount *mp, u_long id, int type, void *addr)
{
struct dqblk32 dqb32;
struct dqblk64 dqb64;
int error;
error = _getquota(td, mp, id, type, &dqb64);
if (error)
return (error);
dqb64_dqb32(&dqb64, &dqb32);
error = copyout(&dqb32, addr, sizeof(dqb32));
return (error);
}
int
setquota32(struct thread *td, struct mount *mp, u_long id, int type, void *addr)
{
struct dqblk32 dqb32;
struct dqblk64 dqb64;
int error;
error = copyin(addr, &dqb32, sizeof(dqb32));
if (error)
return (error);
dqb32_dqb64(&dqb32, &dqb64);
error = _setquota(td, mp, id, type, &dqb64);
return (error);
}
int
setuse32(struct thread *td, struct mount *mp, u_long id, int type, void *addr)
{
struct dqblk32 dqb32;
struct dqblk64 dqb64;
int error;
error = copyin(addr, &dqb32, sizeof(dqb32));
if (error)
return (error);
dqb32_dqb64(&dqb32, &dqb64);
error = _setuse(td, mp, id, type, &dqb64);
return (error);
}
int
getquota(struct thread *td, struct mount *mp, u_long id, int type, void *addr)
{
struct dqblk64 dqb64;
int error;
error = _getquota(td, mp, id, type, &dqb64);
if (error)
return (error);
error = copyout(&dqb64, addr, sizeof(dqb64));
return (error);
}
int
setquota(struct thread *td, struct mount *mp, u_long id, int type, void *addr)
{
struct dqblk64 dqb64;
int error;
error = copyin(addr, &dqb64, sizeof(dqb64));
if (error)
return (error);
error = _setquota(td, mp, id, type, &dqb64);
return (error);
}
int
setuse(struct thread *td, struct mount *mp, u_long id, int type, void *addr)
{
struct dqblk64 dqb64;
int error;
error = copyin(addr, &dqb64, sizeof(dqb64));
if (error)
return (error);
error = _setuse(td, mp, id, type, &dqb64);
return (error);
}
/*
* Q_GETQUOTASIZE - get bit-size of quota file fields
*/
int
getquotasize(struct thread *td, struct mount *mp, u_long id, int type,
void *sizep)
{
struct ufsmount *ump = VFSTOUFS(mp);
int bitsize;
UFS_LOCK(ump);
if (ump->um_quotas[type] == NULLVP ||
(ump->um_qflags[type] & QTF_CLOSING)) {
UFS_UNLOCK(ump);
return (EINVAL);
}
if ((ump->um_qflags[type] & QTF_64BIT) != 0)
bitsize = 64;
else
bitsize = 32;
UFS_UNLOCK(ump);
return (copyout(&bitsize, sizep, sizeof(int)));
}
/*
* Q_SYNC - sync quota files to disk.
*/
int
qsync(struct mount *mp)
{
struct ufsmount *ump = VFSTOUFS(mp);
struct thread *td = curthread; /* XXX */
struct vnode *vp, *mvp;
struct dquot *dq;
int i, error;
/*
* Check if the mount point has any quotas.
* If not, simply return.
*/
for (i = 0; i < MAXQUOTAS; i++)
if (ump->um_quotas[i] != NULLVP)
break;
if (i == MAXQUOTAS)
return (0);
/*
* Search vnodes associated with this mount point,
* synchronizing any modified dquot structures.
*/
again:
MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
if (vp->v_type == VNON) {
VI_UNLOCK(vp);
continue;
}
error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td);
if (error) {
if (error == ENOENT) {
MNT_VNODE_FOREACH_ACTIVE_ABORT(mp, mvp);
goto again;
}
continue;
}
for (i = 0; i < MAXQUOTAS; i++) {
dq = VTOI(vp)->i_dquot[i];
if (dq != NODQUOT)
dqsync(vp, dq);
}
vput(vp);
}
return (0);
}
/*
* Sync quota file for given vnode to disk.
*/
int
qsyncvp(struct vnode *vp)
{
struct ufsmount *ump = VFSTOUFS(vp->v_mount);
struct dquot *dq;
int i;
/*
* Check if the mount point has any quotas.
* If not, simply return.
*/
for (i = 0; i < MAXQUOTAS; i++)
if (ump->um_quotas[i] != NULLVP)
break;
if (i == MAXQUOTAS)
return (0);
/*
* Search quotas associated with this vnode
* synchronizing any modified dquot structures.
*/
for (i = 0; i < MAXQUOTAS; i++) {
dq = VTOI(vp)->i_dquot[i];
if (dq != NODQUOT)
dqsync(vp, dq);
}
return (0);
}
/*
* Code pertaining to management of the in-core dquot data structures.
*/
#define DQHASH(dqvp, id) \
(&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash])
static LIST_HEAD(dqhash, dquot) *dqhashtbl;
static u_long dqhash;
/*
* Dquot free list.
*/
#define DQUOTINC 5 /* minimum free dquots desired */
static TAILQ_HEAD(dqfreelist, dquot) dqfreelist;
static long numdquot, desireddquot = DQUOTINC;
/*
* Lock to protect quota hash, dq free list and dq_cnt ref counters of
* _all_ dqs.
*/
struct mtx dqhlock;
#define DQH_LOCK() mtx_lock(&dqhlock)
#define DQH_UNLOCK() mtx_unlock(&dqhlock)
static struct dquot *dqhashfind(struct dqhash *dqh, u_long id,
struct vnode *dqvp);
/*
* Initialize the quota system.
*/
void
dqinit(void)
{
mtx_init(&dqhlock, "dqhlock", NULL, MTX_DEF);
dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash);
TAILQ_INIT(&dqfreelist);
}
/*
* Shut down the quota system.
*/
void
dquninit(void)
{
struct dquot *dq;
hashdestroy(dqhashtbl, M_DQUOT, dqhash);
while ((dq = TAILQ_FIRST(&dqfreelist)) != NULL) {
TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
mtx_destroy(&dq->dq_lock);
free(dq, M_DQUOT);
}
mtx_destroy(&dqhlock);
}
static struct dquot *
dqhashfind(struct dqhash *dqh, u_long id, struct vnode *dqvp)
{
struct dquot *dq;
mtx_assert(&dqhlock, MA_OWNED);
LIST_FOREACH(dq, dqh, dq_hash) {
if (dq->dq_id != id ||
dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
continue;
/*
* Cache hit with no references. Take
* the structure off the free list.
*/
if (dq->dq_cnt == 0)
TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
DQREF(dq);
return (dq);
}
return (NODQUOT);
}
/*
* Determine the quota file type.
*
* A 32-bit quota file is simply an array of struct dqblk32.
*
* A 64-bit quota file is a struct dqhdr64 followed by an array of struct
* dqblk64. The header contains various magic bits which allow us to be
* reasonably confident that it is indeeda 64-bit quota file and not just
* a 32-bit quota file that just happens to "look right".
*
*/
static int
dqopen(struct vnode *vp, struct ufsmount *ump, int type)
{
struct dqhdr64 dqh;
struct iovec aiov;
struct uio auio;
int error;
ASSERT_VOP_LOCKED(vp, "dqopen");
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
aiov.iov_base = &dqh;
aiov.iov_len = sizeof(dqh);
auio.uio_resid = sizeof(dqh);
auio.uio_offset = 0;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_rw = UIO_READ;
auio.uio_td = (struct thread *)0;
error = VOP_READ(vp, &auio, 0, ump->um_cred[type]);
if (error != 0)
return (error);
if (auio.uio_resid > 0) {
/* assume 32 bits */
return (0);
}
UFS_LOCK(ump);
if (strcmp(dqh.dqh_magic, Q_DQHDR64_MAGIC) == 0 &&
be32toh(dqh.dqh_version) == Q_DQHDR64_VERSION &&
be32toh(dqh.dqh_hdrlen) == (uint32_t)sizeof(struct dqhdr64) &&
be32toh(dqh.dqh_reclen) == (uint32_t)sizeof(struct dqblk64)) {
/* XXX: what if the magic matches, but the sizes are wrong? */
ump->um_qflags[type] |= QTF_64BIT;
} else {
ump->um_qflags[type] &= ~QTF_64BIT;
}
UFS_UNLOCK(ump);
return (0);
}
/*
* Obtain a dquot structure for the specified identifier and quota file
* reading the information from the file if necessary.
*/
static int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
struct dquot **dqp)
{
uint8_t buf[sizeof(struct dqblk64)];
off_t base, recsize;
struct dquot *dq, *dq1;
struct dqhash *dqh;
struct vnode *dqvp;
struct iovec aiov;
struct uio auio;
int dqvplocked, error;
#ifdef DEBUG_VFS_LOCKS
if (vp != NULLVP)
ASSERT_VOP_ELOCKED(vp, "dqget");
#endif
if (vp != NULLVP && *dqp != NODQUOT) {
return (0);
}
/* XXX: Disallow negative id values to prevent the
* creation of 100GB+ quota data files.
*/
if ((int)id < 0)
return (EINVAL);
UFS_LOCK(ump);
dqvp = ump->um_quotas[type];
if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
*dqp = NODQUOT;
UFS_UNLOCK(ump);
return (EINVAL);
}
vref(dqvp);
UFS_UNLOCK(ump);
error = 0;
dqvplocked = 0;
/*
* Check the cache first.
*/
dqh = DQHASH(dqvp, id);
DQH_LOCK();
dq = dqhashfind(dqh, id, dqvp);
if (dq != NULL) {
DQH_UNLOCK();
hfound: DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "dqget");
DQI_UNLOCK(dq);
if (dq->dq_ump == NULL) {
dqrele(vp, dq);
dq = NODQUOT;
error = EIO;
}
*dqp = dq;
if (dqvplocked)
vput(dqvp);
else
vrele(dqvp);
return (error);
}
/*
* Quota vnode lock is before DQ_LOCK. Acquire dqvp lock there
* since new dq will appear on the hash chain DQ_LOCKed.
*/
if (vp != dqvp) {
DQH_UNLOCK();
vn_lock(dqvp, LK_SHARED | LK_RETRY);
dqvplocked = 1;
DQH_LOCK();
/*
* Recheck the cache after sleep for quota vnode lock.
*/
dq = dqhashfind(dqh, id, dqvp);
if (dq != NULL) {
DQH_UNLOCK();
goto hfound;
}
}
/*
* Not in cache, allocate a new one or take it from the
* free list.
*/
if (TAILQ_FIRST(&dqfreelist) == NODQUOT &&
numdquot < MAXQUOTAS * desiredvnodes)
desireddquot += DQUOTINC;
if (numdquot < desireddquot) {
numdquot++;
DQH_UNLOCK();
dq1 = malloc(sizeof *dq1, M_DQUOT, M_WAITOK | M_ZERO);
mtx_init(&dq1->dq_lock, "dqlock", NULL, MTX_DEF);
DQH_LOCK();
/*
* Recheck the cache after sleep for memory.
*/
dq = dqhashfind(dqh, id, dqvp);
if (dq != NULL) {
numdquot--;
DQH_UNLOCK();
mtx_destroy(&dq1->dq_lock);
free(dq1, M_DQUOT);
goto hfound;
}
dq = dq1;
} else {
if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) {
DQH_UNLOCK();
tablefull("dquot");
*dqp = NODQUOT;
if (dqvplocked)
vput(dqvp);
else
vrele(dqvp);
return (EUSERS);
}
if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
panic("dqget: free dquot isn't %p", dq);
TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
if (dq->dq_ump != NULL)
LIST_REMOVE(dq, dq_hash);
}
/*
* Dq is put into hash already locked to prevent parallel
* usage while it is being read from file.
*/
dq->dq_flags = DQ_LOCK;
dq->dq_id = id;
dq->dq_type = type;
dq->dq_ump = ump;
LIST_INSERT_HEAD(dqh, dq, dq_hash);
DQREF(dq);
DQH_UNLOCK();
/*
* Read the requested quota record from the quota file, performing
* any necessary conversions.
*/
if (ump->um_qflags[type] & QTF_64BIT) {
recsize = sizeof(struct dqblk64);
base = sizeof(struct dqhdr64);
} else {
recsize = sizeof(struct dqblk32);
base = 0;
}
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
aiov.iov_base = buf;
aiov.iov_len = recsize;
auio.uio_resid = recsize;
auio.uio_offset = base + id * recsize;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_rw = UIO_READ;
auio.uio_td = (struct thread *)0;
error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
if (auio.uio_resid == recsize && error == 0) {
bzero(&dq->dq_dqb, sizeof(dq->dq_dqb));
} else {
if (ump->um_qflags[type] & QTF_64BIT)
dqb64_dq((struct dqblk64 *)buf, dq);
else
dqb32_dq((struct dqblk32 *)buf, dq);
}
if (dqvplocked)
vput(dqvp);
else
vrele(dqvp);
/*
* I/O error in reading quota file, release
* quota structure and reflect problem to caller.
*/
if (error) {
DQH_LOCK();
dq->dq_ump = NULL;
LIST_REMOVE(dq, dq_hash);
DQH_UNLOCK();
DQI_LOCK(dq);
if (dq->dq_flags & DQ_WANT)
wakeup(dq);
dq->dq_flags = 0;
DQI_UNLOCK(dq);
dqrele(vp, dq);
*dqp = NODQUOT;
return (error);
}
DQI_LOCK(dq);
/*
* Check for no limit to enforce.
* Initialize time values if necessary.
*/
if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
dq->dq_flags |= DQ_FAKE;
if (dq->dq_id != 0) {
if (dq->dq_btime == 0) {
dq->dq_btime = time_second + ump->um_btime[type];
if (dq->dq_bsoftlimit &&
dq->dq_curblocks >= dq->dq_bsoftlimit)
dq->dq_flags |= DQ_MOD;
}
if (dq->dq_itime == 0) {
dq->dq_itime = time_second + ump->um_itime[type];
if (dq->dq_isoftlimit &&
dq->dq_curinodes >= dq->dq_isoftlimit)
dq->dq_flags |= DQ_MOD;
}
}
DQI_WAKEUP(dq);
DQI_UNLOCK(dq);
*dqp = dq;
return (0);
}
#ifdef DIAGNOSTIC
/*
* Obtain a reference to a dquot.
*/
static void
dqref(struct dquot *dq)
{
dq->dq_cnt++;
}
#endif
/*
* Release a reference to a dquot.
*/
void
dqrele(struct vnode *vp, struct dquot *dq)
{
if (dq == NODQUOT)
return;
DQH_LOCK();
KASSERT(dq->dq_cnt > 0, ("Lost dq %p reference 1", dq));
if (dq->dq_cnt > 1) {
dq->dq_cnt--;
DQH_UNLOCK();
return;
}
DQH_UNLOCK();
sync:
(void) dqsync(vp, dq);
DQH_LOCK();
KASSERT(dq->dq_cnt > 0, ("Lost dq %p reference 2", dq));
if (--dq->dq_cnt > 0)
{
DQH_UNLOCK();
return;
}
/*
* The dq may become dirty after it is synced but before it is
* put to the free list. Checking the DQ_MOD there without
* locking dq should be safe since no other references to the
* dq exist.
*/
if ((dq->dq_flags & DQ_MOD) != 0) {
dq->dq_cnt++;
DQH_UNLOCK();
goto sync;
}
TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist);
DQH_UNLOCK();
}
/*
* Update the disk quota in the quota file.
*/
static int
dqsync(struct vnode *vp, struct dquot *dq)
{
uint8_t buf[sizeof(struct dqblk64)];
off_t base, recsize;
struct vnode *dqvp;
struct iovec aiov;
struct uio auio;
int error;
struct mount *mp;
struct ufsmount *ump;
#ifdef DEBUG_VFS_LOCKS
if (vp != NULL)
ASSERT_VOP_ELOCKED(vp, "dqsync");
#endif
mp = NULL;
error = 0;
if (dq == NODQUOT)
panic("dqsync: dquot");
if ((ump = dq->dq_ump) == NULL)
return (0);
UFS_LOCK(ump);
if ((dqvp = ump->um_quotas[dq->dq_type]) == NULLVP)
panic("dqsync: file");
vref(dqvp);
UFS_UNLOCK(ump);
DQI_LOCK(dq);
if ((dq->dq_flags & DQ_MOD) == 0) {
DQI_UNLOCK(dq);
vrele(dqvp);
return (0);
}
DQI_UNLOCK(dq);
(void) vn_start_secondary_write(dqvp, &mp, V_WAIT);
if (vp != dqvp)
vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+2, "dqsync");
if ((dq->dq_flags & DQ_MOD) == 0)
goto out;
dq->dq_flags |= DQ_LOCK;
DQI_UNLOCK(dq);
/*
* Write the quota record to the quota file, performing any
* necessary conversions. See dqget() for additional details.
*/
if (ump->um_qflags[dq->dq_type] & QTF_64BIT) {
dq_dqb64(dq, (struct dqblk64 *)buf);
recsize = sizeof(struct dqblk64);
base = sizeof(struct dqhdr64);
} else {
dq_dqb32(dq, (struct dqblk32 *)buf);
recsize = sizeof(struct dqblk32);
base = 0;
}
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
aiov.iov_base = buf;
aiov.iov_len = recsize;
auio.uio_resid = recsize;
auio.uio_offset = base + dq->dq_id * recsize;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_rw = UIO_WRITE;
auio.uio_td = (struct thread *)0;
error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
if (auio.uio_resid && error == 0)
error = EIO;
DQI_LOCK(dq);
DQI_WAKEUP(dq);
dq->dq_flags &= ~DQ_MOD;
out:
DQI_UNLOCK(dq);
if (vp != dqvp)
vput(dqvp);
else
vrele(dqvp);
vn_finished_secondary_write(mp);
return (error);
}
/*
* Flush all entries from the cache for a particular vnode.
*/
static int
dqflush(struct vnode *vp)
{
struct dquot *dq, *nextdq;
struct dqhash *dqh;
int error;
/*
* Move all dquot's that used to refer to this quota
* file off their hash chains (they will eventually
* fall off the head of the free list and be re-used).
*/
error = 0;
DQH_LOCK();
for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) {
for (dq = LIST_FIRST(dqh); dq; dq = nextdq) {
nextdq = LIST_NEXT(dq, dq_hash);
if (dq->dq_ump->um_quotas[dq->dq_type] != vp)
continue;
if (dq->dq_cnt)
error = EBUSY;
else {
LIST_REMOVE(dq, dq_hash);
dq->dq_ump = NULL;
}
}
}
DQH_UNLOCK();
return (error);
}
/*
* The following three functions are provided for the adjustment of
* quotas by the soft updates code.
*/
#ifdef SOFTUPDATES
/*
* Acquire a reference to the quota structures associated with a vnode.
* Return count of number of quota structures found.
*/
int
quotaref(vp, qrp)
struct vnode *vp;
struct dquot **qrp;
{
struct inode *ip;
struct dquot *dq;
int i, found;
for (i = 0; i < MAXQUOTAS; i++)
qrp[i] = NODQUOT;
/*
* Disk quotas must be turned off for system files. Currently
* snapshot and quota files.
*/
if ((vp->v_vflag & VV_SYSTEM) != 0)
return (0);
/*
* Iterate through and copy active quotas.
*/
found = 0;
ip = VTOI(vp);
mtx_lock(&dqhlock);
for (i = 0; i < MAXQUOTAS; i++) {
if ((dq = ip->i_dquot[i]) == NODQUOT)
continue;
DQREF(dq);
qrp[i] = dq;
found++;
}
mtx_unlock(&dqhlock);
return (found);
}
/*
* Release a set of quota structures obtained from a vnode.
*/
void
quotarele(qrp)
struct dquot **qrp;
{
struct dquot *dq;
int i;
for (i = 0; i < MAXQUOTAS; i++) {
if ((dq = qrp[i]) == NODQUOT)
continue;
dqrele(NULL, dq);
}
}
/*
* Adjust the number of blocks associated with a quota.
* Positive numbers when adding blocks; negative numbers when freeing blocks.
*/
void
quotaadj(qrp, ump, blkcount)
struct dquot **qrp;
struct ufsmount *ump;
int64_t blkcount;
{
struct dquot *dq;
ufs2_daddr_t ncurblocks;
int i;
if (blkcount == 0)
return;
for (i = 0; i < MAXQUOTAS; i++) {
if ((dq = qrp[i]) == NODQUOT)
continue;
DQI_LOCK(dq);
DQI_WAIT(dq, PINOD+1, "adjqta");
ncurblocks = dq->dq_curblocks + blkcount;
if (ncurblocks >= 0)
dq->dq_curblocks = ncurblocks;
else
dq->dq_curblocks = 0;
if (blkcount < 0)
dq->dq_flags &= ~DQ_BLKS;
else if (dq->dq_curblocks + blkcount >= dq->dq_bsoftlimit &&
dq->dq_curblocks < dq->dq_bsoftlimit)
dq->dq_btime = time_second + ump->um_btime[i];
dq->dq_flags |= DQ_MOD;
DQI_UNLOCK(dq);
}
}
#endif /* SOFTUPDATES */
/*
* 32-bit / 64-bit conversion functions.
*
* 32-bit quota records are stored in native byte order. Attention must
* be paid to overflow issues.
*
* 64-bit quota records are stored in network byte order.
*/
#define CLIP32(u64) (u64 > UINT32_MAX ? UINT32_MAX : (uint32_t)u64)
/*
* Convert 32-bit host-order structure to dquot.
*/
static void
dqb32_dq(const struct dqblk32 *dqb32, struct dquot *dq)
{
dq->dq_bhardlimit = dqb32->dqb_bhardlimit;
dq->dq_bsoftlimit = dqb32->dqb_bsoftlimit;
dq->dq_curblocks = dqb32->dqb_curblocks;
dq->dq_ihardlimit = dqb32->dqb_ihardlimit;
dq->dq_isoftlimit = dqb32->dqb_isoftlimit;
dq->dq_curinodes = dqb32->dqb_curinodes;
dq->dq_btime = dqb32->dqb_btime;
dq->dq_itime = dqb32->dqb_itime;
}
/*
* Convert 64-bit network-order structure to dquot.
*/
static void
dqb64_dq(const struct dqblk64 *dqb64, struct dquot *dq)
{
dq->dq_bhardlimit = be64toh(dqb64->dqb_bhardlimit);
dq->dq_bsoftlimit = be64toh(dqb64->dqb_bsoftlimit);
dq->dq_curblocks = be64toh(dqb64->dqb_curblocks);
dq->dq_ihardlimit = be64toh(dqb64->dqb_ihardlimit);
dq->dq_isoftlimit = be64toh(dqb64->dqb_isoftlimit);
dq->dq_curinodes = be64toh(dqb64->dqb_curinodes);
dq->dq_btime = be64toh(dqb64->dqb_btime);
dq->dq_itime = be64toh(dqb64->dqb_itime);
}
/*
* Convert dquot to 32-bit host-order structure.
*/
static void
dq_dqb32(const struct dquot *dq, struct dqblk32 *dqb32)
{
dqb32->dqb_bhardlimit = CLIP32(dq->dq_bhardlimit);
dqb32->dqb_bsoftlimit = CLIP32(dq->dq_bsoftlimit);
dqb32->dqb_curblocks = CLIP32(dq->dq_curblocks);
dqb32->dqb_ihardlimit = CLIP32(dq->dq_ihardlimit);
dqb32->dqb_isoftlimit = CLIP32(dq->dq_isoftlimit);
dqb32->dqb_curinodes = CLIP32(dq->dq_curinodes);
dqb32->dqb_btime = CLIP32(dq->dq_btime);
dqb32->dqb_itime = CLIP32(dq->dq_itime);
}
/*
* Convert dquot to 64-bit network-order structure.
*/
static void
dq_dqb64(const struct dquot *dq, struct dqblk64 *dqb64)
{
dqb64->dqb_bhardlimit = htobe64(dq->dq_bhardlimit);
dqb64->dqb_bsoftlimit = htobe64(dq->dq_bsoftlimit);
dqb64->dqb_curblocks = htobe64(dq->dq_curblocks);
dqb64->dqb_ihardlimit = htobe64(dq->dq_ihardlimit);
dqb64->dqb_isoftlimit = htobe64(dq->dq_isoftlimit);
dqb64->dqb_curinodes = htobe64(dq->dq_curinodes);
dqb64->dqb_btime = htobe64(dq->dq_btime);
dqb64->dqb_itime = htobe64(dq->dq_itime);
}
/*
* Convert 64-bit host-order structure to 32-bit host-order structure.
*/
static void
dqb64_dqb32(const struct dqblk64 *dqb64, struct dqblk32 *dqb32)
{
dqb32->dqb_bhardlimit = CLIP32(dqb64->dqb_bhardlimit);
dqb32->dqb_bsoftlimit = CLIP32(dqb64->dqb_bsoftlimit);
dqb32->dqb_curblocks = CLIP32(dqb64->dqb_curblocks);
dqb32->dqb_ihardlimit = CLIP32(dqb64->dqb_ihardlimit);
dqb32->dqb_isoftlimit = CLIP32(dqb64->dqb_isoftlimit);
dqb32->dqb_curinodes = CLIP32(dqb64->dqb_curinodes);
dqb32->dqb_btime = CLIP32(dqb64->dqb_btime);
dqb32->dqb_itime = CLIP32(dqb64->dqb_itime);
}
/*
* Convert 32-bit host-order structure to 64-bit host-order structure.
*/
static void
dqb32_dqb64(const struct dqblk32 *dqb32, struct dqblk64 *dqb64)
{
dqb64->dqb_bhardlimit = dqb32->dqb_bhardlimit;
dqb64->dqb_bsoftlimit = dqb32->dqb_bsoftlimit;
dqb64->dqb_curblocks = dqb32->dqb_curblocks;
dqb64->dqb_ihardlimit = dqb32->dqb_ihardlimit;
dqb64->dqb_isoftlimit = dqb32->dqb_isoftlimit;
dqb64->dqb_curinodes = dqb32->dqb_curinodes;
dqb64->dqb_btime = dqb32->dqb_btime;
dqb64->dqb_itime = dqb32->dqb_itime;
}
Index: projects/clang360-import/sys/ufs/ufs/ufs_vfsops.c
===================================================================
--- projects/clang360-import/sys/ufs/ufs/ufs_vfsops.c (revision 277808)
+++ projects/clang360-import/sys/ufs/ufs/ufs_vfsops.c (revision 277809)
@@ -1,236 +1,244 @@
/*-
* Copyright (c) 1991, 1993, 1994
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_quota.h"
#include "opt_ufs.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/proc.h>
#include <sys/socket.h>
#include <sys/vnode.h>
#include <ufs/ufs/extattr.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ufs/ufsmount.h>
#include <ufs/ufs/ufs_extern.h>
#ifdef UFS_DIRHASH
#include <ufs/ufs/dir.h>
#include <ufs/ufs/dirhash.h>
#endif
MALLOC_DEFINE(M_UFSMNT, "ufs_mount", "UFS mount structure");
/*
* Return the root of a filesystem.
*/
int
ufs_root(mp, flags, vpp)
struct mount *mp;
int flags;
struct vnode **vpp;
{
struct vnode *nvp;
int error;
error = VFS_VGET(mp, (ino_t)ROOTINO, flags, &nvp);
if (error)
return (error);
*vpp = nvp;
return (0);
}
/*
* Do operations associated with quotas
*/
int
ufs_quotactl(mp, cmds, id, arg)
struct mount *mp;
int cmds;
uid_t id;
void *arg;
{
#ifndef QUOTA
+ if ((cmds >> SUBCMDSHIFT) == Q_QUOTAON)
+ vfs_unbusy(mp);
+
return (EOPNOTSUPP);
#else
struct thread *td;
int cmd, type, error;
td = curthread;
cmd = cmds >> SUBCMDSHIFT;
type = cmds & SUBCMDMASK;
if (id == -1) {
switch (type) {
case USRQUOTA:
id = td->td_ucred->cr_ruid;
break;
case GRPQUOTA:
id = td->td_ucred->cr_rgid;
break;
default:
+ if (cmd == Q_QUOTAON)
+ vfs_unbusy(mp);
return (EINVAL);
}
}
- if ((u_int)type >= MAXQUOTAS)
+ if ((u_int)type >= MAXQUOTAS) {
+ if (cmd == Q_QUOTAON)
+ vfs_unbusy(mp);
return (EINVAL);
+ }
switch (cmd) {
case Q_QUOTAON:
error = quotaon(td, mp, type, arg);
break;
case Q_QUOTAOFF:
error = quotaoff(td, mp, type);
break;
case Q_SETQUOTA32:
error = setquota32(td, mp, id, type, arg);
break;
case Q_SETUSE32:
error = setuse32(td, mp, id, type, arg);
break;
case Q_GETQUOTA32:
error = getquota32(td, mp, id, type, arg);
break;
case Q_SETQUOTA:
error = setquota(td, mp, id, type, arg);
break;
case Q_SETUSE:
error = setuse(td, mp, id, type, arg);
break;
case Q_GETQUOTA:
error = getquota(td, mp, id, type, arg);
break;
case Q_GETQUOTASIZE:
error = getquotasize(td, mp, id, type, arg);
break;
case Q_SYNC:
error = qsync(mp);
break;
default:
error = EINVAL;
break;
}
return (error);
#endif
}
/*
* Initial UFS filesystems, done only once.
*/
int
ufs_init(vfsp)
struct vfsconf *vfsp;
{
#ifdef QUOTA
dqinit();
#endif
#ifdef UFS_DIRHASH
ufsdirhash_init();
#endif
return (0);
}
/*
* Uninitialise UFS filesystems, done before module unload.
*/
int
ufs_uninit(vfsp)
struct vfsconf *vfsp;
{
#ifdef QUOTA
dquninit();
#endif
#ifdef UFS_DIRHASH
ufsdirhash_uninit();
#endif
return (0);
}
/*
* This is the generic part of fhtovp called after the underlying
* filesystem has validated the file handle.
*
* Call the VFS_CHECKEXP beforehand to verify access.
*/
int
ufs_fhtovp(mp, ufhp, flags, vpp)
struct mount *mp;
struct ufid *ufhp;
int flags;
struct vnode **vpp;
{
struct inode *ip;
struct vnode *nvp;
int error;
error = VFS_VGET(mp, ufhp->ufid_ino, flags, &nvp);
if (error) {
*vpp = NULLVP;
return (error);
}
ip = VTOI(nvp);
if (ip->i_mode == 0 || ip->i_gen != ufhp->ufid_gen ||
ip->i_effnlink <= 0) {
vput(nvp);
*vpp = NULLVP;
return (ESTALE);
}
*vpp = nvp;
vnode_create_vobject(*vpp, DIP(ip, i_size), curthread);
return (0);
}
Index: projects/clang360-import/sys
===================================================================
--- projects/clang360-import/sys (revision 277808)
+++ projects/clang360-import/sys (revision 277809)
Property changes on: projects/clang360-import/sys
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /head/sys:r277777-277803
Index: projects/clang360-import/usr.bin/sed/main.c
===================================================================
--- projects/clang360-import/usr.bin/sed/main.c (revision 277808)
+++ projects/clang360-import/usr.bin/sed/main.c (revision 277809)
@@ -1,535 +1,537 @@
/*-
* Copyright (c) 2013 Johann 'Myrkraverk' Oskarsson.
* Copyright (c) 1992 Diomidis Spinellis.
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Diomidis Spinellis of Imperial College, University of London.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef lint
static const char copyright[] =
"@(#) Copyright (c) 1992, 1993\n\
The Regents of the University of California. All rights reserved.\n";
#endif
#ifndef lint
static const char sccsid[] = "@(#)main.c 8.2 (Berkeley) 1/3/94";
#endif
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <limits.h>
#include <locale.h>
#include <regex.h>
#include <stddef.h>
#define _WITH_GETLINE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "defs.h"
#include "extern.h"
/*
* Linked list of units (strings and files) to be compiled
*/
struct s_compunit {
struct s_compunit *next;
enum e_cut {CU_FILE, CU_STRING} type;
char *s; /* Pointer to string or fname */
};
/*
* Linked list pointer to compilation units and pointer to current
* next pointer.
*/
static struct s_compunit *script, **cu_nextp = &script;
/*
* Linked list of files to be processed
*/
struct s_flist {
char *fname;
struct s_flist *next;
};
/*
* Linked list pointer to files and pointer to current
* next pointer.
*/
static struct s_flist *files, **fl_nextp = &files;
FILE *infile; /* Current input file */
FILE *outfile; /* Current output file */
int aflag, eflag, nflag;
int rflags = 0;
static int rval; /* Exit status */
static int ispan; /* Whether inplace editing spans across files */
/*
* Current file and line number; line numbers restart across compilation
* units, but span across input files. The latter is optional if editing
* in place.
*/
const char *fname; /* File name. */
const char *outfname; /* Output file name */
static char oldfname[PATH_MAX]; /* Old file name (for in-place editing) */
static char tmpfname[PATH_MAX]; /* Temporary file name (for in-place editing) */
static const char *inplace; /* Inplace edit file extension. */
u_long linenum;
static void add_compunit(enum e_cut, char *);
static void add_file(char *);
static void usage(void);
int
main(int argc, char *argv[])
{
int c, fflag;
char *temp_arg;
(void) setlocale(LC_ALL, "");
fflag = 0;
inplace = NULL;
while ((c = getopt(argc, argv, "EI:ae:f:i:lnru")) != -1)
switch (c) {
case 'r': /* Gnu sed compat */
case 'E':
rflags = REG_EXTENDED;
break;
case 'I':
inplace = optarg;
ispan = 1; /* span across input files */
break;
case 'a':
aflag = 1;
break;
case 'e':
eflag = 1;
if ((temp_arg = malloc(strlen(optarg) + 2)) == NULL)
err(1, "malloc");
strcpy(temp_arg, optarg);
strcat(temp_arg, "\n");
add_compunit(CU_STRING, temp_arg);
break;
case 'f':
fflag = 1;
add_compunit(CU_FILE, optarg);
break;
case 'i':
inplace = optarg;
ispan = 0; /* don't span across input files */
break;
case 'l':
if(setvbuf(stdout, NULL, _IOLBF, 0) != 0)
warnx("setting line buffered output failed");
break;
case 'n':
nflag = 1;
break;
case 'u':
if(setvbuf(stdout, NULL, _IONBF, 0) != 0)
warnx("setting unbuffered output failed");
break;
default:
case '?':
usage();
}
argc -= optind;
argv += optind;
/* First usage case; script is the first arg */
if (!eflag && !fflag && *argv) {
add_compunit(CU_STRING, *argv);
argv++;
}
compile();
/* Continue with first and start second usage */
if (*argv)
for (; *argv; argv++)
add_file(*argv);
else
add_file(NULL);
process();
cfclose(prog, NULL);
if (fclose(stdout))
err(1, "stdout");
exit(rval);
}
static void
usage(void)
{
(void)fprintf(stderr,
"usage: %s script [-Ealnru] [-i extension] [file ...]\n"
"\t%s [-Ealnu] [-i extension] [-e script] ... [-f script_file]"
" ... [file ...]\n", getprogname(), getprogname());
exit(1);
}
/*
* Like fgets, but go through the chain of compilation units chaining them
* together. Empty strings and files are ignored.
*/
char *
cu_fgets(char *buf, int n, int *more)
{
static enum {ST_EOF, ST_FILE, ST_STRING} state = ST_EOF;
static FILE *f; /* Current open file */
static char *s; /* Current pointer inside string */
static char string_ident[30];
char *p;
again:
switch (state) {
case ST_EOF:
if (script == NULL) {
if (more != NULL)
*more = 0;
return (NULL);
}
linenum = 0;
switch (script->type) {
case CU_FILE:
if ((f = fopen(script->s, "r")) == NULL)
err(1, "%s", script->s);
fname = script->s;
state = ST_FILE;
goto again;
case CU_STRING:
if (((size_t)snprintf(string_ident,
sizeof(string_ident), "\"%s\"", script->s)) >=
sizeof(string_ident) - 1)
(void)strcpy(string_ident +
sizeof(string_ident) - 6, " ...\"");
fname = string_ident;
s = script->s;
state = ST_STRING;
goto again;
}
case ST_FILE:
if ((p = fgets(buf, n, f)) != NULL) {
linenum++;
if (linenum == 1 && buf[0] == '#' && buf[1] == 'n')
nflag = 1;
if (more != NULL)
*more = !feof(f);
return (p);
}
script = script->next;
(void)fclose(f);
state = ST_EOF;
goto again;
case ST_STRING:
if (linenum == 0 && s[0] == '#' && s[1] == 'n')
nflag = 1;
p = buf;
for (;;) {
if (n-- <= 1) {
*p = '\0';
linenum++;
if (more != NULL)
*more = 1;
return (buf);
}
switch (*s) {
case '\0':
state = ST_EOF;
if (s == script->s) {
script = script->next;
goto again;
} else {
script = script->next;
*p = '\0';
linenum++;
if (more != NULL)
*more = 0;
return (buf);
}
case '\n':
*p++ = '\n';
*p = '\0';
s++;
linenum++;
if (more != NULL)
*more = 0;
return (buf);
default:
*p++ = *s++;
}
}
}
/* NOTREACHED */
return (NULL);
}
/*
* Like fgets, but go through the list of files chaining them together.
* Set len to the length of the line.
*/
int
mf_fgets(SPACE *sp, enum e_spflag spflag)
{
struct stat sb;
ssize_t len;
static char *p = NULL;
static size_t plen = 0;
int c;
static int firstfile;
if (infile == NULL) {
/* stdin? */
if (files->fname == NULL) {
if (inplace != NULL)
errx(1, "-I or -i may not be used with stdin");
infile = stdin;
fname = "stdin";
outfile = stdout;
outfname = "stdout";
}
firstfile = 1;
}
for (;;) {
if (infile != NULL && (c = getc(infile)) != EOF) {
(void)ungetc(c, infile);
break;
}
/* If we are here then either eof or no files are open yet */
if (infile == stdin) {
sp->len = 0;
return (0);
}
if (infile != NULL) {
fclose(infile);
if (*oldfname != '\0') {
/* if there was a backup file, remove it */
unlink(oldfname);
/*
* Backup the original. Note that hard links
* are not supported on all filesystems.
*/
if ((link(fname, oldfname) != 0) &&
(rename(fname, oldfname) != 0)) {
warn("rename()");
if (*tmpfname)
unlink(tmpfname);
exit(1);
}
*oldfname = '\0';
}
if (*tmpfname != '\0') {
if (outfile != NULL && outfile != stdout)
if (fclose(outfile) != 0) {
warn("fclose()");
unlink(tmpfname);
exit(1);
}
outfile = NULL;
if (rename(tmpfname, fname) != 0) {
/* this should not happen really! */
warn("rename()");
unlink(tmpfname);
exit(1);
}
*tmpfname = '\0';
}
outfname = NULL;
}
if (firstfile == 0)
files = files->next;
else
firstfile = 0;
if (files == NULL) {
sp->len = 0;
return (0);
}
fname = files->fname;
if (inplace != NULL) {
if (lstat(fname, &sb) != 0)
err(1, "%s", fname);
if (!(sb.st_mode & S_IFREG))
errx(1, "%s: %s %s", fname,
"in-place editing only",
"works for regular files");
if (*inplace != '\0') {
strlcpy(oldfname, fname,
sizeof(oldfname));
len = strlcat(oldfname, inplace,
sizeof(oldfname));
if (len > (ssize_t)sizeof(oldfname))
errx(1, "%s: name too long", fname);
}
len = snprintf(tmpfname, sizeof(tmpfname),
"%s/.!%ld!%s", dirname(fname), (long)getpid(),
basename(fname));
if (len >= (ssize_t)sizeof(tmpfname))
errx(1, "%s: name too long", fname);
unlink(tmpfname);
if ((outfile = fopen(tmpfname, "w")) == NULL)
err(1, "%s", fname);
+ if (outfile != NULL && outfile != stdout)
+ fclose(outfile);
fchown(fileno(outfile), sb.st_uid, sb.st_gid);
fchmod(fileno(outfile), sb.st_mode & ALLPERMS);
outfname = tmpfname;
if (!ispan) {
linenum = 0;
resetstate();
}
} else {
outfile = stdout;
outfname = "stdout";
}
if ((infile = fopen(fname, "r")) == NULL) {
warn("%s", fname);
rval = 1;
continue;
}
}
/*
* We are here only when infile is open and we still have something
* to read from it.
*
* Use getline() so that we can handle essentially infinite input
* data. The p and plen are static so each invocation gives
* getline() the same buffer which is expanded as needed.
*/
len = getline(&p, &plen, infile);
if (len == -1)
err(1, "%s", fname);
if (len != 0 && p[len - 1] == '\n') {
sp->append_newline = 1;
len--;
} else if (!lastline()) {
sp->append_newline = 1;
} else {
sp->append_newline = 0;
}
cspace(sp, p, len, spflag);
linenum++;
return (1);
}
/*
* Add a compilation unit to the linked list
*/
static void
add_compunit(enum e_cut type, char *s)
{
struct s_compunit *cu;
if ((cu = malloc(sizeof(struct s_compunit))) == NULL)
err(1, "malloc");
cu->type = type;
cu->s = s;
cu->next = NULL;
*cu_nextp = cu;
cu_nextp = &cu->next;
}
/*
* Add a file to the linked list
*/
static void
add_file(char *s)
{
struct s_flist *fp;
if ((fp = malloc(sizeof(struct s_flist))) == NULL)
err(1, "malloc");
fp->next = NULL;
*fl_nextp = fp;
fp->fname = s;
fl_nextp = &fp->next;
}
static int
next_files_have_lines(void)
{
struct s_flist *file;
FILE *file_fd;
int ch;
file = files;
while ((file = file->next) != NULL) {
if ((file_fd = fopen(file->fname, "r")) == NULL)
continue;
if ((ch = getc(file_fd)) != EOF) {
/*
* This next file has content, therefore current
* file doesn't contains the last line.
*/
ungetc(ch, file_fd);
fclose(file_fd);
return (1);
}
fclose(file_fd);
}
return (0);
}
int
lastline(void)
{
int ch;
if (feof(infile)) {
return !(
(inplace == NULL || ispan) &&
next_files_have_lines());
}
if ((ch = getc(infile)) == EOF) {
return !(
(inplace == NULL || ispan) &&
next_files_have_lines());
}
ungetc(ch, infile);
return (0);
}
Index: projects/clang360-import/usr.bin/sed/process.c
===================================================================
--- projects/clang360-import/usr.bin/sed/process.c (revision 277808)
+++ projects/clang360-import/usr.bin/sed/process.c (revision 277809)
@@ -1,783 +1,783 @@
/*-
* Copyright (c) 1992 Diomidis Spinellis.
* Copyright (c) 1992, 1993, 1994
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Diomidis Spinellis of Imperial College, University of London.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef lint
static const char sccsid[] = "@(#)process.c 8.6 (Berkeley) 4/20/94";
#endif
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <sys/uio.h>
#include <ctype.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <regex.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <wchar.h>
#include <wctype.h>
#include "defs.h"
#include "extern.h"
static SPACE HS, PS, SS, YS;
#define pd PS.deleted
#define ps PS.space
#define psl PS.len
#define psanl PS.append_newline
#define hs HS.space
#define hsl HS.len
static inline int applies(struct s_command *);
static void do_tr(struct s_tr *);
static void flush_appends(void);
static void lputs(char *, size_t);
static int regexec_e(regex_t *, const char *, int, int, size_t);
static void regsub(SPACE *, char *, char *);
static int substitute(struct s_command *);
struct s_appends *appends; /* Array of pointers to strings to append. */
static int appendx; /* Index into appends array. */
int appendnum; /* Size of appends array. */
static int lastaddr; /* Set by applies if last address of a range. */
static int sdone; /* If any substitutes since last line input. */
/* Iov structure for 'w' commands. */
static regex_t *defpreg;
size_t maxnsub;
regmatch_t *match;
#define OUT() do { \
fwrite(ps, 1, psl, outfile); \
if (psanl) fputc('\n', outfile); \
} while (0)
void
process(void)
{
struct s_command *cp;
SPACE tspace;
size_t oldpsl = 0;
char *p;
int oldpsanl;
p = NULL;
for (linenum = 0; mf_fgets(&PS, REPLACE);) {
pd = 0;
top:
cp = prog;
redirect:
while (cp != NULL) {
if (!applies(cp)) {
cp = cp->next;
continue;
}
switch (cp->code) {
case '{':
cp = cp->u.c;
goto redirect;
case 'a':
if (appendx >= appendnum)
if ((appends = realloc(appends,
sizeof(struct s_appends) *
(appendnum *= 2))) == NULL)
err(1, "realloc");
appends[appendx].type = AP_STRING;
appends[appendx].s = cp->t;
appends[appendx].len = strlen(cp->t);
appendx++;
break;
case 'b':
cp = cp->u.c;
goto redirect;
case 'c':
pd = 1;
psl = 0;
if (cp->a2 == NULL || lastaddr || lastline())
(void)fprintf(outfile, "%s", cp->t);
break;
case 'd':
pd = 1;
goto new;
case 'D':
if (pd)
goto new;
if (psl == 0 ||
(p = memchr(ps, '\n', psl)) == NULL) {
pd = 1;
goto new;
} else {
psl -= (p + 1) - ps;
memmove(ps, p + 1, psl);
goto top;
}
case 'g':
cspace(&PS, hs, hsl, REPLACE);
break;
case 'G':
cspace(&PS, "\n", 1, APPEND);
cspace(&PS, hs, hsl, APPEND);
break;
case 'h':
cspace(&HS, ps, psl, REPLACE);
break;
case 'H':
cspace(&HS, "\n", 1, APPEND);
cspace(&HS, ps, psl, APPEND);
break;
case 'i':
(void)fprintf(outfile, "%s", cp->t);
break;
case 'l':
lputs(ps, psl);
break;
case 'n':
if (!nflag && !pd)
OUT();
flush_appends();
if (!mf_fgets(&PS, REPLACE))
exit(0);
pd = 0;
break;
case 'N':
flush_appends();
cspace(&PS, "\n", 1, APPEND);
if (!mf_fgets(&PS, APPEND))
exit(0);
break;
case 'p':
if (pd)
break;
OUT();
break;
case 'P':
if (pd)
break;
if ((p = memchr(ps, '\n', psl)) != NULL) {
oldpsl = psl;
oldpsanl = psanl;
psl = p - ps;
psanl = 1;
}
OUT();
if (p != NULL) {
psl = oldpsl;
psanl = oldpsanl;
}
break;
case 'q':
if (!nflag && !pd)
OUT();
flush_appends();
exit(0);
case 'r':
if (appendx >= appendnum)
if ((appends = realloc(appends,
sizeof(struct s_appends) *
(appendnum *= 2))) == NULL)
err(1, "realloc");
appends[appendx].type = AP_FILE;
appends[appendx].s = cp->t;
appends[appendx].len = strlen(cp->t);
appendx++;
break;
case 's':
sdone |= substitute(cp);
break;
case 't':
if (sdone) {
sdone = 0;
cp = cp->u.c;
goto redirect;
}
break;
case 'w':
if (pd)
break;
if (cp->u.fd == -1 && (cp->u.fd = open(cp->t,
O_WRONLY|O_APPEND|O_CREAT|O_TRUNC,
DEFFILEMODE)) == -1)
err(1, "%s", cp->t);
if (write(cp->u.fd, ps, psl) != (ssize_t)psl ||
write(cp->u.fd, "\n", 1) != 1)
err(1, "%s", cp->t);
break;
case 'x':
/*
* If the hold space is null, make it empty
* but not null. Otherwise the pattern space
* will become null after the swap, which is
* an abnormal condition.
*/
if (hs == NULL)
cspace(&HS, "", 0, REPLACE);
tspace = PS;
PS = HS;
psanl = tspace.append_newline;
HS = tspace;
break;
case 'y':
if (pd || psl == 0)
break;
do_tr(cp->u.y);
break;
case ':':
case '}':
break;
case '=':
(void)fprintf(outfile, "%lu\n", linenum);
}
cp = cp->next;
} /* for all cp */
new: if (!nflag && !pd)
OUT();
flush_appends();
} /* for all lines */
}
/*
* TRUE if the address passed matches the current program state
* (lastline, linenumber, ps).
*/
#define MATCH(a) \
((a)->type == AT_RE ? regexec_e((a)->u.r, ps, 0, 1, psl) : \
(a)->type == AT_LINE ? linenum == (a)->u.l : lastline())
/*
* Return TRUE if the command applies to the current line. Sets the start
* line for process ranges. Interprets the non-select (``!'') flag.
*/
static inline int
applies(struct s_command *cp)
{
int r;
lastaddr = 0;
if (cp->a1 == NULL && cp->a2 == NULL)
r = 1;
else if (cp->a2)
if (cp->startline > 0) {
switch (cp->a2->type) {
case AT_RELLINE:
if (linenum - cp->startline <= cp->a2->u.l)
r = 1;
else {
cp->startline = 0;
r = 0;
}
break;
default:
if (MATCH(cp->a2)) {
cp->startline = 0;
lastaddr = 1;
r = 1;
} else if (cp->a2->type == AT_LINE &&
linenum > cp->a2->u.l) {
/*
* We missed the 2nd address due to a
* branch, so just close the range and
* return false.
*/
cp->startline = 0;
r = 0;
} else
r = 1;
}
- } else if (MATCH(cp->a1)) {
+ } else if (cp->a1 && MATCH(cp->a1)) {
/*
* If the second address is a number less than or
* equal to the line number first selected, only
* one line shall be selected.
* -- POSIX 1003.2
* Likewise if the relative second line address is zero.
*/
if ((cp->a2->type == AT_LINE &&
linenum >= cp->a2->u.l) ||
(cp->a2->type == AT_RELLINE && cp->a2->u.l == 0))
lastaddr = 1;
else {
cp->startline = linenum;
}
r = 1;
} else
r = 0;
else
r = MATCH(cp->a1);
return (cp->nonsel ? ! r : r);
}
/*
* Reset the sed processor to its initial state.
*/
void
resetstate(void)
{
struct s_command *cp;
/*
* Reset all in-range markers.
*/
for (cp = prog; cp; cp = cp->code == '{' ? cp->u.c : cp->next)
if (cp->a2)
cp->startline = 0;
/*
* Clear out the hold space.
*/
cspace(&HS, "", 0, REPLACE);
}
/*
* substitute --
* Do substitutions in the pattern space. Currently, we build a
* copy of the new pattern space in the substitute space structure
* and then swap them.
*/
static int
substitute(struct s_command *cp)
{
SPACE tspace;
regex_t *re;
regoff_t re_off, slen;
int lastempty, n;
char *s;
s = ps;
re = cp->u.s->re;
if (re == NULL) {
if (defpreg != NULL && cp->u.s->maxbref > defpreg->re_nsub) {
linenum = cp->u.s->linenum;
errx(1, "%lu: %s: \\%u not defined in the RE",
linenum, fname, cp->u.s->maxbref);
}
}
if (!regexec_e(re, s, 0, 0, psl))
return (0);
SS.len = 0; /* Clean substitute space. */
slen = psl;
n = cp->u.s->n;
lastempty = 1;
switch (n) {
case 0: /* Global */
do {
if (lastempty || match[0].rm_so != match[0].rm_eo) {
/* Locate start of replaced string. */
re_off = match[0].rm_so;
/* Copy leading retained string. */
cspace(&SS, s, re_off, APPEND);
/* Add in regular expression. */
regsub(&SS, s, cp->u.s->new);
}
/* Move past this match. */
if (match[0].rm_so != match[0].rm_eo) {
s += match[0].rm_eo;
slen -= match[0].rm_eo;
lastempty = 0;
} else {
if (match[0].rm_so < slen)
cspace(&SS, s + match[0].rm_so, 1,
APPEND);
s += match[0].rm_so + 1;
slen -= match[0].rm_so + 1;
lastempty = 1;
}
} while (slen >= 0 && regexec_e(re, s, REG_NOTBOL, 0, slen));
/* Copy trailing retained string. */
if (slen > 0)
cspace(&SS, s, slen, APPEND);
break;
default: /* Nth occurrence */
while (--n) {
if (match[0].rm_eo == match[0].rm_so)
match[0].rm_eo = match[0].rm_so + 1;
s += match[0].rm_eo;
slen -= match[0].rm_eo;
if (slen < 0)
return (0);
if (!regexec_e(re, s, REG_NOTBOL, 0, slen))
return (0);
}
/* FALLTHROUGH */
case 1: /* 1st occurrence */
/* Locate start of replaced string. */
re_off = match[0].rm_so + (s - ps);
/* Copy leading retained string. */
cspace(&SS, ps, re_off, APPEND);
/* Add in regular expression. */
regsub(&SS, s, cp->u.s->new);
/* Copy trailing retained string. */
s += match[0].rm_eo;
slen -= match[0].rm_eo;
cspace(&SS, s, slen, APPEND);
break;
}
/*
* Swap the substitute space and the pattern space, and make sure
* that any leftover pointers into stdio memory get lost.
*/
tspace = PS;
PS = SS;
psanl = tspace.append_newline;
SS = tspace;
SS.space = SS.back;
/* Handle the 'p' flag. */
if (cp->u.s->p)
OUT();
/* Handle the 'w' flag. */
if (cp->u.s->wfile && !pd) {
if (cp->u.s->wfd == -1 && (cp->u.s->wfd = open(cp->u.s->wfile,
O_WRONLY|O_APPEND|O_CREAT|O_TRUNC, DEFFILEMODE)) == -1)
err(1, "%s", cp->u.s->wfile);
if (write(cp->u.s->wfd, ps, psl) != (ssize_t)psl ||
write(cp->u.s->wfd, "\n", 1) != 1)
err(1, "%s", cp->u.s->wfile);
}
return (1);
}
/*
* do_tr --
* Perform translation ('y' command) in the pattern space.
*/
static void
do_tr(struct s_tr *y)
{
SPACE tmp;
char c, *p;
size_t clen, left;
int i;
if (MB_CUR_MAX == 1) {
/*
* Single-byte encoding: perform in-place translation
* of the pattern space.
*/
for (p = ps; p < &ps[psl]; p++)
*p = y->bytetab[(u_char)*p];
} else {
/*
* Multi-byte encoding: perform translation into the
* translation space, then swap the translation and
* pattern spaces.
*/
/* Clean translation space. */
YS.len = 0;
for (p = ps, left = psl; left > 0; p += clen, left -= clen) {
if ((c = y->bytetab[(u_char)*p]) != '\0') {
cspace(&YS, &c, 1, APPEND);
clen = 1;
continue;
}
for (i = 0; i < y->nmultis; i++)
if (left >= y->multis[i].fromlen &&
memcmp(p, y->multis[i].from,
y->multis[i].fromlen) == 0)
break;
if (i < y->nmultis) {
cspace(&YS, y->multis[i].to,
y->multis[i].tolen, APPEND);
clen = y->multis[i].fromlen;
} else {
cspace(&YS, p, 1, APPEND);
clen = 1;
}
}
/* Swap the translation space and the pattern space. */
tmp = PS;
PS = YS;
psanl = tmp.append_newline;
YS = tmp;
YS.space = YS.back;
}
}
/*
* Flush append requests. Always called before reading a line,
* therefore it also resets the substitution done (sdone) flag.
*/
static void
flush_appends(void)
{
FILE *f;
int count, i;
char buf[8 * 1024];
for (i = 0; i < appendx; i++)
switch (appends[i].type) {
case AP_STRING:
fwrite(appends[i].s, sizeof(char), appends[i].len,
outfile);
break;
case AP_FILE:
/*
* Read files probably shouldn't be cached. Since
* it's not an error to read a non-existent file,
* it's possible that another program is interacting
* with the sed script through the filesystem. It
* would be truly bizarre, but possible. It's probably
* not that big a performance win, anyhow.
*/
if ((f = fopen(appends[i].s, "r")) == NULL)
break;
while ((count = fread(buf, sizeof(char), sizeof(buf), f)))
(void)fwrite(buf, sizeof(char), count, outfile);
(void)fclose(f);
break;
}
if (ferror(outfile))
errx(1, "%s: %s", outfname, strerror(errno ? errno : EIO));
appendx = sdone = 0;
}
static void
lputs(char *s, size_t len)
{
static const char escapes[] = "\\\a\b\f\r\t\v";
int c, col, width;
const char *p;
struct winsize win;
static int termwidth = -1;
size_t clen, i;
wchar_t wc;
mbstate_t mbs;
if (outfile != stdout)
termwidth = 60;
if (termwidth == -1) {
if ((p = getenv("COLUMNS")) && *p != '\0')
termwidth = atoi(p);
else if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) == 0 &&
win.ws_col > 0)
termwidth = win.ws_col;
else
termwidth = 60;
}
if (termwidth <= 0)
termwidth = 1;
memset(&mbs, 0, sizeof(mbs));
col = 0;
while (len != 0) {
clen = mbrtowc(&wc, s, len, &mbs);
if (clen == 0)
clen = 1;
if (clen == (size_t)-1 || clen == (size_t)-2) {
wc = (unsigned char)*s;
clen = 1;
memset(&mbs, 0, sizeof(mbs));
}
if (wc == '\n') {
if (col + 1 >= termwidth)
fprintf(outfile, "\\\n");
fputc('$', outfile);
fputc('\n', outfile);
col = 0;
} else if (iswprint(wc)) {
width = wcwidth(wc);
if (col + width >= termwidth) {
fprintf(outfile, "\\\n");
col = 0;
}
fwrite(s, 1, clen, outfile);
col += width;
} else if (wc != L'\0' && (c = wctob(wc)) != EOF &&
(p = strchr(escapes, c)) != NULL) {
if (col + 2 >= termwidth) {
fprintf(outfile, "\\\n");
col = 0;
}
fprintf(outfile, "\\%c", "\\abfrtv"[p - escapes]);
col += 2;
} else {
if (col + 4 * clen >= (unsigned)termwidth) {
fprintf(outfile, "\\\n");
col = 0;
}
for (i = 0; i < clen; i++)
fprintf(outfile, "\\%03o",
(int)(unsigned char)s[i]);
col += 4 * clen;
}
s += clen;
len -= clen;
}
if (col + 1 >= termwidth)
fprintf(outfile, "\\\n");
(void)fputc('$', outfile);
(void)fputc('\n', outfile);
if (ferror(outfile))
errx(1, "%s: %s", outfname, strerror(errno ? errno : EIO));
}
static int
regexec_e(regex_t *preg, const char *string, int eflags, int nomatch,
size_t slen)
{
int eval;
if (preg == NULL) {
if (defpreg == NULL)
errx(1, "first RE may not be empty");
} else
defpreg = preg;
/* Set anchors */
match[0].rm_so = 0;
match[0].rm_eo = slen;
eval = regexec(defpreg, string,
nomatch ? 0 : maxnsub + 1, match, eflags | REG_STARTEND);
switch(eval) {
case 0:
return (1);
case REG_NOMATCH:
return (0);
}
errx(1, "RE error: %s", strregerror(eval, defpreg));
/* NOTREACHED */
}
/*
* regsub - perform substitutions after a regexp match
* Based on a routine by Henry Spencer
*/
static void
regsub(SPACE *sp, char *string, char *src)
{
int len, no;
char c, *dst;
#define NEEDSP(reqlen) \
/* XXX What is the +1 for? */ \
if (sp->len + (reqlen) + 1 >= sp->blen) { \
sp->blen += (reqlen) + 1024; \
if ((sp->space = sp->back = realloc(sp->back, sp->blen)) \
== NULL) \
err(1, "realloc"); \
dst = sp->space + sp->len; \
}
dst = sp->space + sp->len;
while ((c = *src++) != '\0') {
if (c == '&')
no = 0;
else if (c == '\\' && isdigit((unsigned char)*src))
no = *src++ - '0';
else
no = -1;
if (no < 0) { /* Ordinary character. */
if (c == '\\' && (*src == '\\' || *src == '&'))
c = *src++;
NEEDSP(1);
*dst++ = c;
++sp->len;
} else if (match[no].rm_so != -1 && match[no].rm_eo != -1) {
len = match[no].rm_eo - match[no].rm_so;
NEEDSP(len);
memmove(dst, string + match[no].rm_so, len);
dst += len;
sp->len += len;
}
}
NEEDSP(1);
*dst = '\0';
}
/*
* cspace --
* Concatenate space: append the source space to the destination space,
* allocating new space as necessary.
*/
void
cspace(SPACE *sp, const char *p, size_t len, enum e_spflag spflag)
{
size_t tlen;
/* Make sure SPACE has enough memory and ramp up quickly. */
tlen = sp->len + len + 1;
if (tlen > sp->blen) {
sp->blen = tlen + 1024;
if ((sp->space = sp->back = realloc(sp->back, sp->blen)) ==
NULL)
err(1, "realloc");
}
if (spflag == REPLACE)
sp->len = 0;
memmove(sp->space + sp->len, p, len);
sp->space[sp->len += len] = '\0';
}
/*
* Close all cached opened files and report any errors
*/
void
cfclose(struct s_command *cp, struct s_command *end)
{
for (; cp != end; cp = cp->next)
switch(cp->code) {
case 's':
if (cp->u.s->wfd != -1 && close(cp->u.s->wfd))
err(1, "%s", cp->u.s->wfile);
cp->u.s->wfd = -1;
break;
case 'w':
if (cp->u.fd != -1 && close(cp->u.fd))
err(1, "%s", cp->t);
cp->u.fd = -1;
break;
case '{':
cfclose(cp->u.c, cp->next);
break;
}
}
Index: projects/clang360-import/usr.sbin/pmcstudy/pmcstudy.c
===================================================================
--- projects/clang360-import/usr.sbin/pmcstudy/pmcstudy.c (revision 277808)
+++ projects/clang360-import/usr.sbin/pmcstudy/pmcstudy.c (revision 277809)
@@ -1,2428 +1,2434 @@
/*-
* Copyright (c) 2014, 2015 Netflix Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* in this position and unchanged.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <strings.h>
#include <sys/errno.h>
#include <signal.h>
#include <sys/wait.h>
#include <getopt.h>
#include "eval_expr.h"
__FBSDID("$FreeBSD$");
#define MAX_COUNTER_SLOTS 1024
#define MAX_NLEN 64
#define MAX_CPU 64
static int verbose = 0;
extern char **environ;
extern struct expression *master_exp;
struct expression *master_exp=NULL;
#define PMC_INITIAL_ALLOC 512
extern char **valid_pmcs;
char **valid_pmcs = NULL;
extern int valid_pmc_cnt;
int valid_pmc_cnt=0;
extern int pmc_allocated_cnt;
int pmc_allocated_cnt=0;
/*
* The following two varients on popen and pclose with
* the cavet that they get you the PID so that you
* can supply it to pclose so it can send a SIGTERM
* to the process.
*/
static FILE *
my_popen(const char *command, const char *dir, pid_t *p_pid)
{
FILE *io_out, *io_in;
int pdesin[2], pdesout[2];
char *argv[4];
pid_t pid;
char cmd[4];
char cmd2[1024];
char arg1[4];
if ((strcmp(dir, "r") != 0) &&
(strcmp(dir, "w") != 0)) {
errno = EINVAL;
return(NULL);
}
if (pipe(pdesin) < 0)
return (NULL);
if (pipe(pdesout) < 0) {
(void)close(pdesin[0]);
(void)close(pdesin[1]);
return (NULL);
}
strcpy(cmd, "sh");
strcpy(arg1, "-c");
strcpy(cmd2, command);
argv[0] = cmd;
argv[1] = arg1;
argv[2] = cmd2;
argv[3] = NULL;
switch (pid = fork()) {
case -1: /* Error. */
(void)close(pdesin[0]);
(void)close(pdesin[1]);
(void)close(pdesout[0]);
(void)close(pdesout[1]);
return (NULL);
/* NOTREACHED */
case 0: /* Child. */
/* Close out un-used sides */
(void)close(pdesin[1]);
(void)close(pdesout[0]);
/* Now prepare the stdin of the process */
close(0);
(void)dup(pdesin[0]);
(void)close(pdesin[0]);
/* Now prepare the stdout of the process */
close(1);
(void)dup(pdesout[1]);
/* And lets do stderr just in case */
close(2);
(void)dup(pdesout[1]);
(void)close(pdesout[1]);
/* Now run it */
execve("/bin/sh", argv, environ);
exit(127);
/* NOTREACHED */
}
/* Parent; assume fdopen can't fail. */
/* Store the pid */
*p_pid = pid;
if (strcmp(dir, "r") != 0) {
io_out = fdopen(pdesin[1], "w");
(void)close(pdesin[0]);
(void)close(pdesout[0]);
(void)close(pdesout[1]);
return(io_out);
} else {
/* Prepare the input stream */
io_in = fdopen(pdesout[0], "r");
(void)close(pdesout[1]);
(void)close(pdesin[0]);
(void)close(pdesin[1]);
return (io_in);
}
}
/*
* pclose --
* Pclose returns -1 if stream is not associated with a `popened' command,
* if already `pclosed', or waitpid returns an error.
*/
static void
my_pclose(FILE *io, pid_t the_pid)
{
int pstat;
pid_t pid;
/*
* Find the appropriate file pointer and remove it from the list.
*/
(void)fclose(io);
/* Die if you are not dead! */
kill(the_pid, SIGTERM);
do {
pid = wait4(the_pid, &pstat, 0, (struct rusage *)0);
} while (pid == -1 && errno == EINTR);
}
struct counters {
struct counters *next_cpu;
char counter_name[MAX_NLEN]; /* Name of counter */
int cpu; /* CPU we are on */
int pos; /* Index we are filling to. */
uint64_t vals[MAX_COUNTER_SLOTS]; /* Last 64 entries */
uint64_t sum; /* Summary of entries */
};
extern struct counters *glob_cpu[MAX_CPU];
struct counters *glob_cpu[MAX_CPU];
extern struct counters *cnts;
struct counters *cnts=NULL;
extern int ncnts;
int ncnts=0;
extern int (*expression)(struct counters *, int);
int (*expression)(struct counters *, int);
static const char *threshold=NULL;
static const char *command;
struct cpu_entry {
const char *name;
const char *thresh;
const char *command;
int (*func)(struct counters *, int);
};
struct cpu_type {
char cputype[32];
int number;
struct cpu_entry *ents;
void (*explain)(const char *name);
};
extern struct cpu_type the_cpu;
struct cpu_type the_cpu;
static void
explain_name_sb(const char *name)
{
const char *mythresh;
if (strcmp(name, "allocstall1") == 0) {
printf("Examine PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "allocstall2") == 0) {
printf("Examine PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "br_miss") == 0) {
printf("Examine (20 * BR_MISP_RETIRED.ALL_BRANCHES)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .2";
} else if (strcmp(name, "splitload") == 0) {
printf("Examine MEM_UOP_RETIRED.SPLIT_LOADS * 5) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .1";
} else if (strcmp(name, "splitstore") == 0) {
printf("Examine MEM_UOP_RETIRED.SPLIT_STORES / MEM_UOP_RETIRED.ALL_STORES\n");
mythresh = "thresh >= .01";
} else if (strcmp(name, "contested") == 0) {
printf("Examine (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * 60) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "blockstorefwd") == 0) {
printf("Examine (LD_BLOCKS_STORE_FORWARD * 13) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "cache2") == 0) {
printf("Examine ((MEM_LOAD_RETIRED.L3_HIT * 26) + \n");
printf(" (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT * 43) + \n");
printf(" (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * 60)) / CPU_CLK_UNHALTED.THREAD_P\n");
printf("**Note we have it labeled MEM_LOAD_UOPS_RETIRED.LLC_HIT not MEM_LOAD_RETIRED.L3_HIT\n");
mythresh = "thresh >= .2";
} else if (strcmp(name, "cache1") == 0) {
printf("Examine (MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS * 180) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .2";
} else if (strcmp(name, "dtlbmissload") == 0) {
printf("Examine (((DTLB_LOAD_MISSES.STLB_HIT * 7) + DTLB_LOAD_MISSES.WALK_DURATION)\n");
printf(" / CPU_CLK_UNHALTED.THREAD_P)\n");
mythresh = "thresh >= .1";
} else if (strcmp(name, "frontendstall") == 0) {
printf("Examine IDQ_UOPS_NOT_DELIVERED.CORE / (CPU_CLK_UNHALTED.THREAD_P * 4)\n");
mythresh = "thresh >= .15";
} else if (strcmp(name, "clears") == 0) {
printf("Examine ((MACHINE_CLEARS.MEMORY_ORDERING + \n");
printf(" MACHINE_CLEARS.SMC + \n");
printf(" MACHINE_CLEARS.MASKMOV ) * 100 ) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .02";
} else if (strcmp(name, "microassist") == 0) {
printf("Examine IDQ.MS_CYCLES / (CPU_CLK_UNHALTED.THREAD_P * 4)\n");
printf("***We use IDQ.MS_UOPS,cmask=1 to get cycles\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "aliasing_4k") == 0) {
printf("Examine (LD_BLOCKS_PARTIAL.ADDRESS_ALIAS * 5) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .1";
} else if (strcmp(name, "fpassist") == 0) {
printf("Examine FP_ASSIST.ANY/INST_RETIRED.ANY_P\n");
mythresh = "look for a excessive value";
} else if (strcmp(name, "otherassistavx") == 0) {
printf("Examine (OTHER_ASSISTS.AVX_TO_SSE * 75)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "look for a excessive value";
} else if (strcmp(name, "otherassistsse") == 0) {
printf("Examine (OTHER_ASSISTS.SSE_TO_AVX * 75)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "look for a excessive value";
} else if (strcmp(name, "eff1") == 0) {
printf("Examine (UOPS_RETIRED.RETIRE_SLOTS)/(4 *CPU_CLK_UNHALTED.THREAD_P)\n");
mythresh = "thresh < .9";
} else if (strcmp(name, "eff2") == 0) {
printf("Examine CPU_CLK_UNHALTED.THREAD_P/INST_RETIRED.ANY_P\n");
mythresh = "thresh > 1.0";
} else if (strcmp(name, "dtlbmissstore") == 0) {
printf("Examine (((DTLB_STORE_MISSES.STLB_HIT * 7) + DTLB_STORE_MISSES.WALK_DURATION)\n");
printf(" / CPU_CLK_UNHALTED.THREAD_P)\n");
mythresh = "thresh >= .05";
} else {
printf("Unknown name:%s\n", name);
mythresh = "unknown entry";
}
printf("If the value printed is %s we may have the ability to improve performance\n", mythresh);
}
static void
explain_name_ib(const char *name)
{
const char *mythresh;
if (strcmp(name, "br_miss") == 0) {
printf("Examine ((BR_MISP_RETIRED.ALL_BRANCHES /(BR_MISP_RETIRED.ALL_BRANCHES +\n");
printf(" MACHINE_CLEAR.COUNT) * ((UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES)\n");
printf("/ (4 * CPU_CLK_UNHALTED.THREAD))))\n");
mythresh = "thresh >= .2";
} else if (strcmp(name, "eff1") == 0) {
printf("Examine (UOPS_RETIRED.RETIRE_SLOTS)/(4 *CPU_CLK_UNHALTED.THREAD_P)\n");
mythresh = "thresh < .9";
} else if (strcmp(name, "eff2") == 0) {
printf("Examine CPU_CLK_UNHALTED.THREAD_P/INST_RETIRED.ANY_P\n");
mythresh = "thresh > 1.0";
} else if (strcmp(name, "cache1") == 0) {
printf("Examine (MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM * 180) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .2";
} else if (strcmp(name, "cache2") == 0) {
printf("Examine (MEM_LOAD_UOPS_RETIRED.LLC_HIT / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .2";
} else if (strcmp(name, "itlbmiss") == 0) {
printf("Examine ITLB_MISSES.WALK_DURATION / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "icachemiss") == 0) {
printf("Examine (ICACHE.IFETCH_STALL - ITLB_MISSES.WALK_DURATION)/ CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "lcpstall") == 0) {
printf("Examine ILD_STALL.LCP/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "datashare") == 0) {
printf("Examine (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * 43)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "blockstorefwd") == 0) {
printf("Examine (LD_BLOCKS_STORE_FORWARD * 13) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "splitload") == 0) {
printf("Examine ((L1D_PEND_MISS.PENDING / MEM_LOAD_UOPS_RETIRED.L1_MISS) *\n");
printf(" LD_BLOCKS.NO_SR)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .1";
} else if (strcmp(name, "splitstore") == 0) {
printf("Examine MEM_UOP_RETIRED.SPLIT_STORES / MEM_UOP_RETIRED.ALL_STORES\n");
mythresh = "thresh >= .01";
} else if (strcmp(name, "aliasing_4k") == 0) {
printf("Examine (LD_BLOCKS_PARTIAL.ADDRESS_ALIAS * 5) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .1";
} else if (strcmp(name, "dtlbmissload") == 0) {
printf("Examine (((DTLB_LOAD_MISSES.STLB_HIT * 7) + DTLB_LOAD_MISSES.WALK_DURATION)\n");
printf(" / CPU_CLK_UNHALTED.THREAD_P)\n");
mythresh = "thresh >= .1";
} else if (strcmp(name, "dtlbmissstore") == 0) {
printf("Examine (((DTLB_STORE_MISSES.STLB_HIT * 7) + DTLB_STORE_MISSES.WALK_DURATION)\n");
printf(" / CPU_CLK_UNHALTED.THREAD_P)\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "contested") == 0) {
printf("Examine (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * 60) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "clears") == 0) {
printf("Examine ((MACHINE_CLEARS.MEMORY_ORDERING + \n");
printf(" MACHINE_CLEARS.SMC + \n");
printf(" MACHINE_CLEARS.MASKMOV ) * 100 ) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .02";
} else if (strcmp(name, "microassist") == 0) {
printf("Examine IDQ.MS_CYCLES / (4 * CPU_CLK_UNHALTED.THREAD_P)\n");
printf("***We use IDQ.MS_UOPS,cmask=1 to get cycles\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "fpassist") == 0) {
printf("Examine FP_ASSIST.ANY/INST_RETIRED.ANY_P\n");
mythresh = "look for a excessive value";
} else if (strcmp(name, "otherassistavx") == 0) {
printf("Examine (OTHER_ASSISTS.AVX_TO_SSE * 75)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "look for a excessive value";
} else if (strcmp(name, "otherassistsse") == 0) {
printf("Examine (OTHER_ASSISTS.SSE_TO_AVX * 75)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "look for a excessive value";
} else {
printf("Unknown name:%s\n", name);
mythresh = "unknown entry";
}
printf("If the value printed is %s we may have the ability to improve performance\n", mythresh);
}
static void
explain_name_has(const char *name)
{
const char *mythresh;
if (strcmp(name, "eff1") == 0) {
printf("Examine (UOPS_RETIRED.RETIRE_SLOTS)/(4 *CPU_CLK_UNHALTED.THREAD_P)\n");
mythresh = "thresh < .75";
} else if (strcmp(name, "eff2") == 0) {
printf("Examine CPU_CLK_UNHALTED.THREAD_P/INST_RETIRED.ANY_P\n");
mythresh = "thresh > 1.0";
} else if (strcmp(name, "itlbmiss") == 0) {
printf("Examine ITLB_MISSES.WALK_DURATION / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "icachemiss") == 0) {
printf("Examine (36 * ICACHE.MISSES)/ CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "lcpstall") == 0) {
printf("Examine ILD_STALL.LCP/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "cache1") == 0) {
printf("Examine (MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM * 180) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .2";
} else if (strcmp(name, "cache2") == 0) {
printf("Examine ((MEM_LOAD_UOPS_RETIRED.LLC_HIT * 36) + \n");
printf(" (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT * 72) + \n");
printf(" (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * 84))\n");
printf(" / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .2";
} else if (strcmp(name, "contested") == 0) {
printf("Examine (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * 84) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "datashare") == 0) {
printf("Examine (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * 72)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh > .05";
} else if (strcmp(name, "blockstorefwd") == 0) {
printf("Examine (LD_BLOCKS_STORE_FORWARD * 13) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "splitload") == 0) {
printf("Examine (MEM_UOP_RETIRED.SPLIT_LOADS * 5) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .1";
} else if (strcmp(name, "splitstore") == 0) {
printf("Examine MEM_UOP_RETIRED.SPLIT_STORES / MEM_UOP_RETIRED.ALL_STORES\n");
mythresh = "thresh >= .01";
} else if (strcmp(name, "aliasing_4k") == 0) {
printf("Examine (LD_BLOCKS_PARTIAL.ADDRESS_ALIAS * 5) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .1";
} else if (strcmp(name, "dtlbmissload") == 0) {
printf("Examine (((DTLB_LOAD_MISSES.STLB_HIT * 7) + DTLB_LOAD_MISSES.WALK_DURATION)\n");
printf(" / CPU_CLK_UNHALTED.THREAD_P)\n");
mythresh = "thresh >= .1";
} else if (strcmp(name, "br_miss") == 0) {
printf("Examine (20 * BR_MISP_RETIRED.ALL_BRANCHES)/CPU_CLK_UNHALTED.THREAD\n");
mythresh = "thresh >= .2";
} else if (strcmp(name, "clears") == 0) {
printf("Examine ((MACHINE_CLEARS.MEMORY_ORDERING + \n");
printf(" MACHINE_CLEARS.SMC + \n");
printf(" MACHINE_CLEARS.MASKMOV ) * 100 ) / CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "thresh >= .02";
} else if (strcmp(name, "microassist") == 0) {
printf("Examine IDQ.MS_CYCLES / (4 * CPU_CLK_UNHALTED.THREAD_P)\n");
printf("***We use IDQ.MS_UOPS,cmask=1 to get cycles\n");
mythresh = "thresh >= .05";
} else if (strcmp(name, "fpassist") == 0) {
printf("Examine FP_ASSIST.ANY/INST_RETIRED.ANY_P\n");
mythresh = "look for a excessive value";
} else if (strcmp(name, "otherassistavx") == 0) {
printf("Examine (OTHER_ASSISTS.AVX_TO_SSE * 75)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "look for a excessive value";
} else if (strcmp(name, "otherassistsse") == 0) {
printf("Examine (OTHER_ASSISTS.SSE_TO_AVX * 75)/CPU_CLK_UNHALTED.THREAD_P\n");
mythresh = "look for a excessive value";
} else {
printf("Unknown name:%s\n", name);
mythresh = "unknown entry";
}
printf("If the value printed is %s we may have the ability to improve performance\n", mythresh);
}
static struct counters *
find_counter(struct counters *base, const char *name)
{
struct counters *at;
int len;
at = base;
len = strlen(name);
while(at) {
if (strncmp(at->counter_name, name, len) == 0) {
return(at);
}
at = at->next_cpu;
}
printf("Can't find counter %s\n", name);
printf("We have:\n");
at = base;
while(at) {
printf("- %s\n", at->counter_name);
at = at->next_cpu;
}
exit(-1);
}
static int
allocstall1(struct counters *cpu, int pos)
{
/* 1 - PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW/CPU_CLK_UNHALTED.THREAD_P (thresh > .05)*/
int ret;
struct counters *partial;
struct counters *unhalt;
double un, par, res;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
partial = find_counter(cpu, "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW");
if (pos != -1) {
par = partial->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
par = partial->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = par/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
allocstall2(struct counters *cpu, int pos)
{
/* 2 - PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES/CPU_CLK_UNHALTED.THREAD_P (thresh >.05) */
int ret;
struct counters *partial;
struct counters *unhalt;
double un, par, res;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
partial = find_counter(cpu, "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP");
if (pos != -1) {
par = partial->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
par = partial->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = par/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
br_mispredict(struct counters *cpu, int pos)
{
struct counters *brctr;
struct counters *unhalt;
int ret;
/* 3 - (20 * BR_MISP_RETIRED.ALL_BRANCHES)/CPU_CLK_UNHALTED.THREAD_P (thresh >= .2) */
double br, un, con, res;
con = 20.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
brctr = find_counter(cpu, "BR_MISP_RETIRED.ALL_BRANCHES");
if (pos != -1) {
br = brctr->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
br = brctr->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (con * br)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
br_mispredictib(struct counters *cpu, int pos)
{
struct counters *brctr;
struct counters *unhalt;
struct counters *clear, *clear2, *clear3;
struct counters *uops;
struct counters *recv;
struct counters *iss;
/* "pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s BR_MISP_RETIRED.ALL_BRANCHES -s MACHINE_CLEARS.MEMORY_ORDERING -s MACHINE_CLEARS.SMC -s MACHINE_CLEARS.MASKMOV -s UOPS_ISSUED.ANY -s UOPS_RETIRED.RETIRE_SLOTS -s INT_MISC.RECOVERY_CYCLES -w 1",*/
int ret;
/*
* (BR_MISP_RETIRED.ALL_BRANCHES /
* (BR_MISP_RETIRED.ALL_BRANCHES +
* MACHINE_CLEAR.COUNT) *
* ((UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES) / (4 * CPU_CLK_UNHALTED.THREAD)))
*
*/
double br, cl, cl2, cl3, uo, re, un, con, res, is;
con = 4.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
brctr = find_counter(cpu, "BR_MISP_RETIRED.ALL_BRANCHES");
clear = find_counter(cpu, "MACHINE_CLEARS.MEMORY_ORDERING");
clear2 = find_counter(cpu, "MACHINE_CLEARS.SMC");
clear3 = find_counter(cpu, "MACHINE_CLEARS.MASKMOV");
uops = find_counter(cpu, "UOPS_RETIRED.RETIRE_SLOTS");
iss = find_counter(cpu, "UOPS_ISSUED.ANY");
recv = find_counter(cpu, "INT_MISC.RECOVERY_CYCLES");
if (pos != -1) {
br = brctr->vals[pos] * 1.0;
cl = clear->vals[pos] * 1.0;
cl2 = clear2->vals[pos] * 1.0;
cl3 = clear3->vals[pos] * 1.0;
uo = uops->vals[pos] * 1.0;
re = recv->vals[pos] * 1.0;
is = iss->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
br = brctr->sum * 1.0;
cl = clear->sum * 1.0;
cl2 = clear2->sum * 1.0;
cl3 = clear3->sum * 1.0;
uo = uops->sum * 1.0;
re = recv->sum * 1.0;
is = iss->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (br/(br + cl + cl2 + cl3) * ((is - uo + con * re) / (con * un)));
ret = printf("%1.3f", res);
return(ret);
}
static int
splitloadib(struct counters *cpu, int pos)
{
int ret;
struct counters *mem;
struct counters *l1d, *ldblock;
struct counters *unhalt;
double un, memd, res, l1, ldb;
/*
* ((L1D_PEND_MISS.PENDING / MEM_LOAD_UOPS_RETIRED.L1_MISS) * LD_BLOCKS.NO_SR) / CPU_CLK_UNHALTED.THREAD_P
* "pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s L1D_PEND_MISS.PENDING -s MEM_LOAD_UOPS_RETIRED.L1_MISS -s LD_BLOCKS.NO_SR -w 1",
*/
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem = find_counter(cpu, "MEM_LOAD_UOPS_RETIRED.L1_MISS");
l1d = find_counter(cpu, "L1D_PEND_MISS.PENDING");
ldblock = find_counter(cpu, "LD_BLOCKS.NO_SR");
if (pos != -1) {
memd = mem->vals[pos] * 1.0;
l1 = l1d->vals[pos] * 1.0;
ldb = ldblock->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
memd = mem->sum * 1.0;
l1 = l1d->sum * 1.0;
ldb = ldblock->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = ((l1 / memd) * ldb)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
splitload(struct counters *cpu, int pos)
{
int ret;
struct counters *mem;
struct counters *unhalt;
double con, un, memd, res;
/* 4 - (MEM_UOP_RETIRED.SPLIT_LOADS * 5) / CPU_CLK_UNHALTED.THREAD_P (thresh >= .1)*/
con = 5.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem = find_counter(cpu, "MEM_UOP_RETIRED.SPLIT_LOADS");
if (pos != -1) {
memd = mem->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
memd = mem->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (memd * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
splitstore(struct counters *cpu, int pos)
{
/* 5 - MEM_UOP_RETIRED.SPLIT_STORES / MEM_UOP_RETIRED.ALL_STORES (thresh > 0.01) */
int ret;
struct counters *mem_split;
struct counters *mem_stores;
double memsplit, memstore, res;
mem_split = find_counter(cpu, "MEM_UOP_RETIRED.SPLIT_STORES");
mem_stores = find_counter(cpu, "MEM_UOP_RETIRED.ALL_STORES");
if (pos != -1) {
memsplit = mem_split->vals[pos] * 1.0;
memstore = mem_stores->vals[pos] * 1.0;
} else {
memsplit = mem_split->sum * 1.0;
memstore = mem_stores->sum * 1.0;
}
res = memsplit/memstore;
ret = printf("%1.3f", res);
return(ret);
}
static int
contested(struct counters *cpu, int pos)
{
/* 6 - (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * 60) / CPU_CLK_UNHALTED.THREAD_P (thresh >.05) */
int ret;
struct counters *mem;
struct counters *unhalt;
double con, un, memd, res;
con = 60.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem = find_counter(cpu, "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM");
if (pos != -1) {
memd = mem->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
memd = mem->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (memd * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
contested_has(struct counters *cpu, int pos)
{
/* 6 - (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * 84) / CPU_CLK_UNHALTED.THREAD_P (thresh >.05) */
int ret;
struct counters *mem;
struct counters *unhalt;
double con, un, memd, res;
con = 84.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem = find_counter(cpu, "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM");
if (pos != -1) {
memd = mem->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
memd = mem->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (memd * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
blockstoreforward(struct counters *cpu, int pos)
{
/* 7 - (LD_BLOCKS_STORE_FORWARD * 13) / CPU_CLK_UNHALTED.THREAD_P (thresh >= .05)*/
int ret;
struct counters *ldb;
struct counters *unhalt;
double con, un, ld, res;
con = 13.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
ldb = find_counter(cpu, "LD_BLOCKS_STORE_FORWARD");
if (pos != -1) {
ld = ldb->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
ld = ldb->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (ld * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
cache2(struct counters *cpu, int pos)
{
/* ** Suspect ***
* 8 - ((MEM_LOAD_RETIRED.L3_HIT * 26) + (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT * 43) +
* (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * 60)) / CPU_CLK_UNHALTED.THREAD_P (thresh >.2)
*/
int ret;
struct counters *mem1, *mem2, *mem3;
struct counters *unhalt;
double con1, con2, con3, un, me_1, me_2, me_3, res;
con1 = 26.0;
con2 = 43.0;
con3 = 60.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
/* Call for MEM_LOAD_RETIRED.L3_HIT possibly MEM_LOAD_UOPS_RETIRED.LLC_HIT ?*/
mem1 = find_counter(cpu, "MEM_LOAD_UOPS_RETIRED.LLC_HIT");
mem2 = find_counter(cpu, "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT");
mem3 = find_counter(cpu, "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM");
if (pos != -1) {
me_1 = mem1->vals[pos] * 1.0;
me_2 = mem2->vals[pos] * 1.0;
me_3 = mem3->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
me_1 = mem1->sum * 1.0;
me_2 = mem2->sum * 1.0;
me_3 = mem3->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = ((me_1 * con1) + (me_2 * con2) + (me_3 * con3))/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
datasharing(struct counters *cpu, int pos)
{
/*
* (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * 43)/ CPU_CLK_UNHALTED.THREAD_P (thresh >.2)
*/
int ret;
struct counters *mem;
struct counters *unhalt;
double con, res, me, un;
con = 43.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem = find_counter(cpu, "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT");
if (pos != -1) {
me = mem->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
me = mem->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (me * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
datasharing_has(struct counters *cpu, int pos)
{
/*
* (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * 43)/ CPU_CLK_UNHALTED.THREAD_P (thresh >.2)
*/
int ret;
struct counters *mem;
struct counters *unhalt;
double con, res, me, un;
con = 72.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem = find_counter(cpu, "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT");
if (pos != -1) {
me = mem->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
me = mem->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (me * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
cache2ib(struct counters *cpu, int pos)
{
/*
* (29 * MEM_LOAD_UOPS_RETIRED.LLC_HIT / CPU_CLK_UNHALTED.THREAD_P (thresh >.2)
*/
int ret;
struct counters *mem;
struct counters *unhalt;
double con, un, me, res;
con = 29.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem = find_counter(cpu, "MEM_LOAD_UOPS_RETIRED.LLC_HIT");
if (pos != -1) {
me = mem->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
me = mem->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (con * me)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
cache2has(struct counters *cpu, int pos)
{
/*
* Examine ((MEM_LOAD_UOPS_RETIRED.LLC_HIT * 36) + \
* (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT * 72) +
* (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * 84))
* / CPU_CLK_UNHALTED.THREAD_P
*/
int ret;
struct counters *mem1, *mem2, *mem3;
struct counters *unhalt;
double con1, con2, con3, un, me1, me2, me3, res;
con1 = 36.0;
con2 = 72.0;
con3 = 84.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem1 = find_counter(cpu, "MEM_LOAD_UOPS_RETIRED.LLC_HIT");
mem2 = find_counter(cpu, "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT");
mem3 = find_counter(cpu, "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM");
if (pos != -1) {
me1 = mem1->vals[pos] * 1.0;
me2 = mem2->vals[pos] * 1.0;
me3 = mem3->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
me1 = mem1->sum * 1.0;
me2 = mem2->sum * 1.0;
me3 = mem3->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = ((me1 * con1) + (me2 * con2) + (me3 * con3))/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
cache1(struct counters *cpu, int pos)
{
/* 9 - (MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS * 180) / CPU_CLK_UNHALTED.THREAD_P (thresh >= .2) */
int ret;
struct counters *mem;
struct counters *unhalt;
double con, un, me, res;
con = 180.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem = find_counter(cpu, "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS");
if (pos != -1) {
me = mem->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
me = mem->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (me * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
cache1ib(struct counters *cpu, int pos)
{
/* 9 - (MEM_LOAD_UOPS_L3_MISS_RETIRED.LCOAL_DRAM * 180) / CPU_CLK_UNHALTED.THREAD_P (thresh >= .2) */
int ret;
struct counters *mem;
struct counters *unhalt;
double con, un, me, res;
con = 180.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
mem = find_counter(cpu, "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM");
if (pos != -1) {
me = mem->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
me = mem->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (me * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
dtlb_missload(struct counters *cpu, int pos)
{
/* 10 - ((DTLB_LOAD_MISSES.STLB_HIT * 7) + DTLB_LOAD_MISSES.WALK_DURATION) / CPU_CLK_UNHALTED.THREAD_P (t >=.1) */
int ret;
struct counters *dtlb_m, *dtlb_d;
struct counters *unhalt;
double con, un, d1, d2, res;
con = 7.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
dtlb_m = find_counter(cpu, "DTLB_LOAD_MISSES.STLB_HIT");
dtlb_d = find_counter(cpu, "DTLB_LOAD_MISSES.WALK_DURATION");
if (pos != -1) {
d1 = dtlb_m->vals[pos] * 1.0;
d2 = dtlb_d->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
d1 = dtlb_m->sum * 1.0;
d2 = dtlb_d->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = ((d1 * con) + d2)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
dtlb_missstore(struct counters *cpu, int pos)
{
/*
* ((DTLB_STORE_MISSES.STLB_HIT * 7) + DTLB_STORE_MISSES.WALK_DURATION) /
* CPU_CLK_UNHALTED.THREAD_P (t >= .1)
*/
int ret;
struct counters *dtsb_m, *dtsb_d;
struct counters *unhalt;
double con, un, d1, d2, res;
con = 7.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
dtsb_m = find_counter(cpu, "DTLB_STORE_MISSES.STLB_HIT");
dtsb_d = find_counter(cpu, "DTLB_STORE_MISSES.WALK_DURATION");
if (pos != -1) {
d1 = dtsb_m->vals[pos] * 1.0;
d2 = dtsb_d->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
d1 = dtsb_m->sum * 1.0;
d2 = dtsb_d->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = ((d1 * con) + d2)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
itlb_miss(struct counters *cpu, int pos)
{
/* ITLB_MISSES.WALK_DURATION / CPU_CLK_UNTHREAD_P IB */
int ret;
struct counters *itlb;
struct counters *unhalt;
double un, d1, res;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
itlb = find_counter(cpu, "ITLB_MISSES.WALK_DURATION");
if (pos != -1) {
d1 = itlb->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
d1 = itlb->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = d1/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
icache_miss(struct counters *cpu, int pos)
{
/* (ICACHE.IFETCH_STALL - ITLB_MISSES.WALK_DURATION) / CPU_CLK_UNHALTED.THREAD_P IB */
int ret;
struct counters *itlb, *icache;
struct counters *unhalt;
double un, d1, ic, res;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
itlb = find_counter(cpu, "ITLB_MISSES.WALK_DURATION");
icache = find_counter(cpu, "ICACHE.IFETCH_STALL");
if (pos != -1) {
d1 = itlb->vals[pos] * 1.0;
ic = icache->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
d1 = itlb->sum * 1.0;
ic = icache->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (ic-d1)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
icache_miss_has(struct counters *cpu, int pos)
{
/* (36 * ICACHE.MISSES) / CPU_CLK_UNHALTED.THREAD_P */
int ret;
struct counters *icache;
struct counters *unhalt;
double un, con, ic, res;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
icache = find_counter(cpu, "ICACHE.MISSES");
con = 36.0;
if (pos != -1) {
ic = icache->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
ic = icache->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (con * ic)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
lcp_stall(struct counters *cpu, int pos)
{
/* ILD_STALL.LCP/CPU_CLK_UNHALTED.THREAD_P IB */
int ret;
struct counters *ild;
struct counters *unhalt;
double un, d1, res;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
ild = find_counter(cpu, "ILD_STALL.LCP");
if (pos != -1) {
d1 = ild->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
d1 = ild->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = d1/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
frontendstall(struct counters *cpu, int pos)
{
/* 12 - IDQ_UOPS_NOT_DELIVERED.CORE / (CPU_CLK_UNHALTED.THREAD_P * 4) (thresh >= .15) */
int ret;
struct counters *idq;
struct counters *unhalt;
double con, un, id, res;
con = 4.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
idq = find_counter(cpu, "IDQ_UOPS_NOT_DELIVERED.CORE");
if (pos != -1) {
id = idq->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
id = idq->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = id/(un * con);
ret = printf("%1.3f", res);
return(ret);
}
static int
clears(struct counters *cpu, int pos)
{
/* 13 - ((MACHINE_CLEARS.MEMORY_ORDERING + MACHINE_CLEARS.SMC + MACHINE_CLEARS.MASKMOV ) * 100 )
* / CPU_CLK_UNHALTED.THREAD_P (thresh >= .02)*/
int ret;
struct counters *clr1, *clr2, *clr3;
struct counters *unhalt;
double con, un, cl1, cl2, cl3, res;
con = 100.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
clr1 = find_counter(cpu, "MACHINE_CLEARS.MEMORY_ORDERING");
clr2 = find_counter(cpu, "MACHINE_CLEARS.SMC");
clr3 = find_counter(cpu, "MACHINE_CLEARS.MASKMOV");
if (pos != -1) {
cl1 = clr1->vals[pos] * 1.0;
cl2 = clr2->vals[pos] * 1.0;
cl3 = clr3->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
cl1 = clr1->sum * 1.0;
cl2 = clr2->sum * 1.0;
cl3 = clr3->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = ((cl1 + cl2 + cl3) * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
microassist(struct counters *cpu, int pos)
{
/* 14 - IDQ.MS_CYCLES / CPU_CLK_UNHALTED.THREAD_P (thresh > .05) */
int ret;
struct counters *idq;
struct counters *unhalt;
double un, id, res, con;
con = 4.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
idq = find_counter(cpu, "IDQ.MS_UOPS");
if (pos != -1) {
id = idq->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
id = idq->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = id/(un * con);
ret = printf("%1.3f", res);
return(ret);
}
static int
aliasing(struct counters *cpu, int pos)
{
/* 15 - (LD_BLOCKS_PARTIAL.ADDRESS_ALIAS * 5) / CPU_CLK_UNHALTED.THREAD_P (thresh > .1) */
int ret;
struct counters *ld;
struct counters *unhalt;
double un, lds, con, res;
con = 5.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
ld = find_counter(cpu, "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS");
if (pos != -1) {
lds = ld->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
lds = ld->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (lds * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
fpassists(struct counters *cpu, int pos)
{
/* 16 - FP_ASSIST.ANY/INST_RETIRED.ANY_P */
int ret;
struct counters *fp;
struct counters *inst;
double un, fpd, res;
inst = find_counter(cpu, "INST_RETIRED.ANY_P");
fp = find_counter(cpu, "FP_ASSIST.ANY");
if (pos != -1) {
fpd = fp->vals[pos] * 1.0;
un = inst->vals[pos] * 1.0;
} else {
fpd = fp->sum * 1.0;
un = inst->sum * 1.0;
}
res = fpd/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
otherassistavx(struct counters *cpu, int pos)
{
/* 17 - (OTHER_ASSISTS.AVX_TO_SSE * 75)/CPU_CLK_UNHALTED.THREAD_P thresh .1*/
int ret;
struct counters *oth;
struct counters *unhalt;
double un, ot, con, res;
con = 75.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
oth = find_counter(cpu, "OTHER_ASSISTS.AVX_TO_SSE");
if (pos != -1) {
ot = oth->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
ot = oth->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (ot * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
otherassistsse(struct counters *cpu, int pos)
{
int ret;
struct counters *oth;
struct counters *unhalt;
double un, ot, con, res;
/* 18 (OTHER_ASSISTS.SSE_TO_AVX * 75)/CPU_CLK_UNHALTED.THREAD_P thresh .1*/
con = 75.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
oth = find_counter(cpu, "OTHER_ASSISTS.SSE_TO_AVX");
if (pos != -1) {
ot = oth->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
ot = oth->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = (ot * con)/un;
ret = printf("%1.3f", res);
return(ret);
}
static int
efficiency1(struct counters *cpu, int pos)
{
int ret;
struct counters *uops;
struct counters *unhalt;
double un, ot, con, res;
/* 19 (UOPS_RETIRED.RETIRE_SLOTS/(4*CPU_CLK_UNHALTED.THREAD_P) look if thresh < .9*/
con = 4.0;
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
uops = find_counter(cpu, "UOPS_RETIRED.RETIRE_SLOTS");
if (pos != -1) {
ot = uops->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
ot = uops->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = ot/(con * un);
ret = printf("%1.3f", res);
return(ret);
}
static int
efficiency2(struct counters *cpu, int pos)
{
int ret;
struct counters *uops;
struct counters *unhalt;
double un, ot, res;
/* 20 - CPU_CLK_UNHALTED.THREAD_P/INST_RETIRED.ANY_P good if > 1. (comp factor)*/
unhalt = find_counter(cpu, "CPU_CLK_UNHALTED.THREAD_P");
uops = find_counter(cpu, "INST_RETIRED.ANY_P");
if (pos != -1) {
ot = uops->vals[pos] * 1.0;
un = unhalt->vals[pos] * 1.0;
} else {
ot = uops->sum * 1.0;
un = unhalt->sum * 1.0;
}
res = un/ot;
ret = printf("%1.3f", res);
return(ret);
}
#define SANDY_BRIDGE_COUNT 20
static struct cpu_entry sandy_bridge[SANDY_BRIDGE_COUNT] = {
/*01*/ { "allocstall1", "thresh > .05",
"pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW -w 1",
allocstall1 },
/*02*/ { "allocstall2", "thresh > .05",
"pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES -w 1",
allocstall2 },
/*03*/ { "br_miss", "thresh >= .2",
"pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s BR_MISP_RETIRED.ALL_BRANCHES -w 1",
br_mispredict },
/*04*/ { "splitload", "thresh >= .1",
"pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s MEM_UOP_RETIRED.SPLIT_LOADS -w 1",
splitload },
/*05*/ { "splitstore", "thresh >= .01",
"pmcstat -s MEM_UOP_RETIRED.SPLIT_STORES -s MEM_UOP_RETIRED.ALL_STORES -w 1",
splitstore },
/*06*/ { "contested", "thresh >= .05",
"pmcstat -s MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM -s CPU_CLK_UNHALTED.THREAD_P -w 1",
contested },
/*07*/ { "blockstorefwd", "thresh >= .05",
"pmcstat -s LD_BLOCKS_STORE_FORWARD -s CPU_CLK_UNHALTED.THREAD_P -w 1",
blockstoreforward },
/*08*/ { "cache2", "thresh >= .2",
"pmcstat -s MEM_LOAD_UOPS_RETIRED.LLC_HIT -s MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT -s MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM -s CPU_CLK_UNHALTED.THREAD_P -w 1",
cache2 },
/*09*/ { "cache1", "thresh >= .2",
"pmcstat -s MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS -s CPU_CLK_UNHALTED.THREAD_P -w 1",
cache1 },
/*10*/ { "dtlbmissload", "thresh >= .1",
"pmcstat -s DTLB_LOAD_MISSES.STLB_HIT -s DTLB_LOAD_MISSES.WALK_DURATION -s CPU_CLK_UNHALTED.THREAD_P -w 1",
dtlb_missload },
/*11*/ { "dtlbmissstore", "thresh >= .05",
"pmcstat -s DTLB_STORE_MISSES.STLB_HIT -s DTLB_STORE_MISSES.WALK_DURATION -s CPU_CLK_UNHALTED.THREAD_P -w 1",
dtlb_missstore },
/*12*/ { "frontendstall", "thresh >= .15",
"pmcstat -s IDQ_UOPS_NOT_DELIVERED.CORE -s CPU_CLK_UNHALTED.THREAD_P -w 1",
frontendstall },
/*13*/ { "clears", "thresh >= .02",
"pmcstat -s MACHINE_CLEARS.MEMORY_ORDERING -s MACHINE_CLEARS.SMC -s MACHINE_CLEARS.MASKMOV -s CPU_CLK_UNHALTED.THREAD_P -w 1",
clears },
/*14*/ { "microassist", "thresh >= .05",
"pmcstat -s IDQ.MS_UOPS,cmask=1 -s CPU_CLK_UNHALTED.THREAD_P -w 1",
microassist },
/*15*/ { "aliasing_4k", "thresh >= .1",
"pmcstat -s LD_BLOCKS_PARTIAL.ADDRESS_ALIAS -s CPU_CLK_UNHALTED.THREAD_P -w 1",
aliasing },
/*16*/ { "fpassist", "look for a excessive value",
"pmcstat -s FP_ASSIST.ANY -s INST_RETIRED.ANY_P -w 1",
fpassists },
/*17*/ { "otherassistavx", "look for a excessive value",
"pmcstat -s OTHER_ASSISTS.AVX_TO_SSE -s CPU_CLK_UNHALTED.THREAD_P -w 1",
otherassistavx },
/*18*/ { "otherassistsse", "look for a excessive value",
"pmcstat -s OTHER_ASSISTS.SSE_TO_AVX -s CPU_CLK_UNHALTED.THREAD_P -w 1",
otherassistsse },
/*19*/ { "eff1", "thresh < .9",
"pmcstat -s UOPS_RETIRED.RETIRE_SLOTS -s CPU_CLK_UNHALTED.THREAD_P -w 1",
efficiency1 },
/*20*/ { "eff2", "thresh > 1.0",
"pmcstat -s INST_RETIRED.ANY_P -s CPU_CLK_UNHALTED.THREAD_P -w 1",
efficiency2 },
};
#define IVY_BRIDGE_COUNT 21
static struct cpu_entry ivy_bridge[IVY_BRIDGE_COUNT] = {
/*1*/ { "eff1", "thresh < .75",
"pmcstat -s UOPS_RETIRED.RETIRE_SLOTS -s CPU_CLK_UNHALTED.THREAD_P -w 1",
efficiency1 },
/*2*/ { "eff2", "thresh > 1.0",
"pmcstat -s INST_RETIRED.ANY_P -s CPU_CLK_UNHALTED.THREAD_P -w 1",
efficiency2 },
/*3*/ { "itlbmiss", "thresh > .05",
"pmcstat -s ITLB_MISSES.WALK_DURATION -s CPU_CLK_UNHALTED.THREAD_P -w 1",
itlb_miss },
/*4*/ { "icachemiss", "thresh > .05",
"pmcstat -s ICACHE.IFETCH_STALL -s ITLB_MISSES.WALK_DURATION -s CPU_CLK_UNHALTED.THREAD_P -w 1",
icache_miss },
/*5*/ { "lcpstall", "thresh > .05",
"pmcstat -s ILD_STALL.LCP -s CPU_CLK_UNHALTED.THREAD_P -w 1",
lcp_stall },
/*6*/ { "cache1", "thresh >= .2",
"pmcstat -s MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM -s CPU_CLK_UNHALTED.THREAD_P -w 1",
cache1ib },
/*7*/ { "cache2", "thresh >= .2",
"pmcstat -s MEM_LOAD_UOPS_RETIRED.LLC_HIT -s CPU_CLK_UNHALTED.THREAD_P -w 1",
cache2ib },
/*8*/ { "contested", "thresh >= .05",
"pmcstat -s MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM -s CPU_CLK_UNHALTED.THREAD_P -w 1",
contested },
/*9*/ { "datashare", "thresh >= .05",
"pmcstat -s MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT -s CPU_CLK_UNHALTED.THREAD_P -w 1",
datasharing },
/*10*/ { "blockstorefwd", "thresh >= .05",
"pmcstat -s LD_BLOCKS_STORE_FORWARD -s CPU_CLK_UNHALTED.THREAD_P -w 1",
blockstoreforward },
/*11*/ { "splitload", "thresh >= .1",
"pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s L1D_PEND_MISS.PENDING -s MEM_LOAD_UOPS_RETIRED.L1_MISS -s LD_BLOCKS.NO_SR -w 1",
splitloadib },
/*12*/ { "splitstore", "thresh >= .01",
"pmcstat -s MEM_UOP_RETIRED.SPLIT_STORES -s MEM_UOP_RETIRED.ALL_STORES -w 1",
splitstore },
/*13*/ { "aliasing_4k", "thresh >= .1",
"pmcstat -s LD_BLOCKS_PARTIAL.ADDRESS_ALIAS -s CPU_CLK_UNHALTED.THREAD_P -w 1",
aliasing },
/*14*/ { "dtlbmissload", "thresh >= .1",
"pmcstat -s DTLB_LOAD_MISSES.STLB_HIT -s DTLB_LOAD_MISSES.WALK_DURATION -s CPU_CLK_UNHALTED.THREAD_P -w 1",
dtlb_missload },
/*15*/ { "dtlbmissstore", "thresh >= .05",
"pmcstat -s DTLB_STORE_MISSES.STLB_HIT -s DTLB_STORE_MISSES.WALK_DURATION -s CPU_CLK_UNHALTED.THREAD_P -w 1",
dtlb_missstore },
/*16*/ { "br_miss", "thresh >= .2",
"pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s BR_MISP_RETIRED.ALL_BRANCHES -s MACHINE_CLEARS.MEMORY_ORDERING -s MACHINE_CLEARS.SMC -s MACHINE_CLEARS.MASKMOV -s UOPS_ISSUED.ANY -s UOPS_RETIRED.RETIRE_SLOTS -s INT_MISC.RECOVERY_CYCLES -w 1",
br_mispredictib },
/*17*/ { "clears", "thresh >= .02",
"pmcstat -s MACHINE_CLEARS.MEMORY_ORDERING -s MACHINE_CLEARS.SMC -s MACHINE_CLEARS.MASKMOV -s CPU_CLK_UNHALTED.THREAD_P -w 1",
clears },
/*18*/ { "microassist", "thresh >= .05",
"pmcstat -s IDQ.MS_UOPS,cmask=1 -s CPU_CLK_UNHALTED.THREAD_P -w 1",
microassist },
/*19*/ { "fpassist", "look for a excessive value",
"pmcstat -s FP_ASSIST.ANY -s INST_RETIRED.ANY_P -w 1",
fpassists },
/*20*/ { "otherassistavx", "look for a excessive value",
"pmcstat -s OTHER_ASSISTS.AVX_TO_SSE -s CPU_CLK_UNHALTED.THREAD_P -w 1",
otherassistavx },
/*21*/ { "otherassistsse", "look for a excessive value",
"pmcstat -s OTHER_ASSISTS.SSE_TO_AVX -s CPU_CLK_UNHALTED.THREAD_P -w 1",
otherassistsse },
};
#define HASWELL_COUNT 20
static struct cpu_entry haswell[HASWELL_COUNT] = {
/*1*/ { "eff1", "thresh < .75",
"pmcstat -s UOPS_RETIRED.RETIRE_SLOTS -s CPU_CLK_UNHALTED.THREAD_P -w 1",
efficiency1 },
/*2*/ { "eff2", "thresh > 1.0",
"pmcstat -s INST_RETIRED.ANY_P -s CPU_CLK_UNHALTED.THREAD_P -w 1",
efficiency2 },
/*3*/ { "itlbmiss", "thresh > .05",
"pmcstat -s ITLB_MISSES.WALK_DURATION -s CPU_CLK_UNHALTED.THREAD_P -w 1",
itlb_miss },
/*4*/ { "icachemiss", "thresh > .05",
"pmcstat -s ICACHE.MISSES --s CPU_CLK_UNHALTED.THREAD_P -w 1",
icache_miss_has },
/*5*/ { "lcpstall", "thresh > .05",
"pmcstat -s ILD_STALL.LCP -s CPU_CLK_UNHALTED.THREAD_P -w 1",
lcp_stall },
/*6*/ { "cache1", "thresh >= .2",
"pmcstat -s MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM -s CPU_CLK_UNHALTED.THREAD_P -w 1",
cache1ib },
/*7*/ { "cache2", "thresh >= .2",
"pmcstat -s MEM_LOAD_UOPS_RETIRED.LLC_HIT -s MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT -s MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM -s CPU_CLK_UNHALTED.THREAD_P -w 1",
cache2has },
/*8*/ { "contested", "thresh >= .05",
"pmcstat -s MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM -s CPU_CLK_UNHALTED.THREAD_P -w 1",
contested_has },
/*9*/ { "datashare", "thresh >= .05",
"pmcstat -s MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT -s CPU_CLK_UNHALTED.THREAD_P -w 1",
datasharing_has },
/*10*/ { "blockstorefwd", "thresh >= .05",
"pmcstat -s LD_BLOCKS_STORE_FORWARD -s CPU_CLK_UNHALTED.THREAD_P -w 1",
blockstoreforward },
/*11*/ { "splitload", "thresh >= .1",
"pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s MEM_UOP_RETIRED.SPLIT_LOADS -w 1",
splitload },
/*12*/ { "splitstore", "thresh >= .01",
"pmcstat -s MEM_UOP_RETIRED.SPLIT_STORES -s MEM_UOP_RETIRED.ALL_STORES -w 1",
splitstore },
/*13*/ { "aliasing_4k", "thresh >= .1",
"pmcstat -s LD_BLOCKS_PARTIAL.ADDRESS_ALIAS -s CPU_CLK_UNHALTED.THREAD_P -w 1",
aliasing },
/*14*/ { "dtlbmissload", "thresh >= .1",
"pmcstat -s DTLB_LOAD_MISSES.STLB_HIT -s DTLB_LOAD_MISSES.WALK_DURATION -s CPU_CLK_UNHALTED.THREAD_P -w 1",
dtlb_missload },
/*15*/ { "br_miss", "thresh >= .2",
"pmcstat -s CPU_CLK_UNHALTED.THREAD_P -s BR_MISP_RETIRED.ALL_BRANCHES -w 1",
br_mispredict },
/*16*/ { "clears", "thresh >= .02",
"pmcstat -s MACHINE_CLEARS.MEMORY_ORDERING -s MACHINE_CLEARS.SMC -s MACHINE_CLEARS.MASKMOV -s CPU_CLK_UNHALTED.THREAD_P -w 1",
clears },
/*17*/ { "microassist", "thresh >= .05",
"pmcstat -s IDQ.MS_UOPS,cmask=1 -s CPU_CLK_UNHALTED.THREAD_P -w 1",
microassist },
/*18*/ { "fpassist", "look for a excessive value",
"pmcstat -s FP_ASSIST.ANY -s INST_RETIRED.ANY_P -w 1",
fpassists },
/*19*/ { "otherassistavx", "look for a excessive value",
"pmcstat -s OTHER_ASSISTS.AVX_TO_SSE -s CPU_CLK_UNHALTED.THREAD_P -w 1",
otherassistavx },
/*20*/ { "otherassistsse", "look for a excessive value",
"pmcstat -s OTHER_ASSISTS.SSE_TO_AVX -s CPU_CLK_UNHALTED.THREAD_P -w 1",
otherassistsse },
};
static void
set_sandybridge(void)
{
strcpy(the_cpu.cputype, "SandyBridge PMC");
the_cpu.number = SANDY_BRIDGE_COUNT;
the_cpu.ents = sandy_bridge;
the_cpu.explain = explain_name_sb;
}
static void
set_ivybridge(void)
{
strcpy(the_cpu.cputype, "IvyBridge PMC");
the_cpu.number = IVY_BRIDGE_COUNT;
the_cpu.ents = ivy_bridge;
the_cpu.explain = explain_name_ib;
}
static void
set_haswell(void)
{
strcpy(the_cpu.cputype, "HASWELL PMC");
the_cpu.number = HASWELL_COUNT;
the_cpu.ents = haswell;
the_cpu.explain = explain_name_has;
}
static void
set_expression(char *name)
{
int found = 0, i;
for(i=0 ; i< the_cpu.number; i++) {
if (strcmp(name, the_cpu.ents[i].name) == 0) {
found = 1;
expression = the_cpu.ents[i].func;
command = the_cpu.ents[i].command;
threshold = the_cpu.ents[i].thresh;
break;
}
}
if (!found) {
printf("For CPU type %s we have no expression:%s\n",
the_cpu.cputype, name);
exit(-1);
}
}
static int
validate_expression(char *name)
{
int i, found;
found = 0;
for(i=0 ; i< the_cpu.number; i++) {
if (strcmp(name, the_cpu.ents[i].name) == 0) {
found = 1;
break;
}
}
if (!found) {
return(-1);
}
return (0);
}
static void
do_expression(struct counters *cpu, int pos)
{
if (expression == NULL)
return;
(*expression)(cpu, pos);
}
static void
process_header(int idx, char *p)
{
struct counters *up;
int i, len, nlen;
/*
* Given header element idx, at p in
* form 's/NN/nameof'
* process the entry to pull out the name and
* the CPU number.
*/
if (strncmp(p, "s/", 2)) {
printf("Check -- invalid header no s/ in %s\n",
p);
return;
}
up = &cnts[idx];
up->cpu = strtol(&p[2], NULL, 10);
len = strlen(p);
for (i=2; i<len; i++) {
if (p[i] == '/') {
nlen = strlen(&p[(i+1)]);
if (nlen < (MAX_NLEN-1)) {
strcpy(up->counter_name, &p[(i+1)]);
} else {
strncpy(up->counter_name, &p[(i+1)], (MAX_NLEN-1));
}
}
}
}
static void
build_counters_from_header(FILE *io)
{
char buffer[8192], *p;
int i, len, cnt;
size_t mlen;
/* We have a new start, lets
* setup our headers and cpus.
*/
if (fgets(buffer, sizeof(buffer), io) == NULL) {
printf("First line can't be read from file err:%d\n", errno);
return;
}
/*
* Ok output is an array of counters. Once
* we start to read the values in we must
* put them in there slot to match there CPU and
* counter being updated. We create a mass array
* of the counters, filling in the CPU and
* counter name.
*/
/* How many do we get? */
len = strlen(buffer);
for (i=0, cnt=0; i<len; i++) {
if (strncmp(&buffer[i], "s/", 2) == 0) {
cnt++;
for(;i<len;i++) {
if (buffer[i] == ' ')
break;
}
}
}
mlen = sizeof(struct counters) * cnt;
cnts = malloc(mlen);
ncnts = cnt;
if (cnts == NULL) {
printf("No memory err:%d\n", errno);
return;
}
memset(cnts, 0, mlen);
for (i=0, cnt=0; i<len; i++) {
if (strncmp(&buffer[i], "s/", 2) == 0) {
p = &buffer[i];
for(;i<len;i++) {
if (buffer[i] == ' ') {
buffer[i] = 0;
break;
}
}
process_header(cnt, p);
cnt++;
}
}
if (verbose)
printf("We have %d entries\n", cnt);
}
extern int max_to_collect;
int max_to_collect = MAX_COUNTER_SLOTS;
static int
read_a_line(FILE *io)
{
char buffer[8192], *p, *stop;
int pos, i;
if (fgets(buffer, sizeof(buffer), io) == NULL) {
return(0);
}
p = buffer;
for (i=0; i<ncnts; i++) {
pos = cnts[i].pos;
cnts[i].vals[pos] = strtol(p, &stop, 0);
cnts[i].pos++;
cnts[i].sum += cnts[i].vals[pos];
p = stop;
}
return (1);
}
extern int cpu_count_out;
int cpu_count_out=0;
static void
print_header(void)
{
int i, cnt, printed_cnt;
printf("*********************************\n");
for(i=0, cnt=0; i<MAX_CPU; i++) {
if (glob_cpu[i]) {
cnt++;
}
}
cpu_count_out = cnt;
for(i=0, printed_cnt=0; i<MAX_CPU; i++) {
if (glob_cpu[i]) {
printf("CPU%d", i);
printed_cnt++;
}
if (printed_cnt == cnt) {
printf("\n");
break;
} else {
printf("\t");
}
}
}
static void
lace_cpus_together(void)
{
int i, j, lace_cpu;
struct counters *cpat, *at;
for(i=0; i<ncnts; i++) {
cpat = &cnts[i];
if (cpat->next_cpu) {
/* Already laced in */
continue;
}
lace_cpu = cpat->cpu;
if (lace_cpu >= MAX_CPU) {
printf("CPU %d to big\n", lace_cpu);
continue;
}
if (glob_cpu[lace_cpu] == NULL) {
glob_cpu[lace_cpu] = cpat;
} else {
/* Already processed this cpu */
continue;
}
/* Ok look forward for cpu->cpu and link in */
for(j=(i+1); j<ncnts; j++) {
at = &cnts[j];
if (at->next_cpu) {
continue;
}
if (at->cpu == lace_cpu) {
/* Found one */
cpat->next_cpu = at;
cpat = at;
}
}
}
}
static void
process_file(char *filename)
{
FILE *io;
int i;
int line_at, not_done;
pid_t pid_of_command=0;
if (filename == NULL) {
io = my_popen(command, "r", &pid_of_command);
+ if (io == NULL) {
+ printf("Can't popen the command %s\n", command);
+ return;
+ }
} else {
io = fopen(filename, "r");
if (io == NULL) {
printf("Can't process file %s err:%d\n",
filename, errno);
return;
}
}
build_counters_from_header(io);
if (cnts == NULL) {
/* Nothing we can do */
printf("Nothing to do -- no counters built\n");
- if (io) {
- fclose(io);
+ if (filename) {
+ fclose(io);
+ } else {
+ my_pclose(io, pid_of_command);
}
return;
}
lace_cpus_together();
print_header();
if (verbose) {
for (i=0; i<ncnts; i++) {
printf("Counter:%s cpu:%d index:%d\n",
cnts[i].counter_name,
cnts[i].cpu, i);
}
}
line_at = 0;
not_done = 1;
while(not_done) {
if (read_a_line(io)) {
line_at++;
} else {
break;
}
if (line_at >= max_to_collect) {
not_done = 0;
}
if (filename == NULL) {
int cnt;
/* For the ones we dynamically open we print now */
for(i=0, cnt=0; i<MAX_CPU; i++) {
do_expression(glob_cpu[i], (line_at-1));
cnt++;
if (cnt == cpu_count_out) {
printf("\n");
break;
} else {
printf("\t");
}
}
}
}
if (filename) {
fclose(io);
} else {
my_pclose(io, pid_of_command);
}
}
#if defined(__amd64__)
#define cpuid(in,a,b,c,d)\
asm("cpuid": "=a" (a), "=b" (b), "=c" (c), "=d" (d) : "a" (in));
#else
#define cpuid(in, a, b, c, d)
#endif
static void
get_cpuid_set(void)
{
unsigned long eax, ebx, ecx, edx;
int model;
pid_t pid_of_command=0;
size_t sz, len;
FILE *io;
char linebuf[1024], *str;
eax = ebx = ecx = edx = 0;
cpuid(0, eax, ebx, ecx, edx);
if (ebx == 0x68747541) {
printf("AMD processors are not supported by this program\n");
printf("Sorry\n");
exit(0);
} else if (ebx == 0x6972794) {
printf("Cyrix processors are not supported by this program\n");
printf("Sorry\n");
exit(0);
} else if (ebx == 0x756e6547) {
printf("Genuine Intel\n");
} else {
printf("Unknown processor type 0x%lx Only Intel AMD64 types are supported by this routine!\n", ebx);
exit(0);
}
cpuid(1, eax, ebx, ecx, edx);
model = (((eax & 0xF0000) >> 12) | ((eax & 0xF0) >> 4));
printf("CPU model is 0x%x id:0x%lx\n", model, eax);
switch (eax & 0xF00) {
case 0x500: /* Pentium family processors */
printf("Intel Pentium P5\n");
goto not_supported;
break;
case 0x600: /* Pentium Pro, Celeron, Pentium II & III */
switch (model) {
case 0x1:
printf("Intel Pentium P6\n");
goto not_supported;
break;
case 0x3:
case 0x5:
printf("Intel PII\n");
goto not_supported;
break;
case 0x6: case 0x16:
printf("Intel CL\n");
goto not_supported;
break;
case 0x7: case 0x8: case 0xA: case 0xB:
printf("Intel PIII\n");
goto not_supported;
break;
case 0x9: case 0xD:
printf("Intel PM\n");
goto not_supported;
break;
case 0xE:
printf("Intel CORE\n");
goto not_supported;
break;
case 0xF:
printf("Intel CORE2\n");
goto not_supported;
break;
case 0x17:
printf("Intel CORE2EXTREME\n");
goto not_supported;
break;
case 0x1C: /* Per Intel document 320047-002. */
printf("Intel ATOM\n");
goto not_supported;
break;
case 0x1A:
case 0x1E: /*
* Per Intel document 253669-032 9/2009,
* pages A-2 and A-57
*/
case 0x1F: /*
* Per Intel document 253669-032 9/2009,
* pages A-2 and A-57
*/
printf("Intel COREI7\n");
goto not_supported;
break;
case 0x2E:
printf("Intel NEHALEM\n");
goto not_supported;
break;
case 0x25: /* Per Intel document 253669-033US 12/2009. */
case 0x2C: /* Per Intel document 253669-033US 12/2009. */
printf("Intel WESTMERE\n");
goto not_supported;
break;
case 0x2F: /* Westmere-EX, seen in wild */
printf("Intel WESTMERE\n");
goto not_supported;
break;
case 0x2A: /* Per Intel document 253669-039US 05/2011. */
printf("Intel SANDYBRIDGE\n");
set_sandybridge();
break;
case 0x2D: /* Per Intel document 253669-044US 08/2012. */
printf("Intel SANDYBRIDGE_XEON\n");
set_sandybridge();
break;
case 0x3A: /* Per Intel document 253669-043US 05/2012. */
printf("Intel IVYBRIDGE\n");
set_ivybridge();
break;
case 0x3E: /* Per Intel document 325462-045US 01/2013. */
printf("Intel IVYBRIDGE_XEON\n");
set_ivybridge();
break;
case 0x3F: /* Per Intel document 325462-045US 09/2014. */
printf("Intel HASWELL (Xeon)\n");
set_haswell();
break;
case 0x3C: /* Per Intel document 325462-045US 01/2013. */
case 0x45:
case 0x46:
printf("Intel HASWELL\n");
set_haswell();
break;
case 0x4D:
/* Per Intel document 330061-001 01/2014. */
printf("Intel ATOM_SILVERMONT\n");
goto not_supported;
break;
default:
printf("Intel model 0x%x is not known -- sorry\n",
model);
goto not_supported;
break;
}
break;
case 0xF00: /* P4 */
printf("Intel unknown model %d\n", model);
goto not_supported;
break;
}
/* Ok lets load the list of all known PMC's */
io = my_popen("/usr/sbin/pmccontrol -L", "r", &pid_of_command);
if (valid_pmcs == NULL) {
/* Likely */
pmc_allocated_cnt = PMC_INITIAL_ALLOC;
sz = sizeof(char *) * pmc_allocated_cnt;
valid_pmcs = malloc(sz);
if (valid_pmcs == NULL) {
printf("No memory allocation fails at startup?\n");
exit(-1);
}
memset(valid_pmcs, 0, sz);
}
while (fgets(linebuf, sizeof(linebuf), io) != NULL) {
if (linebuf[0] != '\t') {
/* sometimes headers ;-) */
continue;
}
len = strlen(linebuf);
if (linebuf[(len-1)] == '\n') {
/* Likely */
linebuf[(len-1)] = 0;
}
str = &linebuf[1];
len = strlen(str) + 1;
valid_pmcs[valid_pmc_cnt] = malloc(len);
if (valid_pmcs[valid_pmc_cnt] == NULL) {
printf("No memory2 allocation fails at startup?\n");
exit(-1);
}
memset(valid_pmcs[valid_pmc_cnt], 0, len);
strcpy(valid_pmcs[valid_pmc_cnt], str);
valid_pmc_cnt++;
if (valid_pmc_cnt >= pmc_allocated_cnt) {
/* Got to expand -- unlikely */
char **more;
sz = sizeof(char *) * (pmc_allocated_cnt * 2);
more = malloc(sz);
if (more == NULL) {
printf("No memory3 allocation fails at startup?\n");
exit(-1);
}
memset(more, 0, sz);
memcpy(more, valid_pmcs, sz);
pmc_allocated_cnt *= 2;
free(valid_pmcs);
valid_pmcs = more;
}
}
my_pclose(io, pid_of_command);
return;
not_supported:
printf("Not supported\n");
exit(-1);
}
static void
explain_all(void)
{
int i;
printf("For CPU's of type %s the following expressions are available:\n",the_cpu.cputype);
printf("-------------------------------------------------------------\n");
for(i=0; i<the_cpu.number; i++){
printf("For -e %s ", the_cpu.ents[i].name);
(*the_cpu.explain)(the_cpu.ents[i].name);
printf("----------------------------\n");
}
}
static void
test_for_a_pmc(const char *pmc, int out_so_far)
{
FILE *io;
pid_t pid_of_command=0;
char my_command[1024];
char line[1024];
char resp[1024];
int len, llen, i;
if (out_so_far < 50) {
len = 50 - out_so_far;
for(i=0; i<len; i++) {
printf(" ");
}
}
sprintf(my_command, "/usr/sbin/pmcstat -w .25 -c 0 -s %s", pmc);
io = my_popen(my_command, "r", &pid_of_command);
if (io == NULL) {
printf("Failed -- popen fails\n");
return;
}
/* Setup what we expect */
len = sprintf(resp, "%s", pmc);
if (fgets(line, sizeof(line), io) == NULL) {
printf("Failed -- no output from pmstat\n");
goto out;
}
llen = strlen(line);
if (line[(llen-1)] == '\n') {
line[(llen-1)] = 0;
llen--;
}
for(i=2; i<(llen-len); i++) {
if (strncmp(&line[i], "ERROR", 5) == 0) {
printf("Failed %s\n", line);
goto out;
} else if (strncmp(&line[i], resp, len) == 0) {
int j, k;
if (fgets(line, sizeof(line), io) == NULL) {
printf("Failed -- no second output from pmstat\n");
goto out;
}
len = strlen(line);
for (j=0; j<len; j++) {
if (line[j] == ' ') {
j++;
} else {
break;
}
}
printf("Pass");
len = strlen(&line[j]);
if (len < 20) {
for(k=0; k<(20-len); k++) {
printf(" ");
}
}
printf("%s", &line[j]);
goto out;
}
}
printf("Failed -- '%s' not '%s'\n", line, resp);
out:
my_pclose(io, pid_of_command);
}
static int
add_it_to(char **vars, int cur_cnt, char *name)
{
int i;
size_t len;
for(i=0; i<cur_cnt; i++) {
if (strcmp(vars[i], name) == 0) {
/* Already have */
return(0);
}
}
if (vars[cur_cnt] != NULL) {
printf("Cur_cnt:%d filled with %s??\n",
cur_cnt, vars[cur_cnt]);
exit(-1);
}
/* Ok its new */
len = strlen(name) + 1;
vars[cur_cnt] = malloc(len);
if (vars[cur_cnt] == NULL) {
printf("No memory %s\n", __FUNCTION__);
exit(-1);
}
memset(vars[cur_cnt], 0, len);
strcpy(vars[cur_cnt], name);
return(1);
}
static char *
build_command_for_exp(struct expression *exp)
{
/*
* Build the pmcstat command to handle
* the passed in expression.
* /usr/sbin/pmcstat -w 1 -s NNN -s QQQ
* where NNN and QQQ represent the PMC's in the expression
* uniquely..
*/
char forming[1024];
int cnt_pmc, alloced_pmcs, i;
struct expression *at;
char **vars, *cmd;
size_t mal;
alloced_pmcs = cnt_pmc = 0;
/* first how many do we have */
at = exp;
while (at) {
if (at->type == TYPE_VALUE_PMC) {
cnt_pmc++;
}
at = at->next;
}
if (cnt_pmc == 0) {
printf("No PMC's in your expression -- nothing to do!!\n");
exit(0);
}
mal = cnt_pmc * sizeof(char *);
vars = malloc(mal);
if (vars == NULL) {
printf("No memory\n");
exit(-1);
}
memset(vars, 0, mal);
at = exp;
while (at) {
if (at->type == TYPE_VALUE_PMC) {
if(add_it_to(vars, alloced_pmcs, at->name)) {
alloced_pmcs++;
}
}
at = at->next;
}
/* Now we have a unique list in vars so create our command */
mal = 23; /* "/usr/sbin/pmcstat -w 1" + \0 */
for(i=0; i<alloced_pmcs; i++) {
mal += strlen(vars[i]) + 4; /* var + " -s " */
}
cmd = malloc((mal+2));
if (cmd == NULL) {
printf("%s out of mem\n", __FUNCTION__);
exit(-1);
}
memset(cmd, 0, (mal+2));
strcpy(cmd, "/usr/sbin/pmcstat -w 1");
at = exp;
for(i=0; i<alloced_pmcs; i++) {
sprintf(forming, " -s %s", vars[i]);
strcat(cmd, forming);
free(vars[i]);
vars[i] = NULL;
}
free(vars);
return(cmd);
}
static int
user_expr(struct counters *cpu, int pos)
{
int ret;
double res;
struct counters *var;
struct expression *at;
at = master_exp;
while (at) {
if (at->type == TYPE_VALUE_PMC) {
var = find_counter(cpu, at->name);
if (var == NULL) {
printf("%s:Can't find counter %s?\n", __FUNCTION__, at->name);
exit(-1);
}
if (pos != -1) {
at->value = var->vals[pos] * 1.0;
} else {
at->value = var->sum * 1.0;
}
}
at = at->next;
}
res = run_expr(master_exp, 1, NULL);
ret = printf("%1.3f", res);
return(ret);
}
static void
set_manual_exp(struct expression *exp)
{
expression = user_expr;
command = build_command_for_exp(exp);
threshold = "User defined threshold";
}
static void
run_tests(void)
{
int i, lenout;
printf("Running tests on %d PMC's this may take some time\n", valid_pmc_cnt);
printf("------------------------------------------------------------------------\n");
for(i=0; i<valid_pmc_cnt; i++) {
lenout = printf("%s", valid_pmcs[i]);
fflush(stdout);
test_for_a_pmc(valid_pmcs[i], lenout);
}
}
static void
list_all(void)
{
int i, cnt, j;
printf("PMC Abbreviation\n");
printf("--------------------------------------------------------------\n");
for(i=0; i<valid_pmc_cnt; i++) {
cnt = printf("%s", valid_pmcs[i]);
for(j=cnt; j<52; j++) {
printf(" ");
}
printf("%%%d\n", i);
}
}
int
main(int argc, char **argv)
{
int i, j, cnt;
char *filename=NULL;
char *name=NULL;
int help_only = 0;
int test_mode = 0;
get_cpuid_set();
memset(glob_cpu, 0, sizeof(glob_cpu));
while ((i = getopt(argc, argv, "LHhvm:i:?e:TE:")) != -1) {
switch (i) {
case 'L':
list_all();
return(0);
case 'H':
printf("**********************************\n");
explain_all();
printf("**********************************\n");
return(0);
break;
case 'T':
test_mode = 1;
break;
case 'E':
master_exp = parse_expression(optarg);
if (master_exp) {
set_manual_exp(master_exp);
}
break;
case 'e':
if (validate_expression(optarg)) {
printf("Unknown expression %s\n", optarg);
return(0);
}
name = optarg;
set_expression(optarg);
break;
case 'm':
max_to_collect = strtol(optarg, NULL, 0);
if (max_to_collect > MAX_COUNTER_SLOTS) {
/* You can't collect more than max in array */
max_to_collect = MAX_COUNTER_SLOTS;
}
break;
case 'v':
verbose++;
break;
case 'h':
help_only = 1;
break;
case 'i':
filename = optarg;
break;
case '?':
default:
use:
printf("Use %s [ -i inputfile -v -m max_to_collect -e expr -E -h -? -H]\n",
argv[0]);
printf("-i inputfile -- use source as inputfile not stdin (if stdin collect)\n");
printf("-v -- verbose dump debug type things -- you don't want this\n");
printf("-m N -- maximum to collect is N measurments\n");
printf("-e expr-name -- Do expression expr-name\n");
printf("-E 'your expression' -- Do your expression\n");
printf("-h -- Don't do the expression I put in -e xxx just explain what it does and exit\n");
printf("-H -- Don't run anything, just explain all canned expressions\n");
printf("-T -- Test all PMC's defined by this processor\n");
return(0);
break;
};
}
if ((name == NULL) && (filename == NULL) && (test_mode == 0) && (master_exp == NULL)) {
printf("Without setting an expression we cannot dynamically gather information\n");
printf("you must supply a filename (and you probably want verbosity)\n");
goto use;
}
if (test_mode) {
run_tests();
return(0);
}
printf("*********************************\n");
if (master_exp == NULL) {
(*the_cpu.explain)(name);
} else {
printf("Examine your expression ");
print_exp(master_exp);
printf("User defined threshold\n");
}
if (help_only) {
return(0);
}
process_file(filename);
if (verbose >= 2) {
for (i=0; i<ncnts; i++) {
printf("Counter:%s cpu:%d index:%d\n",
cnts[i].counter_name,
cnts[i].cpu, i);
for(j=0; j<cnts[i].pos; j++) {
printf(" val - %ld\n", (long int)cnts[i].vals[j]);
}
printf(" sum - %ld\n", (long int)cnts[i].sum);
}
}
if (expression == NULL) {
return(0);
}
for(i=0, cnt=0; i<MAX_CPU; i++) {
if (glob_cpu[i]) {
do_expression(glob_cpu[i], -1);
cnt++;
if (cnt == cpu_count_out) {
printf("\n");
break;
} else {
printf("\t");
}
}
}
return(0);
}
Index: projects/clang360-import
===================================================================
--- projects/clang360-import (revision 277808)
+++ projects/clang360-import (revision 277809)
Property changes on: projects/clang360-import
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /head:r277777-277803

File Metadata

Mime Type
application/octet-stream
Expires
Sun, Apr 5, 7:47 PM (2 d)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
30839009
Default Alt Text
(799 KB)

Event Timeline