Changeset View
Standalone View
sys/dev/tpm/tpm20.c
Show All 22 Lines | |||||
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||||
* POSSIBILITY OF SUCH DAMAGE. | * POSSIBILITY OF SUCH DAMAGE. | ||||
*/ | */ | ||||
#include <sys/cdefs.h> | #include <sys/cdefs.h> | ||||
__FBSDID("$FreeBSD$"); | __FBSDID("$FreeBSD$"); | ||||
#include <sys/random.h> | |||||
#include "tpm20.h" | #include "tpm20.h" | ||||
#define TPM_HARVEST_SIZE 16 | |||||
/* | |||||
delphij: Could you please add a comment here to explain why 10 seconds was chosen? (It's not consistent… | |||||
Done Inline ActionsThanks for the feedback.
I've added a comment. To expand on that basically discrete TPMs are painfully slow. I wanted to avoid a situation where the chip is used so often by the harvest command that it has an impact on userspace applications. Because of that the I'd prefer the interval to be in an order of a few seconds. kd: Thanks for the feedback.
> Could you please add a comment here to explain why 10 seconds was… | |||||
* Perform a harvest every 10 seconds. | |||||
* Since discrete TPMs are painfully slow | |||||
* we don't want to execute this too often | |||||
* as the chip is likely to be used by others too. | |||||
*/ | |||||
#define TPM_HARVEST_INTERVAL 10000000 | |||||
MALLOC_DECLARE(M_TPM20); | MALLOC_DECLARE(M_TPM20); | ||||
MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver"); | MALLOC_DEFINE(M_TPM20, "tpm_buffer", "buffer for tpm 2.0 driver"); | ||||
static void tpm20_discard_buffer(void *arg); | static void tpm20_discard_buffer(void *arg); | ||||
#ifdef TPM_HARVEST | |||||
static void tpm20_harvest(void *arg); | |||||
#endif | |||||
static int tpm20_save_state(device_t dev, bool suspend); | static int tpm20_save_state(device_t dev, bool suspend); | ||||
static d_open_t tpm20_open; | static d_open_t tpm20_open; | ||||
static d_close_t tpm20_close; | static d_close_t tpm20_close; | ||||
static d_read_t tpm20_read; | static d_read_t tpm20_read; | ||||
static d_write_t tpm20_write; | static d_write_t tpm20_write; | ||||
static d_ioctl_t tpm20_ioctl; | static d_ioctl_t tpm20_ioctl; | ||||
static struct cdevsw tpm20_cdevsw = { | static struct cdevsw tpm20_cdevsw = { | ||||
▲ Show 20 Lines • Show All 124 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct make_dev_args args; | struct make_dev_args args; | ||||
int result; | int result; | ||||
sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK); | sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK); | ||||
sx_init(&sc->dev_lock, "TPM driver lock"); | sx_init(&sc->dev_lock, "TPM driver lock"); | ||||
cv_init(&sc->buf_cv, "TPM buffer cv"); | cv_init(&sc->buf_cv, "TPM buffer cv"); | ||||
callout_init(&sc->discard_buffer_callout, 1); | callout_init(&sc->discard_buffer_callout, 1); | ||||
#ifdef TPM_HARVEST | |||||
sc->harvest_ticks = TPM_HARVEST_INTERVAL / tick; | |||||
callout_init(&sc->harvest_callout, 1); | |||||
Not Done Inline ActionsHere we are scheduling a callout after 1 tick (0 is silently converted to 1 here), should we wait for TPM_HARVEST_INTERVAL for the first invoke? Note that because we are scheduling a callout here, we should perform a callout_drain() on device detach. delphij: Here we are scheduling a callout after 1 tick (0 is silently converted to 1 here), should we… | |||||
Done Inline Actions
This is intentional. I wanted to fire it as soon as we are ready. IMHO it doesn't make any sense to wait for a rather long amount of time before the first harvest is performed.
Whoops. You're right obviously. Fixed it. kd: > Here we are scheduling a callout after 1 tick (0 is silently converted to 1 here), should we… | |||||
callout_reset(&sc->harvest_callout, 0, tpm20_harvest, sc); | |||||
#endif | |||||
sc->pending_data_length = 0; | sc->pending_data_length = 0; | ||||
make_dev_args_init(&args); | make_dev_args_init(&args); | ||||
args.mda_devsw = &tpm20_cdevsw; | args.mda_devsw = &tpm20_cdevsw; | ||||
args.mda_uid = UID_ROOT; | args.mda_uid = UID_ROOT; | ||||
args.mda_gid = GID_WHEEL; | args.mda_gid = GID_WHEEL; | ||||
args.mda_mode = TPM_CDEV_PERM_FLAG; | args.mda_mode = TPM_CDEV_PERM_FLAG; | ||||
args.mda_si_drv1 = sc; | args.mda_si_drv1 = sc; | ||||
result = make_dev_s(&args, &sc->sc_cdev, TPM_CDEV_NAME); | result = make_dev_s(&args, &sc->sc_cdev, TPM_CDEV_NAME); | ||||
if (result != 0) | if (result != 0) | ||||
tpm20_release(sc); | tpm20_release(sc); | ||||
return (result); | return (result); | ||||
} | } | ||||
void | void | ||||
tpm20_release(struct tpm_sc *sc) | tpm20_release(struct tpm_sc *sc) | ||||
{ | { | ||||
callout_drain(&sc->harvest_callout); | |||||
delphijUnsubmitted Not Done Inline ActionsMaybe also discard_buffer_callout here? (Not really related to your change and therefore optional). delphij: Maybe also discard_buffer_callout here? (Not really related to your change and therefore… | |||||
if (sc->buf != NULL) | if (sc->buf != NULL) | ||||
free(sc->buf, M_TPM20); | free(sc->buf, M_TPM20); | ||||
sx_destroy(&sc->dev_lock); | sx_destroy(&sc->dev_lock); | ||||
cv_destroy(&sc->buf_cv); | cv_destroy(&sc->buf_cv); | ||||
if (sc->sc_cdev != NULL) | if (sc->sc_cdev != NULL) | ||||
destroy_dev(sc->sc_cdev); | destroy_dev(sc->sc_cdev); | ||||
} | } | ||||
int | int | ||||
tpm20_suspend(device_t dev) | tpm20_suspend(device_t dev) | ||||
{ | { | ||||
return (tpm20_save_state(dev, true)); | return (tpm20_save_state(dev, true)); | ||||
} | } | ||||
int | int | ||||
tpm20_shutdown(device_t dev) | tpm20_shutdown(device_t dev) | ||||
{ | { | ||||
return (tpm20_save_state(dev, false)); | return (tpm20_save_state(dev, false)); | ||||
} | } | ||||
#ifdef TPM_HARVEST | |||||
/* | |||||
* Get TPM_HARVEST_SIZE random bytes and add them | |||||
* into system entropy pool. | |||||
*/ | |||||
static void | |||||
tpm20_harvest(void *arg) | |||||
{ | |||||
struct tpm_sc *sc; | |||||
unsigned char entropy[TPM_HARVEST_SIZE]; | |||||
uint16_t entropy_size; | |||||
int result; | |||||
uint8_t cmd[] = { | |||||
0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/ | |||||
0x00, 0x00, 0x00, 0x0c, /* cmd length */ | |||||
0x00, 0x00, 0x01, 0x7b, /* cmd TPM_CC_GetRandom */ | |||||
0x00, TPM_HARVEST_SIZE /* number of bytes requested */ | |||||
}; | |||||
sc = arg; | |||||
sx_xlock(&sc->dev_lock); | |||||
memcpy(sc->buf, cmd, sizeof(cmd)); | |||||
result = sc->transmit(sc, sizeof(cmd)); | |||||
if (result != 0) { | |||||
sx_xunlock(&sc->dev_lock); | |||||
return; | |||||
} | |||||
/* Ignore response size */ | |||||
sc->pending_data_length = 0; | |||||
/* The number of random bytes we got is placed right after the header */ | |||||
entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1]; | |||||
if (entropy_size > 0) { | |||||
entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE); | |||||
memcpy(entropy, | |||||
Not Done Inline ActionsIf I understood the code correctly, does this mean we would no longer have future tpm20_harvest invokes if we ever get a zero-sized response from TPM? If so, is it intentional? (I think we should still schedule it, or in other words, something like: if (entropy_size > 0) { entropy_size = MIN(entropy_size, TPM_HARVEST_SIZE); memcpy(entropy, sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t), entropy_size); } sx_xunlock(&sc->dev_lock); if (entropy_size > 0) { random_harvest_queue(entropy, entropy_size, RANDOM_PURE_TPM); } callout_reset(&sc->harvest_callout, hz * TPM_HARVEST_INTERVAL, tpm20_harvest, sc); ) Note that line 259 is similar but I'm not sure if a failed sc->transmit counts as a fatal error. delphij: If I understood the code correctly, does this mean we would no longer have future tpm20_harvest… | |||||
Done Inline ActionsThis was intentional. I considered a zero-size response as a some kind of fatal error. Frankly I'm fine with trying again in that case, so I changed it. kd: This was intentional. I considered a zero-size response as a some kind of fatal error. Frankly… | |||||
sc->buf + TPM_HEADER_SIZE + sizeof(uint16_t), | |||||
entropy_size); | |||||
} | |||||
sx_xunlock(&sc->dev_lock); | |||||
if (entropy_size > 0) | |||||
random_harvest_queue(entropy, entropy_size, RANDOM_PURE_TPM); | |||||
callout_reset(&sc->harvest_callout, sc->harvest_ticks, tpm20_harvest, sc); | |||||
} | |||||
#endif /* TPM_HARVEST */ | |||||
static int | static int | ||||
tpm20_save_state(device_t dev, bool suspend) | tpm20_save_state(device_t dev, bool suspend) | ||||
{ | { | ||||
struct tpm_sc *sc; | struct tpm_sc *sc; | ||||
uint8_t save_cmd[] = { | uint8_t save_cmd[] = { | ||||
0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/ | 0x80, 0x01, /* TPM_ST_NO_SESSIONS tag*/ | ||||
0x00, 0x00, 0x00, 0x0C, /* cmd length */ | 0x00, 0x00, 0x00, 0x0C, /* cmd length */ | ||||
▲ Show 20 Lines • Show All 50 Lines • Show Last 20 Lines |
Could you please add a comment here to explain why 10 seconds was chosen? (It's not consistent across the tree: some drivers choose 0.1 seconds, while some others uses 4 seconds, 5 seconds, etc.)
Optional suggestions: