diff --git a/.gitignore b/.gitignore index 38ec1bd..821737b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,10 @@ /.idea/ /.idea_modules/ -/target/ -/project/project/ -/project/target/ +target +/project/typesafe.properties /project/activator-* /logs/ /RUNNING_PID *.iml +.DS_Store +.sbtserver* diff --git a/LICENSE b/LICENSE index a021544..39707a6 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2013 Typesafe, Inc. +Copyright 2013-2015 Typesafe, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/activator.properties b/activator.properties index 615798b..e31bcb1 100644 --- a/activator.properties +++ b/activator.properties @@ -1,4 +1,4 @@ -name=hello-slick -title=Hello Slick! +name=hello-slick-3.1 +title=Hello Slick! (Slick 3.1) description=Slick is Typesafe's modern database query and access library for Scala. It allows you to work with stored data almost as if you were using Scala collections while at the same time giving you full control over when a database access happens and which data is transferred. You can also use SQL directly. This tutorial will get you started with a simple standalone Scala application that uses Slick. -tags=Basics,slick,starter +tags=Basics,slick,starter,reactive diff --git a/build.sbt b/build.sbt index 3ced607..803b19c 100644 --- a/build.sbt +++ b/build.sbt @@ -1,14 +1,12 @@ name := "hello-slick" -version := "1.0" - -scalaVersion := "2.10.3" - mainClass in Compile := Some("HelloSlick") libraryDependencies ++= List( - "com.typesafe.slick" %% "slick" % "2.0.0", - "org.slf4j" % "slf4j-nop" % "1.6.4", - "com.h2database" % "h2" % "1.3.170", - "org.scalatest" %% "scalatest" % "2.0" % "test" + "com.typesafe.slick" %% "slick" % "3.1.1", + "org.slf4j" % "slf4j-nop" % "1.7.10", + "com.h2database" % "h2" % "1.4.187", + "org.scalatest" %% "scalatest" % "2.2.4" % "test" ) + +fork in run := true diff --git a/project/build.properties b/project/build.properties index 0974fce..748703f 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=0.13.0 +sbt.version=0.13.7 diff --git a/project/play-fork-run.sbt b/project/play-fork-run.sbt new file mode 100644 index 0000000..a631d58 --- /dev/null +++ b/project/play-fork-run.sbt @@ -0,0 +1,5 @@ +// This plugin adds forked run capabilities to Play projects which is needed for Activator. + +resolvers += Resolver.typesafeRepo("releases") + +addSbtPlugin("com.typesafe.play" % "sbt-fork-run-plugin" % "2.3.9") diff --git a/project/sbt-ui.sbt b/project/sbt-ui.sbt new file mode 100644 index 0000000..7c28b97 --- /dev/null +++ b/project/sbt-ui.sbt @@ -0,0 +1,3 @@ +// This plugin represents functionality that is to be added to sbt in the future + +addSbtPlugin("org.scala-sbt" % "sbt-core-next" % "0.1.1") \ No newline at end of file diff --git a/src/main/resources/application.conf b/src/main/resources/application.conf new file mode 100644 index 0000000..a7775d2 --- /dev/null +++ b/src/main/resources/application.conf @@ -0,0 +1,6 @@ +h2mem1 = { + url = "jdbc:h2:mem:test1" + driver = org.h2.Driver + connectionPool = disabled + keepAliveConnection = true +} diff --git a/src/main/scala/CaseClassMapping.scala b/src/main/scala/CaseClassMapping.scala index 288b233..bccac63 100644 --- a/src/main/scala/CaseClassMapping.scala +++ b/src/main/scala/CaseClassMapping.scala @@ -1,24 +1,27 @@ -import scala.slick.driver.H2Driver.simple._ +import scala.concurrent.Await +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.duration.Duration +import slick.driver.H2Driver.api._ object CaseClassMapping extends App { // the base query for the Users table val users = TableQuery[Users] - val db = Database.forURL("jdbc:h2:mem:hello", driver = "org.h2.Driver") - db.withSession { implicit session => - - // create the schema - users.ddl.create - - // insert two User instances - users += User("John Doe") - users += User("Fred Smith") - - // print the users (select * from USERS) - println(users.list) - } - + val db = Database.forConfig("h2mem1") + try { + Await.result(db.run(DBIO.seq( + // create the schema + users.schema.create, + + // insert two User instances + users += User("John Doe"), + users += User("Fred Smith"), + + // print the users (select * from USERS) + users.result.map(println) + )), Duration.Inf) + } finally db.close } case class User(name: String, id: Option[Int] = None) @@ -27,7 +30,7 @@ class Users(tag: Tag) extends Table[User](tag, "USERS") { // Auto Increment the id primary key column def id = column[Int]("ID", O.PrimaryKey, O.AutoInc) // The name can't be null - def name = column[String]("NAME", O.NotNull) + def name = column[String]("NAME") // the * projection (e.g. select * ...) auto-transforms the tupled // column values to / from a User def * = (name, id.?) <> (User.tupled, User.unapply) diff --git a/src/main/scala/HelloSlick.scala b/src/main/scala/HelloSlick.scala index f09d135..eecaf15 100644 --- a/src/main/scala/HelloSlick.scala +++ b/src/main/scala/HelloSlick.scala @@ -1,176 +1,194 @@ -import scala.slick.driver.H2Driver.simple._ +import scala.concurrent.{Future, Await} +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.duration.Duration +import slick.backend.DatabasePublisher +import slick.driver.H2Driver.api._ // The main application object HelloSlick extends App { + val db = Database.forConfig("h2mem1") + try { - // The query interface for the Suppliers table - val suppliers: TableQuery[Suppliers] = TableQuery[Suppliers] - - // the query interface for the Coffees table - val coffees: TableQuery[Coffees] = TableQuery[Coffees] - - // Create a connection (called a "session") to an in-memory H2 database - val db = Database.forURL("jdbc:h2:mem:hello", driver = "org.h2.Driver") - db.withSession { implicit session => - - // Create the schema by combining the DDLs for the Suppliers and Coffees - // tables using the query interfaces - (suppliers.ddl ++ coffees.ddl).create - - - /* Create / Insert */ - - // Insert some suppliers - suppliers += (101, "Acme, Inc.", "99 Market Street", "Groundsville", "CA", "95199") - suppliers += ( 49, "Superior Coffee", "1 Party Place", "Mendocino", "CA", "95460") - suppliers += (150, "The High Ground", "100 Coffee Lane", "Meadows", "CA", "93966") - - // Insert some coffees (using JDBC's batch insert feature) - val coffeesInsertResult: Option[Int] = coffees ++= Seq ( - ("Colombian", 101, 7.99, 0, 0), - ("French_Roast", 49, 8.99, 0, 0), - ("Espresso", 150, 9.99, 0, 0), - ("Colombian_Decaf", 101, 8.99, 0, 0), - ("French_Roast_Decaf", 49, 9.99, 0, 0) + // The query interface for the Suppliers table + val suppliers: TableQuery[Suppliers] = TableQuery[Suppliers] + + // the query interface for the Coffees table + val coffees: TableQuery[Coffees] = TableQuery[Coffees] + + val setupAction: DBIO[Unit] = DBIO.seq( + // Create the schema by combining the DDLs for the Suppliers and Coffees + // tables using the query interfaces + (suppliers.schema ++ coffees.schema).create, + + // Insert some suppliers + suppliers += (101, "Acme, Inc.", "99 Market Street", "Groundsville", "CA", "95199"), + suppliers += ( 49, "Superior Coffee", "1 Party Place", "Mendocino", "CA", "95460"), + suppliers += (150, "The High Ground", "100 Coffee Lane", "Meadows", "CA", "93966") ) - - val allSuppliers: List[(Int, String, String, String, String, String)] = - suppliers.list - // Print the number of rows inserted - coffeesInsertResult foreach { numRows => - println(s"Inserted $numRows rows into the Coffees table") - } + val setupFuture: Future[Unit] = db.run(setupAction) + val f = setupFuture.flatMap { _ => - - /* Read / Query / Select */ - - // Print the SQL for the Coffees query - println("Generated SQL for base Coffees query:\n" + coffees.selectStatement) + // Insert some coffees (using JDBC's batch insert feature) + val insertAction: DBIO[Option[Int]] = coffees ++= Seq ( + ("Colombian", 101, 7.99, 0, 0), + ("French_Roast", 49, 8.99, 0, 0), + ("Espresso", 150, 9.99, 0, 0), + ("Colombian_Decaf", 101, 8.99, 0, 0), + ("French_Roast_Decaf", 49, 9.99, 0, 0) + ) - // Query the Coffees table using a foreach and print each row - coffees foreach { case (name, supID, price, sales, total) => - println(" " + name + "\t" + supID + "\t" + price + "\t" + sales + "\t" + total) - } + val insertAndPrintAction: DBIO[Unit] = insertAction.map { coffeesInsertResult => + // Print the number of rows inserted + coffeesInsertResult foreach { numRows => + println(s"Inserted $numRows rows into the Coffees table") + } + } + + val allSuppliersAction: DBIO[Seq[(Int, String, String, String, String, String)]] = + suppliers.result + + val combinedAction: DBIO[Seq[(Int, String, String, String, String, String)]] = + insertAndPrintAction >> allSuppliersAction + + val combinedFuture: Future[Seq[(Int, String, String, String, String, String)]] = + db.run(combinedAction) + + combinedFuture.map { allSuppliers => + allSuppliers.foreach(println) + } + + }.flatMap { _ => + + /* Streaming */ + + val coffeeNamesAction: StreamingDBIO[Seq[String], String] = + coffees.map(_.name).result + + val coffeeNamesPublisher: DatabasePublisher[String] = + db.stream(coffeeNamesAction) + + coffeeNamesPublisher.foreach(println) + + }.flatMap { _ => + + /* Filtering / Where */ + + // Construct a query where the price of Coffees is > 9.0 + val filterQuery: Query[Coffees, (String, Int, Double, Int, Int), Seq] = + coffees.filter(_.price > 9.0) + + // Print the SQL for the filter query + println("Generated SQL for filter query:\n" + filterQuery.result.statements) + + // Execute the query and print the Seq of results + db.run(filterQuery.result.map(println)) + + }.flatMap { _ => + + /* Update */ + + // Construct an update query with the sales column being the one to update + val updateQuery: Query[Rep[Int], Int, Seq] = coffees.map(_.sales) + + val updateAction: DBIO[Int] = updateQuery.update(1) + + // Print the SQL for the Coffees update query + println("Generated SQL for Coffees update:\n" + updateQuery.updateStatement) + + // Perform the update + db.run(updateAction.map { numUpdatedRows => + println(s"Updated $numUpdatedRows rows") + }) + + }.flatMap { _ => + + /* Delete */ + + // Construct a delete query that deletes coffees with a price less than 8.0 + val deleteQuery: Query[Coffees,(String, Int, Double, Int, Int), Seq] = + coffees.filter(_.price < 8.0) + + val deleteAction = deleteQuery.delete + // Print the SQL for the Coffees delete query + println("Generated SQL for Coffees delete:\n" + deleteAction.statements) + + // Perform the delete + db.run(deleteAction).map { numDeletedRows => + println(s"Deleted $numDeletedRows rows") + } + + }.flatMap { _ => + + /* Sorting / Order By */ + + val sortByPriceQuery: Query[Coffees, (String, Int, Double, Int, Int), Seq] = + coffees.sortBy(_.price) + + println("Generated SQL for query sorted by price:\n" + + sortByPriceQuery.result.statements) + + // Execute the query + db.run(sortByPriceQuery.result).map(println) + + }.flatMap { _ => + + /* Query Composition */ + + val composedQuery: Query[Rep[String], String, Seq] = + coffees.sortBy(_.name).take(3).filter(_.price > 9.0).map(_.name) + + println("Generated SQL for composed query:\n" + + composedQuery.result.statements) + + // Execute the composed query + db.run(composedQuery.result).map(println) + + }.flatMap { _ => + + /* Joins */ + + // Join the tables using the relationship defined in the Coffees table + val joinQuery: Query[(Rep[String], Rep[String]), (String, String), Seq] = for { + c <- coffees if c.price > 9.0 + s <- c.supplier + } yield (c.name, s.name) + + println("Generated SQL for the join query:\n" + joinQuery.result.statements) + + // Print the rows which contain the coffee name and the supplier name + db.run(joinQuery.result).map(println) + + }.flatMap { _ => + + /* Computed Values */ + + // Create a new computed column that calculates the max price + val maxPriceColumn: Rep[Option[Double]] = coffees.map(_.price).max + + println("Generated SQL for max price column:\n" + maxPriceColumn.result.statements) + + // Execute the computed value query + db.run(maxPriceColumn.result).map(println) + + }.flatMap { _ => + + /* Manual SQL / String Interpolation */ + + // A value to insert into the statement + val state = "CA" + + // Construct a SQL statement manually with an interpolated value + val plainQuery = sql"select SUP_NAME from SUPPLIERS where STATE = $state".as[String] + + println("Generated SQL for plain query:\n" + plainQuery.statements) + + // Execute the query + db.run(plainQuery).map(println) + + } + Await.result(f, Duration.Inf) - /* Filtering / Where */ - - // Construct a query where the price of Coffees is > 9.0 - val filterQuery: Query[Coffees, (String, Int, Double, Int, Int)] = - coffees.filter(_.price > 9.0) - - println("Generated SQL for filter query:\n" + filterQuery.selectStatement) - - // Execute the query - println(filterQuery.list) - - - /* Update */ - - // Construct an update query with the sales column being the one to update - val updateQuery: Query[Column[Int], Int] = coffees.map(_.sales) - - // Print the SQL for the Coffees update query - println("Generated SQL for Coffees update:\n" + updateQuery.updateStatement) - - // Perform the update - val numUpdatedRows = updateQuery.update(1) - - println(s"Updated $numUpdatedRows rows") - - - /* Delete */ - - // Construct a delete query that deletes coffees with a price less than 8.0 - val deleteQuery: Query[Coffees,(String, Int, Double, Int, Int)] = - coffees.filter(_.price < 8.0) - - // Print the SQL for the Coffees delete query - println("Generated SQL for Coffees delete:\n" + deleteQuery.deleteStatement) - - // Perform the delete - val numDeletedRows = deleteQuery.delete - - println(s"Deleted $numDeletedRows rows") - - - /* Selecting Specific Columns */ - - // Construct a new coffees query that just selects the name - val justNameQuery: Query[Column[String], String] = coffees.map(_.name) - - println("Generated SQL for query returning just the name:\n" + - justNameQuery.selectStatement) - - // Execute the query - println(justNameQuery.list) - - - /* Sorting / Order By */ - - val sortByPriceQuery: Query[Coffees, (String, Int, Double, Int, Int)] = - coffees.sortBy(_.price) - - println("Generated SQL for query sorted by price:\n" + - sortByPriceQuery.selectStatement) - - // Execute the query - println(sortByPriceQuery.list) - - - /* Query Composition */ - - val composedQuery: Query[Column[String], String] = - coffees.sortBy(_.name).take(3).filter(_.price > 9.0).map(_.name) - - println("Generated SQL for composed query:\n" + - composedQuery.selectStatement) - - // Execute the composed query - println(composedQuery.list) - - - /* Joins */ - - // Join the tables using the relationship defined in the Coffees table - val joinQuery: Query[(Column[String], Column[String]), (String, String)] = for { - c <- coffees if c.price > 9.0 - s <- c.supplier - } yield (c.name, s.name) - - println("Generated SQL for the join query:\n" + joinQuery.selectStatement) - - // Print the rows which contain the coffee name and the supplier name - println(joinQuery.list) - - - /* Computed Values */ - - // Create a new computed column that calculates the max price - val maxPriceColumn: Column[Option[Double]] = coffees.map(_.price).max - - println("Generated SQL for max price column:\n" + maxPriceColumn.selectStatement) - - // Execute the computed value query - println(maxPriceColumn.run) - - - /* Manual SQL / String Interpolation */ - - // Required import for the sql interpolator - import scala.slick.jdbc.StaticQuery.interpolation - - // A value to insert into the statement - val state = "CA" - - // Construct a SQL statement manually with an interpolated value - val plainQuery = sql"select SUP_NAME from SUPPLIERS where STATE = $state".as[String] - - println("Generated SQL for plain query:\n" + plainQuery.getStatement) - - // Execute the query - println(plainQuery.list) - - } + } finally db.close } diff --git a/src/main/scala/InvokerMethods.scala b/src/main/scala/InvokerMethods.scala deleted file mode 100644 index 4cbe293..0000000 --- a/src/main/scala/InvokerMethods.scala +++ /dev/null @@ -1,65 +0,0 @@ -import scala.slick.driver.H2Driver.simple._ - -// Demonstrates various ways of reading data from an Invoker. -object InvokerMethods extends App { - - // A simple dictionary table with keys and values - class Dict(tag: Tag) extends Table[(Int, String)](tag, "INT_DICT") { - def key = column[Int]("KEY", O.PrimaryKey) - def value = column[String]("VALUE") - def * = (key, value) - } - val dict = TableQuery[Dict] - - val db = Database.forURL("jdbc:h2:mem:invoker", driver = "org.h2.Driver") - db.withSession { implicit session => - - // Create the dictionary table and insert some data - dict.ddl.create - dict ++= Seq(1 -> "a", 2 -> "b", 3 -> "c", 4 -> "d", 5 -> "e") - - // Define a pre-compiled parameterized query for reading all key/value - // pairs up to a given key. - val upTo = Compiled { k: Column[Int] => - dict.filter(_.key <= k).sortBy(_.key) - } - - println("List of k/v pairs up to 3 with .list") - println("- " + upTo(3).list) - - println("IndexedSeq of k/v pairs up to 3 with .buildColl") - println("- " + upTo(3).buildColl[IndexedSeq]) - - println("Set of k/v pairs up to 3 with .buildColl") - println("- " + upTo(3).buildColl[Set]) - - println("Array of k/v pairs up to 3 with .buildColl") - println("- " + upTo(3).buildColl[Array]) - - println("All keys in an unboxed Array[Int]") - val allKeys = dict.map(_.key) - println(" " + allKeys.buildColl[Array]()) - - println("Stream k/v pairs up to 3 via an Iterator") - val it = upTo(3).iterator - try { - it.foreach { case (k, v) => println(s"- $k -> $v") } - } finally { - // Make sure to close the Iterator in case of an error. (It is - // automatically closed when all data has been read.) - it.close - } - - println("Only get the first result, failing if there is none") - println("- " + upTo(3).first) - - println("Get the first result as an Option, or None") - println("- " + upTo(3).firstOption) - - println("Map of k/v pairs up to 3 with .toMap") - println("- " + upTo(3).toMap) - - println("Combine the k/v pairs up to 3 with .foldLeft") - println("- " + upTo(3).foldLeft("") { case (z, (k, v)) => s"$z[$k -> $v] " }) - } -} diff --git a/src/main/scala/QueryActions.scala b/src/main/scala/QueryActions.scala new file mode 100644 index 0000000..7d137bf --- /dev/null +++ b/src/main/scala/QueryActions.scala @@ -0,0 +1,73 @@ +import slick.driver.H2Driver.api._ + +import scala.concurrent.Await +import scala.concurrent.duration.Duration +import scala.concurrent.ExecutionContext.Implicits.global + +// Demonstrates various ways of reading data +object QueryActions extends App { + + // A simple dictionary table with keys and values + class Dict(tag: Tag) extends Table[(Int, String)](tag, "INT_DICT") { + def key = column[Int]("KEY", O.PrimaryKey) + def value = column[String]("VALUE") + def * = (key, value) + } + val dict = TableQuery[Dict] + + val db = Database.forConfig("h2mem1") + try { + + // Define a pre-compiled parameterized query for reading all key/value + // pairs up to a given key. + val upTo = Compiled { k: Rep[Int] => + dict.filter(_.key <= k).sortBy(_.key) + } + + // A second pre-compiled query which returns a Set[String] + val upToSet = upTo.map(_.andThen(_.to[Set])) + + Await.result(db.run(DBIO.seq( + + // Create the dictionary table and insert some data + dict.schema.create, + dict ++= Seq(1 -> "a", 2 -> "b", 3 -> "c", 4 -> "d", 5 -> "e"), + + upTo(3).result.map { r => + println("Seq (Vector) of k/v pairs up to 3") + println("- " + r) + }, + + upToSet(3).result.map { r => + println("Set of k/v pairs up to 3") + println("- " + r) + }, + + dict.map(_.key).to[Array].result.map { r => + println("All keys in an unboxed Array[Int]") + println("- " + r) + }, + + upTo(3).result.head.map { r => + println("Only get the first result, failing if there is none") + println("- " + r) + }, + + upTo(3).result.headOption.map { r => + println("Get the first result as an Option, or None") + println("- " + r) + } + + )), Duration.Inf) + + // The Publisher captures a Database plus a DBIO action. + // The action does not run until you consume the stream. + val p = db.stream(upTo(3).result) + + println("Stream k/v pairs up to 3 via Reactive Streams") + Await.result(p.foreach { v => + println("- " + v) + }, Duration.Inf) + + } finally db.close +} diff --git a/src/main/scala/Tables.scala b/src/main/scala/Tables.scala index e05522c..7201722 100644 --- a/src/main/scala/Tables.scala +++ b/src/main/scala/Tables.scala @@ -1,17 +1,17 @@ -import scala.slick.driver.H2Driver.simple._ -import scala.slick.lifted.{ProvenShape, ForeignKeyQuery} +import slick.driver.H2Driver.api._ +import slick.lifted.{ProvenShape, ForeignKeyQuery} // A Suppliers table with 6 columns: id, name, street, city, state, zip class Suppliers(tag: Tag) extends Table[(Int, String, String, String, String, String)](tag, "SUPPLIERS") { // This is the primary key column: - def id: Column[Int] = column[Int]("SUP_ID", O.PrimaryKey) - def name: Column[String] = column[String]("SUP_NAME") - def street: Column[String] = column[String]("STREET") - def city: Column[String] = column[String]("CITY") - def state: Column[String] = column[String]("STATE") - def zip: Column[String] = column[String]("ZIP") + def id: Rep[Int] = column[Int]("SUP_ID", O.PrimaryKey) + def name: Rep[String] = column[String]("SUP_NAME") + def street: Rep[String] = column[String]("STREET") + def city: Rep[String] = column[String]("CITY") + def state: Rep[String] = column[String]("STATE") + def zip: Rep[String] = column[String]("ZIP") // Every table needs a * projection with the same type as the table's type parameter def * : ProvenShape[(Int, String, String, String, String, String)] = @@ -22,11 +22,11 @@ class Suppliers(tag: Tag) class Coffees(tag: Tag) extends Table[(String, Int, Double, Int, Int)](tag, "COFFEES") { - def name: Column[String] = column[String]("COF_NAME", O.PrimaryKey) - def supID: Column[Int] = column[Int]("SUP_ID") - def price: Column[Double] = column[Double]("PRICE") - def sales: Column[Int] = column[Int]("SALES") - def total: Column[Int] = column[Int]("TOTAL") + def name: Rep[String] = column[String]("COF_NAME", O.PrimaryKey) + def supID: Rep[Int] = column[Int]("SUP_ID") + def price: Rep[Double] = column[Double]("PRICE") + def sales: Rep[Int] = column[Int]("SALES") + def total: Rep[Int] = column[Int]("TOTAL") def * : ProvenShape[(String, Int, Double, Int, Int)] = (name, supID, price, sales, total) diff --git a/src/test/scala/TablesSuite.scala b/src/test/scala/TablesSuite.scala index 4281636..9080e37 100644 --- a/src/test/scala/TablesSuite.scala +++ b/src/test/scala/TablesSuite.scala @@ -1,27 +1,29 @@ import org.scalatest._ -import scala.slick.driver.H2Driver.simple._ -import scala.slick.jdbc.meta._ +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.time.{Seconds, Span} +import slick.driver.H2Driver.api._ +import slick.jdbc.meta._ - -class TablesSuite extends FunSuite with BeforeAndAfter { +class TablesSuite extends FunSuite with BeforeAndAfter with ScalaFutures { + implicit override val patienceConfig = PatienceConfig(timeout = Span(5, Seconds)) val suppliers = TableQuery[Suppliers] val coffees = TableQuery[Coffees] - implicit var session: Session = _ + var db: Database = _ - def createSchema() = (suppliers.ddl ++ coffees.ddl).create + def createSchema() = + db.run((suppliers.schema ++ coffees.schema).create).futureValue - def insertSupplier(): Int = suppliers += (101, "Acme, Inc.", "99 Market Street", "Groundsville", "CA", "95199") + def insertSupplier(): Int = + db.run(suppliers += (101, "Acme, Inc.", "99 Market Street", "Groundsville", "CA", "95199")).futureValue - before { - session = Database.forURL("jdbc:h2:mem:test1", driver = "org.h2.Driver").createSession() - } + before { db = Database.forConfig("h2mem1") } test("Creating the Schema works") { createSchema() - val tables = MTable.getTables().list() + val tables = db.run(MTable.getTables).futureValue assert(tables.size == 2) assert(tables.count(_.name.name.equalsIgnoreCase("suppliers")) == 1) @@ -38,13 +40,10 @@ class TablesSuite extends FunSuite with BeforeAndAfter { test("Query Suppliers works") { createSchema() insertSupplier() - val results = suppliers.list() + val results = db.run(suppliers.result).futureValue assert(results.size == 1) assert(results.head._1 == 101) } - after { - session.close() - } - -} \ No newline at end of file + after { db.close } +} diff --git a/tutorial/index.html b/tutorial/index.html index a9c0d15..73f255f 100644 --- a/tutorial/index.html +++ b/tutorial/index.html @@ -6,20 +6,20 @@
Slick is a Functional Relational Mapping (FRM) library for Scala where you work with relational data in a type-safe and functional way. Here is an example:
coffees.filter(_.price < 10.0).map(_.name)
This will produce a query equivalent to the following SQL:
select COF_NAME from COFFEES where PRICE < 10.0
- Developers benefit from the type-safety and composability of FRM as well as being able to reuse the typical Scala collection APIs like filter, map, foreach, etc.
+ Developers benefit from the type-safety and composability of FRM as well as being able to reuse the typical Scala collection APIs like filter, map, groupBy, etc.
This template will get you started learning Slick using a working application. Continue the tutorial to learn about how to run the application, run the tests, and explore the basics of Slick.
This template includes a simple Scala application, HelloSlick.scala, that does basic FRM operations with Slick. This application automatically runs when Activator is started and then re-runs after every successful compile. You can see the output in Run. Note: The example code in this app has intentionally verbose type information. In normal applications type inference is used more extensively but to assist with learning the type information was included.
@@ -27,123 +27,183 @@The TablesSuite.scala file contains ScalaTest tests which do some basic integration tests. Check out the test results in Test.
-Slick is a library that is easy to include in any project. This project uses the sbt build tool so the dependency for Slick is specified in the build.sbt file. To make things simple this project uses an embedded H2 in-memory database.
+ +Learn more about connecting to databases in the Slick docs.
-- Slick is a library that is easy to include in any project. This project uses the sbt build tool so the dependency for Slick is specified in the build.sbt file. To make things simple this project uses the H2 database in-memory. Learn more about connecting to other databases in the Slick docs. -
- The Tables.scala file contains the mappings for a Suppliers and a Coffees table. These Table create a mapping between a database table and a class. The table's columns are also mapped to functions. This mapping is called Lifted Embedding since the types of a column mappings are not the actual column value's type, but a wrapper type. For a column that contains a Double value the type of mapping will be Column[Double]. This enables type-safe queries to be built around meta-data and then executed against the database.
+
The Tables.scala file contains the mappings for a Suppliers and a Coffees table. These Table create a mapping between a database table and a class. The table's columns are also mapped to functions. This mapping is called Lifted Embedding since the types of a column mappings are not the actual column value's type, but a wrapper type. For a column that contains a Double value the type of mapping will be Rep[Double]. This enables type-safe queries to be built around meta-data and then executed against the database.
Using a table mapping object requires creating a TableQuery instance for the Table classes. For example, in HelloSlick.scala the suppliers val is the TableQuery instance for the Suppliers class.
Learn more about mapping tables and columns in the Slick docs.
- Using a table mapping object requires creating aTableQuery instance for the Table classes. In HelloSlick.scala the suppliers is the TableQuery instance for the Suppliers class.- Every query that runs against the database needs a database session to run with. The HelloSlick.scala file sets up a database connection and gets a session: -
val db = Database.forURL("jdbc:h2:mem:hello", driver = "org.h2.Driver")
-db.withSession { implicit session => ... }
- Note: the session can be implicit to avoid specifying it explicitly with every query.session can now be used in the scope of the provided function to make queries to the database. The session is automatically closed after the function completes.
-
- It is also possible to create a session and manually close it. The TablesSuite.scala tests do this in the before and after functions.
-
- Learn more about session and connnection handling in the Slick docs.
-
+ Database connections are usually configured via Typesafe Config in your application.conf, which is also used by Play and Akka for their configuration:
+ +h2mem1 = {
+ url = "jdbc:h2:mem:test1"
+ driver = org.h2.Driver
+ connectionPool = disabled
+ keepAliveConnection = true
+}
+
+ The default connection pool is HikariCP. Since a connection pool is not necessary for an embedded H2 database, we disable it here. When you use a real, external database server, the connection pool provides improved performance and resilience. The keepAliveConnection option (which is only available without a connection pool) keeps an extra connection open for the lifetime of the Database object in the application. It is useful for managing the lifecycle of named in-memory databases which keep their data as long as there are still open connections.
In the body of HelloSlick.scala we create a Database object from the configuration. This causes a thread pool (and usually also a connection pool) to be created in the background. You should always close the Database object at the end to release these resources. HelloSlick is a standalone command-line application, not running inside of a container which takes care of resource management, so we have to do it ourselves. Since all database calls in Slick are asynchronous, we are going to compose Futures throughout the app, but eventually we have to wait for the result. This gives us the following scaffolding:
val db = Database.forConfig("h2mem1")
+try {
+ val f: Future[_] = {
+ // body of the application
+ }
+ Await.result(f, Duration.Inf)
+} finally db.close
+
+ If you are not familiar with asynchronous, Future-based programming Scala, you can learn more about Futures and Promises in the Scala documentation.
- Once a session is available you can use it to perform operations on the database. To create corresponding tables from a mapping you can get the DDL via its TableQuery and then call the create method, like:
-
suppliers.ddl.create
-
- Multiple DDLs can also be combined together and created, like in HelloSlick.scala:
- (suppliers.ddl ++ coffees.ddl).create
- This will create all database entities and links (like foreign key references) in the correct order, even in the presence of cyclic dependencies between tables.
-
+ To create corresponding tables from a mapping you can get the schema via its TableQuery and then call the create method, like:
suppliers.schema.create
+
+ Multiple schemas can also be combined as in HelloSlick.scala, to create all database entities and links (like foreign key references) in the correct order, even in the presence of cyclic dependencies between tables:
+ +(suppliers.schema ++ coffees.schema).create
+
+ The result of .create is a database I/O action which encapsulates the DDL statements.
Creates / Inserts are as simple as appending the values to a TableQuery instance using either the += operator for a single row, or ++= for multiple rows. In HelloSlick.scala both of these ways of doing inserts are used.
- Creates / Inserts are as simple as appending the values to a TableQuery instance using either the += operator for a single row, or ++= for multiple rows. In HelloSlick.scala both of these ways of doing inserts are used.
-
- Basic reads / queries can be done through the TableQuery instance using Invoker functions. A simple example of invoking a query is to just call list on the Query, like:
-
suppliers.list
- That would produce a List[(Int, String, String, String, String, String)] that corresponds to the columns defined in the Table mapping. Other methods like foreach, first, foldLeft also perform queries. Filtering, sorting, and joining will be covered in the next few sections of the tutorial. In HelloSlick.scala you can see how a foreach is used to do a select * query and then print each row.
-
+ Like all other database operations, += and ++= return database I/O actions. If you do not care about more advanced features like streaming, effect tracking or extension methods for certain actions, you can denote their type as DBIO[T] (for an operation which will eventually produce a value of type T). Instead of running all actions separately, you can combine them with other actions in various ways. The simplest combinator is DBIO.seq which takes a variable number of actions of any type and combines them into a single DBIO[Unit] that runs the actions in the specified order. We use it in HelloSlick.scala to define setupAction which combines schema creation with some insert actions.
- Filtering / adding where statements to a query is done using functions like filter and take on a TableQuery to construct a new query. For example, to create a new query on the Coffees table that selects only rows where the price is higher than 9.0, just do:
-
coffees.filter(_.price > 9.0)
- This produces a SQL statement equivalent to:
- select * from COFFEES where PRICE > 9.0
-
-
-
- Updates are done through the TableQuery instance by calling the update function. To update the sales column on all rows of the Suppliers table, create a new query for just that column:
-
val updateQuery: Query[Column[Int], Int] = coffees.map(_.sales)
- Then call the update with the new value:
- updateQuery.update(1)
-
+ So far we have only staged the operations. We can run them with db.run:
val setupFuture: Future[Unit] =
+ db.run(setupAction)
+
+ This performs the database calls asynchronously, eventually completing the returned Future.
When inserting data, the database usually returns the number of affected rows, therefore the return type is Option[Int] as can be seen in the definition of insertAction:
val insertAction: DBIO[Option[Int]] = ...
+
+ We can use the map combinator to run some code and compute a new value from the value returned by the action (or in this case run it only for its side effects and return Unit):
+
+
val insertAndPrintAction: DBIO[Unit] = insertAction.map { coffeesInsertResult =>
+ // Print the number of rows inserted
+ coffeesInsertResult foreach { numRows =>
+ println(s"Inserted $numRows rows into the Coffees table")
+ }
+}
+
+ Note that map and all other combinators which run user code (e.g. flatMap, cleanup, filter) take an implicit ExecutionContext on which to run this code. Slick uses its own ExecutionContext internally for running blocking database I/O but it always maintains a clean separation and prevents you from running non-I/O code on it.
- Deletes are done by just running delete on a query. So to delete coffees with a price less than 8.0, just do:
-
coffees.filter(_.price < 8.0).delete
- This will produce SQL equivalent to:
- delete from COFFEES where PRICE < 8.0
-
Queries usually start with a TableQuery instance. In the simplest case you read the contents of an entire table by calling .result directly on the TableQuery to get a DBIO action, as shown in HelloSlick.scala:
val allSuppliersAction: DBIO[Seq[(Int, String, String, String, String, String)]] =
+ suppliers.result
+
+ This produces a Seq[(Int, String, String, String, String, String)] that corresponds to the columns defined in the Table mapping. Filtering, sorting, and joining will be covered in the next few sections of the tutorial. We use another new combinator to combine the previously defined insertAndPrintAction with the new allSuppliersAction:
val combinedAction: DBIO[Seq[(Int, String, String, String, String, String)]] =
+ insertAndPrintAction >> allSuppliersAction
+
+val combinedFuture: Future[Seq[(Int, String, String, String, String, String)]] =
+ db.run(combinedAction)
+
+ The >> combinator (also available under the name andThen) runs the second action after the first, similar to DBIO.seq but it does not discard the return value of the second action.
The default query we've been using uses the * method on the Table mapping class. For instance, the suppliers TableQuery uses the * method defined in Tables.scala and returns all of the columns when executed because the * combines all of the columns. Often we just want to select a subset of the columns. To do this use the map method on a query, like:
val coffeeNamesAction: StreamingDBIO[Seq[String], String] =
+ coffees.map(_.name).result
+
+ This will create a new query that when executed just returns the name column. The generated SQL will be something like:
select SUP_NAME from SUPPLIERS
+
+ The type annotation above uses the type StreamingDBIO[Seq[String], String] instead of DBIO[Seq[String]] to also allow streaming. The first type parameter denotes the fully materialized result (as in DBIO) whereas the second type parameter is only the element type. Note that these types can always be inferred by the compiler. They are only spelled out explicitly in this tutorial to facilitate understanding. If you have a streaming action, you can use db.stream instead of db.run to get a Reactive Streams Publisher instead of a Future. This allows data to be streamed asynchronously from the database with any compatible library like Akka Streams. Slick itself does not provide a full set of tools for working with streams but it has a .foreach utility method for consuming a stream:
val coffeeNamesPublisher: DatabasePublisher[String] =
+ db.stream(coffeeNamesAction)
+
+coffeeNamesPublisher.foreach(println)
+
+ Note that a database I/O action does not yet start running when you call db.stream. You must attach a Subscriber to the stream (i.e. start consuming the stream) to actually run the action.
- The default query we've been using uses the * function on the Table mapping class. For instance, the suppliers TableQuery uses the * function defined in Tables.scala and returns all of the columns when executed because the * combines all of the columns. Often we just want to select a subset of the columns. To do this use the map function on a query, like:
-
suppliers.map(_.name)
- This will create a new query that when executed just returns the name column. The generated SQL will be something like:
- select SUP_NAME from SUPPLIERS
-
- Sorting / adding order by statements is done using functions like sortBy on a TableQuery to create a new query. For example in the HelloSlick.scala you can see an example sorting of coffees by price:
-
coffees.sortBy(_.price)
- This would produce SQL equivalent to:
- select * from COFFEES order by PRICE
-
+ Filtering / adding where statements to a query is done using methods like filter and take on a Query to construct a new query. For example, to create a new query on the Coffees table that selects only rows where the price is higher than 9.0, we use the folling code in HelloSlick.scala:
+ +
coffees.filter(_.price > 9.0)
+
+ This produces a SQL statement equivalent to:
+ +select * from COFFEES where PRICE > 9.0
+
+ Updates are done through a Query object by calling the update method. To update the sales column on all rows of the Suppliers table, create a new query for just that column:
val updateQuery: Query[Column[Int], Int] =
+ coffees.map(_.sales)
+
+ Then call the update with the new value to produce an action that will perform the update. Updates, like inserts, return the number of affected rows:
val updateAction: DBIO[Int] =
+ updateQuery.update(1)
+
+ Deletes are done by just calling delete on a query to get an action. So to delete coffees with a price less than 8.0, you do:
val deleteQuery: Query[Coffees,(String, Int, Double, Int, Int), Seq] =
+ coffees.filter(_.price < 8.0)
+
+val deleteAction = deleteQuery.delete
+
+ This will produce SQL equivalent to:
+ +delete from COFFEES where PRICE < 8.0
- The examples so far have taken a base query and used a function to produce a new, more specific query. Due to the functional nature of the query API, this can be done repeatedly to produce more specific queries. For example, to create a query on the Coffees table that sorts them by name, takes the first three rows, filters those with a prices greater than 9.0, and finally just returns the names, simply do:
-
coffees.sortBy(_.name).take(3).filter(_.price > 9.0).map(_.name)
- That results in a new query that has a fairly complex implementation in SQL.
-
+ Sorting / adding order by clauses is done using methods like sortBy on a Query to create a new query. For example in HelloSlick.scala you can see an example sorting of coffees by price:
coffees.sortBy(_.price)
+
+ This would produce SQL equivalent to:
+ +select * from COFFEES order by PRICE
+
+ The examples so far have taken a basic TableQuery and used a method to produce a new, more specific query. Due to the functional nature of the query API, this can be done repeatedly to produce more specific queries. For example, to create a query on the Coffees table that sorts them by name, takes the first three rows, filters those with a prices greater than 9.0, and finally just returns the names, simply do:
coffees.sortBy(_.name).take(3).filter(_.price > 9.0).map(_.name)
+
+ This results in a new query that has a fairly complex implementation in SQL.
- Sometimes writing manual SQL is the easiest and best way to go but we don't want to lose SQL injection protection that Slick includes. SQL String Interpolation provides a nice API for doing this. Start by importing the interpolation API: -
import scala.slick.jdbc.StaticQuery.interpolation
-
- Then use the sql String Interpolator:
- val state = "CA"
+ Sometimes writing manual SQL is the easiest and best way to go but we don't want to lose SQL injection protection that Slick includes. SQL String Interpolation provides a nice API for doing this. In HelloSlick.scala we use the sql interpolator:
+
+ val state = "CA"
val plainQuery = sql"select SUP_NAME from SUPPLIERS where STATE = $state".as[String]
-
- This produces a query that can be run using the normal functions like list.
-
- You can learn more about Slick's Plain SQL queries in the Slick Plain SQL Queries template for Activator.
+ This produces a database I/O action that can be run or streamed in the usual way.
+
+ You can learn more about Slick's Plain SQL queries in the Slick Plain SQL Queries (Slick 3.0) template for Activator.
So far you have seen the Invoker methods .list and .foreach being used to run a collection-valued query. There are several other useful methods which are shown in InvokerMethods.scala. They are equally applicable to Lifted Embedding and Plain SQL queries.
So far you have seen how to get a Seq from a collection-valued query and how to stream individual elements. There are several other useful methods which are shown in QueryActions.scala. They are equally applicable to Lifted Embedding and Plain SQL queries.
Note the use of Compiled in this app. It is used to define a pre-compiled query that can be executed with different parameters without having to recompile the SQL statement each time. This is the prefered way of defining queries in real-world applications. It prevents the (possibly expensive) compilation each time and leads to the same SQL statement (or a small, fixed set of SQL statements) so that the database system can also reuse a previously computed execution plan. As a side-effect, all parameters are automatically turned into bind variables:
-
val upTo = Compiled { k: Column[Int] =>
+ val upTo = Compiled { k: Rep[Int] =>
ts.filter(_.k <= k).sortBy(_.k)
}
@@ -225,7 +281,7 @@ Running Queries
Next Steps
- Check out the full Slick manual and API docs.
+ Check out the full Slick manual and API docs.
You can also find more Slick templates, contributed by both, the Slick team and the community, here in Activator.